X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;ds=inline;f=lib%2Fsorter%2Fs-internal.h;h=ef2da2402958cfa62bfdac3a69d575390ce7e0a8;hb=7cf300f543b5023ad46a909ed6fde0feba5d1acd;hp=c05ca077e574868bb7e27cd94c85ddc6cf24ee10;hpb=c15baf7b8cf0dc6a07b3c57ad346aee68ac6f19f;p=libucw.git diff --git a/lib/sorter/s-internal.h b/lib/sorter/s-internal.h index c05ca077..ef2da240 100644 --- a/lib/sorter/s-internal.h +++ b/lib/sorter/s-internal.h @@ -7,23 +7,98 @@ * of the GNU Lesser General Public License. */ +#include "lib/stkstring.h" + +#ifdef SORT_INTERNAL_RADIX +/* Keep copies of the items' hashes to save cache misses */ +#define SORT_COPY_HASH +#endif + typedef struct { P(key) *key; - // FIXME: Add the hash here to save cache misses +#ifdef SORT_COPY_HASH + P(hash_t) hash; +#endif } P(internal_item_t); #define ASORT_PREFIX(x) SORT_PREFIX(array_##x) #define ASORT_KEY_TYPE P(internal_item_t) -#define ASORT_ELT(i) ary[i] -#define ASORT_LT(x,y) (P(compare)((x).key, (y).key) < 0) -#define ASORT_EXTRA_ARGS , P(internal_item_t) *ary -#include "lib/arraysort.h" +#ifdef SORT_COPY_HASH +# ifdef SORT_INT +# define ASORT_LT(x,y) ((x).hash < (y).hash) // In this mode, the hash is the value +# else +# define ASORT_LT(x,y) ((x).hash < (y).hash || (x).hash == (y).hash && P(compare)((x).key, (y).key) < 0) +# endif +#else +# define ASORT_LT(x,y) (P(compare)((x).key, (y).key) < 0) +#endif +#ifdef SORT_INTERNAL_RADIX +# ifdef SORT_COPY_HASH +# define ASORT_HASH(x) (x).hash +# else +# define ASORT_HASH(x) P(hash)((x).key) +# endif +# ifdef SORT_LONG_HASH +# define ASORT_LONG_HASH +# endif +#endif +#include "lib/sorter/array.h" + +/* + * The big_buf has the following layout: + * + * +-------------------------------------------------------------------------------+ + * | array of internal_item's | + * +-------------------------------------------------------------------------------+ + * | padding to make the following part page-aligned | + * +--------------------------------+----------------------------------------------+ + * | shadow copy of item array | array of pointers to data for write_merged() | + * | used if radix-sorting +----------------------------------------------+ + * | | workspace for write_merged() | + * +--------------------------------+----------------------------------------------+ + * | +---------+ | + * | | key | | + * | +---------+ | + * | sequence of | padding | | + * | items +---------+ | + * | | data | | + * | +---------+ | + * | | padding | | + * | +---------+ | + * +-------------------------------------------------------------------------------+ + * + * (the data which are in different columns are never accessed simultaneously, + * so we use a single buffer for both) + */ + +static inline void *P(internal_get_data)(P(key) *key) +{ + uns ksize = SORT_KEY_SIZE(*key); +#ifdef SORT_UNIFY + ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN); +#endif + return (byte *) key + ksize; +} + +static inline size_t P(internal_workspace)(P(key) *key UNUSED) +{ + size_t ws = 0; +#ifdef SORT_UNIFY + ws += sizeof(void *); +#endif +#ifdef SORT_UNIFY_WORKSPACE + ws += SORT_UNIFY_WORKSPACE(*key); +#endif +#ifdef SORT_INTERNAL_RADIX + ws = MAX(ws, sizeof(P(internal_item_t))); +#endif + return ws; +} static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only) { sorter_alloc_buf(ctx); - ASSERT(bin->fb); // Expects the input bucket to be already open for reading - struct fastbuf *in = bin->fb; + struct fastbuf *in = sbuck_read(bin); P(key) key, *keybuf = ctx->key_buf; if (!keybuf) @@ -36,23 +111,22 @@ static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct else if (!P(read_key)(in, &key)) return 0; + size_t bufsize = ctx->big_buf_size; #ifdef SORT_VAR_DATA - if (sizeof(key) + 1024 + SORT_DATA_SIZE(key) > ctx->big_buf_half_size) + if (sizeof(key) + 2*CPU_PAGE_SIZE + SORT_DATA_SIZE(key) + P(internal_workspace)(&key) > bufsize) { - SORT_XTRACE("s-internal: Generating a giant run"); - struct fastbuf *out = sorter_open_write(bout); /* FIXME: Using a non-direct buffer would be nice here */ + SORT_XTRACE(3, "s-internal: Generating a giant run"); + struct fastbuf *out = sbuck_write(bout); P(copy_data)(&key, in, out); bout->runs++; return 1; // We don't know, but 1 is always safe } #endif - size_t bufsize = ctx->big_buf_half_size; /* FIXME: In some cases, we can use the whole buffer */ - bufsize = MIN((u64)bufsize, (u64)~0U * sizeof(P(internal_item_t))); // The number of records must fit in uns - - SORT_XTRACE("s-internal: Reading (bufsize=%zd)", bufsize); + SORT_XTRACE(4, "s-internal: Reading"); P(internal_item_t) *item_array = ctx->big_buf, *item = item_array, *last_item; byte *end = (byte *) ctx->big_buf + bufsize; + size_t remains = bufsize - CPU_PAGE_SIZE; do { uns ksize = SORT_KEY_SIZE(key); @@ -63,44 +137,113 @@ static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct #endif uns dsize = SORT_DATA_SIZE(key); uns recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN); - if (unlikely(sizeof(P(internal_item_t)) + recsize > (size_t)(end - (byte *) item))) + size_t totalsize = recsize + sizeof(P(internal_item_t) *) + P(internal_workspace)(&key); + if (unlikely(totalsize > remains +#ifdef CPU_64BIT_POINTERS + || item >= item_array + ~0U // The number of items must fit in an uns +#endif + )) { ctx->more_keys = 1; *keybuf = key; break; } + remains -= totalsize; end -= recsize; memcpy(end, &key, ksize); #ifdef SORT_VAR_DATA breadb(in, end + ksize_aligned, dsize); #endif item->key = (P(key)*) end; +#ifdef SORT_COPY_HASH + item->hash = P(hash)(item->key); +#endif item++; } while (P(read_key)(in, &key)); last_item = item; uns count = last_item - item_array; - SORT_XTRACE("s-internal: Sorting %d items", count); - P(array_sort)(count, item_array); + void *workspace UNUSED = ALIGN_PTR(last_item, CPU_PAGE_SIZE); + SORT_XTRACE(3, "s-internal: Read %u items (%s items, %s workspace, %s data)", + count, + stk_fsize((byte*)last_item - (byte*)item_array), + stk_fsize(end - (byte*)last_item - remains), + stk_fsize((byte*)ctx->big_buf + bufsize - end)); + timestamp_t timer; + init_timer(&timer); + item_array = P(array_sort)(item_array, count +#ifdef SORT_INTERNAL_RADIX + , workspace, bin->hash_bits +#endif + ); + ctx->total_int_time += get_timer(&timer); - SORT_XTRACE("s-internal: Writing"); + SORT_XTRACE(4, "s-internal: Writing"); if (!ctx->more_keys) bout = bout_only; struct fastbuf *out = sbuck_write(bout); bout->runs++; - /* FIXME: No unification done yet */ + uns merged UNUSED = 0; for (item = item_array; item < last_item; item++) { - P(write_key)(out, item->key); -#ifdef SORT_VAR_DATA - uns ksize = SORT_KEY_SIZE(*item->key); #ifdef SORT_UNIFY - ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN); + if (item < last_item - 1 && !P(compare)(item->key, item[1].key)) + { + // Rewrite the item structures with just pointers to keys and place + // pointers to data in the workspace. + P(key) **key_array = (void *) item; + void **data_array = workspace; + key_array[0] = item[0].key; + data_array[0] = P(internal_get_data)(key_array[0]); + uns cnt; + for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++) + { + key_array[cnt] = item[cnt].key; + data_array[cnt] = P(internal_get_data)(key_array[cnt]); + } + P(write_merged)(out, key_array, data_array, cnt, data_array+cnt); + item += cnt - 1; + merged += cnt - 1; + continue; + } +#endif +#ifdef SORT_ASSERT_UNIQUE + ASSERT(item == last_item-1 || P(compare)(item->key, item[1].key) < 0); #endif - bwrite(out, (byte *) item->key + ksize, SORT_DATA_SIZE(*item->key)); + P(write_key)(out, item->key); +#ifdef SORT_VAR_DATA + bwrite(out, P(internal_get_data)(item->key), SORT_DATA_SIZE(*item->key)); #endif } +#ifdef SORT_UNIFY + SORT_XTRACE(3, "Merging reduced %u records", merged); +#endif return ctx->more_keys; } + +static u64 +P(internal_estimate)(struct sort_context *ctx, struct sort_bucket *b UNUSED) +{ + // Most of this is just wild guesses +#ifdef SORT_VAR_KEY + uns avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN); +#else + uns avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN); +#endif + uns ws = 0; +#ifdef SORT_UNIFY + ws += sizeof(void *); +#endif +#ifdef SORT_UNIFY_WORKSPACE + ws += avg; +#endif +#ifdef SORT_INTERNAL_RADIX + ws = MAX(ws, sizeof(P(internal_item_t))); +#endif + // We ignore the data part of records, it probably won't make the estimate much worse + return (ctx->big_buf_size / (avg + ws + sizeof(P(internal_item_t))) * avg); +} + +#undef SORT_COPY_HASH