X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;ds=sidebyside;f=lib%2Fsorter%2Fs-internal.h;h=05fc7e982475495e2ba83f53a6b11eaa1ecb86ba;hb=cff818411879583e66b6985b4179be0e9d4c525c;hp=c05ca077e574868bb7e27cd94c85ddc6cf24ee10;hpb=c15baf7b8cf0dc6a07b3c57ad346aee68ac6f19f;p=libucw.git diff --git a/lib/sorter/s-internal.h b/lib/sorter/s-internal.h index c05ca077..05fc7e98 100644 --- a/lib/sorter/s-internal.h +++ b/lib/sorter/s-internal.h @@ -19,11 +19,19 @@ typedef struct { #define ASORT_EXTRA_ARGS , P(internal_item_t) *ary #include "lib/arraysort.h" +static inline void *P(internal_get_data)(P(key) *key) +{ + uns ksize = SORT_KEY_SIZE(*key); +#ifdef SORT_UNIFY + ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN); +#endif + return (byte *) key + ksize; +} + static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only) { sorter_alloc_buf(ctx); - ASSERT(bin->fb); // Expects the input bucket to be already open for reading - struct fastbuf *in = bin->fb; + struct fastbuf *in = sbuck_read(bin); P(key) key, *keybuf = ctx->key_buf; if (!keybuf) @@ -39,8 +47,8 @@ static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct #ifdef SORT_VAR_DATA if (sizeof(key) + 1024 + SORT_DATA_SIZE(key) > ctx->big_buf_half_size) { - SORT_XTRACE("s-internal: Generating a giant run"); - struct fastbuf *out = sorter_open_write(bout); /* FIXME: Using a non-direct buffer would be nice here */ + SORT_XTRACE(3, "s-internal: Generating a giant run"); + struct fastbuf *out = sbuck_write(bout); /* FIXME: Using a non-direct buffer would be nice here */ P(copy_data)(&key, in, out); bout->runs++; return 1; // We don't know, but 1 is always safe @@ -48,9 +56,11 @@ static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct #endif size_t bufsize = ctx->big_buf_half_size; /* FIXME: In some cases, we can use the whole buffer */ +#ifdef CPU_64BIT_POINTERS bufsize = MIN((u64)bufsize, (u64)~0U * sizeof(P(internal_item_t))); // The number of records must fit in uns +#endif - SORT_XTRACE("s-internal: Reading (bufsize=%zd)", bufsize); + SORT_XTRACE(3, "s-internal: Reading (bufsize=%zd)", bufsize); P(internal_item_t) *item_array = ctx->big_buf, *item = item_array, *last_item; byte *end = (byte *) ctx->big_buf + bufsize; do @@ -81,26 +91,62 @@ static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct last_item = item; uns count = last_item - item_array; - SORT_XTRACE("s-internal: Sorting %d items", count); + SORT_XTRACE(3, "s-internal: Sorting %u items", count); P(array_sort)(count, item_array); - SORT_XTRACE("s-internal: Writing"); + SORT_XTRACE(3, "s-internal: Writing"); if (!ctx->more_keys) bout = bout_only; struct fastbuf *out = sbuck_write(bout); bout->runs++; - /* FIXME: No unification done yet */ + uns merged UNUSED = 0; for (item = item_array; item < last_item; item++) { - P(write_key)(out, item->key); -#ifdef SORT_VAR_DATA - uns ksize = SORT_KEY_SIZE(*item->key); #ifdef SORT_UNIFY - ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN); + if (item < last_item - 1 && !P(compare)(item->key, item[1].key)) + { + // Rewrite the item structures with just pointers to keys and place + // pointers to data in the secondary array. + P(key) **key_array = (void *) item; + void **data_array = (void **) ctx->big_buf_half; + key_array[0] = item[0].key; + data_array[0] = P(internal_get_data)(key_array[0]); + uns cnt; + for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++) + { + key_array[cnt] = item[cnt].key; + data_array[cnt] = P(internal_get_data)(key_array[cnt]); + } + P(write_merged)(out, key_array, data_array, cnt, data_array+cnt); + item += cnt - 1; + merged += cnt - 1; + continue; + } #endif - bwrite(out, (byte *) item->key + ksize, SORT_DATA_SIZE(*item->key)); +#ifdef SORT_ASSERT_UNIQUE + ASSERT(item == last_item-1 || P(compare)(item->key, item[1].key) < 0); +#endif + P(write_key)(out, item->key); +#ifdef SORT_VAR_DATA + bwrite(out, P(internal_get_data)(item->key), SORT_DATA_SIZE(*item->key)); #endif } +#ifdef SORT_UNIFY + SORT_XTRACE(3, "Merging reduced %u records", merged); +#endif return ctx->more_keys; } + +static u64 +P(internal_estimate)(struct sort_context *ctx, struct sort_bucket *b UNUSED) +{ + uns avg; +#ifdef SORT_VAR_KEY + avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN); // Wild guess... +#else + avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN); +#endif + // We ignore the data part of records, it probably won't make the estimate much worse + return (ctx->big_buf_half_size / (avg + sizeof(P(internal_item_t))) * avg); +}