X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;f=lib%2Fsorter%2Fgovern.c;h=1211a81cfe55e912d87c1b9fab4bfb875681f4bd;hb=969ce845f3bf35bb7423bac705d46196bb863e72;hp=97942dee37bbc0d7604d3b2a6ed62f0e7fb631ca;hpb=baa9f9a3368c8d318b9711340727f822d8fc8a34;p=libucw.git diff --git a/lib/sorter/govern.c b/lib/sorter/govern.c index 97942dee..1211a81c 100644 --- a/lib/sorter/govern.c +++ b/lib/sorter/govern.c @@ -10,105 +10,48 @@ #include "lib/lib.h" #include "lib/fastbuf.h" #include "lib/mempool.h" +#include "lib/stkstring.h" #include "lib/sorter/common.h" -void * -sorter_alloc(struct sort_context *ctx, uns size) -{ - return mp_alloc_zero(ctx->pool, size); -} +#include +#include +#include -struct sort_bucket * -sbuck_new(struct sort_context *ctx) -{ - return sorter_alloc(ctx, sizeof(struct sort_bucket)); -} +#define F_BSIZE(b) stk_fsize(sbuck_size(b)) -void -sbuck_drop(struct sort_bucket *b) +static u64 +sorter_clock(void) { - if (b) - { - if (b->n.prev) - clist_remove(&b->n); - bclose(b->fb); - bzero(b, sizeof(*b)); - } + struct timeval tv; + gettimeofday(&tv, NULL); + return (u64)tv.tv_sec * 1000 + tv.tv_usec / 1000; } -int -sbuck_can_read(struct sort_bucket *b) -{ - return b && b->size; -} - -struct fastbuf * -sbuck_open_read(struct sort_bucket *b) -{ - /* FIXME: These functions should handle buckets with no fb and only name. */ - ASSERT(b->fb); - return b->fb; -} - -struct fastbuf * -sbuck_open_write(struct sort_bucket *b) -{ - if (!b->fb) - b->fb = bopen_tmp(sorter_stream_bufsize); - return b->fb; -} - -void -sbuck_close_read(struct sort_bucket *b) -{ - if (!b) - return; - ASSERT(b->fb); - bclose(b->fb); - b->fb = NULL; -} - -void -sbuck_close_write(struct sort_bucket *b) -{ - if (b->fb) - { - b->size = btell(b->fb); - brewind(b->fb); - } -} - -void -sorter_alloc_buf(struct sort_context *ctx) +static void +sorter_start_timer(struct sort_context *ctx) { - if (ctx->big_buf) - return; - u64 bs = MAX(sorter_bufsize/2, 1); - bs = ALIGN_TO(bs, (u64)CPU_PAGE_SIZE); - ctx->big_buf = big_alloc(2*bs); - ctx->big_buf_size = 2*bs; - ctx->big_buf_half = ((byte*) ctx->big_buf) + bs; - ctx->big_buf_half_size = bs; - SORT_XTRACE("Allocated sorting buffer (%jd bytes)", (uintmax_t) bs); + ctx->start_time = sorter_clock(); } -void -sorter_free_buf(struct sort_context *ctx) +static uns +sorter_speed(struct sort_context *ctx, u64 size) { - if (!ctx->big_buf) - return; - big_free(ctx->big_buf, ctx->big_buf_size); - ctx->big_buf = NULL; - SORT_XTRACE("Freed sorting buffer"); + u64 stop_time = sorter_clock(); + if (!size) + return 0; + if (stop_time <= ctx->start_time) + return -1; + return (uns)((double)size / (1<<20) * 1000 / (stop_time-ctx->start_time)); } -static int sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only) +static int +sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only) { - /* FIXME: Mode with no presorting (mostly for debugging) */ sorter_alloc_buf(ctx); if (in->flags & SBF_CUSTOM_PRESORT) { - struct fastbuf *f = sbuck_open_write(out); + struct fastbuf *f = sbuck_write(out); + out->runs++; return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size); // FIXME: out_only optimization? } return ctx->internal_sort(ctx, in, out, out_only); @@ -117,6 +60,9 @@ static int sorter_presort(struct sort_context *ctx, struct sort_bucket *in, stru static inline struct sort_bucket * sbuck_join_to(struct sort_bucket *b) { + if (sorter_debug & SORT_DEBUG_NO_JOIN) + return NULL; + struct sort_bucket *out = (struct sort_bucket *) b->n.prev; // Such bucket is guaranteed to exist return (out->flags & SBF_FINAL) ? out : NULL; } @@ -124,74 +70,154 @@ sbuck_join_to(struct sort_bucket *b) static void sorter_join(struct sort_bucket *b) { - struct sort_bucket *join = sbuck_join_to(b); - ASSERT(join); + struct sort_bucket *join = (struct sort_bucket *) b->n.prev; + ASSERT(join->flags & SBF_FINAL); + ASSERT(b->runs == 1); - // FIXME: What if the final bucket doesn't contain any file yet? - - SORT_TRACE("Copying %jd bytes to output file", (uintmax_t) b->size); - struct fastbuf *src = sbuck_open_read(b); - struct fastbuf *dest = sbuck_open_write(join); - bbcopy(src, dest, ~0U); - sbuck_drop(b); + if (!sbuck_has_file(join)) + { + // The final bucket doesn't have any file associated yet, so replace + // it with the new bucket. + SORT_XTRACE(2, "Replaced final bucket"); + b->flags |= SBF_FINAL; + sbuck_drop(join); + } + else + { + SORT_TRACE("Copying to output file: %s", F_BSIZE(b)); + struct fastbuf *src = sbuck_read(b); + struct fastbuf *dest = sbuck_write(join); + bbcopy(src, dest, ~0U); + sbuck_drop(b); + } } static void sorter_twoway(struct sort_context *ctx, struct sort_bucket *b) { - struct sort_bucket *ins[3], *outs[3]; + struct sort_bucket *ins[3] = { NULL }, *outs[3] = { NULL }; + cnode *list_pos = b->n.prev; struct sort_bucket *join = sbuck_join_to(b); - SORT_TRACE("Presorting"); - ins[0] = sbuck_new(ctx); - sbuck_open_read(b); - if (!sorter_presort(ctx, b, ins[0], join ? : ins[0])) + if (!(sorter_debug & SORT_DEBUG_NO_PRESORT) || (b->flags & SBF_CUSTOM_PRESORT)) { - if (join) - sbuck_drop(ins[0]); - else - clist_insert_after(&ins[0]->n, &b->n); + SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting")); + sorter_start_timer(ctx); + ins[0] = sbuck_new(ctx); + if (!sorter_presort(ctx, b, ins[0], join ? : ins[0])) + { + SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 2), "Sorted in memory"); + if (join) + sbuck_drop(ins[0]); + else + clist_insert_after(&ins[0]->n, list_pos); + sbuck_drop(b); + return; + } + + ins[1] = sbuck_new(ctx); + int i = 1; + while (sorter_presort(ctx, b, ins[i], ins[i])) + i = 1-i; sbuck_drop(b); - return; + SORT_TRACE("Presorting pass (%d+%d runs, %s+%s, %dMB/s)", + ins[0]->runs, ins[1]->runs, + F_BSIZE(ins[0]), F_BSIZE(ins[1]), + sorter_speed(ctx, sbuck_size(ins[0]) + sbuck_size(ins[1]))); + } + else + { + SORT_XTRACE(2, "Presorting disabled"); + ins[0] = b; } - ins[1] = sbuck_new(ctx); - ins[2] = NULL; - int i = 1; - while (sorter_presort(ctx, b, ins[i], ins[i])) - i = 1-i; - sbuck_close_read(b); - sbuck_close_write(ins[0]); - sbuck_close_write(ins[1]); - - SORT_TRACE("Main sorting"); + SORT_XTRACE(3, "Main sorting"); + uns pass = 0; do { - if (ins[0]->runs == 1 && ins[1]->runs == 1 && join) // FIXME: Debug switch for disabling joining optimizations + ++pass; + sorter_start_timer(ctx); + if (ins[0]->runs == 1 && ins[1]->runs == 1 && join) { // This is guaranteed to produce a single run, so join if possible + sh_off_t join_size = sbuck_size(join); outs[0] = join; outs[1] = NULL; ctx->twoway_merge(ctx, ins, outs); - ASSERT(outs[0]->runs == 2); - outs[0]->runs--; - SORT_TRACE("Pass done (joined final run)"); - sbuck_drop(b); + ASSERT(join->runs == 2); + join->runs--; + join_size = sbuck_size(join) - join_size; + SORT_TRACE("Mergesort pass %d (final run, %s, %dMB/s)", pass, stk_fsize(join_size), sorter_speed(ctx, join_size)); + sbuck_drop(ins[0]); + sbuck_drop(ins[1]); return; } outs[0] = sbuck_new(ctx); outs[1] = sbuck_new(ctx); outs[2] = NULL; ctx->twoway_merge(ctx, ins, outs); - sbuck_close_write(outs[0]); - sbuck_close_write(outs[1]); - SORT_TRACE("Pass done (%d+%d runs, %jd+%jd bytes)", outs[0]->runs, outs[1]->runs, (uintmax_t) outs[0]->size, (uintmax_t) outs[1]->size); + SORT_TRACE("Mergesort pass %d (%d+%d runs, %s+%s, %dMB/s)", pass, + outs[0]->runs, outs[1]->runs, + F_BSIZE(outs[0]), F_BSIZE(outs[1]), + sorter_speed(ctx, sbuck_size(outs[0]) + sbuck_size(outs[1]))); sbuck_drop(ins[0]); sbuck_drop(ins[1]); memcpy(ins, outs, 3*sizeof(struct sort_bucket *)); - } while (ins[1]->size); + } while (sbuck_have(ins[1])); sbuck_drop(ins[1]); - clist_insert_after(&ins[0]->n, &b->n); + clist_insert_after(&ins[0]->n, list_pos); +} + +static uns +sorter_radix_bits(struct sort_context *ctx, struct sort_bucket *b) +{ + if (!b->hash_bits || !ctx->radix_split || + (b->flags & SBF_CUSTOM_PRESORT) || + (sorter_debug & SORT_DEBUG_NO_RADIX)) + return 0; + + u64 in = sbuck_size(b); + u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // FIXME: Magical factor for hash non-uniformity + if (in <= mem) + return 0; + + uns n = sorter_min_radix_bits; + while (n < sorter_max_radix_bits && n < b->hash_bits && (in >> n) > mem) + n++; + return n; +} + +static void +sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits) +{ + uns nbuck = 1 << bits; + SORT_XTRACE(2, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)", + F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck)); + sorter_start_timer(ctx); + + struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *)); + for (uns i=nbuck; i--; ) + { + outs[i] = sbuck_new(ctx); + outs[i]->hash_bits = b->hash_bits - bits; + clist_insert_after(&outs[i]->n, &b->n); + } + + ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits); + + u64 min = ~(u64)0, max = 0, sum = 0; + for (uns i=0; i 4) + sbuck_swap_out(outs[i]); + } + + SORT_TRACE("Radix split (%d buckets, %s min, %s max, %s avg, %dMB/s)", nbuck, + stk_fsize(min), stk_fsize(max), stk_fsize(sum / nbuck), sorter_speed(ctx, sum)); sbuck_drop(b); } @@ -200,43 +226,47 @@ sorter_run(struct sort_context *ctx) { ctx->pool = mp_new(4096); clist_init(&ctx->bucket_list); - - /* FIXME: There should be a way how to detect size of the input file */ - /* FIXME: Remember to test sorting of empty files */ + sorter_prepare_buf(ctx); // Create bucket containing the source struct sort_bucket *bin = sbuck_new(ctx); - bin->flags = SBF_SOURCE; + bin->flags = SBF_SOURCE | SBF_OPEN_READ; if (ctx->custom_presort) bin->flags |= SBF_CUSTOM_PRESORT; else bin->fb = ctx->in_fb; bin->ident = "in"; - bin->size = ~(u64)0; + bin->size = ctx->in_size; bin->hash_bits = ctx->hash_bits; clist_add_tail(&ctx->bucket_list, &bin->n); + SORT_XTRACE(2, "Input size: %s", F_BSIZE(bin)); // Create bucket for the output struct sort_bucket *bout = sbuck_new(ctx); bout->flags = SBF_FINAL; - bout->fb = ctx->out_fb; + if (bout->fb = ctx->out_fb) + bout->flags |= SBF_OPEN_WRITE; bout->ident = "out"; bout->runs = 1; clist_add_head(&ctx->bucket_list, &bout->n); struct sort_bucket *b; - while (b = clist_next(&ctx->bucket_list, &bout->n)) + uns bits; + while (bout = clist_head(&ctx->bucket_list), b = clist_next(&ctx->bucket_list, &bout->n)) { - if (!b->size) + SORT_XTRACE(2, "Next block: %s, %d hash bits", F_BSIZE(b), b->hash_bits); + if (!sbuck_have(b)) sbuck_drop(b); else if (b->runs == 1) sorter_join(b); + else if (bits = sorter_radix_bits(ctx, b)) + sorter_radix(ctx, b, bits); else sorter_twoway(ctx, b); } sorter_free_buf(ctx); - sbuck_close_write(bout); - SORT_XTRACE("Final size: %jd", (uintmax_t) bout->size); - ctx->out_fb = sbuck_open_read(bout); + sbuck_write(bout); // Force empty bucket to a file + SORT_XTRACE(2, "Final size: %s", F_BSIZE(bout)); + ctx->out_fb = sbuck_read(bout); }