sorter_alloc_buf(ctx);
if (in->flags & SBF_CUSTOM_PRESORT)
{
+ /*
+ * The trick with automatic joining, which we use for the normal presorter,
+ * is not necessary with the custom presorter, because the custom presorter
+ * is never called in the middle of the sorted data.
+ */
struct fastbuf *f = sbuck_write(out);
out->runs++;
- return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size); // FIXME: out_only optimization?
+ return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size);
}
return ctx->internal_sort(ctx, in, out, out_only);
}
static void
sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits)
{
+ // Add more bits if requested and allowed.
+ bits = MIN(bits + sorter_add_radix_bits, sorter_max_radix_bits);
+
uns nbuck = 1 << bits;
SORT_XTRACE(2, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
// How many bits of bucket size we have to reduce before it fits in the RAM?
// (this is insanely large if the input size is unknown, but it serves our purpose)
u64 insize = sbuck_size(b);
- u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // FIXME: Magical factor for various non-uniformities
+ u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // Magical factor accounting for various non-uniformities
uns bits = 0;
while ((insize >> bits) > mem)
bits++;
ctx->pool = mp_new(4096);
clist_init(&ctx->bucket_list);
sorter_prepare_buf(ctx);
+ asort_start_threads(0);
// Create bucket containing the source
struct sort_bucket *bin = sbuck_new(ctx);
while (bout = clist_head(&ctx->bucket_list), b = clist_next(&ctx->bucket_list, &bout->n))
sorter_decide(ctx, b);
+ asort_stop_threads();
sorter_free_buf(ctx);
sbuck_write(bout); // Force empty bucket to a file
SORT_XTRACE(2, "Final size: %s", F_BSIZE(bout));