sorter_alloc_buf(ctx);
if (in->flags & SBF_CUSTOM_PRESORT)
{
+ /*
+ * The trick with automatic joining, which we use for the normal presorter,
+ * is not necessary with the custom presorter, because the custom presorter
+ * is never called in the middle of the sorted data.
+ */
struct fastbuf *f = sbuck_write(out);
out->runs++;
return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size);
static void
sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits)
{
+ // Add more bits if requested and allowed.
+ bits = MIN(bits + sorter_add_radix_bits, sorter_max_radix_bits);
+
uns nbuck = 1 << bits;
SORT_XTRACE(2, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
bin->hash_bits = ctx->hash_bits;
clist_add_tail(&ctx->bucket_list, &bin->n);
SORT_XTRACE(2, "Input size: %s, %d hash bits", F_BSIZE(bin), bin->hash_bits);
+ ctx->fb_params = (bin->size < sorter_small_input) ? &sorter_small_fb_params : &sorter_fb_params;
// Create bucket for the output
struct sort_bucket *bout = sbuck_new(ctx);