+static void
+sorter_multiway(struct sort_context *ctx, struct sort_bucket *b)
+{
+ clist parts;
+ cnode *list_pos = b->n.prev;
+ struct sort_bucket *join = sbuck_join_to(b);
+
+ clist_init(&parts);
+ ASSERT(!(sorter_debug & SORT_DEBUG_NO_PRESORT));
+ // FIXME: What if the parts will be too small?
+ SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
+ uns cont;
+ uns part_cnt = 0;
+ u64 total_size = 0;
+ sorter_start_timer(ctx);
+ do
+ {
+ struct sort_bucket *p = sbuck_new(ctx);
+ clist_add_tail(&parts, &p->n);
+ cont = sorter_presort(ctx, b, p, (!part_cnt && join) ? join : p);
+ part_cnt++;
+ total_size += sbuck_size(p);
+ sbuck_swap_out(p);
+ }
+ while (cont);
+ sorter_stop_timer(ctx, &ctx->total_pre_time);
+
+ // FIXME: This is way too similar to the two-way case.
+ if (part_cnt == 1)
+ {
+ struct sort_bucket *p = clist_head(&parts);
+ SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 2), "Sorted in memory");
+ if (join)
+ {
+ ASSERT(join->runs == 2);
+ join->runs--;
+ sbuck_drop(p);
+ }
+ else
+ clist_insert_after(&p->n, list_pos);
+ sbuck_drop(b);
+ return;
+ }
+
+ SORT_TRACE("Multi-way presorting pass (%d parts, %s, %dMB/s)", part_cnt, stk_fsize(total_size), sorter_speed(ctx, total_size));
+ sbuck_drop(b);
+
+ uns max_ways = 16;
+ struct sort_bucket *ways[max_ways+1];
+ SORT_XTRACE(2, "Starting up to %d-way merge", max_ways);
+ for (;;)
+ {
+ uns n = 0;
+ struct sort_bucket *p;
+ while (n < max_ways && (p = clist_head(&parts)))
+ {
+ clist_remove(&p->n);
+ ways[n++] = p;
+ }
+ ways[n] = NULL;
+ ASSERT(n > 1);
+
+ struct sort_bucket *out;
+ out = sbuck_new(ctx); // FIXME: No joining so far
+ sorter_start_timer(ctx);
+ ctx->multiway_merge(ctx, ways, out);
+ sorter_stop_timer(ctx, &ctx->total_ext_time);
+
+ for (uns i=0; i<n; i++)
+ sbuck_drop(ways[i]);
+
+ if (clist_empty(&parts))
+ {
+ clist_insert_after(&out->n, list_pos);
+ SORT_TRACE("Multi-way merge completed (%s, %dMB/s)", F_BSIZE(out), sorter_speed(ctx, sbuck_size(out)));
+ return;
+ }
+ else
+ {
+ sbuck_swap_out(out);
+ clist_add_tail(&parts, &out->n);
+ SORT_TRACE("Multi-way merge pass (%d ways, %s, %dMB/s)", n, F_BSIZE(out), sorter_speed(ctx, sbuck_size(out)));
+ }
+ }
+}
+
+static uns
+sorter_radix_bits(struct sort_context *ctx, struct sort_bucket *b)
+{
+ if (!b->hash_bits || b->hash_bits < sorter_min_radix_bits ||
+ !ctx->radix_split ||
+ (b->flags & SBF_CUSTOM_PRESORT) ||
+ (sorter_debug & SORT_DEBUG_NO_RADIX))
+ return 0;
+
+ u64 in = sbuck_size(b);
+ u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // FIXME: Magical factor for hash non-uniformity
+ if (in <= mem)
+ return 0;
+
+ uns n = sorter_min_radix_bits;
+ while (n < sorter_max_radix_bits && n < b->hash_bits && (in >> n) > mem)
+ n++;
+ return n;
+}
+
+static void
+sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits)
+{
+ uns nbuck = 1 << bits;
+ SORT_XTRACE(2, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
+ F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
+ sorter_free_buf(ctx);
+ sorter_start_timer(ctx);
+
+ struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *));
+ for (uns i=nbuck; i--; )
+ {
+ outs[i] = sbuck_new(ctx);
+ outs[i]->hash_bits = b->hash_bits - bits;
+ clist_insert_after(&outs[i]->n, &b->n);
+ }
+
+ ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits);
+
+ u64 min = ~(u64)0, max = 0, sum = 0;
+ for (uns i=0; i<nbuck; i++)
+ {
+ u64 s = sbuck_size(outs[i]);
+ min = MIN(min, s);
+ max = MAX(max, s);
+ sum += s;
+ if (nbuck > 4)
+ sbuck_swap_out(outs[i]);
+ }
+
+ sorter_stop_timer(ctx, &ctx->total_ext_time);
+ SORT_TRACE("Radix split (%d buckets, %s min, %s max, %s avg, %dMB/s)", nbuck,
+ stk_fsize(min), stk_fsize(max), stk_fsize(sum / nbuck), sorter_speed(ctx, sum));
+ sbuck_drop(b);
+}
+