for (uns i=0; i<buckets; i++)
{
uns n = cnt[i] - pos;
- if (n < sorter_radix_threshold || shift < ASORT_MIN_SHIFT)
+ if (n * cts->elt_size < sorter_radix_threshold || shift < ASORT_MIN_SHIFT)
{
ctx->quicksort(buffer, n);
if (!swapped_output)
{
struct rs_work *w = (struct rs_work *) ww;
- DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift);
- if (w->shift < ASORT_MIN_SHIFT || w->num_elts < sorter_radix_threshold)
+ if (thr)
+ DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift);
+ if (w->shift < ASORT_MIN_SHIFT || w->num_elts * ctx->elt_size < sorter_radix_threshold)
{
w->ctx->quicksort(w->in, w->num_elts);
if (w->swap_output)
}
else
asort_radix(w->ctx, w->in, w->out, w->num_elts, w->shift, w->swap_output);
- DBG("Thread %d: Finishing done", thr->id);
+ if (thr)
+ DBG("Thread %d: Finishing done", thr->id);
+}
+
+static void
+rs_wait_small(struct asort_context *ctx)
+{
+ struct rs_work *w;
+
+ while (w = (struct rs_work *) work_wait(ctx->rs_work_queue))
+ {
+ DBG("Reaping small chunk of %d items", w->num_elts);
+ ep_free(ctx->eltpool, w);
+ }
}
static void
uns blksize = num_elts / sorter_threads;
DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
+ // If there are any small chunks in progress, wait for them to finish
+ rs_wait_small(ctx);
+
// Start parallel counting
void *iptr = array;
for (uns i=0; i<sorter_threads; i++)
for (uns i=0; i<buckets; i++)
{
uns n = cnt[i] - pos;
+ if (!n)
+ continue;
if (n * ctx->elt_size < sorter_thread_threshold)
{
struct rs_work *w = ep_alloc(ctx->eltpool);
w->num_elts = n;
w->shift = shift;
w->swap_output = !swapped_output;
- clist_add_tail(&ctx->rs_bits, &w->w.n);
- DBG("Scheduling block %d+%d", pos, n);
+ if (n * ctx->elt_size < sorter_thread_chunk)
+ {
+ DBG("Sorting block %d+%d inline", pos, n);
+ rs_finish(NULL, &w->w);
+ ep_free(ctx->eltpool, w);
+ }
+ else
+ {
+ DBG("Scheduling block %d+%d", pos, n);
+ work_submit(ctx->rs_work_queue, &w->w);
+ }
}
else
rs_radix(ctx, buffer, array, n, shift, !swapped_output);
for (uns i=0; i<sorter_threads; i++)
ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
- // Prepare work structures for all remaining small bits which will be sorted later.
- clist_init(&ctx->rs_bits);
+ // Prepare a pool for all remaining small bits which will be sorted on background.
ctx->eltpool = ep_new(sizeof(struct rs_work), 1000);
// Do the big splitting
big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
// Finish the small blocks
- struct rs_work *w, *tmp;
- CLIST_WALK_DELSAFE(w, ctx->rs_bits, tmp)
- work_submit(&q, &w->w);
- while (work_wait(&q))
- ;
+ rs_wait_small(ctx);
+ ASSERT(!ctx->eltpool->num_allocated);
ep_delete(ctx->eltpool);
work_queue_cleanup(&q);
asort_stop_threads();
ctx->num_elts * ctx->elt_size >= sorter_thread_threshold &&
!(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
- if (ctx->num_elts < sorter_radix_threshold ||
+ if (ctx->num_elts * ctx->elt_size < sorter_radix_threshold ||
ctx->hash_bits <= ASORT_MIN_SHIFT ||
!ctx->radix_split ||
(sorter_debug & SORT_DEBUG_ASORT_NO_RADIX))