#define ASORT_MIN_SHIFT 2
+#define ASORT_TRACE(x...) ASORT_XTRACE(1, x)
+#define ASORT_XTRACE(level, x...) do { if (sorter_trace_array >= level) msg(L_DEBUG, x); } while(0)
+
static void
asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
{
static uns asort_threads_ready;
static struct worker_pool asort_thread_pool;
+static uns
+rs_estimate_stack(void)
+{
+ // Stack space needed by the recursive radix-sorter
+ uns ctrsize = sizeof(uns) * (1 << CONFIG_UCW_RADIX_SORTER_BITS);
+ uns maxdepth = (64 / CONFIG_UCW_RADIX_SORTER_BITS) + 1;
+ return ctrsize * maxdepth;
+}
+
void
asort_start_threads(uns run)
{
asort_threads_use_count++;
if (run && !asort_threads_ready)
{
- SORT_XTRACE(2, "Initializing thread pool (%d threads)", sorter_threads);
+ // XXX: If somebody overrides the radix-sorter parameters to insane values,
+ // he also should override the stack size to insane values.
+ asort_thread_pool.stack_size = default_thread_stack_size + rs_estimate_stack();
asort_thread_pool.num_threads = sorter_threads;
+ ASORT_TRACE("Initializing thread pool (%d threads, %dK stack)", sorter_threads, asort_thread_pool.stack_size >> 10);
worker_pool_init(&asort_thread_pool);
asort_threads_ready = 1;
}
ucwlib_lock();
if (!--asort_threads_use_count && asort_threads_ready)
{
- SORT_XTRACE(2, "Shutting down thread pool");
+ ASORT_TRACE("Shutting down thread pool");
worker_pool_cleanup(&asort_thread_pool);
asort_threads_ready = 0;
}
uns n = cnt[i] - pos;
if (!n)
continue;
- if (n * ctx->elt_size < sorter_thread_threshold)
+ if (n * ctx->elt_size < sorter_thread_threshold || shift < ASORT_MIN_SHIFT)
{
struct rs_work *w = ep_alloc(ctx->eltpool);
w->w.priority = 0;
void
asort_run(struct asort_context *ctx)
{
- SORT_XTRACE(10, "Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
+ ASORT_TRACE("Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
uns allow_threads UNUSED = (sorter_threads > 1 &&
ctx->num_elts * ctx->elt_size >= sorter_thread_threshold &&
!(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
#ifdef CONFIG_UCW_THREADS
if (allow_threads)
{
- SORT_XTRACE(12, "Decided to use parallel quicksort");
+ ASORT_XTRACE(2, "Decided to use parallel quicksort");
threaded_quicksort(ctx);
- return;
}
+ else
#endif
- SORT_XTRACE(12, "Decided to use sequential quicksort");
- ctx->quicksort(ctx->array, ctx->num_elts);
+ {
+ ASORT_XTRACE(2, "Decided to use sequential quicksort");
+ ctx->quicksort(ctx->array, ctx->num_elts);
+ }
}
else
{
#ifdef CONFIG_UCW_THREADS
if (allow_threads)
{
- SORT_XTRACE(12, "Decided to use parallel radix-sort (swap=%d)", swap);
+ ASORT_XTRACE(2, "Decided to use parallel radix-sort (swap=%d)", swap);
threaded_radixsort(ctx, swap);
return;
}
+ else
#endif
- SORT_XTRACE(12, "Decided to use sequential radix-sort (swap=%d)", swap);
- asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
+ {
+ ASORT_XTRACE(2, "Decided to use sequential radix-sort (swap=%d)", swap);
+ asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
+ }
if (swap)
ctx->array = ctx->buffer;
}
- SORT_XTRACE(11, "Array-sort finished");
+ ASORT_XTRACE(2, "Array-sort finished");
}