static int reported[64];
if (!reported[hash_bits]++)
#endif
- DBG(">>> n=%d h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output);
+ DBG(">>> n=%u h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output);
bzero(cnt, sizeof(cnt));
ctx->radix_count(array, num_elts, cnt, shift);
struct qs_work *w = (struct qs_work *) ww;
struct asort_context *ctx = w->ctx;
- DBG("Thread %d: got %d elts", thr->id, w->num_elts);
+ DBG("Thread %d: got %u elts", thr->id, w->num_elts);
if (w->num_elts < ctx->thread_threshold)
{
ctx->quicksort(w->array, w->num_elts);
}
else
ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right);
- DBG("Thread %d: returning l=%d r=%d", thr->id, w->left, w->right);
+ DBG("Thread %d: returning l=%u r=%u", thr->id, w->left, w->right);
}
static struct qs_work *
{
struct rs_work *w = (struct rs_work *) ww;
- DBG("Thread %d: Counting %d items, shift=%d", thr->id, w->num_elts, w->shift);
+ DBG("Thread %d: Counting %u items, shift=%d", thr->id, w->num_elts, w->shift);
w->ctx->radix_count(w->array, w->num_elts, w->cnt, w->shift);
DBG("Thread %d: Counting done", thr->id);
}
{
struct rs_work *w = (struct rs_work *) ww;
- DBG("Thread %d: Splitting %d items, shift=%d", thr->id, w->num_elts, w->shift);
+ DBG("Thread %d: Splitting %u items, shift=%d", thr->id, w->num_elts, w->shift);
w->ctx->radix_split(w->array, w->buffer, w->num_elts, w->cnt, w->shift);
DBG("Thread %d: Splitting done", thr->id);
}
struct rs_work *w = (struct rs_work *) ww;
if (thr)
- DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift);
+ DBG("Thread %d: Finishing %u items, shift=%d", thr->id, w->num_elts, w->shift);
if (w->shift < ASORT_MIN_SHIFT || w->num_elts < w->ctx->radix_threshold)
{
w->ctx->quicksort(w->array, w->num_elts);
while (w = (struct rs_work *) work_wait(ctx->rs_work_queue))
{
- DBG("Reaping small chunk of %d items", w->num_elts);
+ DBG("Reaping small chunk of %u items", w->num_elts);
ep_free(ctx->eltpool, w);
}
}
uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
uns cnt[buckets];
uns blksize = num_elts / sorter_threads;
- DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
+ DBG(">>> n=%u h=%d s=%d blk=%u sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
// If there are any small chunks in progress, wait for them to finish
rs_wait_small(ctx);
w->swap_output = !swapped_output;
if (n < ctx->thread_chunk)
{
- DBG("Sorting block %d+%d inline", pos, n);
+ DBG("Sorting block %u+%u inline", pos, n);
rs_finish(NULL, &w->w);
ep_free(ctx->eltpool, w);
}
else
{
- DBG("Scheduling block %d+%d", pos, n);
+ DBG("Scheduling block %u+%u", pos, n);
work_submit(ctx->rs_work_queue, &w->w);
}
}
while (elts >= ctx->radix_threshold && bits >= ASORT_MIN_SHIFT)
{
- DBG("Predicting pass: %d elts, %d bits", elts, bits);
+ DBG("Predicting pass: %u elts, %d bits", elts, bits);
swap = !swap;
elts >>= ctx->radix_bits;
bits = MAX(bits, ctx->radix_bits) - ctx->radix_bits;
ctx->thread_chunk = MIN(sorter_thread_chunk / ctx->elt_size, ~0U);
ctx->radix_threshold = MIN(sorter_radix_threshold / ctx->elt_size, ~0U);
- ASORT_TRACE("Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
+ ASORT_TRACE("Array-sorting %u items per %u bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
ASORT_XTRACE(2, "Limits: thread_threshold=%u, thread_chunk=%u, radix_threshold=%u",
ctx->thread_threshold, ctx->thread_chunk, ctx->radix_threshold);
uns allow_threads UNUSED = (sorter_threads > 1 &&