X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;ds=inline;f=lib%2Fsorter%2Farray.c;h=43b02fabb9cad171b535c883117e30a94cf1ccb1;hb=6bd2ff95b10c8c409eb178684294f1d17d79265b;hp=2eae93edbbe09352ffdadafd8ad68efd13cd3c59;hpb=d02d92d8402023e96465e7f8edbabd70fe96df06;p=libucw.git diff --git a/lib/sorter/array.c b/lib/sorter/array.c index 2eae93ed..43b02fab 100644 --- a/lib/sorter/array.c +++ b/lib/sorter/array.c @@ -15,12 +15,15 @@ #include #include -#define ASORT_MIN_RADIX 5000 // FIXME: var? #define ASORT_MIN_SHIFT 2 +#define ASORT_TRACE(x...) ASORT_XTRACE(1, x) +#define ASORT_XTRACE(level, x...) do { if (sorter_trace_array >= level) msg(L_DEBUG, x); } while(0) + static void asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output) { + // swap_output == 0 if result should be returned in `array', otherwise in `buffer' uns buckets = (1 << ctx->radix_bits); uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0; uns cnt[buckets]; @@ -29,7 +32,7 @@ asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, static int reported[64]; if (!reported[hash_bits]++) #endif - DBG(">>> n=%d h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output); + DBG(">>> n=%u h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output); bzero(cnt, sizeof(cnt)); ctx->radix_count(array, num_elts, cnt, shift); @@ -48,7 +51,7 @@ asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, for (uns i=0; iradix_threshold || shift < ASORT_MIN_SHIFT) { ctx->quicksort(buffer, n); if (!swapped_output) @@ -72,6 +75,15 @@ static uns asort_threads_use_count; static uns asort_threads_ready; static struct worker_pool asort_thread_pool; +static uns +rs_estimate_stack(void) +{ + // Stack space needed by the recursive radix-sorter + uns ctrsize = sizeof(uns) * (1 << CONFIG_UCW_RADIX_SORTER_BITS); + uns maxdepth = (64 / CONFIG_UCW_RADIX_SORTER_BITS) + 1; + return ctrsize * maxdepth; +} + void asort_start_threads(uns run) { @@ -79,8 +91,11 @@ asort_start_threads(uns run) asort_threads_use_count++; if (run && !asort_threads_ready) { - SORT_XTRACE(2, "Initializing thread pool (%d threads)", sorter_threads); + // XXX: If somebody overrides the radix-sorter parameters to insane values, + // he also should override the stack size to insane values. + asort_thread_pool.stack_size = default_thread_stack_size + rs_estimate_stack(); asort_thread_pool.num_threads = sorter_threads; + ASORT_TRACE("Initializing thread pool (%d threads, %dK stack)", sorter_threads, asort_thread_pool.stack_size >> 10); worker_pool_init(&asort_thread_pool); asort_threads_ready = 1; } @@ -93,7 +108,7 @@ asort_stop_threads(void) ucwlib_lock(); if (!--asort_threads_use_count && asort_threads_ready) { - SORT_XTRACE(2, "Shutting down thread pool"); + ASORT_TRACE("Shutting down thread pool"); worker_pool_cleanup(&asort_thread_pool); asort_threads_ready = 0; } @@ -113,16 +128,17 @@ static void qs_handle_work(struct worker_thread *thr UNUSED, struct work *ww) { struct qs_work *w = (struct qs_work *) ww; + struct asort_context *ctx = w->ctx; - DBG("Thread %d: got %d elts", thr->id, w->num_elts); - if (w->num_elts * w->ctx->elt_size < sorter_thread_threshold) + DBG("Thread %d: got %u elts", thr->id, w->num_elts); + if (w->num_elts < ctx->thread_threshold) { - w->ctx->quicksort(w->array, w->num_elts); + ctx->quicksort(w->array, w->num_elts); w->left = w->right = LR_UNDEF; } else - w->ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right); - DBG("Thread %d: returning l=%d r=%d", thr->id, w->left, w->right); + ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right); + DBG("Thread %d: returning l=%u r=%u", thr->id, w->left, w->right); } static struct qs_work * @@ -182,7 +198,7 @@ threaded_quicksort(struct asort_context *ctx) struct rs_work { struct work w; struct asort_context *ctx; - void *in, *out; + void *array, *buffer; // Like asort_radix(). uns num_elts; uns shift; uns swap_output; @@ -194,8 +210,8 @@ rs_count(struct worker_thread *thr UNUSED, struct work *ww) { struct rs_work *w = (struct rs_work *) ww; - DBG("Thread %d: Counting %d items, shift=%d", thr->id, w->num_elts, w->shift); - w->ctx->radix_count(w->in, w->num_elts, w->cnt, w->shift); + DBG("Thread %d: Counting %u items, shift=%d", thr->id, w->num_elts, w->shift); + w->ctx->radix_count(w->array, w->num_elts, w->cnt, w->shift); DBG("Thread %d: Counting done", thr->id); } @@ -204,8 +220,8 @@ rs_split(struct worker_thread *thr UNUSED, struct work *ww) { struct rs_work *w = (struct rs_work *) ww; - DBG("Thread %d: Splitting %d items, shift=%d", thr->id, w->num_elts, w->shift); - w->ctx->radix_split(w->in, w->out, w->num_elts, w->cnt, w->shift); + DBG("Thread %d: Splitting %u items, shift=%d", thr->id, w->num_elts, w->shift); + w->ctx->radix_split(w->array, w->buffer, w->num_elts, w->cnt, w->shift); DBG("Thread %d: Splitting done", thr->id); } @@ -214,16 +230,30 @@ rs_finish(struct worker_thread *thr UNUSED, struct work *ww) { struct rs_work *w = (struct rs_work *) ww; - DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift); - if (w->shift < ASORT_MIN_SHIFT || w->num_elts < ASORT_MIN_RADIX) + if (thr) + DBG("Thread %d: Finishing %u items, shift=%d", thr->id, w->num_elts, w->shift); + if (w->shift < ASORT_MIN_SHIFT || w->num_elts < w->ctx->radix_threshold) { - w->ctx->quicksort(w->out, w->num_elts); - if (!w->swap_output) - memcpy(w->in, w->out, w->num_elts * w->ctx->elt_size); + w->ctx->quicksort(w->array, w->num_elts); + if (w->swap_output) + memcpy(w->buffer, w->array, w->num_elts * w->ctx->elt_size); } else - asort_radix(w->ctx, w->out, w->in, w->num_elts, w->shift, !w->swap_output); - DBG("Thread %d: Finishing done", thr->id); + asort_radix(w->ctx, w->array, w->buffer, w->num_elts, w->shift, w->swap_output); + if (thr) + DBG("Thread %d: Finishing done", thr->id); +} + +static void +rs_wait_small(struct asort_context *ctx) +{ + struct rs_work *w; + + while (w = (struct rs_work *) work_wait(ctx->rs_work_queue)) + { + DBG("Reaping small chunk of %u items", w->num_elts); + ep_free(ctx->eltpool, w); + } } static void @@ -233,7 +263,10 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0; uns cnt[buckets]; uns blksize = num_elts / sorter_threads; - DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output); + DBG(">>> n=%u h=%d s=%d blk=%u sw=%d", num_elts, hash_bits, shift, blksize, swapped_output); + + // If there are any small chunks in progress, wait for them to finish + rs_wait_small(ctx); // Start parallel counting void *iptr = array; @@ -243,13 +276,14 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns w->w.priority = 0; w->w.go = rs_count; w->ctx = ctx; - w->in = iptr; - w->out = ctx->buffer; + w->array = iptr; + w->buffer = buffer; w->num_elts = blksize; if (i == sorter_threads-1) w->num_elts += num_elts % sorter_threads; w->shift = shift; iptr += w->num_elts * ctx->elt_size; + bzero(w->cnt, sizeof(uns) * buckets); work_submit(ctx->rs_work_queue, &w->w); } @@ -297,19 +331,30 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns for (uns i=0; ithread_threshold || shift < ASORT_MIN_SHIFT) { struct rs_work *w = ep_alloc(ctx->eltpool); w->w.priority = 0; w->w.go = rs_finish; w->ctx = ctx; - w->in = array; - w->out = buffer; + w->array = buffer; + w->buffer = array; w->num_elts = n; w->shift = shift; - w->swap_output = swapped_output; - clist_add_tail(&ctx->rs_bits, &w->w.n); - DBG("Scheduling block %d+%d", pos, n); + w->swap_output = !swapped_output; + if (n < ctx->thread_chunk) + { + DBG("Sorting block %u+%u inline", pos, n); + rs_finish(NULL, &w->w); + ep_free(ctx->eltpool, w); + } + else + { + DBG("Scheduling block %u+%u", pos, n); + work_submit(ctx->rs_work_queue, &w->w); + } } else rs_radix(ctx, buffer, array, n, shift, !swapped_output); @@ -320,7 +365,7 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns } static void -threaded_radixsort(struct asort_context *ctx) +threaded_radixsort(struct asort_context *ctx, uns swap) { struct work_queue q; @@ -334,23 +379,18 @@ threaded_radixsort(struct asort_context *ctx) for (uns i=0; irs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits)); - // Prepare work structures for all remaining small bits which will be sorted later. - clist_init(&ctx->rs_bits); + // Prepare a pool for all remaining small bits which will be sorted on background. ctx->eltpool = ep_new(sizeof(struct rs_work), 1000); // Do the big splitting - // FIXME: Set the swap bit carefully. - rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, 0); + rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap); for (uns i=0; irs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits)); // Finish the small blocks - struct rs_work *w, *tmp; - CLIST_WALK_DELSAFE(w, ctx->rs_bits, tmp) - work_submit(&q, &w->w); - while (work_wait(&q)) - ; + rs_wait_small(ctx); + ASSERT(!ctx->eltpool->num_allocated); ep_delete(ctx->eltpool); work_queue_cleanup(&q); asort_stop_threads(); @@ -363,15 +403,38 @@ void asort_stop_threads(void) { } #endif +static uns +predict_swap(struct asort_context *ctx) +{ + uns bits = ctx->radix_bits; + uns elts = ctx->num_elts; + uns swap = 0; + + while (elts >= ctx->radix_threshold && bits >= ASORT_MIN_SHIFT) + { + DBG("Predicting pass: %u elts, %d bits", elts, bits); + swap = !swap; + elts >>= ctx->radix_bits; + bits = MAX(bits, ctx->radix_bits) - ctx->radix_bits; + } + return swap; +} + void asort_run(struct asort_context *ctx) { - SORT_XTRACE(10, "Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits); + ctx->thread_threshold = MIN(sorter_thread_threshold / ctx->elt_size, ~0U); + ctx->thread_chunk = MIN(sorter_thread_chunk / ctx->elt_size, ~0U); + ctx->radix_threshold = MIN(sorter_radix_threshold / ctx->elt_size, ~0U); + + ASORT_TRACE("Array-sorting %u items per %u bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits); + ASORT_XTRACE(2, "Limits: thread_threshold=%u, thread_chunk=%u, radix_threshold=%u", + ctx->thread_threshold, ctx->thread_chunk, ctx->radix_threshold); uns allow_threads UNUSED = (sorter_threads > 1 && - ctx->num_elts * ctx->elt_size >= sorter_thread_threshold && + ctx->num_elts >= ctx->thread_threshold && !(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS)); - if (ctx->num_elts < ASORT_MIN_RADIX || + if (ctx->num_elts < ctx->radix_threshold || ctx->hash_bits <= ASORT_MIN_SHIFT || !ctx->radix_split || (sorter_debug & SORT_DEBUG_ASORT_NO_RADIX)) @@ -379,28 +442,35 @@ asort_run(struct asort_context *ctx) #ifdef CONFIG_UCW_THREADS if (allow_threads) { - SORT_XTRACE(12, "Decided to use parallel quicksort"); + ASORT_XTRACE(2, "Decided to use parallel quicksort"); threaded_quicksort(ctx); - return; } + else #endif - SORT_XTRACE(12, "Decided to use sequential quicksort"); - ctx->quicksort(ctx->array, ctx->num_elts); + { + ASORT_XTRACE(2, "Decided to use sequential quicksort"); + ctx->quicksort(ctx->array, ctx->num_elts); + } } else { + uns swap = predict_swap(ctx); #ifdef CONFIG_UCW_THREADS if (allow_threads) { - SORT_XTRACE(12, "Decided to use parallel radix-sort"); - threaded_radixsort(ctx); + ASORT_XTRACE(2, "Decided to use parallel radix-sort (swap=%d)", swap); + threaded_radixsort(ctx, swap); return; } + else #endif - SORT_XTRACE(12, "Decided to use sequential radix-sort"); - // FIXME: select dest buffer - asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, 0); + { + ASORT_XTRACE(2, "Decided to use sequential radix-sort (swap=%d)", swap); + asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap); + } + if (swap) + ctx->array = ctx->buffer; } - SORT_XTRACE(11, "Array-sort finished"); + ASORT_XTRACE(2, "Array-sort finished"); }