X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;f=lib%2Fsorter%2Farray.c;h=5bc60276d5046fc8fca71dd29dbd6f40f4e21258;hb=1a3cd3005f2a5cda8dbdaaf8f153ae5703845876;hp=2eae93edbbe09352ffdadafd8ad68efd13cd3c59;hpb=d02d92d8402023e96465e7f8edbabd70fe96df06;p=libucw.git diff --git a/lib/sorter/array.c b/lib/sorter/array.c index 2eae93ed..5bc60276 100644 --- a/lib/sorter/array.c +++ b/lib/sorter/array.c @@ -15,12 +15,12 @@ #include #include -#define ASORT_MIN_RADIX 5000 // FIXME: var? #define ASORT_MIN_SHIFT 2 static void asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output) { + // swap_output == 0 if result should be returned in `array', otherwise in `buffer' uns buckets = (1 << ctx->radix_bits); uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0; uns cnt[buckets]; @@ -48,7 +48,7 @@ asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, for (uns i=0; ielt_size < sorter_radix_threshold || shift < ASORT_MIN_SHIFT) { ctx->quicksort(buffer, n); if (!swapped_output) @@ -214,16 +214,30 @@ rs_finish(struct worker_thread *thr UNUSED, struct work *ww) { struct rs_work *w = (struct rs_work *) ww; - DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift); - if (w->shift < ASORT_MIN_SHIFT || w->num_elts < ASORT_MIN_RADIX) + if (thr) + DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift); + if (w->shift < ASORT_MIN_SHIFT || w->num_elts * ctx->elt_size < sorter_radix_threshold) { - w->ctx->quicksort(w->out, w->num_elts); - if (!w->swap_output) - memcpy(w->in, w->out, w->num_elts * w->ctx->elt_size); + w->ctx->quicksort(w->in, w->num_elts); + if (w->swap_output) + memcpy(w->out, w->in, w->num_elts * w->ctx->elt_size); } else - asort_radix(w->ctx, w->out, w->in, w->num_elts, w->shift, !w->swap_output); - DBG("Thread %d: Finishing done", thr->id); + asort_radix(w->ctx, w->in, w->out, w->num_elts, w->shift, w->swap_output); + if (thr) + DBG("Thread %d: Finishing done", thr->id); +} + +static void +rs_wait_small(struct asort_context *ctx) +{ + struct rs_work *w; + + while (w = (struct rs_work *) work_wait(ctx->rs_work_queue)) + { + DBG("Reaping small chunk of %d items", w->num_elts); + ep_free(ctx->eltpool, w); + } } static void @@ -235,6 +249,9 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns uns blksize = num_elts / sorter_threads; DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output); + // If there are any small chunks in progress, wait for them to finish + rs_wait_small(ctx); + // Start parallel counting void *iptr = array; for (uns i=0; iw.go = rs_count; w->ctx = ctx; w->in = iptr; - w->out = ctx->buffer; + w->out = buffer; w->num_elts = blksize; if (i == sorter_threads-1) w->num_elts += num_elts % sorter_threads; w->shift = shift; iptr += w->num_elts * ctx->elt_size; + bzero(w->cnt, sizeof(uns) * buckets); work_submit(ctx->rs_work_queue, &w->w); } @@ -297,19 +315,30 @@ rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns for (uns i=0; ielt_size < sorter_thread_threshold) { struct rs_work *w = ep_alloc(ctx->eltpool); w->w.priority = 0; w->w.go = rs_finish; w->ctx = ctx; - w->in = array; - w->out = buffer; + w->in = buffer; + w->out = array; w->num_elts = n; w->shift = shift; - w->swap_output = swapped_output; - clist_add_tail(&ctx->rs_bits, &w->w.n); - DBG("Scheduling block %d+%d", pos, n); + w->swap_output = !swapped_output; + if (n * ctx->elt_size < sorter_thread_chunk) + { + DBG("Sorting block %d+%d inline", pos, n); + rs_finish(NULL, &w->w); + ep_free(ctx->eltpool, w); + } + else + { + DBG("Scheduling block %d+%d", pos, n); + work_submit(ctx->rs_work_queue, &w->w); + } } else rs_radix(ctx, buffer, array, n, shift, !swapped_output); @@ -334,8 +363,7 @@ threaded_radixsort(struct asort_context *ctx) for (uns i=0; irs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits)); - // Prepare work structures for all remaining small bits which will be sorted later. - clist_init(&ctx->rs_bits); + // Prepare a pool for all remaining small bits which will be sorted on background. ctx->eltpool = ep_new(sizeof(struct rs_work), 1000); // Do the big splitting @@ -345,12 +373,9 @@ threaded_radixsort(struct asort_context *ctx) big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits)); // Finish the small blocks - struct rs_work *w, *tmp; - CLIST_WALK_DELSAFE(w, ctx->rs_bits, tmp) - work_submit(&q, &w->w); - while (work_wait(&q)) - ; + rs_wait_small(ctx); + ASSERT(!ctx->eltpool->num_allocated); ep_delete(ctx->eltpool); work_queue_cleanup(&q); asort_stop_threads(); @@ -371,7 +396,7 @@ asort_run(struct asort_context *ctx) ctx->num_elts * ctx->elt_size >= sorter_thread_threshold && !(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS)); - if (ctx->num_elts < ASORT_MIN_RADIX || + if (ctx->num_elts * ctx->elt_size < sorter_radix_threshold || ctx->hash_bits <= ASORT_MIN_SHIFT || !ctx->radix_split || (sorter_debug & SORT_DEBUG_ASORT_NO_RADIX))