2 * UCW Library -- Optimized Array Sorter
4 * (c) 2003--2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
13 #include "lib/sorter/common.h"
18 #define ASORT_MIN_SHIFT 2
20 #define ASORT_TRACE(x...) ASORT_XTRACE(1, x)
21 #define ASORT_XTRACE(level, x...) do { if (sorter_trace_array >= level) msg(L_DEBUG, x); } while(0)
24 asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
26 // swap_output == 0 if result should be returned in `array', otherwise in `buffer'
27 uns buckets = (1 << ctx->radix_bits);
28 uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
32 static int reported[64];
33 if (!reported[hash_bits]++)
35 DBG(">>> n=%d h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output);
37 bzero(cnt, sizeof(cnt));
38 ctx->radix_count(array, num_elts, cnt, shift);
41 for (uns i=0; i<buckets; i++)
47 ASSERT(pos == num_elts);
49 ctx->radix_split(array, buffer, num_elts, cnt, shift);
51 for (uns i=0; i<buckets; i++)
54 if (n * ctx->elt_size < sorter_radix_threshold || shift < ASORT_MIN_SHIFT)
56 ctx->quicksort(buffer, n);
58 memcpy(array, buffer, n * ctx->elt_size);
61 asort_radix(ctx, buffer, array, n, shift, !swapped_output);
62 array += n * ctx->elt_size;
63 buffer += n * ctx->elt_size;
68 #ifdef CONFIG_UCW_THREADS
70 #include "lib/threads.h"
71 #include "lib/workqueue.h"
72 #include "lib/eltpool.h"
74 static uns asort_threads_use_count;
75 static uns asort_threads_ready;
76 static struct worker_pool asort_thread_pool;
79 asort_start_threads(uns run)
82 asort_threads_use_count++;
83 if (run && !asort_threads_ready)
85 ASORT_TRACE("Initializing thread pool (%d threads)", sorter_threads);
86 asort_thread_pool.num_threads = sorter_threads;
87 worker_pool_init(&asort_thread_pool);
88 asort_threads_ready = 1;
94 asort_stop_threads(void)
97 if (!--asort_threads_use_count && asort_threads_ready)
99 ASORT_TRACE("Shutting down thread pool");
100 worker_pool_cleanup(&asort_thread_pool);
101 asort_threads_ready = 0;
108 struct asort_context *ctx;
112 #define LR_UNDEF -100
116 qs_handle_work(struct worker_thread *thr UNUSED, struct work *ww)
118 struct qs_work *w = (struct qs_work *) ww;
120 DBG("Thread %d: got %d elts", thr->id, w->num_elts);
121 if (w->num_elts * w->ctx->elt_size < sorter_thread_threshold)
123 w->ctx->quicksort(w->array, w->num_elts);
124 w->left = w->right = LR_UNDEF;
127 w->ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right);
128 DBG("Thread %d: returning l=%d r=%d", thr->id, w->left, w->right);
131 static struct qs_work *
132 qs_alloc_work(struct asort_context *ctx)
134 struct qs_work *w = ep_alloc(ctx->eltpool);
136 w->w.go = qs_handle_work;
142 threaded_quicksort(struct asort_context *ctx)
145 struct qs_work *v, *w;
147 asort_start_threads(1);
148 work_queue_init(&asort_thread_pool, &q);
149 ctx->eltpool = ep_new(sizeof(struct qs_work), 1000);
151 w = qs_alloc_work(ctx);
152 w->array = ctx->array;
153 w->num_elts = ctx->num_elts;
154 work_submit(&q, &w->w);
156 while (v = (struct qs_work *) work_wait(&q))
158 if (v->left != LR_UNDEF)
162 w = qs_alloc_work(ctx);
164 w->num_elts = v->right + 1;
165 w->w.priority = v->w.priority + 1;
166 work_submit(&q, &w->w);
168 if (v->left < (int)v->num_elts - 1)
170 w = qs_alloc_work(ctx);
171 w->array = v->array + v->left * ctx->elt_size;
172 w->num_elts = v->num_elts - v->left;
173 w->w.priority = v->w.priority + 1;
174 work_submit(&q, &w->w);
177 ep_free(ctx->eltpool, v);
180 ep_delete(ctx->eltpool);
181 work_queue_cleanup(&q);
182 asort_stop_threads();
187 struct asort_context *ctx;
188 void *array, *buffer; // Like asort_radix().
196 rs_count(struct worker_thread *thr UNUSED, struct work *ww)
198 struct rs_work *w = (struct rs_work *) ww;
200 DBG("Thread %d: Counting %d items, shift=%d", thr->id, w->num_elts, w->shift);
201 w->ctx->radix_count(w->array, w->num_elts, w->cnt, w->shift);
202 DBG("Thread %d: Counting done", thr->id);
206 rs_split(struct worker_thread *thr UNUSED, struct work *ww)
208 struct rs_work *w = (struct rs_work *) ww;
210 DBG("Thread %d: Splitting %d items, shift=%d", thr->id, w->num_elts, w->shift);
211 w->ctx->radix_split(w->array, w->buffer, w->num_elts, w->cnt, w->shift);
212 DBG("Thread %d: Splitting done", thr->id);
216 rs_finish(struct worker_thread *thr UNUSED, struct work *ww)
218 struct rs_work *w = (struct rs_work *) ww;
221 DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift);
222 if (w->shift < ASORT_MIN_SHIFT || w->num_elts * w->ctx->elt_size < sorter_radix_threshold)
224 w->ctx->quicksort(w->array, w->num_elts);
226 memcpy(w->buffer, w->array, w->num_elts * w->ctx->elt_size);
229 asort_radix(w->ctx, w->array, w->buffer, w->num_elts, w->shift, w->swap_output);
231 DBG("Thread %d: Finishing done", thr->id);
235 rs_wait_small(struct asort_context *ctx)
239 while (w = (struct rs_work *) work_wait(ctx->rs_work_queue))
241 DBG("Reaping small chunk of %d items", w->num_elts);
242 ep_free(ctx->eltpool, w);
247 rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
249 uns buckets = (1 << ctx->radix_bits);
250 uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
252 uns blksize = num_elts / sorter_threads;
253 DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
255 // If there are any small chunks in progress, wait for them to finish
258 // Start parallel counting
260 for (uns i=0; i<sorter_threads; i++)
262 struct rs_work *w = ctx->rs_works[i];
268 w->num_elts = blksize;
269 if (i == sorter_threads-1)
270 w->num_elts += num_elts % sorter_threads;
272 iptr += w->num_elts * ctx->elt_size;
273 bzero(w->cnt, sizeof(uns) * buckets);
274 work_submit(ctx->rs_work_queue, &w->w);
277 // Get bucket sizes from the counts
278 bzero(cnt, sizeof(cnt));
279 for (uns i=0; i<sorter_threads; i++)
281 struct rs_work *w = (struct rs_work *) work_wait(ctx->rs_work_queue);
283 for (uns j=0; j<buckets; j++)
287 // Calculate bucket starts
289 for (uns i=0; i<buckets; i++)
295 ASSERT(pos == num_elts);
297 // Start parallel splitting
298 for (uns i=0; i<sorter_threads; i++)
300 struct rs_work *w = ctx->rs_works[i];
302 for (uns j=0; j<buckets; j++)
308 work_submit(ctx->rs_work_queue, &w->w);
310 ASSERT(cnt[buckets-1] == num_elts);
312 // Wait for splits to finish
313 while (work_wait(ctx->rs_work_queue))
316 // Recurse on buckets
318 for (uns i=0; i<buckets; i++)
320 uns n = cnt[i] - pos;
323 if (n * ctx->elt_size < sorter_thread_threshold)
325 struct rs_work *w = ep_alloc(ctx->eltpool);
333 w->swap_output = !swapped_output;
334 if (n * ctx->elt_size < sorter_thread_chunk)
336 DBG("Sorting block %d+%d inline", pos, n);
337 rs_finish(NULL, &w->w);
338 ep_free(ctx->eltpool, w);
342 DBG("Scheduling block %d+%d", pos, n);
343 work_submit(ctx->rs_work_queue, &w->w);
347 rs_radix(ctx, buffer, array, n, shift, !swapped_output);
349 array += n * ctx->elt_size;
350 buffer += n * ctx->elt_size;
355 threaded_radixsort(struct asort_context *ctx, uns swap)
359 asort_start_threads(1);
360 work_queue_init(&asort_thread_pool, &q);
362 // Prepare work structures for counting and splitting.
363 // We use big_alloc(), because we want to avoid cacheline aliasing between threads.
364 ctx->rs_work_queue = &q;
365 ctx->rs_works = alloca(sizeof(struct rs_work *) * sorter_threads);
366 for (uns i=0; i<sorter_threads; i++)
367 ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
369 // Prepare a pool for all remaining small bits which will be sorted on background.
370 ctx->eltpool = ep_new(sizeof(struct rs_work), 1000);
372 // Do the big splitting
373 rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
374 for (uns i=0; i<sorter_threads; i++)
375 big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
377 // Finish the small blocks
380 ASSERT(!ctx->eltpool->num_allocated);
381 ep_delete(ctx->eltpool);
382 work_queue_cleanup(&q);
383 asort_stop_threads();
388 void asort_start_threads(uns run UNUSED) { }
389 void asort_stop_threads(void) { }
394 predict_swap(struct asort_context *ctx)
396 uns bits = ctx->radix_bits;
397 uns elts = ctx->num_elts;
400 while (elts * ctx->elt_size >= sorter_radix_threshold && bits >= ASORT_MIN_SHIFT)
402 DBG("Predicting pass: %d elts, %d bits", elts, bits);
404 elts >>= ctx->radix_bits;
405 bits = MAX(bits, ctx->radix_bits) - ctx->radix_bits;
411 asort_run(struct asort_context *ctx)
413 ASORT_TRACE("Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
414 uns allow_threads UNUSED = (sorter_threads > 1 &&
415 ctx->num_elts * ctx->elt_size >= sorter_thread_threshold &&
416 !(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
418 if (ctx->num_elts * ctx->elt_size < sorter_radix_threshold ||
419 ctx->hash_bits <= ASORT_MIN_SHIFT ||
421 (sorter_debug & SORT_DEBUG_ASORT_NO_RADIX))
423 #ifdef CONFIG_UCW_THREADS
426 ASORT_XTRACE(2, "Decided to use parallel quicksort");
427 threaded_quicksort(ctx);
431 ASORT_XTRACE(2, "Decided to use sequential quicksort");
432 ctx->quicksort(ctx->array, ctx->num_elts);
436 uns swap = predict_swap(ctx);
437 #ifdef CONFIG_UCW_THREADS
440 ASORT_XTRACE(2, "Decided to use parallel radix-sort (swap=%d)", swap);
441 threaded_radixsort(ctx, swap);
445 ASORT_XTRACE(2, "Decided to use sequential radix-sort (swap=%d)", swap);
446 asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
448 ctx->array = ctx->buffer;
451 ASORT_XTRACE(2, "Array-sort finished");