2 * UCW Library -- Optimized Array Sorter
4 * (c) 2003--2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
13 #include <ucw/sorter/common.h>
18 #define ASORT_MIN_SHIFT 2
20 #define ASORT_TRACE(x...) ASORT_XTRACE(1, x)
21 #define ASORT_XTRACE(level, x...) do { if (sorter_trace_array >= level) msg(L_DEBUG, x); } while(0)
24 asort_radix(struct asort_context *ctx, void *array, void *buffer, uint num_elts, uint hash_bits, uint swapped_output)
26 // swap_output == 0 if result should be returned in `array', otherwise in `buffer'
27 uint buckets = (1 << ctx->radix_bits);
28 uint shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
32 static int reported[64];
33 if (!reported[hash_bits]++)
35 DBG(">>> n=%u h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output);
37 bzero(cnt, sizeof(cnt));
38 ctx->radix_count(array, num_elts, cnt, shift);
41 for (uint i=0; i<buckets; i++)
47 ASSERT(pos == num_elts);
49 ctx->radix_split(array, buffer, num_elts, cnt, shift);
51 for (uint i=0; i<buckets; i++)
53 uint n = cnt[i] - pos;
54 if (n < ctx->radix_threshold || shift < ASORT_MIN_SHIFT)
56 ctx->quicksort(buffer, n);
58 memcpy(array, buffer, n * ctx->elt_size);
61 asort_radix(ctx, buffer, array, n, shift, !swapped_output);
62 array += n * ctx->elt_size;
63 buffer += n * ctx->elt_size;
68 #ifdef CONFIG_UCW_THREADS
70 #include <ucw/threads.h>
71 #include <ucw/workqueue.h>
72 #include <ucw/eltpool.h>
74 static uint asort_threads_use_count;
75 static uint asort_threads_ready;
76 static struct worker_pool asort_thread_pool;
79 rs_estimate_stack(void)
81 // Stack space needed by the recursive radix-sorter
82 uint ctrsize = sizeof(uint) * (1 << CONFIG_UCW_RADIX_SORTER_BITS);
83 uint maxdepth = (64 / CONFIG_UCW_RADIX_SORTER_BITS) + 1;
84 return ctrsize * maxdepth;
88 asort_start_threads(uint run)
91 asort_threads_use_count++;
92 if (run && !asort_threads_ready)
94 // XXX: If somebody overrides the radix-sorter parameters to insane values,
95 // he also should override the stack size to insane values.
96 asort_thread_pool.stack_size = ucwlib_thread_stack_size + rs_estimate_stack();
97 asort_thread_pool.num_threads = sorter_threads;
98 ASORT_TRACE("Initializing thread pool (%d threads, %dK stack)", sorter_threads, asort_thread_pool.stack_size >> 10);
99 worker_pool_init(&asort_thread_pool);
100 asort_threads_ready = 1;
106 asort_stop_threads(void)
109 if (!--asort_threads_use_count && asort_threads_ready)
111 ASORT_TRACE("Shutting down thread pool");
112 worker_pool_cleanup(&asort_thread_pool);
113 asort_threads_ready = 0;
120 struct asort_context *ctx;
124 #define LR_UNDEF -100
128 qs_handle_work(struct worker_thread *thr UNUSED, struct work *ww)
130 struct qs_work *w = (struct qs_work *) ww;
131 struct asort_context *ctx = w->ctx;
133 DBG("Thread %d: got %u elts", thr->id, w->num_elts);
134 if (w->num_elts < ctx->thread_threshold)
136 ctx->quicksort(w->array, w->num_elts);
137 w->left = w->right = LR_UNDEF;
140 ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right);
141 DBG("Thread %d: returning l=%u r=%u", thr->id, w->left, w->right);
144 static struct qs_work *
145 qs_alloc_work(struct asort_context *ctx)
147 struct qs_work *w = ep_alloc(ctx->eltpool);
149 w->w.go = qs_handle_work;
155 threaded_quicksort(struct asort_context *ctx)
158 struct qs_work *v, *w;
160 asort_start_threads(1);
161 work_queue_init(&asort_thread_pool, &q);
162 ctx->eltpool = ep_new(sizeof(struct qs_work), 1000);
164 w = qs_alloc_work(ctx);
165 w->array = ctx->array;
166 w->num_elts = ctx->num_elts;
167 work_submit(&q, &w->w);
169 while (v = (struct qs_work *) work_wait(&q))
171 if (v->left != LR_UNDEF)
175 w = qs_alloc_work(ctx);
177 w->num_elts = v->right + 1;
178 w->w.priority = v->w.priority + 1;
179 work_submit(&q, &w->w);
181 if (v->left < (int)v->num_elts - 1)
183 w = qs_alloc_work(ctx);
184 w->array = v->array + v->left * ctx->elt_size;
185 w->num_elts = v->num_elts - v->left;
186 w->w.priority = v->w.priority + 1;
187 work_submit(&q, &w->w);
190 ep_free(ctx->eltpool, v);
193 ep_delete(ctx->eltpool);
194 work_queue_cleanup(&q);
195 asort_stop_threads();
200 struct asort_context *ctx;
201 void *array, *buffer; // Like asort_radix().
209 rs_count(struct worker_thread *thr UNUSED, struct work *ww)
211 struct rs_work *w = (struct rs_work *) ww;
213 DBG("Thread %d: Counting %u items, shift=%d", thr->id, w->num_elts, w->shift);
214 w->ctx->radix_count(w->array, w->num_elts, w->cnt, w->shift);
215 DBG("Thread %d: Counting done", thr->id);
219 rs_split(struct worker_thread *thr UNUSED, struct work *ww)
221 struct rs_work *w = (struct rs_work *) ww;
223 DBG("Thread %d: Splitting %u items, shift=%d", thr->id, w->num_elts, w->shift);
224 w->ctx->radix_split(w->array, w->buffer, w->num_elts, w->cnt, w->shift);
225 DBG("Thread %d: Splitting done", thr->id);
229 rs_finish(struct worker_thread *thr UNUSED, struct work *ww)
231 struct rs_work *w = (struct rs_work *) ww;
234 DBG("Thread %d: Finishing %u items, shift=%d", thr->id, w->num_elts, w->shift);
235 if (w->shift < ASORT_MIN_SHIFT || w->num_elts < w->ctx->radix_threshold)
237 w->ctx->quicksort(w->array, w->num_elts);
239 memcpy(w->buffer, w->array, w->num_elts * w->ctx->elt_size);
242 asort_radix(w->ctx, w->array, w->buffer, w->num_elts, w->shift, w->swap_output);
244 DBG("Thread %d: Finishing done", thr->id);
248 rs_wait_small(struct asort_context *ctx)
252 while (w = (struct rs_work *) work_wait(ctx->rs_work_queue))
254 DBG("Reaping small chunk of %u items", w->num_elts);
255 ep_free(ctx->eltpool, w);
260 rs_radix(struct asort_context *ctx, void *array, void *buffer, uint num_elts, uint hash_bits, uint swapped_output)
262 uint buckets = (1 << ctx->radix_bits);
263 uint shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
265 uint blksize = num_elts / sorter_threads;
266 DBG(">>> n=%u h=%d s=%d blk=%u sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
268 // If there are any small chunks in progress, wait for them to finish
271 // Start parallel counting
273 for (uint i=0; i<sorter_threads; i++)
275 struct rs_work *w = ctx->rs_works[i];
281 w->num_elts = blksize;
282 if (i == sorter_threads-1)
283 w->num_elts += num_elts % sorter_threads;
285 iptr += w->num_elts * ctx->elt_size;
286 bzero(w->cnt, sizeof(uint) * buckets);
287 work_submit(ctx->rs_work_queue, &w->w);
290 // Get bucket sizes from the counts
291 bzero(cnt, sizeof(cnt));
292 for (uint i=0; i<sorter_threads; i++)
294 struct rs_work *w = (struct rs_work *) work_wait(ctx->rs_work_queue);
296 for (uint j=0; j<buckets; j++)
300 // Calculate bucket starts
302 for (uint i=0; i<buckets; i++)
308 ASSERT(pos == num_elts);
310 // Start parallel splitting
311 for (uint i=0; i<sorter_threads; i++)
313 struct rs_work *w = ctx->rs_works[i];
315 for (uint j=0; j<buckets; j++)
321 work_submit(ctx->rs_work_queue, &w->w);
323 ASSERT(cnt[buckets-1] == num_elts);
325 // Wait for splits to finish
326 while (work_wait(ctx->rs_work_queue))
329 // Recurse on buckets
331 for (uint i=0; i<buckets; i++)
333 uint n = cnt[i] - pos;
336 if (n < ctx->thread_threshold || shift < ASORT_MIN_SHIFT)
338 struct rs_work *w = ep_alloc(ctx->eltpool);
346 w->swap_output = !swapped_output;
347 if (n < ctx->thread_chunk)
349 DBG("Sorting block %u+%u inline", pos, n);
350 rs_finish(NULL, &w->w);
351 ep_free(ctx->eltpool, w);
355 DBG("Scheduling block %u+%u", pos, n);
356 work_submit(ctx->rs_work_queue, &w->w);
360 rs_radix(ctx, buffer, array, n, shift, !swapped_output);
362 array += n * ctx->elt_size;
363 buffer += n * ctx->elt_size;
368 threaded_radixsort(struct asort_context *ctx, uint swap)
372 asort_start_threads(1);
373 work_queue_init(&asort_thread_pool, &q);
375 // Prepare work structures for counting and splitting.
376 // We use big_alloc(), because we want to avoid cacheline aliasing between threads.
377 ctx->rs_work_queue = &q;
378 ctx->rs_works = alloca(sizeof(struct rs_work *) * sorter_threads);
379 for (uint i=0; i<sorter_threads; i++)
380 ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uint) * (1 << ctx->radix_bits));
382 // Prepare a pool for all remaining small bits which will be sorted on background.
383 ctx->eltpool = ep_new(sizeof(struct rs_work), 1000);
385 // Do the big splitting
386 rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
387 for (uint i=0; i<sorter_threads; i++)
388 big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uint) * (1 << ctx->radix_bits));
390 // Finish the small blocks
393 ASSERT(!ctx->eltpool->num_allocated);
394 ep_delete(ctx->eltpool);
395 work_queue_cleanup(&q);
396 asort_stop_threads();
401 void asort_start_threads(uint run UNUSED) { }
402 void asort_stop_threads(void) { }
407 predict_swap(struct asort_context *ctx)
409 uint bits = ctx->radix_bits;
410 uint elts = ctx->num_elts;
413 while (elts >= ctx->radix_threshold && bits >= ASORT_MIN_SHIFT)
415 DBG("Predicting pass: %u elts, %d bits", elts, bits);
417 elts >>= ctx->radix_bits;
418 bits = MAX(bits, ctx->radix_bits) - ctx->radix_bits;
424 asort_run(struct asort_context *ctx)
426 ctx->thread_threshold = MIN(sorter_thread_threshold / ctx->elt_size, ~0U);
427 ctx->thread_chunk = MIN(sorter_thread_chunk / ctx->elt_size, ~0U);
428 ctx->radix_threshold = MIN(sorter_radix_threshold / ctx->elt_size, ~0U);
430 ASORT_TRACE("Array-sorting %u items per %u bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
431 ASORT_XTRACE(2, "Limits: thread_threshold=%u, thread_chunk=%u, radix_threshold=%u",
432 ctx->thread_threshold, ctx->thread_chunk, ctx->radix_threshold);
433 uint allow_threads UNUSED = (sorter_threads > 1 &&
434 ctx->num_elts >= ctx->thread_threshold &&
435 !(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
437 if (ctx->num_elts < ctx->radix_threshold ||
438 ctx->hash_bits <= ASORT_MIN_SHIFT ||
440 (sorter_debug & SORT_DEBUG_ASORT_NO_RADIX))
442 #ifdef CONFIG_UCW_THREADS
445 ASORT_XTRACE(2, "Decided to use parallel quicksort");
446 threaded_quicksort(ctx);
451 ASORT_XTRACE(2, "Decided to use sequential quicksort");
452 ctx->quicksort(ctx->array, ctx->num_elts);
457 uint swap = predict_swap(ctx);
458 #ifdef CONFIG_UCW_THREADS
461 ASORT_XTRACE(2, "Decided to use parallel radix-sort (swap=%d)", swap);
462 threaded_radixsort(ctx, swap);
467 ASORT_XTRACE(2, "Decided to use sequential radix-sort (swap=%d)", swap);
468 asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
471 ctx->array = ctx->buffer;
474 ASORT_XTRACE(2, "Array-sort finished");