2 * UCW Library -- Optimized Array Sorter
4 * (c) 2003--2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
13 #include "lib/sorter/common.h"
18 #define ASORT_MIN_SHIFT 2
21 asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
23 // swap_output == 0 if result should be returned in `array', otherwise in `buffer'
24 uns buckets = (1 << ctx->radix_bits);
25 uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
29 static int reported[64];
30 if (!reported[hash_bits]++)
32 DBG(">>> n=%d h=%d s=%d sw=%d", num_elts, hash_bits, shift, swapped_output);
34 bzero(cnt, sizeof(cnt));
35 ctx->radix_count(array, num_elts, cnt, shift);
38 for (uns i=0; i<buckets; i++)
44 ASSERT(pos == num_elts);
46 ctx->radix_split(array, buffer, num_elts, cnt, shift);
48 for (uns i=0; i<buckets; i++)
51 if (n < sorter_radix_threshold || shift < ASORT_MIN_SHIFT)
53 ctx->quicksort(buffer, n);
55 memcpy(array, buffer, n * ctx->elt_size);
58 asort_radix(ctx, buffer, array, n, shift, !swapped_output);
59 array += n * ctx->elt_size;
60 buffer += n * ctx->elt_size;
65 #ifdef CONFIG_UCW_THREADS
67 #include "lib/threads.h"
68 #include "lib/workqueue.h"
69 #include "lib/eltpool.h"
71 static uns asort_threads_use_count;
72 static uns asort_threads_ready;
73 static struct worker_pool asort_thread_pool;
76 asort_start_threads(uns run)
79 asort_threads_use_count++;
80 if (run && !asort_threads_ready)
82 SORT_XTRACE(2, "Initializing thread pool (%d threads)", sorter_threads);
83 asort_thread_pool.num_threads = sorter_threads;
84 worker_pool_init(&asort_thread_pool);
85 asort_threads_ready = 1;
91 asort_stop_threads(void)
94 if (!--asort_threads_use_count && asort_threads_ready)
96 SORT_XTRACE(2, "Shutting down thread pool");
97 worker_pool_cleanup(&asort_thread_pool);
98 asort_threads_ready = 0;
105 struct asort_context *ctx;
109 #define LR_UNDEF -100
113 qs_handle_work(struct worker_thread *thr UNUSED, struct work *ww)
115 struct qs_work *w = (struct qs_work *) ww;
117 DBG("Thread %d: got %d elts", thr->id, w->num_elts);
118 if (w->num_elts * w->ctx->elt_size < sorter_thread_threshold)
120 w->ctx->quicksort(w->array, w->num_elts);
121 w->left = w->right = LR_UNDEF;
124 w->ctx->quicksplit(w->array, w->num_elts, &w->left, &w->right);
125 DBG("Thread %d: returning l=%d r=%d", thr->id, w->left, w->right);
128 static struct qs_work *
129 qs_alloc_work(struct asort_context *ctx)
131 struct qs_work *w = ep_alloc(ctx->eltpool);
133 w->w.go = qs_handle_work;
139 threaded_quicksort(struct asort_context *ctx)
142 struct qs_work *v, *w;
144 asort_start_threads(1);
145 work_queue_init(&asort_thread_pool, &q);
146 ctx->eltpool = ep_new(sizeof(struct qs_work), 1000);
148 w = qs_alloc_work(ctx);
149 w->array = ctx->array;
150 w->num_elts = ctx->num_elts;
151 work_submit(&q, &w->w);
153 while (v = (struct qs_work *) work_wait(&q))
155 if (v->left != LR_UNDEF)
159 w = qs_alloc_work(ctx);
161 w->num_elts = v->right + 1;
162 w->w.priority = v->w.priority + 1;
163 work_submit(&q, &w->w);
165 if (v->left < (int)v->num_elts - 1)
167 w = qs_alloc_work(ctx);
168 w->array = v->array + v->left * ctx->elt_size;
169 w->num_elts = v->num_elts - v->left;
170 w->w.priority = v->w.priority + 1;
171 work_submit(&q, &w->w);
174 ep_free(ctx->eltpool, v);
177 ep_delete(ctx->eltpool);
178 work_queue_cleanup(&q);
179 asort_stop_threads();
184 struct asort_context *ctx;
193 rs_count(struct worker_thread *thr UNUSED, struct work *ww)
195 struct rs_work *w = (struct rs_work *) ww;
197 DBG("Thread %d: Counting %d items, shift=%d", thr->id, w->num_elts, w->shift);
198 w->ctx->radix_count(w->in, w->num_elts, w->cnt, w->shift);
199 DBG("Thread %d: Counting done", thr->id);
203 rs_split(struct worker_thread *thr UNUSED, struct work *ww)
205 struct rs_work *w = (struct rs_work *) ww;
207 DBG("Thread %d: Splitting %d items, shift=%d", thr->id, w->num_elts, w->shift);
208 w->ctx->radix_split(w->in, w->out, w->num_elts, w->cnt, w->shift);
209 DBG("Thread %d: Splitting done", thr->id);
213 rs_finish(struct worker_thread *thr UNUSED, struct work *ww)
215 struct rs_work *w = (struct rs_work *) ww;
218 DBG("Thread %d: Finishing %d items, shift=%d", thr->id, w->num_elts, w->shift);
219 if (w->shift < ASORT_MIN_SHIFT || w->num_elts < sorter_radix_threshold)
221 w->ctx->quicksort(w->in, w->num_elts);
223 memcpy(w->out, w->in, w->num_elts * w->ctx->elt_size);
226 asort_radix(w->ctx, w->in, w->out, w->num_elts, w->shift, w->swap_output);
228 DBG("Thread %d: Finishing done", thr->id);
232 rs_wait_small(struct asort_context *ctx)
236 while (w = (struct rs_work *) work_wait(ctx->rs_work_queue))
238 DBG("Reaping small chunk of %d items", w->num_elts);
239 ep_free(ctx->eltpool, w);
244 rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
246 uns buckets = (1 << ctx->radix_bits);
247 uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
249 uns blksize = num_elts / sorter_threads;
250 DBG(">>> n=%d h=%d s=%d blk=%d sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
252 // If there are any small chunks in progress, wait for them to finish
255 // Start parallel counting
257 for (uns i=0; i<sorter_threads; i++)
259 struct rs_work *w = ctx->rs_works[i];
265 w->num_elts = blksize;
266 if (i == sorter_threads-1)
267 w->num_elts += num_elts % sorter_threads;
269 iptr += w->num_elts * ctx->elt_size;
270 bzero(w->cnt, sizeof(uns) * buckets);
271 work_submit(ctx->rs_work_queue, &w->w);
274 // Get bucket sizes from the counts
275 bzero(cnt, sizeof(cnt));
276 for (uns i=0; i<sorter_threads; i++)
278 struct rs_work *w = (struct rs_work *) work_wait(ctx->rs_work_queue);
280 for (uns j=0; j<buckets; j++)
284 // Calculate bucket starts
286 for (uns i=0; i<buckets; i++)
292 ASSERT(pos == num_elts);
294 // Start parallel splitting
295 for (uns i=0; i<sorter_threads; i++)
297 struct rs_work *w = ctx->rs_works[i];
299 for (uns j=0; j<buckets; j++)
305 work_submit(ctx->rs_work_queue, &w->w);
307 ASSERT(cnt[buckets-1] == num_elts);
309 // Wait for splits to finish
310 while (work_wait(ctx->rs_work_queue))
313 // Recurse on buckets
315 for (uns i=0; i<buckets; i++)
317 uns n = cnt[i] - pos;
320 if (n * ctx->elt_size < sorter_thread_threshold)
322 struct rs_work *w = ep_alloc(ctx->eltpool);
330 w->swap_output = !swapped_output;
331 if (n * ctx->elt_size < sorter_thread_chunk)
333 DBG("Sorting block %d+%d inline", pos, n);
334 rs_finish(NULL, &w->w);
335 ep_free(ctx->eltpool, w);
339 DBG("Scheduling block %d+%d", pos, n);
340 work_submit(ctx->rs_work_queue, &w->w);
344 rs_radix(ctx, buffer, array, n, shift, !swapped_output);
346 array += n * ctx->elt_size;
347 buffer += n * ctx->elt_size;
352 threaded_radixsort(struct asort_context *ctx)
356 asort_start_threads(1);
357 work_queue_init(&asort_thread_pool, &q);
359 // Prepare work structures for counting and splitting.
360 // We use big_alloc(), because we want to avoid cacheline aliasing between threads.
361 ctx->rs_work_queue = &q;
362 ctx->rs_works = alloca(sizeof(struct rs_work *) * sorter_threads);
363 for (uns i=0; i<sorter_threads; i++)
364 ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
366 // Prepare a pool for all remaining small bits which will be sorted on background.
367 ctx->eltpool = ep_new(sizeof(struct rs_work), 1000);
369 // Do the big splitting
370 // FIXME: Set the swap bit carefully.
371 rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, 0);
372 for (uns i=0; i<sorter_threads; i++)
373 big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
375 // Finish the small blocks
378 ASSERT(!ctx->eltpool->num_allocated);
379 ep_delete(ctx->eltpool);
380 work_queue_cleanup(&q);
381 asort_stop_threads();
386 void asort_start_threads(uns run UNUSED) { }
387 void asort_stop_threads(void) { }
392 asort_run(struct asort_context *ctx)
394 SORT_XTRACE(10, "Array-sorting %d items per %d bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
395 uns allow_threads UNUSED = (sorter_threads > 1 &&
396 ctx->num_elts * ctx->elt_size >= sorter_thread_threshold &&
397 !(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
399 if (ctx->num_elts < sorter_radix_threshold ||
400 ctx->hash_bits <= ASORT_MIN_SHIFT ||
402 (sorter_debug & SORT_DEBUG_ASORT_NO_RADIX))
404 #ifdef CONFIG_UCW_THREADS
407 SORT_XTRACE(12, "Decided to use parallel quicksort");
408 threaded_quicksort(ctx);
412 SORT_XTRACE(12, "Decided to use sequential quicksort");
413 ctx->quicksort(ctx->array, ctx->num_elts);
417 #ifdef CONFIG_UCW_THREADS
420 SORT_XTRACE(12, "Decided to use parallel radix-sort");
421 threaded_radixsort(ctx);
425 SORT_XTRACE(12, "Decided to use sequential radix-sort");
426 // FIXME: select dest buffer
427 asort_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, 0);
430 SORT_XTRACE(11, "Array-sort finished");