2 * UCW Library -- Universal Sorter: Governing Routines
4 * (c) 2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
11 #include <ucw/fastbuf.h>
12 #include <ucw/mempool.h>
13 #include <ucw/stkstring.h>
15 #include <ucw/sorter/common.h>
21 #define F_BSIZE(b) stk_fsize(sbuck_size(b))
24 sorter_start_timer(struct sort_context *ctx)
26 init_timer(&ctx->start_time);
30 sorter_stop_timer(struct sort_context *ctx, uint *account_to)
32 ctx->last_pass_time = get_timer(&ctx->start_time);
33 *account_to += ctx->last_pass_time;
37 sorter_speed(struct sort_context *ctx, u64 size)
41 if (!ctx->last_pass_time)
43 return (uint)((double)size / (1<<20) * 1000 / ctx->last_pass_time);
47 sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only)
49 sorter_alloc_buf(ctx);
50 if (in->flags & SBF_CUSTOM_PRESORT)
53 * The trick with automatic joining, which we use for the normal presorter,
54 * is not necessary with the custom presorter, because the custom presorter
55 * is never called in the middle of the sorted data.
57 struct fastbuf *f = sbuck_write(out);
59 return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size);
61 return ctx->internal_sort(ctx, in, out, out_only);
64 static struct sort_bucket *
65 sbuck_join_to(struct sort_bucket *b, ucw_off_t *sizep)
67 *sizep = 0; // Not needed, just to silence false warnings in some compilers about uninitialized sizep (it's OK for NULL result)
69 if (sorter_debug & SORT_DEBUG_NO_JOIN)
72 struct sort_bucket *out = (struct sort_bucket *) b->n.prev; // Such bucket is guaranteed to exist
73 if (!(out->flags & SBF_FINAL))
75 ASSERT(out->runs == 1);
76 *sizep = sbuck_size(out);
81 sbuck_ins_or_join(struct sort_bucket *b, cnode *list_pos, struct sort_bucket *join, ucw_off_t join_size)
83 if (join && join->runs >= 2)
87 ASSERT(join->runs == 2);
89 return sbuck_size(join) - join_size;
93 clist_insert_after(&b->n, list_pos);
101 sorter_join(struct sort_bucket *b)
103 struct sort_bucket *join = (struct sort_bucket *) b->n.prev;
104 ASSERT(join->flags & SBF_FINAL);
105 ASSERT(b->runs == 1);
107 if (!sbuck_has_file(join))
109 // The final bucket doesn't have any file associated yet, so replace
110 // it with the new bucket.
111 SORT_XTRACE(3, "Replaced final bucket");
112 b->flags |= SBF_FINAL;
117 SORT_TRACE("Copying to output file: %s", F_BSIZE(b));
118 struct fastbuf *src = sbuck_read(b);
119 struct fastbuf *dest = sbuck_write(join);
120 bbcopy(src, dest, ~0U);
126 sorter_twoway(struct sort_context *ctx, struct sort_bucket *b)
128 struct sort_bucket *ins[3] = { NULL }, *outs[3] = { NULL };
129 cnode *list_pos = b->n.prev;
131 struct sort_bucket *join = sbuck_join_to(b, &join_size);
133 if (!(sorter_debug & SORT_DEBUG_NO_PRESORT) || (b->flags & SBF_CUSTOM_PRESORT))
135 SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
136 sorter_start_timer(ctx);
137 ins[0] = sbuck_new(ctx);
138 if (!sorter_presort(ctx, b, ins[0], join ? : ins[0]))
140 sorter_stop_timer(ctx, &ctx->total_pre_time);
141 ucw_off_t size = sbuck_ins_or_join(ins[0], list_pos, join, join_size);
142 SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 3), "Sorted in memory (%s, %dMB/s)", stk_fsize(size), sorter_speed(ctx, size));
147 ins[1] = sbuck_new(ctx);
149 while (sorter_presort(ctx, b, ins[i], ins[i]))
152 sorter_stop_timer(ctx, &ctx->total_pre_time);
153 SORT_TRACE("Presorting pass (%d+%d runs, %s+%s, %dMB/s)",
154 ins[0]->runs, ins[1]->runs,
155 F_BSIZE(ins[0]), F_BSIZE(ins[1]),
156 sorter_speed(ctx, sbuck_size(ins[0]) + sbuck_size(ins[1])));
160 SORT_XTRACE(2, "Presorting disabled");
164 SORT_XTRACE(3, "Main sorting");
168 sorter_start_timer(ctx);
169 if (ins[0]->runs <= 1 && ins[1]->runs <= 1 && join)
171 // This is guaranteed to produce a single run, so join if possible
174 ctx->twoway_merge(ctx, ins, outs);
175 ucw_off_t size = sbuck_ins_or_join(NULL, NULL, join, join_size);
176 sorter_stop_timer(ctx, &ctx->total_ext_time);
177 SORT_TRACE("Mergesort pass %d (final run, %s, %dMB/s)", pass, stk_fsize(size), sorter_speed(ctx, size));
182 outs[0] = sbuck_new(ctx);
183 outs[1] = sbuck_new(ctx);
185 ctx->twoway_merge(ctx, ins, outs);
186 sorter_stop_timer(ctx, &ctx->total_ext_time);
187 SORT_TRACE("Mergesort pass %d (%d+%d runs, %s+%s, %dMB/s)", pass,
188 outs[0]->runs, outs[1]->runs,
189 F_BSIZE(outs[0]), F_BSIZE(outs[1]),
190 sorter_speed(ctx, sbuck_size(outs[0]) + sbuck_size(outs[1])));
193 memcpy(ins, outs, 3*sizeof(struct sort_bucket *));
194 } while (sbuck_have(ins[1]));
197 clist_insert_after(&ins[0]->n, list_pos);
201 sorter_multiway(struct sort_context *ctx, struct sort_bucket *b)
204 cnode *list_pos = b->n.prev;
206 struct sort_bucket *join = sbuck_join_to(b, &join_size);
207 uint trace_level = (b->flags & SBF_SOURCE) ? 1 : 3;
210 ASSERT(!(sorter_debug & SORT_DEBUG_NO_PRESORT));
211 SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
215 sorter_start_timer(ctx);
218 struct sort_bucket *p = sbuck_new(ctx);
219 cont = sorter_presort(ctx, b, p, (!part_cnt && join) ? join : p);
223 clist_add_tail(&parts, &p->n);
224 total_size += sbuck_size(p);
231 sorter_stop_timer(ctx, &ctx->total_pre_time);
232 sorter_free_buf(ctx);
237 ucw_off_t size = sbuck_ins_or_join(clist_head(&parts), list_pos, (part_cnt ? NULL : join), join_size);
238 SORT_XTRACE(trace_level, "Sorted in memory (%s, %dMB/s)", stk_fsize(size), sorter_speed(ctx, size));
242 SORT_TRACE("Multi-way presorting pass (%d parts, %s, %dMB/s)", part_cnt, stk_fsize(total_size), sorter_speed(ctx, total_size));
244 uint max_ways = 1 << sorter_max_multiway_bits;
245 struct sort_bucket *ways[max_ways+1];
246 SORT_XTRACE(3, "Starting up to %d-way merge", max_ways);
250 struct sort_bucket *p;
251 while (n < max_ways && (p = clist_head(&parts)))
259 struct sort_bucket *out;
260 if (clist_empty(&parts) && join)
263 out = sbuck_new(ctx);
264 sorter_start_timer(ctx);
265 ctx->multiway_merge(ctx, ways, out);
266 sorter_stop_timer(ctx, &ctx->total_ext_time);
268 for (uint i=0; i<n; i++)
271 if (clist_empty(&parts))
273 ucw_off_t size = sbuck_ins_or_join((join ? NULL : out), list_pos, join, join_size);
274 SORT_TRACE("Multi-way merge completed (%d ways, %s, %dMB/s)", n, stk_fsize(size), sorter_speed(ctx, size));
280 clist_add_tail(&parts, &out->n);
281 SORT_TRACE("Multi-way merge pass (%d ways, %s, %dMB/s)", n, F_BSIZE(out), sorter_speed(ctx, sbuck_size(out)));
287 sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uint bits)
289 // Add more bits if requested and allowed.
290 bits = MIN(bits + sorter_add_radix_bits, sorter_max_radix_bits);
292 uint nbuck = 1 << bits;
293 SORT_XTRACE(3, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
294 F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
295 sorter_free_buf(ctx);
296 sorter_start_timer(ctx);
298 struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *));
299 for (uint i=nbuck; i--; )
301 outs[i] = sbuck_new(ctx);
302 outs[i]->hash_bits = b->hash_bits - bits;
303 clist_insert_after(&outs[i]->n, &b->n);
306 ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits);
308 u64 min = ~(u64)0, max = 0, sum = 0;
309 for (uint i=0; i<nbuck; i++)
311 u64 s = sbuck_size(outs[i]);
316 sbuck_swap_out(outs[i]);
319 sorter_stop_timer(ctx, &ctx->total_ext_time);
320 SORT_TRACE("Radix split (%d buckets, %s min, %s max, %s avg, %dMB/s)", nbuck,
321 stk_fsize(min), stk_fsize(max), stk_fsize(sum / nbuck), sorter_speed(ctx, sum));
326 sorter_decide(struct sort_context *ctx, struct sort_bucket *b)
328 // Drop empty buckets
331 SORT_XTRACE(4, "Dropping empty bucket");
336 // How many bits of bucket size we have to reduce before it fits in the RAM?
337 // (this is insanely large if the input size is unknown, but it serves our purpose)
338 u64 insize = sbuck_size(b);
339 u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // Magical factor accounting for various non-uniformities
341 while ((insize >> bits) > mem)
344 // Calculate the possibilities of radix splits
346 if (!ctx->radix_split ||
347 (b->flags & SBF_CUSTOM_PRESORT) ||
348 (sorter_debug & SORT_DEBUG_NO_RADIX))
352 radix_bits = MIN(bits, b->hash_bits);
353 radix_bits = MIN(radix_bits, sorter_max_radix_bits);
354 if (radix_bits < sorter_min_radix_bits)
358 // The same for multi-way merges
360 if (!ctx->multiway_merge ||
361 (sorter_debug & SORT_DEBUG_NO_MULTIWAY) ||
362 (sorter_debug & SORT_DEBUG_NO_PRESORT))
366 multiway_bits = MIN(bits, sorter_max_multiway_bits);
367 if (multiway_bits < sorter_min_multiway_bits)
371 SORT_XTRACE(3, "Decisions: size=%s max=%s runs=%d bits=%d hash=%d -> radix=%d multi=%d",
372 stk_fsize(insize), stk_fsize(mem), b->runs, bits, b->hash_bits,
373 radix_bits, multiway_bits);
375 // If the input already consists of a single run, just join it
377 return sorter_join(b);
379 // If everything fits in memory, the 2-way strategy will sort it in memory
381 return sorter_twoway(ctx, b);
383 // If we can reduce everything in one pass, do so and prefer radix splits
384 if (radix_bits == bits)
385 return sorter_radix(ctx, b, radix_bits);
386 if (multiway_bits == bits)
387 return sorter_multiway(ctx, b);
389 // Otherwise, reduce as much as possible and again prefer radix splits
391 return sorter_radix(ctx, b, radix_bits);
393 return sorter_multiway(ctx, b);
395 // Fall back to 2-way strategy if nothing else applies
396 return sorter_twoway(ctx, b);
400 sorter_run(struct sort_context *ctx)
402 ctx->pool = mp_new(4096);
403 clist_init(&ctx->bucket_list);
404 sorter_prepare_buf(ctx);
405 asort_start_threads(0);
407 // Create bucket containing the source
408 struct sort_bucket *bin = sbuck_new(ctx);
409 bin->flags = SBF_SOURCE | SBF_OPEN_READ;
410 if (ctx->custom_presort)
411 bin->flags |= SBF_CUSTOM_PRESORT;
413 bin->fb = ctx->in_fb;
415 bin->size = ctx->in_size;
416 bin->hash_bits = ctx->hash_bits;
417 clist_add_tail(&ctx->bucket_list, &bin->n);
418 SORT_XTRACE(2, "Input size: %s, %d hash bits", F_BSIZE(bin), bin->hash_bits);
419 ctx->fb_params = (bin->size < sorter_small_input) ? &sorter_small_fb_params : &sorter_fb_params;
421 // Create bucket for the output
422 struct sort_bucket *bout = sbuck_new(ctx);
423 bout->flags = SBF_FINAL;
424 if (bout->fb = ctx->out_fb)
425 bout->flags |= SBF_OPEN_WRITE;
428 clist_add_head(&ctx->bucket_list, &bout->n);
430 // Repeatedly sort buckets
431 struct sort_bucket *b;
432 while (bout = clist_head(&ctx->bucket_list), b = clist_next(&ctx->bucket_list, &bout->n))
433 sorter_decide(ctx, b);
435 asort_stop_threads();
436 sorter_free_buf(ctx);
437 sbuck_write(bout); // Force empty bucket to a file
438 SORT_XTRACE(2, "Final size: %s", F_BSIZE(bout));
439 SORT_XTRACE(2, "Final timings: %.3fs external sorting, %.3fs presorting, %.3fs internal sorting",
440 ctx->total_ext_time/1000., ctx->total_pre_time/1000., ctx->total_int_time/1000.);
441 ctx->out_fb = sbuck_read(bout);
442 mp_delete(ctx->pool);