2 * UCW Library -- Universal Sorter: Governing Routines
4 * (c) 2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
11 #include "lib/fastbuf.h"
12 #include "lib/mempool.h"
13 #include "lib/stkstring.h"
14 #include "lib/sorter/common.h"
20 #define F_BSIZE(b) stk_fsize(sbuck_size(b))
23 sorter_start_timer(struct sort_context *ctx)
25 init_timer(&ctx->start_time);
29 sorter_stop_timer(struct sort_context *ctx, uns *account_to)
31 ctx->last_pass_time = get_timer(&ctx->start_time);
32 *account_to += ctx->last_pass_time;
36 sorter_speed(struct sort_context *ctx, u64 size)
40 if (!ctx->last_pass_time)
42 return (uns)((double)size / (1<<20) * 1000 / ctx->last_pass_time);
46 sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only)
48 sorter_alloc_buf(ctx);
49 if (in->flags & SBF_CUSTOM_PRESORT)
51 struct fastbuf *f = sbuck_write(out);
53 return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size); // FIXME: out_only optimization?
55 return ctx->internal_sort(ctx, in, out, out_only);
58 static inline struct sort_bucket *
59 sbuck_join_to(struct sort_bucket *b)
61 if (sorter_debug & SORT_DEBUG_NO_JOIN)
64 struct sort_bucket *out = (struct sort_bucket *) b->n.prev; // Such bucket is guaranteed to exist
65 if (!(out->flags & SBF_FINAL))
67 ASSERT(out->runs == 1);
72 sorter_join(struct sort_bucket *b)
74 struct sort_bucket *join = (struct sort_bucket *) b->n.prev;
75 ASSERT(join->flags & SBF_FINAL);
78 if (!sbuck_has_file(join))
80 // The final bucket doesn't have any file associated yet, so replace
81 // it with the new bucket.
82 SORT_XTRACE(2, "Replaced final bucket");
83 b->flags |= SBF_FINAL;
88 SORT_TRACE("Copying to output file: %s", F_BSIZE(b));
89 struct fastbuf *src = sbuck_read(b);
90 struct fastbuf *dest = sbuck_write(join);
91 bbcopy(src, dest, ~0U);
97 sorter_twoway(struct sort_context *ctx, struct sort_bucket *b)
99 struct sort_bucket *ins[3] = { NULL }, *outs[3] = { NULL };
100 cnode *list_pos = b->n.prev;
101 struct sort_bucket *join = sbuck_join_to(b);
103 if (!(sorter_debug & SORT_DEBUG_NO_PRESORT) || (b->flags & SBF_CUSTOM_PRESORT))
105 SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
106 sorter_start_timer(ctx);
107 ins[0] = sbuck_new(ctx);
108 if (!sorter_presort(ctx, b, ins[0], join ? : ins[0]))
110 sorter_stop_timer(ctx, &ctx->total_pre_time);
111 SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 2), "Sorted in memory");
114 ASSERT(join->runs == 2);
119 clist_insert_after(&ins[0]->n, list_pos);
124 ins[1] = sbuck_new(ctx);
126 while (sorter_presort(ctx, b, ins[i], ins[i]))
129 sorter_stop_timer(ctx, &ctx->total_pre_time);
130 SORT_TRACE("Presorting pass (%d+%d runs, %s+%s, %dMB/s)",
131 ins[0]->runs, ins[1]->runs,
132 F_BSIZE(ins[0]), F_BSIZE(ins[1]),
133 sorter_speed(ctx, sbuck_size(ins[0]) + sbuck_size(ins[1])));
137 SORT_XTRACE(2, "Presorting disabled");
141 SORT_XTRACE(3, "Main sorting");
145 sorter_start_timer(ctx);
146 if (ins[0]->runs == 1 && ins[1]->runs == 1 && join)
148 // This is guaranteed to produce a single run, so join if possible
149 sh_off_t join_size = sbuck_size(join);
152 ctx->twoway_merge(ctx, ins, outs);
153 ASSERT(join->runs == 2);
155 join_size = sbuck_size(join) - join_size;
156 sorter_stop_timer(ctx, &ctx->total_ext_time);
157 SORT_TRACE("Mergesort pass %d (final run, %s, %dMB/s)", pass, stk_fsize(join_size), sorter_speed(ctx, join_size));
162 outs[0] = sbuck_new(ctx);
163 outs[1] = sbuck_new(ctx);
165 ctx->twoway_merge(ctx, ins, outs);
166 sorter_stop_timer(ctx, &ctx->total_ext_time);
167 SORT_TRACE("Mergesort pass %d (%d+%d runs, %s+%s, %dMB/s)", pass,
168 outs[0]->runs, outs[1]->runs,
169 F_BSIZE(outs[0]), F_BSIZE(outs[1]),
170 sorter_speed(ctx, sbuck_size(outs[0]) + sbuck_size(outs[1])));
173 memcpy(ins, outs, 3*sizeof(struct sort_bucket *));
174 } while (sbuck_have(ins[1]));
177 clist_insert_after(&ins[0]->n, list_pos);
181 sorter_multiway(struct sort_context *ctx, struct sort_bucket *b)
184 cnode *list_pos = b->n.prev;
185 struct sort_bucket *join = sbuck_join_to(b);
186 uns trace_level = (b->flags & SBF_SOURCE) ? 1 : 2;
189 ASSERT(!(sorter_debug & SORT_DEBUG_NO_PRESORT));
190 SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
194 sorter_start_timer(ctx);
197 struct sort_bucket *p = sbuck_new(ctx);
198 cont = sorter_presort(ctx, b, p, (!part_cnt && join) ? join : p);
202 clist_add_tail(&parts, &p->n);
203 total_size += sbuck_size(p);
210 sorter_stop_timer(ctx, &ctx->total_pre_time);
211 sorter_free_buf(ctx);
214 // FIXME: This is way too similar to the two-way case.
219 SORT_XTRACE(trace_level, "Sorted in memory and joined");
220 ASSERT(join->runs == 2);
227 struct sort_bucket *p = clist_head(&parts);
228 SORT_XTRACE(trace_level, "Sorted in memory");
229 clist_insert_after(&p->n, list_pos);
233 SORT_TRACE("Multi-way presorting pass (%d parts, %s, %dMB/s)", part_cnt, stk_fsize(total_size), sorter_speed(ctx, total_size));
235 uns max_ways = 1 << sorter_max_multiway_bits;
236 struct sort_bucket *ways[max_ways+1];
237 SORT_XTRACE(2, "Starting up to %d-way merge", max_ways);
241 struct sort_bucket *p;
242 while (n < max_ways && (p = clist_head(&parts)))
250 struct sort_bucket *out;
251 out = sbuck_new(ctx); // FIXME: No joining so far
252 sorter_start_timer(ctx);
253 ctx->multiway_merge(ctx, ways, out);
254 sorter_stop_timer(ctx, &ctx->total_ext_time);
256 for (uns i=0; i<n; i++)
259 if (clist_empty(&parts))
261 clist_insert_after(&out->n, list_pos);
262 SORT_TRACE("Multi-way merge completed (%s, %dMB/s)", F_BSIZE(out), sorter_speed(ctx, sbuck_size(out)));
268 clist_add_tail(&parts, &out->n);
269 SORT_TRACE("Multi-way merge pass (%d ways, %s, %dMB/s)", n, F_BSIZE(out), sorter_speed(ctx, sbuck_size(out)));
275 sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits)
277 uns nbuck = 1 << bits;
278 SORT_XTRACE(2, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
279 F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
280 sorter_free_buf(ctx);
281 sorter_start_timer(ctx);
283 struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *));
284 for (uns i=nbuck; i--; )
286 outs[i] = sbuck_new(ctx);
287 outs[i]->hash_bits = b->hash_bits - bits;
288 clist_insert_after(&outs[i]->n, &b->n);
291 ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits);
293 u64 min = ~(u64)0, max = 0, sum = 0;
294 for (uns i=0; i<nbuck; i++)
296 u64 s = sbuck_size(outs[i]);
301 sbuck_swap_out(outs[i]);
304 sorter_stop_timer(ctx, &ctx->total_ext_time);
305 SORT_TRACE("Radix split (%d buckets, %s min, %s max, %s avg, %dMB/s)", nbuck,
306 stk_fsize(min), stk_fsize(max), stk_fsize(sum / nbuck), sorter_speed(ctx, sum));
311 sorter_decide(struct sort_context *ctx, struct sort_bucket *b)
313 // Drop empty buckets
316 SORT_XTRACE(3, "Dropping empty bucket");
321 // How many bits of bucket size we have to reduce before it fits in the RAM?
322 // (this is insanely large if the input size is unknown, but it serves our purpose)
323 u64 insize = sbuck_size(b);
324 u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // FIXME: Magical factor for various non-uniformities
326 while ((insize >> bits) > mem)
329 // Calculate the possibilities of radix splits
331 if (!ctx->radix_split ||
332 (b->flags & SBF_CUSTOM_PRESORT) ||
333 (sorter_debug & SORT_DEBUG_NO_RADIX))
337 radix_bits = MIN(bits, b->hash_bits);
338 radix_bits = MIN(radix_bits, sorter_max_radix_bits);
339 if (radix_bits < sorter_min_radix_bits)
343 // The same for multi-way merges
345 if (!ctx->multiway_merge ||
346 (sorter_debug & SORT_DEBUG_NO_MULTIWAY) ||
347 (sorter_debug & SORT_DEBUG_NO_PRESORT))
351 multiway_bits = MIN(bits, sorter_max_multiway_bits);
352 if (multiway_bits < sorter_min_multiway_bits)
356 SORT_XTRACE(2, "Decisions: size=%s max=%s runs=%d bits=%d hash=%d -> radix=%d multi=%d",
357 stk_fsize(insize), stk_fsize(mem), b->runs, bits, b->hash_bits,
358 radix_bits, multiway_bits);
360 // If the input already consists of a single run, just join it
362 return sorter_join(b);
364 // If everything fits in memory, the 2-way strategy will sort it in memory
366 return sorter_twoway(ctx, b);
368 // If we can reduce everything in one pass, do so and prefer radix splits
369 if (radix_bits == bits)
370 return sorter_radix(ctx, b, radix_bits);
371 if (multiway_bits == bits)
372 return sorter_multiway(ctx, b);
374 // Otherwise, reduce as much as possible and again prefer radix splits
376 return sorter_radix(ctx, b, radix_bits);
378 return sorter_multiway(ctx, b);
380 // Fall back to 2-way strategy if nothing else applies
381 return sorter_twoway(ctx, b);
385 sorter_run(struct sort_context *ctx)
387 ctx->pool = mp_new(4096);
388 clist_init(&ctx->bucket_list);
389 sorter_prepare_buf(ctx);
391 // Create bucket containing the source
392 struct sort_bucket *bin = sbuck_new(ctx);
393 bin->flags = SBF_SOURCE | SBF_OPEN_READ;
394 if (ctx->custom_presort)
395 bin->flags |= SBF_CUSTOM_PRESORT;
397 bin->fb = ctx->in_fb;
399 bin->size = ctx->in_size;
400 bin->hash_bits = ctx->hash_bits;
401 clist_add_tail(&ctx->bucket_list, &bin->n);
402 SORT_XTRACE(2, "Input size: %s, %d hash bits", F_BSIZE(bin), bin->hash_bits);
404 // Create bucket for the output
405 struct sort_bucket *bout = sbuck_new(ctx);
406 bout->flags = SBF_FINAL;
407 if (bout->fb = ctx->out_fb)
408 bout->flags |= SBF_OPEN_WRITE;
411 clist_add_head(&ctx->bucket_list, &bout->n);
413 // Repeatedly sort buckets
414 struct sort_bucket *b;
415 while (bout = clist_head(&ctx->bucket_list), b = clist_next(&ctx->bucket_list, &bout->n))
416 sorter_decide(ctx, b);
418 sorter_free_buf(ctx);
419 sbuck_write(bout); // Force empty bucket to a file
420 SORT_XTRACE(2, "Final size: %s", F_BSIZE(bout));
421 SORT_XTRACE(2, "Final timings: %.3fs external sorting, %.3fs presorting, %.3fs internal sorting",
422 ctx->total_ext_time/1000., ctx->total_pre_time/1000., ctx->total_int_time/1000.);
423 ctx->out_fb = sbuck_read(bout);