2 * UCW Library -- Universal Sorter: Governing Routines
4 * (c) 2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
11 #include "lib/fastbuf.h"
12 #include "lib/mempool.h"
13 #include "lib/sorter/common.h"
20 #define F_SIZE(x) ({ byte *buf = alloca(16); format_size(buf, x); buf; })
21 #define F_BSIZE(b) F_SIZE(sbuck_size(b))
27 gettimeofday(&tv, NULL);
28 return (u64)tv.tv_sec * 1000 + tv.tv_usec / 1000;
32 sorter_start_timer(struct sort_context *ctx)
34 ctx->start_time = sorter_clock();
38 sorter_speed(struct sort_context *ctx, u64 size)
40 u64 stop_time = sorter_clock();
43 if (stop_time <= ctx->start_time)
45 return (uns)((double)size / (1<<20) * 1000 / (stop_time-ctx->start_time));
49 sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only)
51 sorter_alloc_buf(ctx);
52 if (in->flags & SBF_CUSTOM_PRESORT)
54 struct fastbuf *f = sbuck_write(out);
56 return ctx->custom_presort(f, ctx->big_buf, ctx->big_buf_size); // FIXME: out_only optimization?
58 return ctx->internal_sort(ctx, in, out, out_only);
61 static inline struct sort_bucket *
62 sbuck_join_to(struct sort_bucket *b)
64 if (sorter_debug & SORT_DEBUG_NO_JOIN)
67 struct sort_bucket *out = (struct sort_bucket *) b->n.prev; // Such bucket is guaranteed to exist
68 return (out->flags & SBF_FINAL) ? out : NULL;
72 sorter_join(struct sort_bucket *b)
74 struct sort_bucket *join = (struct sort_bucket *) b->n.prev;
75 ASSERT(join->flags & SBF_FINAL);
78 if (!sbuck_has_file(join))
80 // The final bucket doesn't have any file associated yet, so replace
81 // it with the new bucket.
82 SORT_XTRACE(2, "Replaced final bucket");
83 b->flags |= SBF_FINAL;
88 SORT_TRACE("Copying to output file: %s", F_BSIZE(b));
89 struct fastbuf *src = sbuck_read(b);
90 struct fastbuf *dest = sbuck_write(join);
91 bbcopy(src, dest, ~0U);
97 sorter_twoway(struct sort_context *ctx, struct sort_bucket *b)
99 struct sort_bucket *ins[3] = { NULL }, *outs[3] = { NULL };
100 cnode *list_pos = b->n.prev;
101 struct sort_bucket *join = sbuck_join_to(b);
103 if (!(sorter_debug & SORT_DEBUG_NO_PRESORT) || (b->flags & SBF_CUSTOM_PRESORT))
105 SORT_XTRACE(2, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
106 sorter_start_timer(ctx);
107 ins[0] = sbuck_new(ctx);
108 if (!sorter_presort(ctx, b, ins[0], join ? : ins[0]))
110 SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 2), "Sorted in memory");
114 clist_insert_after(&ins[0]->n, list_pos);
119 ins[1] = sbuck_new(ctx);
121 while (sorter_presort(ctx, b, ins[i], ins[i]))
124 SORT_TRACE("Presorting pass (%d+%d runs, %s+%s, %dMB/s)",
125 ins[0]->runs, ins[1]->runs,
126 F_BSIZE(ins[0]), F_BSIZE(ins[1]),
127 sorter_speed(ctx, sbuck_size(ins[0]) + sbuck_size(ins[1])));
131 SORT_XTRACE(2, "Presorting disabled");
135 SORT_XTRACE(2, "Main sorting");
139 sorter_start_timer(ctx);
140 if (ins[0]->runs == 1 && ins[1]->runs == 1 && join)
142 // This is guaranteed to produce a single run, so join if possible
145 ctx->twoway_merge(ctx, ins, outs);
146 ASSERT(outs[0]->runs == 2);
148 SORT_TRACE("Mergesort pass %d (final run, %s, %dMB/s)", pass, F_BSIZE(outs[0]), sorter_speed(ctx, sbuck_size(outs[0])));
153 outs[0] = sbuck_new(ctx);
154 outs[1] = sbuck_new(ctx);
156 ctx->twoway_merge(ctx, ins, outs);
157 SORT_TRACE("Mergesort pass %d (%d+%d runs, %s+%s, %dMB/s)", pass,
158 outs[0]->runs, outs[1]->runs,
159 F_BSIZE(outs[0]), F_BSIZE(outs[1]),
160 sorter_speed(ctx, sbuck_size(outs[0]) + sbuck_size(outs[1])));
163 memcpy(ins, outs, 3*sizeof(struct sort_bucket *));
164 } while (sbuck_have(ins[1]));
167 clist_insert_after(&ins[0]->n, list_pos);
171 sorter_radix_p(struct sort_context *ctx, struct sort_bucket *b)
173 return b->hash_bits && ctx->radix_split &&
174 !(sorter_debug & SORT_DEBUG_NO_RADIX) &&
175 sbuck_size(b) > (sh_off_t)sorter_bufsize;
179 sorter_radix(struct sort_context *ctx, struct sort_bucket *b)
181 uns bits = MIN(b->hash_bits, 4); /* FIXME */
182 uns nbuck = 1 << bits;
183 SORT_XTRACE(2, "Running radix sort on %s with %d bits of %d", F_BSIZE(b), bits, b->hash_bits);
184 sorter_start_timer(ctx);
186 struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *));
187 for (uns i=nbuck; i--; )
189 outs[i] = sbuck_new(ctx);
190 outs[i]->hash_bits = b->hash_bits - bits;
191 clist_insert_after(&outs[i]->n, &b->n);
194 ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits);
196 u64 min = ~(u64)0, max = 0, sum = 0;
197 for (uns i=0; i<nbuck; i++)
199 u64 s = sbuck_size(outs[i]);
205 SORT_TRACE("Radix split (%d buckets, %s min, %s max, %s avg, %dMB/s)", nbuck,
206 F_SIZE(min), F_SIZE(max), F_SIZE(sum / nbuck), sorter_speed(ctx, sum));
211 sorter_run(struct sort_context *ctx)
213 ctx->pool = mp_new(4096);
214 clist_init(&ctx->bucket_list);
216 /* FIXME: Remember to test sorting of empty files */
218 // Create bucket containing the source
219 struct sort_bucket *bin = sbuck_new(ctx);
220 bin->flags = SBF_SOURCE | SBF_OPEN_READ;
221 if (ctx->custom_presort)
222 bin->flags |= SBF_CUSTOM_PRESORT;
224 bin->fb = ctx->in_fb;
226 bin->size = ctx->in_size; /* Sizes should be either sh_off_t or u64, not both; beware of ~0U */
227 bin->hash_bits = ctx->hash_bits;
228 clist_add_tail(&ctx->bucket_list, &bin->n);
229 SORT_XTRACE(2, "Input size: %s", (ctx->in_size == ~(u64)0 ? (byte*)"unknown" : F_BSIZE(bin)));
231 // Create bucket for the output
232 struct sort_bucket *bout = sbuck_new(ctx);
233 bout->flags = SBF_FINAL;
234 if (bout->fb = ctx->out_fb)
235 bout->flags |= SBF_OPEN_WRITE;
238 clist_add_head(&ctx->bucket_list, &bout->n);
240 struct sort_bucket *b;
241 while (bout = clist_head(&ctx->bucket_list), b = clist_next(&ctx->bucket_list, &bout->n))
245 else if (b->runs == 1)
247 else if (sorter_radix_p(ctx, b))
248 sorter_radix(ctx, b);
250 sorter_twoway(ctx, b);
253 sorter_free_buf(ctx);
254 sbuck_write(bout); // Force empty bucket to a file
255 SORT_XTRACE(2, "Final size: %s", F_BSIZE(bout));
256 ctx->out_fb = sbuck_read(bout);