2 * UCW Library -- Universal Sorter: Internal Sorting Module
4 * (c) 2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
10 #include <ucw/stkstring.h>
13 #ifdef SORT_INTERNAL_RADIX
14 /* Keep copies of the items' hashes to save cache misses */
15 #define SORT_COPY_HASH
25 #define ASORT_PREFIX(x) SORT_PREFIX(array_##x)
26 #define ASORT_KEY_TYPE P(internal_item_t)
29 # define ASORT_LT(x,y) ((x).hash < (y).hash) // In this mode, the hash is the value
31 # define ASORT_LT(x,y) ((x).hash < (y).hash || (x).hash == (y).hash && P(compare)((x).key, (y).key) < 0)
34 # define ASORT_LT(x,y) (P(compare)((x).key, (y).key) < 0)
36 #ifdef SORT_INTERNAL_RADIX
37 # ifdef SORT_COPY_HASH
38 # define ASORT_HASH(x) (x).hash
40 # define ASORT_HASH(x) P(hash)((x).key)
42 # ifdef SORT_LONG_HASH
43 # define ASORT_LONG_HASH
46 #include <ucw/sorter/array.h>
49 * The big_buf has the following layout:
51 * +-------------------------------------------------------------------------------+
52 * | array of internal_item's |
53 * +-------------------------------------------------------------------------------+
54 * | padding to make the following part page-aligned |
55 * +--------------------------------+----------------------------------------------+
56 * | shadow copy of item array | array of pointers to data for write_merged() |
57 * | used if radix-sorting +----------------------------------------------+
58 * | | workspace for write_merged() |
59 * +--------------------------------+----------------------------------------------+
63 * | sequence of | padding | |
64 * | items +---------+ |
69 * +-------------------------------------------------------------------------------+
71 * (the data which are in different columns are never accessed simultaneously,
72 * so we use a single buffer for both)
75 static inline void *P(internal_get_data)(P(key) *key)
77 uns ksize = SORT_KEY_SIZE(*key);
79 ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
81 return (byte *) key + ksize;
84 static inline size_t P(internal_workspace)(P(key) *key UNUSED)
90 #ifdef SORT_UNIFY_WORKSPACE
91 ws += SORT_UNIFY_WORKSPACE(*key);
93 #ifdef SORT_INTERNAL_RADIX
94 ws = MAX(ws, sizeof(P(internal_item_t)));
99 static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only)
101 sorter_alloc_buf(ctx);
102 struct fastbuf *in = sbuck_read(bin);
104 P(key) key, *keybuf = ctx->key_buf;
106 keybuf = ctx->key_buf = sorter_alloc(ctx, sizeof(key));
112 else if (!P(read_key)(in, &key))
115 size_t bufsize = ctx->big_buf_size;
117 if (sizeof(key) + 2*CPU_PAGE_SIZE + SORT_DATA_SIZE(key) + P(internal_workspace)(&key) > bufsize)
119 SORT_XTRACE(4, "s-internal: Generating a giant run");
120 struct fastbuf *out = sbuck_write(bout);
121 P(copy_data)(&key, in, out);
123 return 1; // We don't know, but 1 is always safe
127 SORT_XTRACE(5, "s-internal: Reading");
128 P(internal_item_t) *item_array = ctx->big_buf, *item = item_array, *last_item;
129 byte *end = (byte *) ctx->big_buf + bufsize;
130 size_t remains = bufsize - CPU_PAGE_SIZE;
133 uns ksize = SORT_KEY_SIZE(key);
135 uns ksize_aligned = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
137 uns ksize_aligned = ksize;
139 uns dsize = SORT_DATA_SIZE(key);
140 uns recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN);
141 size_t totalsize = recsize + sizeof(P(internal_item_t)) + P(internal_workspace)(&key);
142 if (unlikely(totalsize > remains
143 #ifdef CPU_64BIT_POINTERS
144 || item >= item_array + ~0U // The number of items must fit in an uns
152 remains -= totalsize;
154 memcpy(end, &key, ksize);
156 breadb(in, end + ksize_aligned, dsize);
158 item->key = (P(key)*) end;
159 #ifdef SORT_COPY_HASH
160 item->hash = P(hash)(item->key);
164 while (P(read_key)(in, &key));
167 uns count = last_item - item_array;
168 void *workspace UNUSED = ALIGN_PTR(last_item, CPU_PAGE_SIZE);
169 SORT_XTRACE(4, "s-internal: Read %u items (%s items, %s workspace, %s data)",
171 stk_fsize((byte*)last_item - (byte*)item_array),
172 stk_fsize(end - (byte*)last_item - remains),
173 stk_fsize((byte*)ctx->big_buf + bufsize - end));
176 item_array = P(array_sort)(item_array, count
177 #ifdef SORT_INTERNAL_RADIX
178 , workspace, bin->hash_bits
181 if ((void *)item_array != ctx->big_buf)
182 workspace = ctx->big_buf;
183 last_item = item_array + count;
184 ctx->total_int_time += get_timer(&timer);
186 SORT_XTRACE(5, "s-internal: Writing");
189 struct fastbuf *out = sbuck_write(bout);
191 uns merged UNUSED = 0;
192 for (item = item_array; item < last_item; item++)
195 if (item < last_item - 1 && !P(compare)(item->key, item[1].key))
197 // Rewrite the item structures with just pointers to keys and place
198 // pointers to data in the workspace.
199 P(key) **key_array = (void *) item;
200 void **data_array = workspace;
201 key_array[0] = item[0].key;
202 data_array[0] = P(internal_get_data)(key_array[0]);
204 for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++)
206 key_array[cnt] = item[cnt].key;
207 data_array[cnt] = P(internal_get_data)(key_array[cnt]);
209 P(write_merged)(out, key_array, data_array, cnt, data_array+cnt);
215 #ifdef SORT_ASSERT_UNIQUE
216 ASSERT(item == last_item-1 || P(compare)(item->key, item[1].key) < 0);
218 P(write_key)(out, item->key);
220 bwrite(out, P(internal_get_data)(item->key), SORT_DATA_SIZE(*item->key));
224 SORT_XTRACE(4, "Merging reduced %u records", merged);
227 return ctx->more_keys;
231 P(internal_estimate)(struct sort_context *ctx, struct sort_bucket *b UNUSED)
233 // Most of this is just wild guesses
235 uns avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN);
237 uns avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN);
241 ws += sizeof(void *);
243 #ifdef SORT_UNIFY_WORKSPACE
246 #ifdef SORT_INTERNAL_RADIX
247 ws = MAX(ws, sizeof(P(internal_item_t)));
249 // We ignore the data part of records, it probably won't make the estimate much worse
250 return (ctx->big_buf_size / (avg + ws + sizeof(P(internal_item_t))) * avg);
253 #undef SORT_COPY_HASH