2 * UCW Library -- Universal Sorter: Internal Sorting Module
4 * (c) 2007 Martin Mares <mj@ucw.cz>
6 * This software may be freely distributed and used according to the terms
7 * of the GNU Lesser General Public License.
12 // FIXME: Add the hash here to save cache misses
15 #define ASORT_PREFIX(x) SORT_PREFIX(array_##x)
16 #define ASORT_KEY_TYPE P(internal_item_t)
17 #define ASORT_ELT(i) ary[i]
18 #define ASORT_LT(x,y) (P(compare)((x).key, (y).key) < 0)
19 #define ASORT_EXTRA_ARGS , P(internal_item_t) *ary
20 #include "lib/arraysort.h"
22 static inline void *P(internal_get_data)(P(key) *key)
24 uns ksize = SORT_KEY_SIZE(*key);
26 ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
28 return (byte *) key + ksize;
31 static size_t P(internal_buf_size)(struct sort_context *ctx)
33 size_t bufsize = ctx->big_buf_half_size; /* FIXME: In some cases, we can use the whole buffer */
34 #ifdef CPU_64BIT_POINTERS
35 bufsize = MIN((u64)bufsize, (u64)~0U * sizeof(P(internal_item_t))); // The number of records must fit in uns
40 static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only)
42 sorter_alloc_buf(ctx);
43 struct fastbuf *in = sbuck_read(bin);
45 P(key) key, *keybuf = ctx->key_buf;
47 keybuf = ctx->key_buf = sorter_alloc(ctx, sizeof(key));
53 else if (!P(read_key)(in, &key))
56 size_t bufsize = P(internal_buf_size)(ctx);
58 if (sizeof(key) + 1024 + SORT_DATA_SIZE(key) > bufsize)
60 SORT_XTRACE(3, "s-internal: Generating a giant run");
61 struct fastbuf *out = sbuck_write(bout);
62 P(copy_data)(&key, in, out);
64 return 1; // We don't know, but 1 is always safe
68 SORT_XTRACE(3, "s-internal: Reading (bufsize=%zd)", bufsize);
69 P(internal_item_t) *item_array = ctx->big_buf, *item = item_array, *last_item;
70 byte *end = (byte *) ctx->big_buf + bufsize;
73 uns ksize = SORT_KEY_SIZE(key);
75 uns ksize_aligned = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
77 uns ksize_aligned = ksize;
79 uns dsize = SORT_DATA_SIZE(key);
80 uns recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN);
81 if (unlikely(sizeof(P(internal_item_t)) + recsize > (size_t)(end - (byte *) item)))
88 memcpy(end, &key, ksize);
90 breadb(in, end + ksize_aligned, dsize);
92 item->key = (P(key)*) end;
95 while (P(read_key)(in, &key));
98 uns count = last_item - item_array;
99 SORT_XTRACE(3, "s-internal: Sorting %u items", count);
100 P(array_sort)(count, item_array);
102 SORT_XTRACE(3, "s-internal: Writing");
105 struct fastbuf *out = sbuck_write(bout);
107 uns merged UNUSED = 0;
108 for (item = item_array; item < last_item; item++)
111 if (item < last_item - 1 && !P(compare)(item->key, item[1].key))
113 // Rewrite the item structures with just pointers to keys and place
114 // pointers to data in the secondary array.
115 P(key) **key_array = (void *) item;
116 void **data_array = (void **) ctx->big_buf_half;
117 key_array[0] = item[0].key;
118 data_array[0] = P(internal_get_data)(key_array[0]);
120 for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++)
122 key_array[cnt] = item[cnt].key;
123 data_array[cnt] = P(internal_get_data)(key_array[cnt]);
125 P(write_merged)(out, key_array, data_array, cnt, data_array+cnt);
131 #ifdef SORT_ASSERT_UNIQUE
132 ASSERT(item == last_item-1 || P(compare)(item->key, item[1].key) < 0);
134 P(write_key)(out, item->key);
136 bwrite(out, P(internal_get_data)(item->key), SORT_DATA_SIZE(*item->key));
140 SORT_XTRACE(3, "Merging reduced %u records", merged);
143 return ctx->more_keys;
147 P(internal_estimate)(struct sort_context *ctx, struct sort_bucket *b UNUSED)
150 uns avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN); // Wild guess...
152 uns avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN);
154 // We ignore the data part of records, it probably won't make the estimate much worse
155 return (P(internal_buf_size)(ctx) / (avg + sizeof(P(internal_item_t))) * avg);