struct mempool *pool;
clist bucket_list;
- void *big_buf, *big_buf_half;
- size_t big_buf_size, big_buf_half_size;
+ void *big_buf;
+ size_t big_buf_size;
int (*custom_presort)(struct fastbuf *dest, void *buf, size_t bufsize);
* of the GNU Lesser General Public License.
*/
+#include "lib/stkstring.h"
+
#define ASORT_PREFIX(x) SORT_PREFIX(array_##x)
#define ASORT_KEY_TYPE P(key)
#define ASORT_ELT(i) ary[i]
#define ASORT_EXTRA_ARGS , P(key) *ary
#include "lib/arraysort.h"
-static int P(internal_num_keys)(struct sort_context *ctx)
+/*
+ * This is a more efficient implementation of the internal sorter,
+ * which runs under the following assumptions:
+ *
+ * - the keys have fixed (and small) size
+ * - no data are present after the key
+ * - unification does not require any workspace
+ */
+
+static size_t P(internal_workspace)(void)
{
- size_t bufsize = ctx->big_buf_half_size;
-#ifdef SORT_UNIFY
- // When we promise unification, we have to reduce the number of records
- // to be sure that both pointers and merged records fit in the 2nd half
- // of the big_buf. So we eat as much memory as s-internal.h, but still
- // we are faster.
- u64 maxkeys = bufsize / (sizeof(P(key)) + sizeof(void *));
-#else
- u64 maxkeys = bufsize / sizeof(P(key));
+ size_t workspace = 0;
+#ifdef CONFIG_UNIFY
+ workspace = sizeof(P(key) *);
+#endif
+#if 0 // FIXME: Workspace for radix-sort if needed
+ workspace = MAX(workspace, sizeof(P(key)));
#endif
+ return workspace;
+}
+
+static uns P(internal_num_keys)(struct sort_context *ctx)
+{
+ size_t bufsize = ctx->big_buf_size;
+ size_t workspace = P(internal_workspace)();
+ if (workspace)
+ bufsize -= CPU_PAGE_SIZE;
+ u64 maxkeys = bufsize / (sizeof(P(key)) + workspace);
return MIN(maxkeys, ~0U); // The number of records must fit in uns
}
P(key) *buf = ctx->big_buf;
uns maxkeys = P(internal_num_keys)(ctx);
- SORT_XTRACE(3, "s-fixint: Reading (maxkeys=%u)", maxkeys);
+ SORT_XTRACE(4, "s-fixint: Reading (maxkeys=%u)", maxkeys);
uns n = 0;
while (n < maxkeys && P(read_key)(in, &buf[n]))
n++;
if (!n)
return 0;
+ void *workspace UNUSED = ALIGN_PTR(&buf[n], CPU_PAGE_SIZE);
- SORT_XTRACE(3, "s-fixint: Sorting %u items", n);
+ SORT_XTRACE(3, "s-fixint: Sorting %u items (%s items, %s workspace)",
+ n,
+ stk_fsize(n * sizeof(P(key))),
+ stk_fsize(n * P(internal_workspace)()));
timestamp_t timer;
init_timer(&timer);
P(array_sort)(n, buf);
ctx->total_int_time += get_timer(&timer);
- SORT_XTRACE(3, "s-fixint: Writing");
+ SORT_XTRACE(4, "s-fixint: Writing");
if (n < maxkeys)
bout = bout_only;
struct fastbuf *out = sbuck_write(bout);
#ifdef SORT_UNIFY
if (i < n-1 && !P(compare)(&buf[i], &buf[i+1]))
{
- P(key) **keys = ctx->big_buf_half;
+ P(key) **keys = workspace;
uns n = 2;
keys[0] = &buf[i];
keys[1] = &buf[i+1];
keys[n] = &buf[i+n];
n++;
}
- P(write_merged)(out, keys, NULL, n, keys+n);
+ P(write_merged)(out, keys, NULL, n, NULL);
merged += n - 1;
i += n - 1;
continue;
* of the GNU Lesser General Public License.
*/
+#include "lib/stkstring.h"
+
typedef struct {
P(key) *key;
// FIXME: Add the hash here to save cache misses
#define ASORT_EXTRA_ARGS , P(internal_item_t) *ary
#include "lib/arraysort.h"
+/*
+ * The big_buf has the following layout:
+ *
+ * +-------------------------------------------------------------------------------+
+ * | array of internal_item's |
+ * +-------------------------------------------------------------------------------+
+ * | padding to make the following part page-aligned |
+ * +--------------------------------+----------------------------------------------+
+ * | shadow copy of item array | array of pointers to data for write_merged() |
+ * | used if radix-sorting +----------------------------------------------+
+ * | | workspace for write_merged() |
+ * +--------------------------------+----------------------------------------------+
+ * | +---------+ |
+ * | | key | |
+ * | +---------+ |
+ * | sequence of | padding | |
+ * | items +---------+ |
+ * | | data | |
+ * | +---------+ |
+ * | | padding | |
+ * | +---------+ |
+ * +-------------------------------------------------------------------------------+
+ *
+ * (the data which are in different columns are never accessed simultaneously,
+ * so we use a single buffer for both)
+ */
+
static inline void *P(internal_get_data)(P(key) *key)
{
uns ksize = SORT_KEY_SIZE(*key);
return (byte *) key + ksize;
}
-static size_t P(internal_buf_size)(struct sort_context *ctx)
+static inline size_t P(internal_workspace)(P(key) *key UNUSED)
{
- size_t bufsize = ctx->big_buf_half_size; /* FIXME: In some cases, we can use the whole buffer */
-#ifdef CPU_64BIT_POINTERS
- bufsize = MIN((u64)bufsize, (u64)~0U * sizeof(P(internal_item_t))); // The number of records must fit in uns
+ size_t ws = 0;
+#ifdef SORT_UNIFY
+ ws += sizeof(void *);
+#endif
+#ifdef SORT_UNIFY_WORKSPACE
+ ws += SORT_UNIFY_WORKSPACE(*key);
+#endif
+#if 0 /* FIXME: Shadow copy if radix-sorting */
+ ws = MAX(ws, sizeof(P(key) *));
#endif
- return bufsize;
+ return ws;
}
static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only)
else if (!P(read_key)(in, &key))
return 0;
- size_t bufsize = P(internal_buf_size)(ctx);
+ size_t bufsize = ctx->big_buf_size;
#ifdef SORT_VAR_DATA
- if (sizeof(key) + 1024 + SORT_DATA_SIZE(key) > bufsize)
+ if (sizeof(key) + 2*CPU_PAGE_SIZE + SORT_DATA_SIZE(key) + P(internal_workspace)(&key) > bufsize)
{
SORT_XTRACE(3, "s-internal: Generating a giant run");
struct fastbuf *out = sbuck_write(bout);
}
#endif
- SORT_XTRACE(3, "s-internal: Reading (bufsize=%zd)", bufsize);
+ SORT_XTRACE(4, "s-internal: Reading");
P(internal_item_t) *item_array = ctx->big_buf, *item = item_array, *last_item;
byte *end = (byte *) ctx->big_buf + bufsize;
+ size_t remains = bufsize - CPU_PAGE_SIZE;
do
{
uns ksize = SORT_KEY_SIZE(key);
#endif
uns dsize = SORT_DATA_SIZE(key);
uns recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN);
- if (unlikely(sizeof(P(internal_item_t)) + recsize > (size_t)(end - (byte *) item)))
+ size_t totalsize = recsize + sizeof(P(internal_item_t) *) + P(internal_workspace)(&key);
+ if (unlikely(totalsize > remains
+#ifdef CPU_64BIT_POINTERS
+ || item >= item_array + ~0U // The number of items must fit in an uns
+#endif
+ ))
{
ctx->more_keys = 1;
*keybuf = key;
break;
}
+ remains -= totalsize;
end -= recsize;
memcpy(end, &key, ksize);
#ifdef SORT_VAR_DATA
last_item = item;
uns count = last_item - item_array;
- SORT_XTRACE(3, "s-internal: Sorting %u items", count);
+ void *workspace UNUSED = ALIGN_PTR(last_item, CPU_PAGE_SIZE);
+ SORT_XTRACE(3, "s-internal: Read %u items (%s items, %s workspace, %s data)",
+ count,
+ stk_fsize((byte*)last_item - (byte*)item_array),
+ stk_fsize(end - (byte*)last_item - remains),
+ stk_fsize((byte*)ctx->big_buf + bufsize - end));
timestamp_t timer;
init_timer(&timer);
P(array_sort)(count, item_array);
ctx->total_int_time += get_timer(&timer);
- SORT_XTRACE(3, "s-internal: Writing");
+ SORT_XTRACE(4, "s-internal: Writing");
if (!ctx->more_keys)
bout = bout_only;
struct fastbuf *out = sbuck_write(bout);
if (item < last_item - 1 && !P(compare)(item->key, item[1].key))
{
// Rewrite the item structures with just pointers to keys and place
- // pointers to data in the secondary array.
+ // pointers to data in the workspace.
P(key) **key_array = (void *) item;
- void **data_array = (void **) ctx->big_buf_half;
+ void **data_array = workspace;
key_array[0] = item[0].key;
data_array[0] = P(internal_get_data)(key_array[0]);
uns cnt;
uns avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN);
#endif
// We ignore the data part of records, it probably won't make the estimate much worse
- return (P(internal_buf_size)(ctx) / (avg + sizeof(P(internal_item_t))) * avg);
+ size_t bufsize = ctx->big_buf_size;
+#ifdef SORT_UNIFY_WORKSPACE // FIXME: Or if radix-sorting
+ bufsize /= 2;
+#endif
+ return (bufsize / (avg + sizeof(P(internal_item_t))) * avg);
}
void
sorter_prepare_buf(struct sort_context *ctx)
{
- u64 bs = MAX(sorter_bufsize/2, 1);
+ u64 bs = sorter_bufsize;
bs = ALIGN_TO(bs, (u64)CPU_PAGE_SIZE);
- ctx->big_buf_size = 2*bs;
- ctx->big_buf_half_size = bs;
+ bs = MAX(bs, 2*(u64)CPU_PAGE_SIZE);
+ ctx->big_buf_size = bs;
}
void
if (ctx->big_buf)
return;
ctx->big_buf = big_alloc(ctx->big_buf_size);
- ctx->big_buf_half = ((byte*) ctx->big_buf) + ctx->big_buf_half_size;
- SORT_XTRACE(2, "Allocated sorting buffer (2*%s)", stk_fsize(ctx->big_buf_half_size));
+ SORT_XTRACE(2, "Allocated sorting buffer (%s)", stk_fsize(ctx->big_buf_size));
}
void
#define SORT_PREFIX(x) s5_##x
#define SORT_DATA_SIZE(k) (4*(k).cnt)
#define SORT_UNIFY
+#define SORT_UNIFY_WORKSPACE(k) SORT_DATA_SIZE(k)
#define SORT_INPUT_PRESORT
#define SORT_OUTPUT_THIS_FB
#define SORT_INT(k) (k).x
#define SORT_PREFIX(x) s5b_##x
#define SORT_DATA_SIZE(k) (4*(k).cnt)
#define SORT_UNIFY
+#define SORT_UNIFY_WORKSPACE(k) SORT_DATA_SIZE(k)
#define SORT_INPUT_FB
#define SORT_OUTPUT_THIS_FB
#define SORT_INT(k) (k).x
* void PREFIX_write_merged(struct fastbuf *f, SORT_KEY **keys, void **data, uns n, void *buf)
* takes n records in memory with keys which compare equal and writes
* a single record to the given fastbuf. `buf' points to a buffer which
- * is guaranteed to hold all given records.
+ * is guaranteed to hold the sum of workspace requirements (see below)
+ * over all given records.
* void PREFIX_copy_merged(SORT_KEY **keys, struct fastbuf **data, uns n, struct fastbuf *dest)
* takes n records with keys in memory and data in fastbufs and writes
* a single record.
+ * SORT_UNIFY_WORKSPACE(key) gets a key and returns the amount of workspace required when merging
+ * the given record. Defaults to 0.
*
* Input (choose one of these):
*
#endif
}
-#if defined(SORT_VAR_KEY) || defined(SORT_VAR_DATA)
+#if defined(SORT_VAR_KEY) || defined(SORT_VAR_DATA) || defined(SORT_UNIFY_WORKSPACE)
#include "lib/sorter/s-internal.h"
#else
#include "lib/sorter/s-fixint.h"
#undef SORT_INT
#undef SORT_HASH_BITS
#undef SORT_UNIFY
+#undef SORT_UNIFY_WORKSPACE
#undef SORT_INPUT_FILE
#undef SORT_INPUT_FB
#undef SORT_INPUT_PRESORT