void *big_buf, *big_buf_half;
size_t big_buf_size, big_buf_half_size;
- int (*custom_presort)(struct fastbuf *dest, byte *buf, size_t bufsize);
+ int (*custom_presort)(struct fastbuf *dest, void *buf, size_t bufsize);
// Take as much as possible from the source bucket, sort it in memory and dump to destination bucket.
// Return 1 if there is more data available in the source bucket.
int (*internal_sort)(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only);
static int sorter_presort(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket *out, struct sort_bucket *out_only)
{
- /* FIXME: Mode with no presorting (mostly for debugging) */
sorter_alloc_buf(ctx);
if (in->flags & SBF_CUSTOM_PRESORT)
{
#define ASORT_EXTRA_ARGS , P(internal_item_t) *ary
#include "lib/arraysort.h"
+static inline void *P(internal_get_data)(P(key) *key)
+{
+ uns ksize = SORT_KEY_SIZE(*key);
+#ifdef SORT_UNIFY
+ ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
+#endif
+ return (byte *) key + ksize;
+}
+
static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only)
{
sorter_alloc_buf(ctx);
if (sizeof(key) + 1024 + SORT_DATA_SIZE(key) > ctx->big_buf_half_size)
{
SORT_XTRACE("s-internal: Generating a giant run");
- struct fastbuf *out = sorter_open_write(bout); /* FIXME: Using a non-direct buffer would be nice here */
+ struct fastbuf *out = sbuck_write(bout); /* FIXME: Using a non-direct buffer would be nice here */
P(copy_data)(&key, in, out);
bout->runs++;
return 1; // We don't know, but 1 is always safe
bout = bout_only;
struct fastbuf *out = sbuck_write(bout);
bout->runs++;
- /* FIXME: No unification done yet */
+ uns merged = 0;
for (item = item_array; item < last_item; item++)
{
- P(write_key)(out, item->key);
-#ifdef SORT_VAR_DATA
- uns ksize = SORT_KEY_SIZE(*item->key);
#ifdef SORT_UNIFY
- ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
+ if (item < last_item - 1 && !P(compare)(item->key, item[1].key))
+ {
+ // Rewrite the item structures with just pointers to keys and place
+ // pointers to data in the secondary array.
+ P(key) **key_array = (void *) item;
+ void **data_array = (void **) ctx->big_buf_half;
+ key_array[0] = item[0].key;
+ data_array[0] = P(internal_get_data)(key_array[0]);
+ uns cnt;
+ for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++)
+ {
+ key_array[cnt] = item[cnt].key;
+ data_array[cnt] = P(internal_get_data)(key_array[cnt]);
+ }
+ P(write_merged)(out, key_array, data_array, cnt, data_array+cnt);
+ item += cnt - 1;
+ merged += cnt - 1;
+ continue;
+ }
#endif
- bwrite(out, (byte *) item->key + ksize, SORT_DATA_SIZE(*item->key));
+#ifdef SORT_ASSERT_UNIQUE
+ ASSERT(item == last_item-1 || P(compare)(item->key, item[1].key) < 0);
+#endif
+ P(write_key)(out, item->key);
+#ifdef SORT_VAR_DATA
+ bwrite(out, P(internal_get_data)(item->key), SORT_DATA_SIZE(*item->key));
#endif
}
+#ifdef SORT_UNIFY
+ SORT_XTRACE("Merging reduced %d records", merged);
+#endif
return ctx->more_keys;
}
}
run_count++;
}
+#ifdef SORT_ASSERT_UNIQUE
+ ASSERT(comp != 0);
+#endif
if (comp LESS 0)
{
P(copy_data)(kin1, fin1, fout1);
run1 = next1 && (P(compare)(kprev1, kin1) LESS 0);
kout = kprev1;
}
-#ifdef SORT_MERGE
+#ifdef SORT_UNIFY
else if (comp == 0)
{
P(key) *mkeys[] = { kin1, kin2 };
run2 = next2 && (P(compare)(kprev2, kin2) LESS 0);
kout = kprev2;
}
-#endif
-#ifdef SORT_ASSERT_UNIQUE
- else if (unlikely(comp == 0))
- ASSERT(0);
#endif
else
{
uns x;
};
+static inline void s_write_merged(struct fastbuf *f, struct key **k, void **d, uns n, void *buf)
+{
+ bwrite(f, k[0], sizeof(struct key));
+ bwrite(f, d[0], 5);
+}
+
+static inline void s_copy_merged(struct key **keys, struct fastbuf **data, uns n, struct fastbuf *dest)
+{
+ bwrite(dest, keys[0], sizeof(struct key));
+ bbcopy(data[0], dest, 5);
+ for (uns i=1; i<n; i++)
+ bskip(data[i], 5);
+}
+
#define SORT_KEY_REGULAR struct key
#define SORT_PREFIX(x) s_##x
#define SORT_INPUT_FB
#define SORT_OUTPUT_FB
+//#define SORT_KEY_SIZE(k) 4
+#define SORT_DATA_SIZE(k) 5
+//#define SORT_UNIQUE
+#define SORT_UNIFY
#define SORT_INT(k) (k).x
#include "lib/sorter/sorter.h"
struct fastbuf *f = bopen(argv[optind], O_RDWR | O_CREAT | O_TRUNC, 65536);
#define N 259309
#define K 199483
- for (uns i=0; i<N; i++)
- bputl(f, ((u64)i * K + 17) % N);
+ for (uns i=0; i<2*N; i++)
+ {
+ bputl(f, ((u64)i * K + 17) % N);
+ bputs(f, "12345");
+ bputl(f, ((u64)i * K + 17) % N);
+ bputs(f, "12345");
+ }
brewind(f);
log(L_INFO, "Sorting");
uns j = bgetl(f);
if (i != j)
die("Discrepancy: %d instead of %d", j, i);
+ for (uns i='1'; i<='5'; i++)
+ if (bgetc(f) != i)
+ ASSERT(0);
}
bclose(f);
* Unification:
*
* SORT_UNIFY merge items with identical keys, needs the following functions:
- * void PREFIX_write_merged(struct fastbuf *f, SORT_KEY **keys, uns n, byte *buf)
+ * void PREFIX_write_merged(struct fastbuf *f, SORT_KEY **keys, void **data, uns n, void *buf)
* takes n records in memory with keys which compare equal and writes
- * a single record to the given fastbuf. Data for each key can
- * be accessed by the SORT_GET_DATA(*key) macro. `buf' points
- * to a buffer which is guaranteed to hold all given records.
+ * a single record to the given fastbuf. `buf' points to a buffer which
+ * is guaranteed to hold all given records.
* void PREFIX_copy_merged(SORT_KEY **keys, struct fastbuf **data, uns n, struct fastbuf *dest)
* takes n records with keys in memory and data in fastbufs and writes
* a single record.
* SORT_INPUT_FILE file of a given name
* SORT_INPUT_FB fastbuf stream
* SORT_INPUT_PRESORT custom presorter. Calls function
- * int PREFIX_presort(struct fastbuf *dest, byte *buf, size_t bufsize);
+ * int PREFIX_presort(struct fastbuf *dest, void *buf, size_t bufsize);
* to get successive batches of pre-sorted data.
* The function is passed a page-aligned presorting buffer.
* It returns 1 on success or 0 on EOF.