bin->size = ctx->in_size;
bin->hash_bits = ctx->hash_bits;
clist_add_tail(&ctx->bucket_list, &bin->n);
- SORT_XTRACE(2, "Input size: %s", F_BSIZE(bin));
+ SORT_XTRACE(2, "Input size: %s, %d hash bits", F_BSIZE(bin), bin->hash_bits);
// Create bucket for the output
struct sort_bucket *bout = sbuck_new(ctx);
while (P(read_key)(in, &k))
{
- uns h = P(hash)(&k);
+ P(hash_t) h = P(hash)(&k);
uns i = (h >> bitpos) & mask;
if (unlikely(!outs[i]))
outs[i] = sbuck_write(bouts[i]);
bclose(f);
}
+/*** Simple 8-byte integer keys ***/
+
+struct key6 {
+ u64 x;
+};
+
+#define SORT_KEY_REGULAR struct key6
+#define SORT_PREFIX(x) s6_##x
+#define SORT_INPUT_FB
+#define SORT_OUTPUT_FB
+#define SORT_UNIQUE
+#define SORT_INT64(k) (k).x
+
+#include "lib/sorter/sorter.h"
+
+static void
+test_int64(int mode, u64 size)
+{
+ u64 N = size ? nextprime(MIN(size/8, 0xffff0000)) : 0;
+ u64 K = N/4*3;
+ log(L_INFO, ">>> 64-bit integers (%s, N=%llu)", ((char *[]) { "increasing", "decreasing", "random" })[mode], N);
+
+ struct fastbuf *f = bopen_tmp(65536);
+ for (u64 i=0; i<N; i++)
+ bputq(f, 777777*((mode==0) ? i : (mode==1) ? N-1-i : ((u64)i * K + 17) % N));
+ brewind(f);
+
+ start();
+ f = s6_sort(f, NULL, 777777*(N-1));
+ stop();
+
+ SORT_XTRACE(2, "Verifying");
+ for (u64 i=0; i<N; i++)
+ {
+ u64 j = bgetq(f);
+ if (777777*i != j)
+ die("Discrepancy: %llu instead of %llu", j, 777777*i);
+ }
+ bclose(f);
+}
+
/*** Main ***/
static void
test_graph(0, size); break;
case 12:
test_graph(1, size); break;
-#define TMAX 13
+ case 13:
+ test_int64(0, size); break;
+ case 14:
+ test_int64(1, size); break;
+ case 15:
+ test_int64(2, size); break;
+#define TMAX 16
}
}
* is supplied automatically and the sorting function gets an extra
* parameter specifying a range of the integers. The better the range
* fits, the faster we sort. Sets up SORT_HASH_xxx automatically.
+ * SORT_INT64(key) the same for 64-bit integers.
*
* Hashing (optional, but it can speed sorting up):
*
#error Missing definition of sorting key.
#endif
+#ifdef SORT_INT64
+typedef u64 P(hash_t);
+#define SORT_INT SORT_INT64
+#else
+typedef uns P(hash_t);
+#endif
+
#ifdef SORT_INT
static inline int P(compare) (P(key) *x, P(key) *y)
{
}
#ifndef SORT_HASH_BITS
-static inline int P(hash) (P(key) *x)
+static inline P(hash_t) P(hash) (P(key) *x)
{
return SORT_INT((*x));
}
struct fastbuf *out
#endif
#ifdef SORT_INT
- , uns int_range
+ , u64 int_range
#endif
)
{
ctx.radix_split = P(radix_split);
#elif defined(SORT_INT)
ctx.hash_bits = 0;
- while (ctx.hash_bits < 32 && (int_range >> ctx.hash_bits))
+ while (ctx.hash_bits < 64 && (int_range >> ctx.hash_bits))
ctx.hash_bits++;
ctx.radix_split = P(radix_split);
#endif
#undef SORT_VAR_KEY
#undef SORT_VAR_DATA
#undef SORT_INT
+#undef SORT_INT64
#undef SORT_HASH_BITS
#undef SORT_UNIFY
#undef SORT_UNIFY_WORKSPACE