/*
* Sherlock Library -- Universal Hash Table
*
- * (c) 2002 Martin Mares <mj@ucw.cz>
+ * (c) 2002--2004 Martin Mares <mj@ucw.cz>
+ * (c) 2002 Robert Spalek <robert@ucw.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
*/
/*
*
* <always defined> init() -- initialize the hash table.
* HASH_WANT_CLEANUP cleanup() -- deallocate the hash table.
- * HASH_WANT_FIND node *find(key) -- find node with the specified
+ * HASH_WANT_FIND node *find(key) -- find first node with the specified
* key, return NULL if no such node exists.
+ * HASH_WANT_FIND_NEXT node *find(node *start) -- find next node with the
+ * specified key, return NULL if no such node exists.
* HASH_WANT_NEW node *new(key) -- create new node with given key.
* Doesn't check whether it already exists.
* HASH_WANT_LOOKUP node *lookup(key) -- find node with given key,
* ... and a couple of extra parameters:
*
* HASH_NOCASE string comparisons should be case-insensitive.
- * HASH_DEFAULT_SIZE=n initially, use hash table of `n' entries.
- * The `n' has to be a power of two.
+ * HASH_DEFAULT_SIZE=n initially, use hash table of approx. `n' entries.
* HASH_CONSERVE_SPACE use as little space as possible.
* HASH_FN_BITS=n The hash function gives only `n' significant bits.
* HASH_ATOMIC_TYPE=t Atomic values are of type `t' instead of int.
* HASH_USE_POOL=pool Allocate all nodes from given mempool.
* Collides with delete/remove functions.
+ * HASH_AUTO_POOL=size Create a pool of the given block size automatically.
*
* You also get a iterator macro at no extra charge:
*
* }
* HASH_END_FOR;
*
- * Then include <lib/hashtable.h> and voila, you have a hash table
+ * Then include "lib/hashtable.h" and voila, you have a hash table
* suiting all your needs (at least those which you've revealed :) ).
*
* After including this file, all parameter macros are automatically
struct P(table) {
uns hash_size;
- uns hash_count, hash_max, hash_min, hash_hard_max, hash_mask;
+ uns hash_count, hash_max, hash_min, hash_hard_max;
P(bucket) **ht;
} P(table);
#include <stdlib.h>
-#ifndef HASH_GIVE_ALLOC
-#ifdef HASH_USE_POOL
-
-static inline void * P(alloc) (unsigned int size)
-{ return mp_alloc_fast(HASH_USE_POOL, size); }
+#ifdef HASH_GIVE_ALLOC
+/* If the caller has requested to use his own allocation functions, do so */
+static inline void P(init_alloc) (void) { }
+static inline void P(cleanup_alloc) (void) { }
+
+#elif defined(HASH_USE_POOL)
+/* If the caller has requested to use his mempool, do so */
+#include "lib/mempool.h"
+static inline void * P(alloc) (unsigned int size) { return mp_alloc_fast(HASH_USE_POOL, size); }
+static inline void P(init_alloc) (void) { }
+static inline void P(cleanup_alloc) (void) { }
+
+#elif defined(HASH_AUTO_POOL)
+/* Use our own pools */
+#include "lib/mempool.h"
+static struct mempool *P(pool);
+static inline void * P(alloc) (unsigned int size) { return mp_alloc_fast(P(pool), size); }
+static inline void P(init_alloc) (void) { P(pool) = mp_new(HASH_AUTO_POOL); }
+static inline void P(cleanup_alloc) (void) { mp_delete(P(pool)); }
#else
+/* The default allocation method */
+static inline void * P(alloc) (unsigned int size) { return xmalloc(size); }
+static inline void P(free) (void *x) { xfree(x); }
+static inline void P(init_alloc) (void) { }
+static inline void P(cleanup_alloc) (void) { }
-static inline void * P(alloc) (unsigned int size)
-{ return xmalloc(size); }
-
-static inline void P(free) (void *x)
-{ xfree(x); }
-
-#endif
#endif
#ifndef HASH_DEFAULT_SIZE
static void P(alloc_table) (void)
{
+ T.hash_size = nextprime(T.hash_size);
T.ht = xmalloc(sizeof(void *) * T.hash_size);
bzero(T.ht, sizeof(void *) * T.hash_size);
- T.hash_max = T.hash_size * 2;
- if (T.hash_max > T.hash_hard_max)
- T.hash_max = T.hash_hard_max;
- T.hash_min = T.hash_size / 4;
- T.hash_mask = T.hash_size - 1;
+ if (2*T.hash_size < T.hash_hard_max)
+ T.hash_max = 2*T.hash_size;
+ else
+ T.hash_max = ~0U;
+ if (T.hash_size/2 > HASH_DEFAULT_SIZE)
+ T.hash_min = T.hash_size/4;
+ else
+ T.hash_min = 0;
}
static void P(init) (void)
T.hash_hard_max = 1 << 28;
#endif
P(alloc_table)();
+ P(init_alloc)();
}
#ifdef HASH_WANT_CLEANUP
P(free)(b);
}
#endif
+ P(cleanup_alloc)();
xfree(T.ht);
}
#endif
while (b)
{
nb = b->next;
- h = P(bucket_hash)(b) & T.hash_mask;
+ h = P(bucket_hash)(b) % T.hash_size;
b->next = newt[h];
newt[h] = b;
b = nb;
static P(node) * P(find) (HASH_KEY_DECL)
{
uns h0 = P(hash) (HASH_KEY( ));
- uns h = h0 & T.hash_mask;
+ uns h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
}
#endif
+#ifdef HASH_WANT_FIND_NEXT
+static P(node) * P(find_next) (P(node) *start)
+{
+#ifndef HASH_CONSERVE_SPACE
+ uns h0 = P(hash) (HASH_KEY(start->));
+#endif
+ P(bucket) *b = SKIP_BACK(P(bucket), n, start);
+
+ for (b=b->next; b; b=b->next)
+ {
+ if (
+#ifndef HASH_CONSERVE_SPACE
+ b->hash == h0 &&
+#endif
+ P(eq)(HASH_KEY(start->), HASH_KEY(b->n.)))
+ return &b->n;
+ }
+ return NULL;
+}
+#endif
+
#ifdef HASH_WANT_NEW
static P(node) * P(new) (HASH_KEY_DECL)
{
P(bucket) *b;
h0 = P(hash) (HASH_KEY( ));
- h = h0 & T.hash_mask;
+ h = h0 % T.hash_size;
b = P(alloc) (sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( )));
b->next = T.ht[h];
T.ht[h] = b;
static P(node) * P(lookup) (HASH_KEY_DECL)
{
uns h0 = P(hash) (HASH_KEY( ));
- uns h = h0 & T.hash_mask;
+ uns h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
static int P(delete) (HASH_KEY_DECL)
{
uns h0 = P(hash) (HASH_KEY( ));
- uns h = h0 & T.hash_mask;
+ uns h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; b=*bb; bb=&b->next)
{
P(bucket) *x = SKIP_BACK(struct P(bucket), n, n);
uns h0 = P(bucket_hash)(x);
- uns h = h0 & T.hash_mask;
+ uns h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; (b=*bb) && b != x; bb=&b->next)
#define HASH_FOR_ALL(h_px, h_var) \
do { \
uns h_slot; \
- struct HASH_GLUE(h_px,bucket) *h_buck; \
- for (h_slot=0; h_slot < HASH_GLUE(h_px,table).hash_size; h_slot++) \
- for (h_buck = HASH_GLUE(h_px,table).ht[h_slot]; h_buck; h_buck = h_buck->next) \
+ struct GLUE_(h_px,bucket) *h_buck; \
+ for (h_slot=0; h_slot < GLUE_(h_px,table).hash_size; h_slot++) \
+ for (h_buck = GLUE_(h_px,table).ht[h_slot]; h_buck; h_buck = h_buck->next) \
{ \
- HASH_GLUE(h_px,node) *h_var = &h_buck->n;
+ GLUE_(h_px,node) *h_var = &h_buck->n;
#define HASH_END_FOR } } while(0)
#define HASH_BREAK
#define HASH_CONTINUE continue
-#define HASH_GLUE(x,y) x##_##y
#endif
#undef HASH_NODE
#undef HASH_PREFIX
#undef HASH_USE_POOL
+#undef HASH_AUTO_POOL
#undef HASH_WANT_CLEANUP
#undef HASH_WANT_DELETE
#undef HASH_WANT_FIND
+#undef HASH_WANT_FIND_NEXT
#undef HASH_WANT_LOOKUP
#undef HASH_WANT_NEW
#undef HASH_WANT_REMOVE