* HASH_WANT_LOOKUP node *lookup(key) -- find node with given key,
* if it doesn't exist, create it. Defining
* HASH_GIVE_INIT_DATA is strongly recommended.
- * HASH_LOOKUP_DETECT_NEW
- * the prototype for lookup is changed to node *lookup(key, int *new_item)
- * new_item must not be NULL and returns 1 whether lookup
- * just created a new item in the hashtable or 0 otherwise
+ * Use HASH_LOOKUP_DETECT_NEW if you want to know
+ * whether the node was newly created or not.
* HASH_WANT_DELETE int delete(key) -- delete and deallocate node
* with given key. Returns success.
* HASH_WANT_REMOVE remove(node *) -- delete and deallocate given node.
*
* You can also supply several functions:
*
- * HASH_GIVE_HASHFN unsigned int hash(key) -- calculate hash value of key.
+ * HASH_GIVE_HASHFN uint hash(key) -- calculate hash value of key.
* We have sensible default hash functions for strings
* and integers.
* HASH_GIVE_EQ int eq(key1, key2) -- return whether keys are equal.
* and static strings, strcpy for end-allocated strings.
* HASH_GIVE_INIT_DATA void init_data(node *) -- initialize data fields in a
* newly created node. Very useful for lookup operations.
- * HASH_GIVE_ALLOC void *alloc(unsigned int size) -- allocate space for
+ * HASH_GIVE_ALLOC void *alloc(uint size) -- allocate space for
* a node. Default is xmalloc() or pooled allocation, depending
* on HASH_USE_POOL, HASH_AUTO_POOL, HASH_USE_ELTPOOL
* and HASH_AUTO_ELTPOOL switches. void free(void *) -- the converse.
- * HASH_GIVE_TABLE_ALLOC void *table_alloc(unsigned int size), void *table_free(void *)
+ * HASH_GIVE_TABLE_ALLOC void *table_alloc(uint size), void *table_free(void *)
* Allocate or free space for the table itself. Default is xmalloc()
* or the functions defined by HASH_GIVE_ALLOC if HASH_TABLE_ALLOC is set.
*
* HASH_TABLE_DYNAMIC Support multiple hash tables; the first parameter of all
* hash table operations is struct HASH_PREFIX(table) *.
* HASH_TABLE_VARS Extra variables to be defined in table structure
+ * HASH_LOOKUP_DETECT_NEW
+ * the prototype for lookup is changed to node *lookup(key, int *new_item)
+ * new_item must not be NULL and returns 1 whether lookup
+ * just created a new item in the hashtable or 0 otherwise.
*
* You also get a iterator macro at no extra charge:
*
typedef struct P(bucket) {
struct P(bucket) *next;
#ifndef HASH_CONSERVE_SPACE
- uns hash;
+ uint hash;
#endif
P(node) n;
} P(bucket);
#ifdef HASH_TABLE_VARS
HASH_TABLE_VARS
#endif
- uns hash_size;
- uns hash_count, hash_max, hash_min, hash_hard_max;
+ uint hash_size;
+ uint hash_count, hash_max, hash_min, hash_hard_max;
P(bucket) **ht;
#ifdef HASH_AUTO_POOL
struct mempool *pool;
#ifndef HASH_GIVE_HASHFN
#define HASH_GIVE_HASHFN
- static inline uns P(hash) (TAUC char *k)
+ static inline uint P(hash) (TAUC char *k)
{
# ifdef HASH_NOCASE
return hash_string_nocase(k);
#elif defined(HASH_USE_POOL)
/* If the caller has requested to use his mempool, do so */
#include <ucw/mempool.h>
-static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(HASH_USE_POOL, size); }
+static inline void * P(alloc) (TAUC uint size) { return mp_alloc_fast(HASH_USE_POOL, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#elif defined(HASH_AUTO_POOL)
/* Use our own pools */
#include <ucw/mempool.h>
-static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(T.pool, size); }
+static inline void * P(alloc) (TAUC uint size) { return mp_alloc_fast(T.pool, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { T.pool = mp_new(HASH_AUTO_POOL); }
static inline void P(cleanup_alloc) (TAU) { mp_delete(T.pool); }
#elif defined(HASH_USE_ELTPOOL)
/* If the caller has requested to use his eltpool, do so */
#include <ucw/eltpool.h>
-static inline void * P(alloc) (TAUC unsigned int size UNUSED) { ASSERT(size <= (HASH_USE_ELTPOOL)->elt_size); return ep_alloc(HASH_USE_ELTPOOL); }
+static inline void * P(alloc) (TAUC uint size UNUSED) { ASSERT(size <= (HASH_USE_ELTPOOL)->elt_size); return ep_alloc(HASH_USE_ELTPOOL); }
static inline void P(free) (TAUC void *x) { ep_free(HASH_USE_ELTPOOL, x); }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#elif defined(HASH_AUTO_ELTPOOL)
/* Use our own eltpools */
#include <ucw/eltpool.h>
-static inline void * P(alloc) (TAUC unsigned int size UNUSED) { return ep_alloc(T.eltpool); }
+static inline void * P(alloc) (TAUC uint size UNUSED) { return ep_alloc(T.eltpool); }
static inline void P(free) (TAUC void *x) { ep_free(T.eltpool, x); }
static inline void P(init_alloc) (TAU) { T.eltpool = ep_new(sizeof(P(bucket)), HASH_AUTO_ELTPOOL); }
static inline void P(cleanup_alloc) (TAU) { ep_delete(T.eltpool); }
#else
/* The default allocation method */
-static inline void * P(alloc) (TAUC unsigned int size) { return xmalloc(size); }
+static inline void * P(alloc) (TAUC uint size) { return xmalloc(size); }
static inline void P(free) (TAUC void *x) { xfree(x); }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#ifdef HASH_USE_ELTPOOL
#error HASH_TABLE_ALLOC not supported in combination with eltpools
#endif
-static inline void * P(table_alloc) (TAUC unsigned int size) { return P(alloc)(TTC size); }
+static inline void * P(table_alloc) (TAUC uint size) { return P(alloc)(TTC size); }
static inline void P(table_free) (TAUC void *x) { P(free)(TTC x); }
#else
-static inline void * P(table_alloc) (TAUC unsigned int size) { return xmalloc(size); }
+static inline void * P(table_alloc) (TAUC uint size) { return xmalloc(size); }
static inline void P(table_free) (TAUC void *x) { xfree(x); }
#endif
#endif
#ifdef HASH_ZERO_FILL
-static inline void * P(new_bucket)(TAUC uns size)
+static inline void * P(new_bucket)(TAUC uint size)
{
byte *buck = P(alloc)(TTC size);
bzero(buck, size);
return buck;
}
#else
-static inline void * P(new_bucket)(TAUC uns size) { return P(alloc)(TTC size); }
+static inline void * P(new_bucket)(TAUC uint size) { return P(alloc)(TTC size); }
#endif
/* Now the operations */
static void HASH_PREFIX(cleanup)(TA)
{
#ifndef HASH_USE_POOL
- uns i;
+ uint i;
P(bucket) *b, *bb;
for (i=0; i<T.hash_size; i++)
}
#endif
-static inline uns P(bucket_hash) (TAUC P(bucket) *b)
+static inline uint P(bucket_hash) (TAUC P(bucket) *b)
{
#ifdef HASH_CONSERVE_SPACE
return P(hash)(TTC HASH_KEY(b->n.));
#endif
}
-static void P(rehash) (TAC uns size)
+static void P(rehash) (TAC uint size)
{
P(bucket) *b, *nb;
P(bucket) **oldt = T.ht, **newt;
- uns oldsize = T.hash_size;
- uns i, h;
+ uint oldsize = T.hash_size;
+ uint i, h;
DBG("Rehashing %d->%d at count %d", oldsize, size, T.hash_count);
T.hash_size = size;
**/
static HASH_NODE* HASH_PREFIX(find)(TAC HASH_KEY_DECL)
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
static HASH_NODE* HASH_PREFIX(find_next)(TAC P(node) *start)
{
#ifndef HASH_CONSERVE_SPACE
- uns h0 = P(hash) (TTC HASH_KEY(start->));
+ uint h0 = P(hash) (TTC HASH_KEY(start->));
#endif
P(bucket) *b = SKIP_BACK(P(bucket), n, start);
**/
static HASH_NODE * HASH_PREFIX(new)(TAC HASH_KEY_DECL)
{
- uns h0, h;
+ uint h0, h;
P(bucket) *b;
h0 = P(hash) (TTC HASH_KEY( ));
#endif
#ifdef HASH_WANT_LOOKUP
+#ifdef HASH_LOOKUP_DETECT_NEW
/**
* Finds a node with a given key. If it does not exist, a new one is created.
* It is strongly recommended to use <<give_init_data,`HASH_GIVE_INIT_DATA`>>.
*
* This one is enabled by the <<want_lookup,`HASH_WANT_LOOKUP`>> macro.
+ * The @new_item argument is available only if <<lookup_detect_new,`HASH_LOOKUP_DETECT_NEW`>> was given.
**/
-#ifdef HASH_LOOKUP_DETECT_NEW
static HASH_NODE* HASH_PREFIX(lookup)(TAC HASH_KEY_DECL, int *new_item)
#else
static HASH_NODE* HASH_PREFIX(lookup)(TAC HASH_KEY_DECL)
#endif
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
**/
static int HASH_PREFIX(delete)(TAC HASH_KEY_DECL)
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; b=*bb; bb=&b->next)
static void HASH_PREFIX(remove)(TAC HASH_NODE *n)
{
P(bucket) *x = SKIP_BACK(struct P(bucket), n, n);
- uns h0 = P(bucket_hash)(TTC x);
- uns h = h0 % T.hash_size;
+ uint h0 = P(bucket_hash)(TTC x);
+ uint h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; (b=*bb) && b != x; bb=&b->next)
#define HASH_FOR_ALL_DYNAMIC(h_px, h_table, h_var) \
do { \
- uns h_slot; \
+ uint h_slot; \
struct GLUE_(h_px,bucket) *h_buck; \
for (h_slot=0; h_slot < (h_table)->hash_size; h_slot++) \
for (h_buck = (h_table)->ht[h_slot]; h_buck; h_buck = h_buck->next) \