X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;f=lib%2Fhashtable.h;h=0ef283caa44f34f87142a3a82006a04330d5cef8;hb=dca5f3368d079186be487724d6521f222b7991e9;hp=33a7cc3ef1c68e58302c3108a6f3bfc1fada2ea7;hpb=cad27e97e6370f96903d42aaf345c099af0a03bd;p=libucw.git diff --git a/lib/hashtable.h b/lib/hashtable.h index 33a7cc3e..0ef283ca 100644 --- a/lib/hashtable.h +++ b/lib/hashtable.h @@ -2,7 +2,7 @@ * UCW Library -- Universal Hash Table * * (c) 2002--2004 Martin Mares - * (c) 2002 Robert Spalek + * (c) 2002--2005 Robert Spalek * * This software may be freely distributed and used according to the terms * of the GNU Lesser General Public License. @@ -39,6 +39,9 @@ * That is, `type1 k1, type2 k2, ... typen kn'. * With complex keys, HASH_GIVE_HASHFN and HASH_GIVE_EQ * are mandatory. + * | HASH_KEY_MEMORY=f use node->f as a raw data key, compared using + * memcmp + * HASH_KEY_SIZE the length of the key block * * Then specify what operations you request (all names are automatically * prefixed by calling HASH_PREFIX): @@ -90,6 +93,7 @@ * deallocation is not supported by mempools, so delete/remove * will leak pool memory. * HASH_AUTO_POOL=size Create a pool of the given block size automatically. + * HASH_ZERO_FILL New entries should be initialized to all zeroes. * HASH_TABLE_ALLOC The hash table itself will be allocated and freed using * the same allocation functions as the nodes instead of * the default xmalloc(). @@ -150,6 +154,9 @@ struct P(table) { uns hash_size; uns hash_count, hash_max, hash_min, hash_hard_max; P(bucket) **ht; +#ifdef HASH_AUTO_POOL + struct mempool *pool; +#endif }; #ifdef HASH_TABLE_DYNAMIC @@ -185,7 +192,7 @@ struct P(table) P(table); #ifndef HASH_GIVE_HASHFN # define HASH_GIVE_HASHFN static inline int P(hash) (TAUC HASH_ATOMIC_TYPE x) - { return hash_int(x); } + { return ((sizeof(x) <= 4) ? hash_u32(x) : hash_u64(x)); } #endif #ifndef HASH_GIVE_EQ @@ -200,6 +207,30 @@ struct P(table) P(table); { HASH_KEY(n->) = k; } #endif +#elif defined(HASH_KEY_MEMORY) + +#define HASH_KEY(x) x HASH_KEY_MEMORY + +#define HASH_KEY_DECL byte HASH_KEY( )[HASH_KEY_SIZE] + +#ifndef HASH_GIVE_HASHFN +# define HASH_GIVE_HASHFN + static inline int P(hash) (TAUC byte *x) + { return hash_block(x, HASH_KEY_SIZE); } +#endif + +#ifndef HASH_GIVE_EQ +# define HASH_GIVE_EQ + static inline int P(eq) (TAUC byte *x, byte *y) + { return !memcmp(x, y, HASH_KEY_SIZE); } +#endif + +#ifndef HASH_GIVE_INIT_KEY +# define HASH_GIVE_INIT_KEY + static inline void P(init_key) (TAUC P(node) *n, byte *k) + { memcpy(HASH_KEY(n->), k, HASH_KEY_SIZE); } +#endif + #elif defined(HASH_KEY_STRING) || defined(HASH_KEY_ENDSTRING) #ifdef HASH_KEY_STRING @@ -304,11 +335,11 @@ static inline void P(cleanup_alloc) (TAU) { } #elif defined(HASH_AUTO_POOL) /* Use our own pools */ #include "lib/mempool.h" -static struct mempool *P(pool); -static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(P(pool), size); } +static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(T.pool, size); } static inline void P(free) (TAUC void *x UNUSED) { } -static inline void P(init_alloc) (TAU) { P(pool) = mp_new(HASH_AUTO_POOL); } -static inline void P(cleanup_alloc) (TAU) { mp_delete(P(pool)); } +static inline void P(init_alloc) (TAU) { T.pool = mp_new(HASH_AUTO_POOL); } +static inline void P(cleanup_alloc) (TAU) { mp_delete(T.pool); } +#define HASH_USE_POOL #else /* The default allocation method */ @@ -335,11 +366,22 @@ static inline void P(table_free) (TAUC void *x) { xfree(x); } #define HASH_FN_BITS 32 #endif +#ifdef HASH_ZERO_FILL +static inline void * P(new_bucket)(TAUC uns size) +{ + byte *buck = P(alloc)(TTC size); + bzero(buck, size); + return buck; +} +#else +static inline void * P(new_bucket)(TAUC uns size) { return P(alloc)(TTC size); } +#endif + /* Now the operations */ static void P(alloc_table) (TAU) { - T.hash_size = nextprime(T.hash_size); + T.hash_size = next_table_prime(T.hash_size); T.ht = P(table_alloc)(TTC sizeof(void *) * T.hash_size); bzero(T.ht, sizeof(void *) * T.hash_size); if (2*T.hash_size < T.hash_hard_max) @@ -468,7 +510,7 @@ static P(node) * P(new) (TAC HASH_KEY_DECL) h0 = P(hash) (TTC HASH_KEY( )); h = h0 % T.hash_size; - b = P(alloc) (TTC sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( ))); + b = P(new_bucket) (TTC sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( ))); b->next = T.ht[h]; T.ht[h] = b; #ifndef HASH_CONSERVE_SPACE @@ -499,7 +541,7 @@ static P(node) * P(lookup) (TAC HASH_KEY_DECL) return &b->n; } - b = P(alloc) (TTC sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( ))); + b = P(new_bucket) (TTC sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( ))); b->next = T.ht[h]; T.ht[h] = b; #ifndef HASH_CONSERVE_SPACE @@ -583,7 +625,7 @@ do { \ #undef TA #undef TAC #undef TAU -#undef TACU +#undef TAUC #undef TT #undef TTC @@ -618,3 +660,4 @@ do { \ #undef HASH_WANT_REMOVE #undef HASH_TABLE_ALLOC #undef HASH_TABLE_DYNAMIC +#undef HASH_ZERO_FILL