*
* (c) 2002--2004 Martin Mares <mj@ucw.cz>
* (c) 2002--2005 Robert Spalek <robert@ucw.cz>
+ * (c) 2010 Pavel Charvat <pchar@ucw.cz>
+ * (c) 2012 Tomas Valla <tom@ucw.cz>
*
* This software may be freely distributed and used according to the terms
* of the GNU Lesser General Public License.
* HASH_WANT_LOOKUP node *lookup(key) -- find node with given key,
* if it doesn't exist, create it. Defining
* HASH_GIVE_INIT_DATA is strongly recommended.
+ * Use HASH_LOOKUP_DETECT_NEW if you want to know
+ * whether the node was newly created or not.
* HASH_WANT_DELETE int delete(key) -- delete and deallocate node
* with given key. Returns success.
* HASH_WANT_REMOVE remove(node *) -- delete and deallocate given node.
* newly created node. Very useful for lookup operations.
* HASH_GIVE_ALLOC void *alloc(unsigned int size) -- allocate space for
* a node. Default is xmalloc() or pooled allocation, depending
- * on HASH_USE_POOL and HASH_AUTO_POOL switches.
- * void free(void *) -- the converse.
+ * on HASH_USE_POOL, HASH_AUTO_POOL, HASH_USE_ELTPOOL
+ * and HASH_AUTO_ELTPOOL switches. void free(void *) -- the converse.
+ * HASH_GIVE_TABLE_ALLOC void *table_alloc(unsigned int size), void *table_free(void *)
+ * Allocate or free space for the table itself. Default is xmalloc()
+ * or the functions defined by HASH_GIVE_ALLOC if HASH_TABLE_ALLOC is set.
*
* ... and a couple of extra parameters:
*
* deallocation is not supported by mempools, so delete/remove
* will leak pool memory.
* HASH_AUTO_POOL=size Create a pool of the given block size automatically.
+ * HASH_USE_ELTPOOL=pool Allocate all nodes from given eltpool.
+ * HASH_AUTO_ELTPOOL=count Create an eltpool of the given number of elements in each chunk.
* HASH_ZERO_FILL New entries should be initialized to all zeroes.
* HASH_TABLE_ALLOC The hash table itself will be allocated and freed using
* the same allocation functions as the nodes instead of
* the default xmalloc().
+ * HASH_TABLE_GROWING Never decrease the size of the hash table itself
* HASH_TABLE_DYNAMIC Support multiple hash tables; the first parameter of all
* hash table operations is struct HASH_PREFIX(table) *.
+ * HASH_TABLE_VARS Extra variables to be defined in table structure
+ * HASH_LOOKUP_DETECT_NEW
+ * the prototype for lookup is changed to node *lookup(key, int *new_item)
+ * new_item must not be NULL and returns 1 whether lookup
+ * just created a new item in the hashtable or 0 otherwise.
*
* You also get a iterator macro at no extra charge:
*
*
* (For dynamic tables, use HASH_FOR_ALL_DYNAMIC(hash_prefix, hash_table, variable) instead.)
*
- * Then include "ucw/hashtable.h" and voila, you have a hash table
+ * Then include <ucw/hashtable.h> and voila, you have a hash table
* suiting all your needs (at least those which you've revealed :) ).
*
* After including this file, all parameter macros are automatically
*/
#ifndef _UCW_HASHFUNC_H
-#include "ucw/hashfunc.h"
+#include <ucw/hashfunc.h>
#endif
-#include "ucw/prime.h"
+#include <ucw/prime.h>
#include <string.h>
} P(bucket);
struct P(table) {
+#ifdef HASH_TABLE_VARS
+ HASH_TABLE_VARS
+#endif
uns hash_size;
uns hash_count, hash_max, hash_min, hash_hard_max;
P(bucket) **ht;
#ifdef HASH_AUTO_POOL
struct mempool *pool;
#endif
+#ifdef HASH_AUTO_ELTPOOL
+ struct eltpool *eltpool;
+#endif
};
#ifdef HASH_TABLE_DYNAMIC
#elif defined(HASH_USE_POOL)
/* If the caller has requested to use his mempool, do so */
-#include "ucw/mempool.h"
+#include <ucw/mempool.h>
static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(HASH_USE_POOL, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { }
#elif defined(HASH_AUTO_POOL)
/* Use our own pools */
-#include "ucw/mempool.h"
+#include <ucw/mempool.h>
static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(T.pool, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { T.pool = mp_new(HASH_AUTO_POOL); }
static inline void P(cleanup_alloc) (TAU) { mp_delete(T.pool); }
#define HASH_USE_POOL
+#elif defined(HASH_USE_ELTPOOL)
+/* If the caller has requested to use his eltpool, do so */
+#include <ucw/eltpool.h>
+static inline void * P(alloc) (TAUC unsigned int size UNUSED) { ASSERT(size <= (HASH_USE_ELTPOOL)->elt_size); return ep_alloc(HASH_USE_ELTPOOL); }
+static inline void P(free) (TAUC void *x) { ep_free(HASH_USE_ELTPOOL, x); }
+static inline void P(init_alloc) (TAU) { }
+static inline void P(cleanup_alloc) (TAU) { }
+
+#elif defined(HASH_AUTO_ELTPOOL)
+/* Use our own eltpools */
+#include <ucw/eltpool.h>
+static inline void * P(alloc) (TAUC unsigned int size UNUSED) { return ep_alloc(T.eltpool); }
+static inline void P(free) (TAUC void *x) { ep_free(T.eltpool, x); }
+static inline void P(init_alloc) (TAU) { T.eltpool = ep_new(sizeof(P(bucket)), HASH_AUTO_ELTPOOL); }
+static inline void P(cleanup_alloc) (TAU) { ep_delete(T.eltpool); }
+#define HASH_USE_ELTPOOL
+
#else
/* The default allocation method */
static inline void * P(alloc) (TAUC unsigned int size) { return xmalloc(size); }
#endif
-#ifdef HASH_TABLE_ALLOC
+#if defined(HASH_USE_ELTPOOL) && defined(HASH_GIVE_EXTRA_SIZE)
+#error Eltpools not supported in combination with variable-sized nodes
+#endif
+
+#ifdef HASH_GIVE_TABLE_ALLOC
+/* If the caller has requested to use his own allocation functions, do so */
+#elif defined(HASH_TABLE_ALLOC)
+#ifdef HASH_USE_ELTPOOL
+#error HASH_TABLE_ALLOC not supported in combination with eltpools
+#endif
static inline void * P(table_alloc) (TAUC unsigned int size) { return P(alloc)(TTC size); }
static inline void P(table_free) (TAUC void *x) { P(free)(TTC x); }
#else
static inline void P(table_free) (TAUC void *x) { xfree(x); }
#endif
+#if defined(HASH_USE_POOL) && defined(HASH_TABLE_ALLOC) && !defined(HASH_TABLE_GROWING)
+#define HASH_TABLE_GROWING
+#endif
+
#ifndef HASH_DEFAULT_SIZE
#define HASH_DEFAULT_SIZE 32
#endif
T.hash_max = 2*T.hash_size;
else
T.hash_max = ~0U;
+#ifndef HASH_TABLE_GROWING
if (T.hash_size/2 > HASH_DEFAULT_SIZE)
T.hash_min = T.hash_size/4;
else
+#endif
T.hash_min = 0;
}
#endif
#ifdef HASH_WANT_LOOKUP
+#ifdef HASH_LOOKUP_DETECT_NEW
/**
* Finds a node with a given key. If it does not exist, a new one is created.
* It is strongly recommended to use <<give_init_data,`HASH_GIVE_INIT_DATA`>>.
*
* This one is enabled by the <<want_lookup,`HASH_WANT_LOOKUP`>> macro.
+ * The @new_item argument is available only if <<lookup_detect_new,`HASH_LOOKUP_DETECT_NEW`>> was given.
**/
+static HASH_NODE* HASH_PREFIX(lookup)(TAC HASH_KEY_DECL, int *new_item)
+#else
static HASH_NODE* HASH_PREFIX(lookup)(TAC HASH_KEY_DECL)
+#endif
{
uns h0 = P(hash) (TTC HASH_KEY( ));
uns h = h0 % T.hash_size;
#ifndef HASH_CONSERVE_SPACE
b->hash == h0 &&
#endif
- P(eq)(TTC HASH_KEY( ), HASH_KEY(b->n.)))
+ P(eq)(TTC HASH_KEY( ), HASH_KEY(b->n.))) {
+#ifdef HASH_LOOKUP_DETECT_NEW
+ *new_item = 0;
+#endif
return &b->n;
+ }
}
b = P(new_bucket) (TTC sizeof(struct P(bucket)) + HASH_EXTRA_SIZE(HASH_KEY( )));
P(init_data)(TTC &b->n);
if (T.hash_count++ >= T.hash_max)
P(rehash)(TTC 2*T.hash_size);
+#ifdef HASH_LOOKUP_DETECT_NEW
+ *new_item = 1;
+#endif
return &b->n;
}
#endif
{
*bb = b->next;
P(free)(TTC b);
- if (--T.hash_count < T.hash_min)
+ T.hash_count--;
+#ifndef HASH_TABLE_GROWING
+ if (T.hash_count < T.hash_min)
P(rehash)(TTC T.hash_size/2);
+#endif
return 1;
}
}
ASSERT(b);
*bb = b->next;
P(free)(TTC b);
- if (--T.hash_count < T.hash_min)
+ T.hash_count--;
+#ifndef HASH_TABLE_GROWING
+ if (T.hash_count < T.hash_min)
P(rehash)(TTC T.hash_size/2);
+#endif
}
#endif
#undef HASH_EXTRA_SIZE
#undef HASH_FN_BITS
#undef HASH_GIVE_ALLOC
+#undef HASH_GIVE_TABLE_ALLOC
#undef HASH_GIVE_EQ
#undef HASH_GIVE_EXTRA_SIZE
#undef HASH_GIVE_HASHFN
#undef HASH_PREFIX
#undef HASH_USE_POOL
#undef HASH_AUTO_POOL
+#undef HASH_USE_ELTPOOL
+#undef HASH_AUTO_ELTPOOL
#undef HASH_WANT_CLEANUP
#undef HASH_WANT_DELETE
#undef HASH_WANT_FIND
#undef HASH_WANT_NEW
#undef HASH_WANT_REMOVE
#undef HASH_TABLE_ALLOC
+#undef HASH_TABLE_GROWING
#undef HASH_TABLE_DYNAMIC
+#undef HASH_TABLE_VARS
#undef HASH_ZERO_FILL
+#undef HASH_LOOKUP_DETECT_NEW