X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;f=ucw%2Fmempool.c;h=f9e268dab5c9144745eb80b6bc735abcb9a30933;hb=f1155256f7a168f5e2c0097cb4e7197b79c4f041;hp=521724a8b582accde6f68a649f793c0d5c05e6ba;hpb=b541de765129b6bf28c5601bb355779652001150;p=libucw.git diff --git a/ucw/mempool.c b/ucw/mempool.c index 521724a8..f9e268da 100644 --- a/ucw/mempool.c +++ b/ucw/mempool.c @@ -1,8 +1,8 @@ /* * UCW Library -- Memory Pools (One-Time Allocation) * - * (c) 1997--2001 Martin Mares - * (c) 2007 Pavel Charvat + * (c) 1997--2014 Martin Mares + * (c) 2007--2014 Pavel Charvat * * This software may be freely distributed and used according to the terms * of the GNU Lesser General Public License. @@ -11,20 +11,24 @@ #undef LOCAL_DEBUG #include +#include #include #include #define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN) -#define MP_SIZE_MAX (~0U - MP_CHUNK_TAIL - CPU_PAGE_SIZE) +#define MP_SIZE_MAX (SIZE_MAX - MP_CHUNK_TAIL - CPU_PAGE_SIZE) struct mempool_chunk { +#ifdef CONFIG_DEBUG + struct mempool *pool; // Can be useful when analysing coredump for memory leaks +#endif struct mempool_chunk *next; - uns size; + size_t size; }; -static uns -mp_align_size(uns size) +static size_t +mp_align_size(size_t size) { #ifdef CONFIG_UCW_POOL_IS_MMAP return ALIGN_TO(size + MP_CHUNK_TAIL, CPU_PAGE_SIZE) - MP_CHUNK_TAIL; @@ -33,88 +37,136 @@ mp_align_size(uns size) #endif } +static void *mp_allocator_alloc(struct ucw_allocator *a, size_t size) +{ + struct mempool *mp = (struct mempool *) a; + return mp_alloc_fast(mp, size); +} + +static void *mp_allocator_realloc(struct ucw_allocator *a, void *ptr, size_t old_size, size_t new_size) +{ + if (new_size <= old_size) + return ptr; + + /* + * In the future, we might want to do something like mp_realloc(), + * but we have to check that it is indeed the last block in the pool. + */ + struct mempool *mp = (struct mempool *) a; + void *new = mp_alloc_fast(mp, new_size); + memcpy(new, ptr, old_size); + return new; +} + +static void mp_allocator_free(struct ucw_allocator *a UNUSED, void *ptr UNUSED) +{ + // Does nothing +} + void -mp_init(struct mempool *pool, uns chunk_size) +mp_init(struct mempool *pool, size_t chunk_size) { chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size)); *pool = (struct mempool) { + .allocator = { + .alloc = mp_allocator_alloc, + .realloc = mp_allocator_realloc, + .free = mp_allocator_free, + }, .chunk_size = chunk_size, .threshold = chunk_size >> 1, - .last_big = &pool->last_big }; + .last_big = &pool->last_big + }; } static void * -mp_new_big_chunk(uns size) +mp_new_big_chunk(struct mempool *pool, size_t size) { struct mempool_chunk *chunk; chunk = xmalloc(size + MP_CHUNK_TAIL) + size; chunk->size = size; + if (pool) + pool->total_size += size + MP_CHUNK_TAIL; return chunk; } static void -mp_free_big_chunk(struct mempool_chunk *chunk) +mp_free_big_chunk(struct mempool *pool, struct mempool_chunk *chunk) { + pool->total_size -= chunk->size + MP_CHUNK_TAIL; xfree((void *)chunk - chunk->size); } static void * -mp_new_chunk(uns size) +mp_new_chunk(struct mempool *pool, size_t size) { #ifdef CONFIG_UCW_POOL_IS_MMAP struct mempool_chunk *chunk; chunk = page_alloc(size + MP_CHUNK_TAIL) + size; chunk->size = size; + if (pool) + pool->total_size += size + MP_CHUNK_TAIL; return chunk; #else - return mp_new_big_chunk(size); + return mp_new_big_chunk(pool, size); #endif } static void -mp_free_chunk(struct mempool_chunk *chunk) +mp_free_chunk(struct mempool *pool, struct mempool_chunk *chunk) { #ifdef CONFIG_UCW_POOL_IS_MMAP + pool->total_size -= chunk->size + MP_CHUNK_TAIL; page_free((void *)chunk - chunk->size, chunk->size + MP_CHUNK_TAIL); #else - mp_free_big_chunk(chunk); + mp_free_big_chunk(pool, chunk); #endif } struct mempool * -mp_new(uns chunk_size) +mp_new(size_t chunk_size) { chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size)); - struct mempool_chunk *chunk = mp_new_chunk(chunk_size); + struct mempool_chunk *chunk = mp_new_chunk(NULL, chunk_size); struct mempool *pool = (void *)chunk - chunk_size; DBG("Creating mempool %p with %u bytes long chunks", pool, chunk_size); chunk->next = NULL; +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif *pool = (struct mempool) { + .allocator = { + .alloc = mp_allocator_alloc, + .realloc = mp_allocator_realloc, + .free = mp_allocator_free, + }, .state = { .free = { chunk_size - sizeof(*pool) }, .last = { chunk } }, .chunk_size = chunk_size, .threshold = chunk_size >> 1, - .last_big = &pool->last_big }; + .last_big = &pool->last_big, + .total_size = chunk->size + MP_CHUNK_TAIL, + }; return pool; } static void -mp_free_chain(struct mempool_chunk *chunk) +mp_free_chain(struct mempool *pool, struct mempool_chunk *chunk) { while (chunk) { struct mempool_chunk *next = chunk->next; - mp_free_chunk(chunk); + mp_free_chunk(pool, chunk); chunk = next; } } static void -mp_free_big_chain(struct mempool_chunk *chunk) +mp_free_big_chain(struct mempool *pool, struct mempool_chunk *chunk) { while (chunk) { struct mempool_chunk *next = chunk->next; - mp_free_big_chunk(chunk); + mp_free_big_chunk(pool, chunk); chunk = next; } } @@ -123,15 +175,15 @@ void mp_delete(struct mempool *pool) { DBG("Deleting mempool %p", pool); - mp_free_big_chain(pool->state.last[1]); - mp_free_chain(pool->unused); - mp_free_chain(pool->state.last[0]); // can contain the mempool structure + mp_free_big_chain(pool, pool->state.last[1]); + mp_free_chain(pool, pool->unused); + mp_free_chain(pool, pool->state.last[0]); // can contain the mempool structure } void mp_flush(struct mempool *pool) { - mp_free_big_chain(pool->state.last[1]); + mp_free_big_chain(pool, pool->state.last[1]); struct mempool_chunk *chunk, *next; for (chunk = pool->state.last[0]; chunk && (void *)chunk - chunk->size != pool; chunk = next) { @@ -148,12 +200,18 @@ mp_flush(struct mempool *pool) } static void -mp_stats_chain(struct mempool_chunk *chunk, struct mempool_stats *stats, uns idx) +mp_stats_chain(struct mempool *pool, struct mempool_chunk *chunk, struct mempool_stats *stats, uint idx) { while (chunk) { - stats->chain_size[idx] += chunk->size + sizeof(*chunk); + stats->chain_size[idx] += chunk->size + MP_CHUNK_TAIL; stats->chain_count[idx]++; + if (idx < 2) + { + stats->used_size += chunk->size; + if ((byte *)pool == (byte *)chunk - chunk->size) + stats->used_size -= sizeof(*pool); + } chunk = chunk->next; } stats->total_size += stats->chain_size[idx]; @@ -163,21 +221,35 @@ void mp_stats(struct mempool *pool, struct mempool_stats *stats) { bzero(stats, sizeof(*stats)); - mp_stats_chain(pool->state.last[0], stats, 0); - mp_stats_chain(pool->state.last[1], stats, 1); - mp_stats_chain(pool->unused, stats, 2); + mp_stats_chain(pool, pool->state.last[0], stats, 0); + mp_stats_chain(pool, pool->state.last[1], stats, 1); + mp_stats_chain(pool, pool->unused, stats, 2); + stats->used_size -= pool->state.free[0] + pool->state.free[1]; + ASSERT(stats->total_size == pool->total_size); + ASSERT(stats->used_size <= stats->total_size); } u64 mp_total_size(struct mempool *pool) { - struct mempool_stats stats; - mp_stats(pool, &stats); - return stats.total_size; + return pool->total_size; +} + +void +mp_shrink(struct mempool *pool, u64 min_total_size) +{ + while (1) + { + struct mempool_chunk *chunk = pool->unused; + if (!chunk || pool->total_size - (chunk->size + MP_CHUNK_TAIL) < min_total_size) + break; + pool->unused = chunk->next; + mp_free_chunk(pool, chunk); + } } void * -mp_alloc_internal(struct mempool *pool, uns size) +mp_alloc_internal(struct mempool *pool, size_t size) { struct mempool_chunk *chunk; if (size <= pool->threshold) @@ -189,7 +261,12 @@ mp_alloc_internal(struct mempool *pool, uns size) pool->unused = chunk->next; } else - chunk = mp_new_chunk(pool->chunk_size); + { + chunk = mp_new_chunk(pool, pool->chunk_size); +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif + } chunk->next = pool->state.last[0]; pool->state.last[0] = chunk; pool->state.free[0] = pool->chunk_size - size; @@ -198,31 +275,34 @@ mp_alloc_internal(struct mempool *pool, uns size) else if (likely(size <= MP_SIZE_MAX)) { pool->idx = 1; - uns aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN); - chunk = mp_new_big_chunk(aligned); + size_t aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN); + chunk = mp_new_big_chunk(pool, aligned); chunk->next = pool->state.last[1]; +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif pool->state.last[1] = chunk; pool->state.free[1] = aligned - size; return pool->last_big = (void *)chunk - aligned; } else - die("Cannot allocate %u bytes from a mempool", size); + die("Cannot allocate %zu bytes from a mempool", size); } void * -mp_alloc(struct mempool *pool, uns size) +mp_alloc(struct mempool *pool, size_t size) { return mp_alloc_fast(pool, size); } void * -mp_alloc_noalign(struct mempool *pool, uns size) +mp_alloc_noalign(struct mempool *pool, size_t size) { return mp_alloc_fast_noalign(pool, size); } void * -mp_alloc_zero(struct mempool *pool, uns size) +mp_alloc_zero(struct mempool *pool, size_t size) { void *ptr = mp_alloc_fast(pool, size); bzero(ptr, size); @@ -230,7 +310,7 @@ mp_alloc_zero(struct mempool *pool, uns size) } void * -mp_start_internal(struct mempool *pool, uns size) +mp_start_internal(struct mempool *pool, size_t size) { void *ptr = mp_alloc_internal(pool, size); pool->state.free[pool->idx] += size; @@ -238,30 +318,31 @@ mp_start_internal(struct mempool *pool, uns size) } void * -mp_start(struct mempool *pool, uns size) +mp_start(struct mempool *pool, size_t size) { return mp_start_fast(pool, size); } void * -mp_start_noalign(struct mempool *pool, uns size) +mp_start_noalign(struct mempool *pool, size_t size) { return mp_start_fast_noalign(pool, size); } void * -mp_grow_internal(struct mempool *pool, uns size) +mp_grow_internal(struct mempool *pool, size_t size) { if (unlikely(size > MP_SIZE_MAX)) - die("Cannot allocate %u bytes of memory", size); - uns avail = mp_avail(pool); + die("Cannot allocate %zu bytes of memory", size); + size_t avail = mp_avail(pool); void *ptr = mp_ptr(pool); if (pool->idx) { - uns amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX; + size_t amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX; amortized = MAX(amortized, size); amortized = ALIGN_TO(amortized, CPU_STRUCT_ALIGN); struct mempool_chunk *chunk = pool->state.last[1], *next = chunk->next; + pool->total_size = pool->total_size - chunk->size + amortized; ptr = xrealloc(ptr, amortized + MP_CHUNK_TAIL); chunk = ptr + amortized; chunk->next = next; @@ -279,22 +360,22 @@ mp_grow_internal(struct mempool *pool, uns size) } } -uns +size_t mp_open(struct mempool *pool, void *ptr) { return mp_open_fast(pool, ptr); } void * -mp_realloc(struct mempool *pool, void *ptr, uns size) +mp_realloc(struct mempool *pool, void *ptr, size_t size) { return mp_realloc_fast(pool, ptr, size); } void * -mp_realloc_zero(struct mempool *pool, void *ptr, uns size) +mp_realloc_zero(struct mempool *pool, void *ptr, size_t size) { - uns old_size = mp_open_fast(pool, ptr); + size_t old_size = mp_open_fast(pool, ptr); ptr = mp_grow(pool, size); if (size > old_size) bzero(ptr + old_size, size - old_size); @@ -303,7 +384,7 @@ mp_realloc_zero(struct mempool *pool, void *ptr, uns size) } void * -mp_spread_internal(struct mempool *pool, void *p, uns size) +mp_spread_internal(struct mempool *pool, void *p, size_t size) { void *old = mp_ptr(pool); void *new = mp_grow_internal(pool, p-old+size); @@ -324,7 +405,7 @@ mp_restore(struct mempool *pool, struct mempool_state *state) for (chunk = pool->state.last[1]; chunk != s.last[1]; chunk = next) { next = chunk->next; - mp_free_big_chunk(chunk); + mp_free_big_chunk(pool, chunk); } pool->state = s; pool->last_big = &pool->last_big; @@ -355,14 +436,14 @@ mp_pop(struct mempool *pool) #include static void -fill(byte *ptr, uns len, uns magic) +fill(byte *ptr, uint len, uint magic) { while (len--) *ptr++ = (magic++ & 255); } static void -check(byte *ptr, uns len, uns magic, uns align) +check(byte *ptr, uint len, uint magic, uint align) { ASSERT(!((uintptr_t)ptr & (align - 1))); while (len--) @@ -378,15 +459,15 @@ int main(int argc, char **argv) if (cf_getopt(argc, argv, CF_SHORT_OPTS, CF_NO_LONG_OPTS, NULL) >= 0 || argc != optind) die("Invalid usage"); - uns max = 1000, n = 0, m = 0, can_realloc = 0; + uint max = 1000, n = 0, m = 0, can_realloc = 0; void *ptr[max]; struct mempool_state *state[max]; - uns len[max], num[max], align[max]; + uint len[max], num[max], align[max]; struct mempool *mp = mp_new(128), mp_static; - for (uns i = 0; i < 5000; i++) + for (uint i = 0; i < 5000; i++) { - for (uns j = 0; j < n; j++) + for (uint j = 0; j < n; j++) check(ptr[j], len[j], j, align[j]); #if 0 DBG("free_small=%u free_big=%u idx=%u chunk_size=%u last_big=%p", mp->state.free[0], mp->state.free[1], mp->idx, mp->chunk_size, mp->last_big); @@ -441,10 +522,10 @@ int main(int argc, char **argv) ASSERT(0); grow: { - uns k = n - 1; - for (uns i = random_max(4); i--; ) + uint k = n - 1; + for (uint i = random_max(4); i--; ) { - uns l = len[k]; + uint l = len[k]; len[k] = random_max(0x2000); DBG("grow(%u)", len[k]); ptr[k] = mp_grow(mp, len[k]); @@ -457,7 +538,7 @@ grow: } else if (can_realloc && n && (r -= 20) < 0) { - uns i = n - 1, l = len[i]; + uint i = n - 1, l = len[i]; DBG("realloc(%p, %u)", ptr[i], len[i]); ptr[i] = mp_realloc(mp, ptr[i], len[i] = random_max(0x2000)); DBG(" -> (%p, %u)", ptr[i], len[i]); @@ -481,7 +562,7 @@ grow: } else if (m && (r -= 1) < 0) { - uns i = random_max(m); + uint i = random_max(m); DBG("restore(%u)", i); mp_restore(mp, state[i]); n = num[m = i]; @@ -489,6 +570,11 @@ grow: } else if (can_realloc && n && (r -= 5) < 0) ASSERT(mp_size(mp, ptr[n - 1]) == len[n - 1]); + else + { + struct mempool_stats stats; + mp_stats(mp, &stats); + } } mp_delete(mp);