2 * UCW Library -- Memory Pools (One-Time Allocation)
4 * (c) 1997--2014 Martin Mares <mj@ucw.cz>
5 * (c) 2007--2014 Pavel Charvat <pchar@ucw.cz>
7 * This software may be freely distributed and used according to the terms
8 * of the GNU Lesser General Public License.
14 #include <ucw/alloc.h>
15 #include <ucw/mempool.h>
19 #define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN)
20 #define MP_SIZE_MAX (~0U - MP_CHUNK_TAIL - CPU_PAGE_SIZE)
22 struct mempool_chunk {
24 struct mempool *pool; // Can be useful when analysing coredump for memory leaks
26 struct mempool_chunk *next;
31 mp_align_size(uns size)
33 #ifdef CONFIG_UCW_POOL_IS_MMAP
34 return ALIGN_TO(size + MP_CHUNK_TAIL, CPU_PAGE_SIZE) - MP_CHUNK_TAIL;
36 return ALIGN_TO(size, CPU_STRUCT_ALIGN);
40 static void *mp_allocator_alloc(struct ucw_allocator *a, size_t size)
42 struct mempool *mp = (struct mempool *) a;
43 return mp_alloc_fast(mp, size);
46 static void *mp_allocator_realloc(struct ucw_allocator *a, void *ptr, size_t old_size, size_t new_size)
48 if (new_size <= old_size)
52 * In the future, we might want to do something like mp_realloc(),
53 * but we have to check that it is indeed the last block in the pool.
55 struct mempool *mp = (struct mempool *) a;
56 void *new = mp_alloc_fast(mp, new_size);
57 memcpy(new, ptr, old_size);
61 static void mp_allocator_free(struct ucw_allocator *a UNUSED, void *ptr UNUSED)
67 mp_init(struct mempool *pool, uns chunk_size)
69 chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
70 *pool = (struct mempool) {
72 .alloc = mp_allocator_alloc,
73 .realloc = mp_allocator_realloc,
74 .free = mp_allocator_free,
76 .chunk_size = chunk_size,
77 .threshold = chunk_size >> 1,
78 .last_big = &pool->last_big
83 mp_new_big_chunk(uns size)
85 struct mempool_chunk *chunk;
86 chunk = xmalloc(size + MP_CHUNK_TAIL) + size;
92 mp_free_big_chunk(struct mempool_chunk *chunk)
94 xfree((void *)chunk - chunk->size);
98 mp_new_chunk(uns size)
100 #ifdef CONFIG_UCW_POOL_IS_MMAP
101 struct mempool_chunk *chunk;
102 chunk = page_alloc(size + MP_CHUNK_TAIL) + size;
106 return mp_new_big_chunk(size);
111 mp_free_chunk(struct mempool_chunk *chunk)
113 #ifdef CONFIG_UCW_POOL_IS_MMAP
114 page_free((void *)chunk - chunk->size, chunk->size + MP_CHUNK_TAIL);
116 mp_free_big_chunk(chunk);
121 mp_new(uns chunk_size)
123 chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
124 struct mempool_chunk *chunk = mp_new_chunk(chunk_size);
125 struct mempool *pool = (void *)chunk - chunk_size;
126 DBG("Creating mempool %p with %u bytes long chunks", pool, chunk_size);
131 *pool = (struct mempool) {
133 .alloc = mp_allocator_alloc,
134 .realloc = mp_allocator_realloc,
135 .free = mp_allocator_free,
137 .state = { .free = { chunk_size - sizeof(*pool) }, .last = { chunk } },
138 .chunk_size = chunk_size,
139 .threshold = chunk_size >> 1,
140 .last_big = &pool->last_big };
145 mp_free_chain(struct mempool_chunk *chunk)
149 struct mempool_chunk *next = chunk->next;
150 mp_free_chunk(chunk);
156 mp_free_big_chain(struct mempool_chunk *chunk)
160 struct mempool_chunk *next = chunk->next;
161 mp_free_big_chunk(chunk);
167 mp_delete(struct mempool *pool)
169 DBG("Deleting mempool %p", pool);
170 mp_free_big_chain(pool->state.last[1]);
171 mp_free_chain(pool->unused);
172 mp_free_chain(pool->state.last[0]); // can contain the mempool structure
176 mp_flush(struct mempool *pool)
178 mp_free_big_chain(pool->state.last[1]);
179 struct mempool_chunk *chunk, *next;
180 for (chunk = pool->state.last[0]; chunk && (void *)chunk - chunk->size != pool; chunk = next)
183 chunk->next = pool->unused;
184 pool->unused = chunk;
186 pool->state.last[0] = chunk;
187 pool->state.free[0] = chunk ? chunk->size - sizeof(*pool) : 0;
188 pool->state.last[1] = NULL;
189 pool->state.free[1] = 0;
190 pool->state.next = NULL;
191 pool->last_big = &pool->last_big;
195 mp_stats_chain(struct mempool_chunk *chunk, struct mempool_stats *stats, uns idx)
199 stats->chain_size[idx] += chunk->size + sizeof(*chunk);
200 stats->chain_count[idx]++;
203 stats->total_size += stats->chain_size[idx];
207 mp_stats(struct mempool *pool, struct mempool_stats *stats)
209 bzero(stats, sizeof(*stats));
210 mp_stats_chain(pool->state.last[0], stats, 0);
211 mp_stats_chain(pool->state.last[1], stats, 1);
212 mp_stats_chain(pool->unused, stats, 2);
216 mp_total_size(struct mempool *pool)
218 struct mempool_stats stats;
219 mp_stats(pool, &stats);
220 return stats.total_size;
224 mp_alloc_internal(struct mempool *pool, uns size)
226 struct mempool_chunk *chunk;
227 if (size <= pool->threshold)
232 chunk = pool->unused;
233 pool->unused = chunk->next;
237 chunk = mp_new_chunk(pool->chunk_size);
242 chunk->next = pool->state.last[0];
243 pool->state.last[0] = chunk;
244 pool->state.free[0] = pool->chunk_size - size;
245 return (void *)chunk - pool->chunk_size;
247 else if (likely(size <= MP_SIZE_MAX))
250 uns aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN);
251 chunk = mp_new_big_chunk(aligned);
252 chunk->next = pool->state.last[1];
256 pool->state.last[1] = chunk;
257 pool->state.free[1] = aligned - size;
258 return pool->last_big = (void *)chunk - aligned;
261 die("Cannot allocate %u bytes from a mempool", size);
265 mp_alloc(struct mempool *pool, uns size)
267 return mp_alloc_fast(pool, size);
271 mp_alloc_noalign(struct mempool *pool, uns size)
273 return mp_alloc_fast_noalign(pool, size);
277 mp_alloc_zero(struct mempool *pool, uns size)
279 void *ptr = mp_alloc_fast(pool, size);
285 mp_start_internal(struct mempool *pool, uns size)
287 void *ptr = mp_alloc_internal(pool, size);
288 pool->state.free[pool->idx] += size;
293 mp_start(struct mempool *pool, uns size)
295 return mp_start_fast(pool, size);
299 mp_start_noalign(struct mempool *pool, uns size)
301 return mp_start_fast_noalign(pool, size);
305 mp_grow_internal(struct mempool *pool, uns size)
307 if (unlikely(size > MP_SIZE_MAX))
308 die("Cannot allocate %u bytes of memory", size);
309 uns avail = mp_avail(pool);
310 void *ptr = mp_ptr(pool);
313 uns amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX;
314 amortized = MAX(amortized, size);
315 amortized = ALIGN_TO(amortized, CPU_STRUCT_ALIGN);
316 struct mempool_chunk *chunk = pool->state.last[1], *next = chunk->next;
317 ptr = xrealloc(ptr, amortized + MP_CHUNK_TAIL);
318 chunk = ptr + amortized;
320 chunk->size = amortized;
321 pool->state.last[1] = chunk;
322 pool->state.free[1] = amortized;
323 pool->last_big = ptr;
328 void *p = mp_start_internal(pool, size);
329 memcpy(p, ptr, avail);
335 mp_open(struct mempool *pool, void *ptr)
337 return mp_open_fast(pool, ptr);
341 mp_realloc(struct mempool *pool, void *ptr, uns size)
343 return mp_realloc_fast(pool, ptr, size);
347 mp_realloc_zero(struct mempool *pool, void *ptr, uns size)
349 uns old_size = mp_open_fast(pool, ptr);
350 ptr = mp_grow(pool, size);
352 bzero(ptr + old_size, size - old_size);
353 mp_end(pool, ptr + size);
358 mp_spread_internal(struct mempool *pool, void *p, uns size)
360 void *old = mp_ptr(pool);
361 void *new = mp_grow_internal(pool, p-old+size);
366 mp_restore(struct mempool *pool, struct mempool_state *state)
368 struct mempool_chunk *chunk, *next;
369 struct mempool_state s = *state;
370 for (chunk = pool->state.last[0]; chunk != s.last[0]; chunk = next)
373 chunk->next = pool->unused;
374 pool->unused = chunk;
376 for (chunk = pool->state.last[1]; chunk != s.last[1]; chunk = next)
379 mp_free_big_chunk(chunk);
382 pool->last_big = &pool->last_big;
385 struct mempool_state *
386 mp_push(struct mempool *pool)
388 struct mempool_state state = pool->state;
389 struct mempool_state *p = mp_alloc_fast(pool, sizeof(*p));
391 pool->state.next = p;
396 mp_pop(struct mempool *pool)
398 ASSERT(pool->state.next);
399 mp_restore(pool, pool->state.next);
404 #include <ucw/getopt.h>
410 fill(byte *ptr, uns len, uns magic)
413 *ptr++ = (magic++ & 255);
417 check(byte *ptr, uns len, uns magic, uns align)
419 ASSERT(!((uintptr_t)ptr & (align - 1)));
421 if (*ptr++ != (magic++ & 255))
425 int main(int argc, char **argv)
430 if (cf_getopt(argc, argv, CF_SHORT_OPTS, CF_NO_LONG_OPTS, NULL) >= 0 || argc != optind)
431 die("Invalid usage");
433 uns max = 1000, n = 0, m = 0, can_realloc = 0;
435 struct mempool_state *state[max];
436 uns len[max], num[max], align[max];
437 struct mempool *mp = mp_new(128), mp_static;
439 for (uns i = 0; i < 5000; i++)
441 for (uns j = 0; j < n; j++)
442 check(ptr[j], len[j], j, align[j]);
444 DBG("free_small=%u free_big=%u idx=%u chunk_size=%u last_big=%p", mp->state.free[0], mp->state.free[1], mp->idx, mp->chunk_size, mp->last_big);
445 for (struct mempool_chunk *ch = mp->state.last[0]; ch; ch = ch->next)
446 DBG("small %p %p %p %d", (byte *)ch - ch->size, ch, ch + 1, ch->size);
447 for (struct mempool_chunk *ch = mp->state.last[1]; ch; ch = ch->next)
448 DBG("big %p %p %p %d", (byte *)ch - ch->size, ch, ch + 1, ch->size);
450 int r = random_max(100);
457 else if ((r -= 1) < 0)
462 mp = mp_new(random_max(0x1000) + 1);
464 mp = &mp_static, mp_init(mp, random_max(512) + 1);
467 else if (n < max && (r -= 30) < 0)
469 len[n] = random_max(0x2000);
470 DBG("alloc(%u)", len[n]);
471 align[n] = random_max(2) ? CPU_STRUCT_ALIGN : 1;
472 ptr[n] = (align[n] == 1) ? mp_alloc_fast_noalign(mp, len[n]) : mp_alloc_fast(mp, len[n]);
473 DBG(" -> (%p)", ptr[n]);
474 fill(ptr[n], len[n], n);
478 else if (n < max && (r -= 20) < 0)
480 len[n] = random_max(0x2000);
481 DBG("start(%u)", len[n]);
482 align[n] = random_max(2) ? CPU_STRUCT_ALIGN : 1;
483 ptr[n] = (align[n] == 1) ? mp_start_fast_noalign(mp, len[n]) : mp_start_fast(mp, len[n]);
484 DBG(" -> (%p)", ptr[n]);
485 fill(ptr[n], len[n], n);
490 else if (can_realloc && n && (r -= 10) < 0)
492 if (mp_open(mp, ptr[n - 1]) != len[n - 1])
497 for (uns i = random_max(4); i--; )
500 len[k] = random_max(0x2000);
501 DBG("grow(%u)", len[k]);
502 ptr[k] = mp_grow(mp, len[k]);
503 DBG(" -> (%p)", ptr[k]);
504 check(ptr[k], MIN(l, len[k]), k, align[k]);
505 fill(ptr[k], len[k], k);
507 mp_end(mp, ptr[k] + len[k]);
510 else if (can_realloc && n && (r -= 20) < 0)
512 uns i = n - 1, l = len[i];
513 DBG("realloc(%p, %u)", ptr[i], len[i]);
514 ptr[i] = mp_realloc(mp, ptr[i], len[i] = random_max(0x2000));
515 DBG(" -> (%p, %u)", ptr[i], len[i]);
516 check(ptr[i], MIN(len[i], l), i, align[i]);
517 fill(ptr[i], len[i], i);
519 else if (m < max && (r -= 5) < 0)
523 state[m++] = mp_push(mp);
526 else if (m && (r -= 2) < 0)
534 else if (m && (r -= 1) < 0)
536 uns i = random_max(m);
537 DBG("restore(%u)", i);
538 mp_restore(mp, state[i]);
542 else if (can_realloc && n && (r -= 5) < 0)
543 ASSERT(mp_size(mp, ptr[n - 1]) == len[n - 1]);