2 * Simple and Quick Shared Memory Cache
4 * (c) 2005 Martin Mares <mj@ucw.cz>
10 #include "lib/fastbuf.h"
11 #include "lib/qache.h"
20 * The cache lives in a mmapped file of the following format:
22 * qache_entry[max_entries] table of entries and their keys
23 * u32 qache_hash[hash_size] hash table pointing to keys
24 * u32 block_next[num_blocks] next block pointers
25 * padding to a multiple of block size
26 * blocks[] data blocks
30 u32 magic; /* QCACHE_MAGIC */
31 u32 block_size; /* Parameters as in qache_params */
32 u32 block_shift; /* block_size = 1 << block_shift */
35 u32 entry_table_start; /* Array of qache_entry's */
37 u32 hash_table_start; /* Hash table containing all keys */
39 u32 next_table_start; /* Array of next pointers */
43 #define QACHE_MAGIC 0xb79f6d12
46 u32 lru_prev, lru_next; /* Entry #0: head of the cyclic LRU list */
47 u32 data_len; /* Entry #0: number of free blocks, Free entries: ~0U */
48 u32 first_data_block; /* Entry #0: first free block */
50 u32 hash_next; /* Entry #0: first free entry, Free entries: next free */
54 struct qache_header *hdr;
55 struct qache_entry *entry_table;
65 #define first_free_entry entry_table[0].hash_next
66 #define first_free_block entry_table[0].first_data_block
67 #define num_free_blocks entry_table[0].data_len
70 format_key(qache_key_t *key)
72 static byte keybuf[2*sizeof(qache_key_t)+1];
73 for (uns i=0; i<sizeof(qache_key_t); i++)
74 sprintf(keybuf+2*i, "%02x", (*key)[i]);
79 qache_open_existing(struct qache *q, struct qache_params *par)
81 if ((q->fd = open(q->file_name, O_RDWR, 0)) < 0)
85 byte *err = "stat failed";
86 if (fstat(q->fd, &st) < 0)
89 err = "invalid file size";
90 if (st.st_size < (int)sizeof(struct qache_header) || (st.st_size % par->block_size))
92 q->file_size = st.st_size;
94 err = "requested size change";
95 if (q->file_size != par->cache_size)
99 if ((q->mmap_data = mmap(NULL, q->file_size, PROT_READ | PROT_WRITE, MAP_SHARED, q->fd, 0)) == MAP_FAILED)
101 struct qache_header *h = (struct qache_header *) q->mmap_data;
103 err = "incompatible format";
104 if (h->magic != QACHE_MAGIC ||
105 h->block_size != par->block_size ||
106 h->max_entries != par->max_entries ||
107 h->format_id != par->format_id)
110 err = "incomplete file";
111 if (h->num_blocks*h->block_size != q->file_size)
114 /* FIXME: Audit cache file contents */
116 log(L_INFO, "Cache %s: using existing data", q->file_name);
120 munmap(q->mmap_data, q->file_size);
122 log(L_INFO, "Cache %s: ignoring old contents (%s)", q->file_name, err);
128 qache_create(struct qache *q, struct qache_params *par)
130 q->fd = open(q->file_name, O_RDWR | O_CREAT | O_TRUNC, 0666);
132 die("Cache %s: unable to create (%m)", q->file_name);
133 struct fastbuf *fb = bfdopen_shared(q->fd, 16384);
135 struct qache_header h;
136 bzero(&h, sizeof(h));
137 h.magic = QACHE_MAGIC;
138 h.block_size = par->block_size;
139 h.block_shift = fls(h.block_size);
140 h.num_blocks = par->cache_size >> h.block_shift;
141 h.format_id = par->format_id;
142 h.entry_table_start = sizeof(h);
143 h.max_entries = par->max_entries;
144 h.hash_table_start = h.entry_table_start + (h.max_entries+1) * sizeof(struct qache_entry);
146 while (h.hash_size < h.max_entries)
148 h.next_table_start = h.hash_table_start + h.hash_size * 4;
149 h.first_data_block = (h.next_table_start + 4*h.num_blocks + h.block_size - 1) >> h.block_shift;
150 if (h.first_data_block >= h.num_blocks)
151 die("Cache %s: Requested size is too small even to hold the maintenance structures", q->file_name);
152 bwrite(fb, &h, sizeof(h));
154 /* Entry #0: heads of all lists */
155 ASSERT(btell(fb) == h.entry_table_start);
156 struct qache_entry ent;
157 bzero(&ent, sizeof(ent));
158 ent.first_data_block = h.first_data_block;
159 ent.data_len = h.num_blocks - h.first_data_block;
161 bwrite(fb, &ent, sizeof(ent));
164 bzero(&ent, sizeof(ent));
166 for (uns i=1; i<=h.max_entries; i++)
168 ent.hash_next = (i == h.max_entries ? 0 : i+1);
169 bwrite(fb, &ent, sizeof(ent));
173 ASSERT(btell(fb) == h.hash_table_start);
174 for (uns i=0; i<h.hash_size; i++)
177 /* The next pointers */
178 ASSERT(btell(fb) == h.next_table_start);
179 for (uns i=0; i<h.num_blocks; i++)
180 bputl(fb, (i < h.first_data_block || i == h.num_blocks-1) ? 0 : i+1);
183 ASSERT(btell(fb) <= h.first_data_block << h.block_shift);
184 while (btell(fb) < h.first_data_block << h.block_shift)
188 for (uns i=h.first_data_block; i<h.num_blocks; i++)
189 for (uns j=0; j<h.block_size; j+=4)
192 ASSERT(btell(fb) == par->cache_size);
194 log(L_INFO, "Cache %s: created (%d bytes, %d slots, %d buckets)", q->file_name, par->cache_size, h.max_entries, h.hash_size);
196 if ((q->mmap_data = mmap(NULL, par->cache_size, PROT_READ | PROT_WRITE, MAP_SHARED, q->fd, 0)) == MAP_FAILED)
197 die("Cache %s: mmap failed (%m)", par->cache_size);
198 q->file_size = par->cache_size;
202 qache_open(struct qache_params *par)
204 struct qache *q = xmalloc_zero(sizeof(*q));
205 q->file_name = xstrdup(par->file_name);
207 ASSERT(par->block_size >= 8 && !(par->block_size & (par->block_size-1)));
208 par->cache_size = ALIGN(par->cache_size, par->block_size);
210 if (par->force_reset <= 0 && qache_open_existing(q, par))
212 else if (par->force_reset < 0)
213 die("Cache %s: read-only access requested, but no data available");
215 qache_create(q, par);
217 /* FIXME: Remember `closed correctly' status */
219 q->hdr = (struct qache_header *) q->mmap_data;
220 q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start);
221 q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start);
222 q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start);
227 qache_close(struct qache *q, uns retain_data)
229 munmap(q->mmap_data, q->file_size);
231 if (!retain_data && unlink(q->file_name) < 0)
232 log(L_ERROR, "Cache %s: unlink failed (%m)", q->file_name);
238 qache_msync(struct qache *q, uns start, uns len)
240 len += (start % PAGE_SIZE);
241 start -= start % PAGE_SIZE;
242 len = ALIGN(len, PAGE_SIZE);
243 if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0)
244 log(L_ERROR, "Cache %s: msync failed: %m", q->file_name);
245 /* FIXME: Do we need this on Linux? */
249 qache_msync_block(struct qache *q, uns blk)
251 DBG("\tSyncing block %d", blk);
252 qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size);
256 qache_lock(struct qache *q)
258 /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */
260 struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
261 if (fcntl(q->fd, F_SETLKW, &fl) < 0)
262 die("fcntl lock on %s: %m", q->file_name);
264 DBG("Locked cache %s", q->file_name);
268 qache_unlock(struct qache *q, uns dirty)
271 if (dirty) /* Sync header, entry table and hash table */
272 qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift);
273 struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
274 if (fcntl(q->fd, F_SETLKW, &fl) < 0)
275 die("fcntl unlock on %s: %m", q->file_name);
277 DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty);
281 qache_hash(struct qache *q, qache_key_t *key)
283 uns h = ((*key)[0] << 24) | ((*key)[1] << 16) | ((*key)[2] << 8) | (*key)[3];
284 return h % q->hdr->hash_size;
288 qache_hash_find(struct qache *q, qache_key_t *key, uns pos_hint)
292 if (pos_hint && pos_hint <= q->hdr->max_entries && !memcmp(q->entry_table[pos_hint].key, key, sizeof(*key)))
295 uns h = qache_hash(q, key);
296 for (uns e = q->hash_table[h]; e; e=q->entry_table[e].hash_next)
297 if (!memcmp(q->entry_table[e].key, key, sizeof(*key)))
303 qache_hash_insert(struct qache *q, uns e)
305 uns h = qache_hash(q, &q->entry_table[e].key);
306 q->entry_table[e].hash_next = q->hash_table[h];
307 q->hash_table[h] = e;
311 qache_hash_remove(struct qache *q, uns e)
313 struct qache_entry *entry = &q->entry_table[e];
315 for (hh=&q->hash_table[qache_hash(q, &entry->key)]; f=*hh; hh=&(q->entry_table[f].hash_next))
316 if (!memcmp(q->entry_table[f].key, entry->key, sizeof(qache_key_t)))
318 *hh = entry->hash_next;
325 qache_alloc_entry(struct qache *q)
327 uns e = q->first_free_entry;
328 ASSERT(q->locked && e);
329 struct qache_entry *entry = &q->entry_table[e];
330 ASSERT(entry->data_len == ~0U);
331 q->first_free_entry = entry->hash_next;
337 qache_free_entry(struct qache *q, uns e)
339 struct qache_entry *entry = &q->entry_table[e];
340 ASSERT(q->locked && entry->data_len != ~0U);
341 entry->data_len = ~0U;
342 entry->hash_next = q->first_free_entry;
343 q->first_free_entry = e;
347 get_block_start(struct qache *q, uns block)
349 ASSERT(block && block < q->hdr->num_blocks);
350 return q->mmap_data + (block << q->hdr->block_shift);
354 qache_alloc_block(struct qache *q)
356 ASSERT(q->locked && q->num_free_blocks);
357 uns blk = q->first_free_block;
358 q->first_free_block = q->next_table[blk];
359 q->num_free_blocks--;
360 DBG("\tAllocated block %d", blk);
365 qache_free_block(struct qache *q, uns blk)
368 q->next_table[blk] = q->first_free_block;
369 q->first_free_block = blk;
370 q->num_free_blocks++;
371 DBG("\tFreed block %d", blk);
375 qache_lru_insert(struct qache *q, uns e)
377 struct qache_entry *head = &q->entry_table[0];
378 struct qache_entry *entry = &q->entry_table[e];
379 ASSERT(q->locked && !entry->lru_prev && !entry->lru_next);
380 uns succe = head->lru_next;
381 struct qache_entry *succ = &q->entry_table[succe];
384 entry->lru_next = succe;
389 qache_lru_remove(struct qache *q, uns e)
392 struct qache_entry *entry = &q->entry_table[e];
393 q->entry_table[entry->lru_prev].lru_next = entry->lru_next;
394 q->entry_table[entry->lru_next].lru_prev = entry->lru_prev;
395 entry->lru_prev = entry->lru_next = 0;
399 qache_lru_get(struct qache *q)
401 return q->entry_table[0].lru_prev;
405 qache_ll_delete(struct qache *q, uns e)
407 struct qache_entry *entry = &q->entry_table[e];
408 uns blk = entry->first_data_block;
409 while (entry->data_len)
411 uns next = q->next_table[blk];
412 qache_free_block(q, blk);
414 if (entry->data_len >= q->hdr->block_size)
415 entry->data_len -= q->hdr->block_size;
419 qache_lru_remove(q, e);
420 qache_hash_remove(q, e);
421 qache_free_entry(q, e);
425 qache_insert(struct qache *q, qache_key_t *key, uns pos_hint, void *data, uns size)
429 uns e = qache_hash_find(q, key, pos_hint);
432 qache_ll_delete(q ,e);
433 DBG("Insert <%s>: deleting old entry %d", format_key(key), e);
436 uns blocks = (size + q->hdr->block_size - 1) >> q->hdr->block_shift;
437 if (blocks > q->hdr->num_blocks - q->hdr->first_data_block)
442 while (q->num_free_blocks < blocks || !q->first_free_entry)
444 e = qache_lru_get(q);
445 DBG("Insert <%s>: evicting entry %d to make room for %d blocks", format_key(key), e, blocks);
447 qache_ll_delete(q, e);
449 e = qache_alloc_entry(q);
450 struct qache_entry *entry = &q->entry_table[e];
451 entry->data_len = size;
452 memcpy(entry->key, key, sizeof(*key));
453 DBG("Insert <%s>: created entry %d with %d data blocks", format_key(key), e, blocks);
455 entry->first_data_block = 0;
458 uns chunk = (size & (q->hdr->block_size-1)) ? : q->hdr->block_size;
459 uns blk = qache_alloc_block(q);
460 q->next_table[blk] = entry->first_data_block;
461 memcpy(get_block_start(q, blk), data+size-chunk, chunk);
462 qache_msync_block(q, blk);
463 entry->first_data_block = blk;
467 qache_lru_insert(q, e);
468 qache_hash_insert(q, e);
474 qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, void **datap, uns *sizep, uns start)
477 uns e = qache_hash_find(q, key, pos_hint);
480 struct qache_entry *entry = &q->entry_table[e];
481 DBG("Lookup <%s>: found entry %d", format_key(key), e);
482 qache_lru_remove(q, e);
483 qache_lru_insert(q, e);
487 uns avail = (size > entry->data_len) ? 0 : entry->data_len - size;
488 uns xfer = MIN(*sizep, avail);
493 *datap = xmalloc(xfer);
494 uns blk = entry->first_data_block;
495 while (start >= q->hdr->block_size)
497 blk = q->next_table[blk];
498 start -= q->hdr->block_size;
503 uns len = MIN(xfer, q->hdr->block_size - start);
504 memcpy(data, get_block_start(q, blk), len);
505 blk = q->next_table[blk];
514 qache_unlock(q, 1); /* Yes, modified -- we update the LRU */
518 DBG("Lookup <%s>: not found", format_key(key));
525 qache_delete(struct qache *q, qache_key_t *key, uns pos_hint)
528 uns e = qache_hash_find(q, key, pos_hint);
531 DBG("Delete <%s: deleting entry %d", format_key(key), e);
532 qache_ll_delete(q, e);
535 DBG("Delete <%s>: No match", format_key(key));
541 qache_debug(struct qache *q)
543 log(L_DEBUG, "Cache %s: block_size=%d (%d data), num_blocks=%d (%d first data), %d slots, %d hash buckets",
544 q->file_name, q->hdr->block_size, q->hdr->block_size, q->hdr->num_blocks, q->hdr->first_data_block,
545 q->hdr->max_entries, q->hdr->hash_size);
547 log(L_DEBUG, "Table of cache entries:");
548 log(L_DEBUG, "\tEntry\tLruPrev\tLruNext\tDataLen\tDataBlk\tHashNxt\tKey");
549 for (uns e=0; e<=q->hdr->max_entries; e++)
551 struct qache_entry *ent = &q->entry_table[e];
552 log(L_DEBUG, "\t%d\t%d\t%d\t%d\t%d\t%d\t%s", e, ent->lru_prev, ent->lru_next, ent->data_len,
553 ent->first_data_block, ent->hash_next, format_key(&ent->key));
556 log(L_DEBUG, "Hash table:");
557 for (uns h=0; h<q->hdr->hash_size; h++)
558 log(L_DEBUG, "\t%04x\t%d", h, q->hash_table[h]);
560 log(L_DEBUG, "Next pointers:");
561 for (uns blk=q->hdr->first_data_block; blk<q->hdr->num_blocks; blk++)
562 log(L_DEBUG, "\t%d\t%d", blk, q->next_table[blk]);
567 int main(int argc UNUSED, char **argv UNUSED)
569 struct qache_params par = {
570 .file_name = "tmp/test",
575 .format_id = 0xfeedcafe
577 struct qache *q = qache_open(&par);
579 qache_key_t key = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef };
580 byte data[1000] = { '1', '2', '3', '4', '5' };
581 for (uns i=0; i<100; i++)
583 qache_insert(q, &key, 0, data, 10*i);
588 qache_lookup(q, &key, 0, NULL, NULL, 0);