* (c) 2005 Martin Mares <mj@ucw.cz>
*/
-#define LOCAL_DEBUG
+#undef LOCAL_DEBUG
#include "lib/lib.h"
#include "lib/fastbuf.h"
return keybuf;
}
+static void
+qache_msync(struct qache *q UNUSED, uns start UNUSED, uns len UNUSED)
+{
+#ifndef CONFIG_LINUX
+ /* We don't need msyncing on Linux, since the mappings are guaranteed to be coherent */
+ len += (start % PAGE_SIZE);
+ start -= start % PAGE_SIZE;
+ len = ALIGN(len, PAGE_SIZE);
+ if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0)
+ log(L_ERROR, "Cache %s: msync failed: %m", q->file_name);
+#endif
+}
+
+static void
+qache_msync_block(struct qache *q, uns blk)
+{
+ DBG("\tSyncing block %d", blk);
+ qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size);
+}
+
+static void
+qache_lock(struct qache *q)
+{
+ /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */
+ ASSERT(!q->locked);
+ struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
+ if (fcntl(q->fd, F_SETLKW, &fl) < 0)
+ die("fcntl lock on %s: %m", q->file_name);
+ q->locked = 1;
+ DBG("Locked cache %s", q->file_name);
+}
+
+static void
+qache_unlock(struct qache *q, uns dirty)
+{
+ ASSERT(q->locked);
+ if (dirty) /* Sync header, entry table and hash table */
+ qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift);
+ struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
+ if (fcntl(q->fd, F_SETLKW, &fl) < 0)
+ die("fcntl unlock on %s: %m", q->file_name);
+ q->locked = 0;
+ DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty);
+}
+
+enum entry_audit_flags {
+ ET_FREE_LIST = 1,
+ ET_LRU = 2,
+ ET_HASH = 4
+};
+
+static byte *
+audit_entries(struct qache *q, byte *entrymap)
+{
+ uns i, j;
+
+ DBG("Auditing entries");
+
+ /* Check the free list */
+ i = q->first_free_entry;
+ while (i)
+ {
+ if (i >= q->hdr->max_entries || (entrymap[i] & ET_FREE_LIST) || q->entry_table[i].data_len != ~0U)
+ return "inconsistent free entry list";
+ entrymap[i] |= ET_FREE_LIST;
+ i = q->entry_table[i].hash_next;
+ }
+
+ /* Check the hash table */
+ for (i=0; i<q->hdr->hash_size; i++)
+ {
+ j = q->hash_table[i];
+ while (j)
+ {
+ if (j >= q->hdr->max_entries || (entrymap[j] & (ET_HASH | ET_FREE_LIST)))
+ return "inconsistent hash chains";
+ entrymap[j] |= ET_HASH;
+ j = q->entry_table[j].hash_next;
+ }
+ }
+
+ /* Check the LRU */
+ i = 0;
+ do
+ {
+ j = q->entry_table[i].lru_next;
+ if ((entrymap[i] & (ET_LRU | ET_FREE_LIST)) || j >= q->hdr->max_entries || q->entry_table[j].lru_prev != i)
+ return "inconsistent LRU list";
+ entrymap[i] |= ET_LRU;
+ i = j;
+ }
+ while (i);
+
+ /* Check if all non-free items are in all lists */
+ for (i=1; i<q->hdr->max_entries; i++)
+ {
+ if (entrymap[i] != ((q->entry_table[i].data_len == ~0U) ? ET_FREE_LIST : (ET_LRU | ET_HASH)))
+ return "inconsistent lists";
+ }
+ return NULL;
+}
+
+enum block_audit_flags {
+ BT_FREE_LIST = 1,
+ BT_ALLOC = 2
+};
+
+static byte *
+audit_blocks(struct qache *q, byte *entrymap, byte *blockmap)
+{
+ uns i, j;
+
+ DBG("Auditing blocks");
+
+ /* Check the free list */
+ for (i=q->first_free_block; i; i=q->next_table[i])
+ {
+ if (i < q->hdr->first_data_block || i >= q->hdr->num_blocks || (blockmap[i] & BT_FREE_LIST))
+ return "inconsistent free block list";
+ blockmap[i] |= BT_FREE_LIST;
+ }
+
+ /* Check allocation lists of entries */
+ for (i=1; i<q->hdr->max_entries; i++)
+ if (!(entrymap[i] & ET_FREE_LIST))
+ {
+ uns blocks = 0;
+ for (j=q->entry_table[i].first_data_block; j; j=q->next_table[j])
+ {
+ if (blockmap[j])
+ return "inconsistent entry block list";
+ blockmap[j] |= BT_ALLOC;
+ blocks++;
+ }
+ if (((q->entry_table[i].data_len + q->hdr->block_size - 1) >> q->hdr->block_shift) != blocks)
+ return "inconsistent entry data length";
+ }
+
+ /* Check if all blocks belong somewhere */
+ for (i=q->hdr->first_data_block; i < q->hdr->num_blocks; i++)
+ if (!blockmap[i])
+ {
+ DBG("Block %d unreferenced", i);
+ return "unreferenced blocks found";
+ }
+
+ return NULL;
+}
+
+static byte *
+do_audit(struct qache *q)
+{
+ byte *entry_map = xmalloc_zero(q->hdr->max_entries);
+ byte *block_map = xmalloc_zero(q->hdr->num_blocks);
+ byte *err = audit_entries(q, entry_map);
+ if (!err)
+ err = audit_blocks(q, entry_map, block_map);
+ xfree(block_map);
+ xfree(entry_map);
+ return err;
+}
+
+static void
+qache_setup_pointers(struct qache *q)
+{
+ q->hdr = (struct qache_header *) q->mmap_data;
+ q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start);
+ q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start);
+ q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start);
+}
+
static int
qache_open_existing(struct qache *q, struct qache_params *par)
{
goto close_and_fail;
struct qache_header *h = (struct qache_header *) q->mmap_data;
+ qache_setup_pointers(q);
+ qache_lock(q);
+
err = "incompatible format";
if (h->magic != QACHE_MAGIC ||
h->block_size != par->block_size ||
h->max_entries != par->max_entries ||
h->format_id != par->format_id)
- goto unmap_and_fail;
+ goto unlock_and_fail;
err = "incomplete file";
if (h->num_blocks*h->block_size != q->file_size)
- goto unmap_and_fail;
+ goto unlock_and_fail;
- /* FIXME: Audit cache file contents */
+ if (err = do_audit(q))
+ goto unlock_and_fail;
+ qache_unlock(q, 0);
log(L_INFO, "Cache %s: using existing data", q->file_name);
return 1;
- unmap_and_fail:
+ unlock_and_fail:
+ qache_unlock(q, 0);
munmap(q->mmap_data, q->file_size);
close_and_fail:
log(L_INFO, "Cache %s: ignoring old contents (%s)", q->file_name, err);
h.format_id = par->format_id;
h.entry_table_start = sizeof(h);
h.max_entries = par->max_entries;
- h.hash_table_start = h.entry_table_start + (h.max_entries+1) * sizeof(struct qache_entry);
+ h.hash_table_start = h.entry_table_start + h.max_entries * sizeof(struct qache_entry);
h.hash_size = 1;
while (h.hash_size < h.max_entries)
h.hash_size *= 2;
/* Other entries */
bzero(&ent, sizeof(ent));
ent.data_len = ~0U;
- for (uns i=1; i<=h.max_entries; i++)
+ for (uns i=1; i<h.max_entries; i++)
{
- ent.hash_next = (i == h.max_entries ? 0 : i+1);
+ ent.hash_next = (i == h.max_entries-1 ? 0 : i+1);
bwrite(fb, &ent, sizeof(ent));
}
if ((q->mmap_data = mmap(NULL, par->cache_size, PROT_READ | PROT_WRITE, MAP_SHARED, q->fd, 0)) == MAP_FAILED)
die("Cache %s: mmap failed (%m)", par->cache_size);
q->file_size = par->cache_size;
+ qache_setup_pointers(q);
}
struct qache *
die("Cache %s: read-only access requested, but no data available");
else
qache_create(q, par);
-
- /* FIXME: Remember `closed correctly' status */
-
- q->hdr = (struct qache_header *) q->mmap_data;
- q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start);
- q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start);
- q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start);
return q;
}
xfree(q);
}
-static void
-qache_msync(struct qache *q, uns start, uns len)
-{
- len += (start % PAGE_SIZE);
- start -= start % PAGE_SIZE;
- len = ALIGN(len, PAGE_SIZE);
- if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0)
- log(L_ERROR, "Cache %s: msync failed: %m", q->file_name);
- /* FIXME: Do we need this on Linux? */
-}
-
-static void
-qache_msync_block(struct qache *q, uns blk)
-{
- DBG("\tSyncing block %d", blk);
- qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size);
-}
-
-static void
-qache_lock(struct qache *q)
-{
- /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */
- ASSERT(!q->locked);
- struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
- if (fcntl(q->fd, F_SETLKW, &fl) < 0)
- die("fcntl lock on %s: %m", q->file_name);
- q->locked = 1;
- DBG("Locked cache %s", q->file_name);
-}
-
-static void
-qache_unlock(struct qache *q, uns dirty)
-{
- ASSERT(q->locked);
- if (dirty) /* Sync header, entry table and hash table */
- qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift);
- struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
- if (fcntl(q->fd, F_SETLKW, &fl) < 0)
- die("fcntl unlock on %s: %m", q->file_name);
- q->locked = 0;
- DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty);
-}
-
static uns
qache_hash(struct qache *q, qache_key_t *key)
{
{
ASSERT(q->locked);
- if (pos_hint && pos_hint <= q->hdr->max_entries && !memcmp(q->entry_table[pos_hint].key, key, sizeof(*key)))
+ if (pos_hint && pos_hint < q->hdr->max_entries && !memcmp(q->entry_table[pos_hint].key, key, sizeof(*key)))
return pos_hint;
uns h = qache_hash(q, key);
}
uns
-qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, void **datap, uns *sizep, uns start)
+qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, byte **datap, uns *sizep, uns start)
{
qache_lock(q);
uns e = qache_hash_find(q, key, pos_hint);
if (sizep)
{
uns size = *sizep;
- uns avail = (size > entry->data_len) ? 0 : entry->data_len - size;
- uns xfer = MIN(*sizep, avail);
+ uns avail = (start > entry->data_len) ? 0 : entry->data_len - start;
+ uns xfer = MIN(size, avail);
*sizep = avail;
if (datap)
{
log(L_DEBUG, "Table of cache entries:");
log(L_DEBUG, "\tEntry\tLruPrev\tLruNext\tDataLen\tDataBlk\tHashNxt\tKey");
- for (uns e=0; e<=q->hdr->max_entries; e++)
+ for (uns e=0; e<q->hdr->max_entries; e++)
{
struct qache_entry *ent = &q->entry_table[e];
log(L_DEBUG, "\t%d\t%d\t%d\t%d\t%d\t%d\t%s", e, ent->lru_prev, ent->lru_next, ent->data_len,
log(L_DEBUG, "\t%d\t%d", blk, q->next_table[blk]);
}
+void
+qache_audit(struct qache *q)
+{
+ byte *err;
+ qache_lock(q);
+ if (err = do_audit(q))
+ die("Cache %s: %s", q->file_name, err);
+ qache_unlock(q, 0);
+}
+
#ifdef TEST
int main(int argc UNUSED, char **argv UNUSED)
struct qache *q = qache_open(&par);
qache_key_t key = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef };
- byte data[1000] = { '1', '2', '3', '4', '5' };
- for (uns i=0; i<100; i++)
+#define N 100
+ uns i, j;
+ byte data[11*N];
+ for (i=0; i<N; i++)
{
- qache_insert(q, &key, 0, data, 10*i);
- key[15]++;
+ key[3] = i / 16; key[15] = i % 16;
+ for (j=0; j<11*i; j++)
+ data[j] = 0x33 + i*j;
+ qache_insert(q, &key, 0, data, 11*i);
}
qache_debug(q);
+ qache_audit(q);
- qache_lookup(q, &key, 0, NULL, NULL, 0);
+ uns found = 0;
+ for (i=0; i<100; i++)
+ {
+ key[3] = i / 16; key[15] = i % 16;
+ byte *dptr = data;
+ uns sz = sizeof(data);
+ uns e = qache_lookup(q, &key, 0, &dptr, &sz, 0);
+ if (e)
+ {
+ ASSERT(sz == 11*i);
+ for (j=0; j<sz; j++)
+ ASSERT(data[j] == (byte)(0x33 + i*j));
+ found++;
+ }
+ }
+ log(L_INFO, "Found %d of %d entries", found, N);
qache_close(q, 1);
return 0;