X-Git-Url: http://mj.ucw.cz/gitweb/?a=blobdiff_plain;f=lib%2Fqache.c;h=3c4be273ab1c1bd0b24d100cc4439781da75a61c;hb=b4d79987a979bcbf749294c706fdc8c4ae8f9304;hp=eeda8fd1a47268b7a8e87e870906f7ff6448c917;hpb=53a240dfbe2707acc43c6883886e06463756ee48;p=libucw.git diff --git a/lib/qache.c b/lib/qache.c index eeda8fd1..3c4be273 100644 --- a/lib/qache.c +++ b/lib/qache.c @@ -4,15 +4,16 @@ * (c) 2005 Martin Mares */ -#define LOCAL_DEBUG +#undef LOCAL_DEBUG #include "lib/lib.h" +#include "lib/bitops.h" #include "lib/fastbuf.h" #include "lib/qache.h" -#include #include #include +#include #include #include @@ -75,6 +76,177 @@ format_key(qache_key_t *key) return keybuf; } +static void +qache_msync(struct qache *q UNUSED, uns start UNUSED, uns len UNUSED) +{ +#ifndef CONFIG_LINUX + /* We don't need msyncing on Linux, since the mappings are guaranteed to be coherent */ + len += (start % PAGE_SIZE); + start -= start % PAGE_SIZE; + len = ALIGN_TO(len, PAGE_SIZE); + if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0) + log(L_ERROR, "Cache %s: msync failed: %m", q->file_name); +#endif +} + +static void +qache_msync_block(struct qache *q, uns blk) +{ + DBG("\tSyncing block %d", blk); + qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size); +} + +static void +qache_lock(struct qache *q) +{ + /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */ + ASSERT(!q->locked); + struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) }; + if (fcntl(q->fd, F_SETLKW, &fl) < 0) + die("fcntl lock on %s: %m", q->file_name); + q->locked = 1; + DBG("Locked cache %s", q->file_name); +} + +static void +qache_unlock(struct qache *q, uns dirty) +{ + ASSERT(q->locked); + if (dirty) /* Sync header, entry table and hash table */ + qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift); + struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) }; + if (fcntl(q->fd, F_SETLKW, &fl) < 0) + die("fcntl unlock on %s: %m", q->file_name); + q->locked = 0; + DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty); +} + +enum entry_audit_flags { + ET_FREE_LIST = 1, + ET_LRU = 2, + ET_HASH = 4 +}; + +static byte * +audit_entries(struct qache *q, byte *entrymap) +{ + uns i, j; + + DBG("Auditing entries"); + + /* Check the free list */ + i = q->first_free_entry; + while (i) + { + if (i >= q->hdr->max_entries || (entrymap[i] & ET_FREE_LIST) || q->entry_table[i].data_len != ~0U) + return "inconsistent free entry list"; + entrymap[i] |= ET_FREE_LIST; + i = q->entry_table[i].hash_next; + } + + /* Check the hash table */ + for (i=0; ihdr->hash_size; i++) + { + j = q->hash_table[i]; + while (j) + { + if (j >= q->hdr->max_entries || (entrymap[j] & (ET_HASH | ET_FREE_LIST))) + return "inconsistent hash chains"; + entrymap[j] |= ET_HASH; + j = q->entry_table[j].hash_next; + } + } + + /* Check the LRU */ + i = 0; + do + { + j = q->entry_table[i].lru_next; + if ((entrymap[i] & (ET_LRU | ET_FREE_LIST)) || j >= q->hdr->max_entries || q->entry_table[j].lru_prev != i) + return "inconsistent LRU list"; + entrymap[i] |= ET_LRU; + i = j; + } + while (i); + + /* Check if all non-free items are in all lists */ + for (i=1; ihdr->max_entries; i++) + { + if (entrymap[i] != ((q->entry_table[i].data_len == ~0U) ? ET_FREE_LIST : (ET_LRU | ET_HASH))) + return "inconsistent lists"; + } + return NULL; +} + +enum block_audit_flags { + BT_FREE_LIST = 1, + BT_ALLOC = 2 +}; + +static byte * +audit_blocks(struct qache *q, byte *entrymap, byte *blockmap) +{ + uns i, j; + + DBG("Auditing blocks"); + + /* Check the free list */ + for (i=q->first_free_block; i; i=q->next_table[i]) + { + if (i < q->hdr->first_data_block || i >= q->hdr->num_blocks || (blockmap[i] & BT_FREE_LIST)) + return "inconsistent free block list"; + blockmap[i] |= BT_FREE_LIST; + } + + /* Check allocation lists of entries */ + for (i=1; ihdr->max_entries; i++) + if (!(entrymap[i] & ET_FREE_LIST)) + { + uns blocks = 0; + for (j=q->entry_table[i].first_data_block; j; j=q->next_table[j]) + { + if (blockmap[j]) + return "inconsistent entry block list"; + blockmap[j] |= BT_ALLOC; + blocks++; + } + if (((q->entry_table[i].data_len + q->hdr->block_size - 1) >> q->hdr->block_shift) != blocks) + return "inconsistent entry data length"; + } + + /* Check if all blocks belong somewhere */ + for (i=q->hdr->first_data_block; i < q->hdr->num_blocks; i++) + if (!blockmap[i]) + { + DBG("Block %d unreferenced", i); + return "unreferenced blocks found"; + } + + return NULL; +} + +static byte * +do_audit(struct qache *q) +{ + byte *entry_map = xmalloc_zero(q->hdr->max_entries); + byte *block_map = xmalloc_zero(q->hdr->num_blocks); + byte *err = audit_entries(q, entry_map); + if (!err) + err = audit_blocks(q, entry_map, block_map); + xfree(block_map); + xfree(entry_map); + return err; +} + +static void +qache_setup_pointers(struct qache *q) +{ + q->hdr = (struct qache_header *) q->mmap_data; + q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start); + q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start); + q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start); +} + static int qache_open_existing(struct qache *q, struct qache_params *par) { @@ -100,23 +272,29 @@ qache_open_existing(struct qache *q, struct qache_params *par) goto close_and_fail; struct qache_header *h = (struct qache_header *) q->mmap_data; + qache_setup_pointers(q); + qache_lock(q); + err = "incompatible format"; if (h->magic != QACHE_MAGIC || h->block_size != par->block_size || h->max_entries != par->max_entries || h->format_id != par->format_id) - goto unmap_and_fail; + goto unlock_and_fail; err = "incomplete file"; if (h->num_blocks*h->block_size != q->file_size) - goto unmap_and_fail; + goto unlock_and_fail; - /* FIXME: Audit cache file contents */ + if (err = do_audit(q)) + goto unlock_and_fail; + qache_unlock(q, 0); log(L_INFO, "Cache %s: using existing data", q->file_name); return 1; - unmap_and_fail: + unlock_and_fail: + qache_unlock(q, 0); munmap(q->mmap_data, q->file_size); close_and_fail: log(L_INFO, "Cache %s: ignoring old contents (%s)", q->file_name, err); @@ -136,12 +314,12 @@ qache_create(struct qache *q, struct qache_params *par) bzero(&h, sizeof(h)); h.magic = QACHE_MAGIC; h.block_size = par->block_size; - h.block_shift = fls(h.block_size); + h.block_shift = bit_fls(h.block_size); h.num_blocks = par->cache_size >> h.block_shift; h.format_id = par->format_id; h.entry_table_start = sizeof(h); h.max_entries = par->max_entries; - h.hash_table_start = h.entry_table_start + (h.max_entries+1) * sizeof(struct qache_entry); + h.hash_table_start = h.entry_table_start + h.max_entries * sizeof(struct qache_entry); h.hash_size = 1; while (h.hash_size < h.max_entries) h.hash_size *= 2; @@ -163,9 +341,9 @@ qache_create(struct qache *q, struct qache_params *par) /* Other entries */ bzero(&ent, sizeof(ent)); ent.data_len = ~0U; - for (uns i=1; i<=h.max_entries; i++) + for (uns i=1; ifile_name, par->cache_size, h.max_entries, h.hash_size); if ((q->mmap_data = mmap(NULL, par->cache_size, PROT_READ | PROT_WRITE, MAP_SHARED, q->fd, 0)) == MAP_FAILED) - die("Cache %s: mmap failed (%m)", par->cache_size); + die("Cache %s: mmap failed (%m)", par->file_name); q->file_size = par->cache_size; + qache_setup_pointers(q); } struct qache * @@ -205,21 +384,14 @@ qache_open(struct qache_params *par) q->file_name = xstrdup(par->file_name); ASSERT(par->block_size >= 8 && !(par->block_size & (par->block_size-1))); - par->cache_size = ALIGN(par->cache_size, par->block_size); + par->cache_size = ALIGN_TO(par->cache_size, par->block_size); if (par->force_reset <= 0 && qache_open_existing(q, par)) ; else if (par->force_reset < 0) - die("Cache %s: read-only access requested, but no data available"); + die("Cache %s: read-only access requested, but no data available", q->file_name); else qache_create(q, par); - - /* FIXME: Remember `closed correctly' status */ - - q->hdr = (struct qache_header *) q->mmap_data; - q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start); - q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start); - q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start); return q; } @@ -234,49 +406,6 @@ qache_close(struct qache *q, uns retain_data) xfree(q); } -static void -qache_msync(struct qache *q, uns start, uns len) -{ - len += (start % PAGE_SIZE); - start -= start % PAGE_SIZE; - len = ALIGN(len, PAGE_SIZE); - if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0) - log(L_ERROR, "Cache %s: msync failed: %m", q->file_name); - /* FIXME: Do we need this on Linux? */ -} - -static void -qache_msync_block(struct qache *q, uns blk) -{ - DBG("\tSyncing block %d", blk); - qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size); -} - -static void -qache_lock(struct qache *q) -{ - /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */ - ASSERT(!q->locked); - struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) }; - if (fcntl(q->fd, F_SETLKW, &fl) < 0) - die("fcntl lock on %s: %m", q->file_name); - q->locked = 1; - DBG("Locked cache %s", q->file_name); -} - -static void -qache_unlock(struct qache *q, uns dirty) -{ - ASSERT(q->locked); - if (dirty) /* Sync header, entry table and hash table */ - qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift); - struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) }; - if (fcntl(q->fd, F_SETLKW, &fl) < 0) - die("fcntl unlock on %s: %m", q->file_name); - q->locked = 0; - DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty); -} - static uns qache_hash(struct qache *q, qache_key_t *key) { @@ -289,7 +418,7 @@ qache_hash_find(struct qache *q, qache_key_t *key, uns pos_hint) { ASSERT(q->locked); - if (pos_hint && pos_hint <= q->hdr->max_entries && !memcmp(q->entry_table[pos_hint].key, key, sizeof(*key))) + if (pos_hint && pos_hint < q->hdr->max_entries && q->entry_table[pos_hint].data_len != ~0U && !memcmp(q->entry_table[pos_hint].key, key, sizeof(*key))) return pos_hint; uns h = qache_hash(q, key); @@ -470,8 +599,43 @@ qache_insert(struct qache *q, qache_key_t *key, uns pos_hint, void *data, uns si return e; } +static void +copy_out(struct qache *q, struct qache_entry *entry, byte **datap, uns *sizep, uns start) +{ + if (sizep) + { + uns size = *sizep; + uns avail = (start > entry->data_len) ? 0 : entry->data_len - start; + uns xfer = MIN(size, avail); + *sizep = avail; + if (datap) + { + if (!*datap) + *datap = xmalloc(xfer); + uns blk = entry->first_data_block; + while (start >= q->hdr->block_size) + { + blk = q->next_table[blk]; + start -= q->hdr->block_size; + } + byte *data = *datap; + while (xfer) + { + uns len = MIN(xfer, q->hdr->block_size - start); + memcpy(data, get_block_start(q, blk), len); + blk = q->next_table[blk]; + data += len; + xfer -= len; + start = 0; + } + } + } + else + ASSERT(!datap); +} + uns -qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, void **datap, uns *sizep, uns start) +qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, byte **datap, uns *sizep, uns start) { qache_lock(q); uns e = qache_hash_find(q, key, pos_hint); @@ -481,36 +645,7 @@ qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, void **datap, uns DBG("Lookup <%s>: found entry %d", format_key(key), e); qache_lru_remove(q, e); qache_lru_insert(q, e); - if (sizep) - { - uns size = *sizep; - uns avail = (size > entry->data_len) ? 0 : entry->data_len - size; - uns xfer = MIN(*sizep, avail); - *sizep = avail; - if (datap) - { - if (!*datap) - *datap = xmalloc(xfer); - uns blk = entry->first_data_block; - while (start >= q->hdr->block_size) - { - blk = q->next_table[blk]; - start -= q->hdr->block_size; - } - byte *data = *datap; - while (xfer) - { - uns len = MIN(xfer, q->hdr->block_size - start); - memcpy(data, get_block_start(q, blk), len); - blk = q->next_table[blk]; - data += len; - xfer -= len; - start = 0; - } - } - } - else - ASSERT(!datap); + copy_out(q, entry, datap, sizep, start); qache_unlock(q, 1); /* Yes, modified -- we update the LRU */ } else @@ -521,6 +656,32 @@ qache_lookup(struct qache *q, qache_key_t *key, uns pos_hint, void **datap, uns return e; } +uns +qache_probe(struct qache *q, qache_key_t *key, uns pos, byte **datap, uns *sizep, uns start) +{ + if (!pos || pos >= q->hdr->max_entries) + { + DBG("Probe %d: Out of range", pos); + return ~0U; + } + + qache_lock(q); + uns ret = 0; + struct qache_entry *entry = &q->entry_table[pos]; + if (entry->data_len != ~0U) + { + DBG("Probe %d: Found key <%s>", format_key(entry->key)); + if (key) + memcpy(key, entry->key, sizeof(qache_key_t)); + copy_out(q, entry, datap, sizep, start); + ret = pos; + } + else + DBG("Probe %d: Empty", pos); + qache_unlock(q, 0); + return ret; +} + uns qache_delete(struct qache *q, qache_key_t *key, uns pos_hint) { @@ -546,7 +707,7 @@ qache_debug(struct qache *q) log(L_DEBUG, "Table of cache entries:"); log(L_DEBUG, "\tEntry\tLruPrev\tLruNext\tDataLen\tDataBlk\tHashNxt\tKey"); - for (uns e=0; e<=q->hdr->max_entries; e++) + for (uns e=0; ehdr->max_entries; e++) { struct qache_entry *ent = &q->entry_table[e]; log(L_DEBUG, "\t%d\t%d\t%d\t%d\t%d\t%d\t%s", e, ent->lru_prev, ent->lru_next, ent->data_len, @@ -562,6 +723,16 @@ qache_debug(struct qache *q) log(L_DEBUG, "\t%d\t%d", blk, q->next_table[blk]); } +void +qache_audit(struct qache *q) +{ + byte *err; + qache_lock(q); + if (err = do_audit(q)) + die("Cache %s: %s", q->file_name, err); + qache_unlock(q, 0); +} + #ifdef TEST int main(int argc UNUSED, char **argv UNUSED) @@ -577,15 +748,35 @@ int main(int argc UNUSED, char **argv UNUSED) struct qache *q = qache_open(&par); qache_key_t key = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef }; - byte data[1000] = { '1', '2', '3', '4', '5' }; - for (uns i=0; i<100; i++) +#define N 100 + uns i, j; + byte data[11*N]; + for (i=0; i