+static inline byte *
+format_key(qache_key_t *key)
+{
+ static byte keybuf[2*sizeof(qache_key_t)+1];
+ for (uns i=0; i<sizeof(qache_key_t); i++)
+ sprintf(keybuf+2*i, "%02x", (*key)[i]);
+ return keybuf;
+}
+
+static void
+qache_msync(struct qache *q UNUSED, uns start UNUSED, uns len UNUSED)
+{
+#ifndef CONFIG_LINUX
+ /* We don't need msyncing on Linux, since the mappings are guaranteed to be coherent */
+ len += (start % CPU_PAGE_SIZE);
+ start -= start % CPU_PAGE_SIZE;
+ len = ALIGN_TO(len, CPU_PAGE_SIZE);
+ if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0)
+ log(L_ERROR, "Cache %s: msync failed: %m", q->file_name);
+#endif
+}
+
+static void
+qache_msync_block(struct qache *q, uns blk)
+{
+ DBG("\tSyncing block %d", blk);
+ qache_msync(q, blk << q->hdr->block_shift, q->hdr->block_size);
+}
+
+static void
+qache_lock(struct qache *q)
+{
+ /* We cannot use flock() since it happily permits locking a shared fd (e.g., after fork()) multiple times */
+ ASSERT(!q->locked);
+ struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
+ if (fcntl(q->fd, F_SETLKW, &fl) < 0)
+ die("fcntl lock on %s: %m", q->file_name);
+ q->locked = 1;
+ DBG("Locked cache %s", q->file_name);
+}
+
+static void
+qache_unlock(struct qache *q, uns dirty)
+{
+ ASSERT(q->locked);
+ if (dirty) /* Sync header, entry table and hash table */
+ qache_msync(q, 0, q->hdr->first_data_block << q->hdr->block_shift);
+ struct flock fl = { .l_type = F_UNLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = sizeof(struct qache_header) };
+ if (fcntl(q->fd, F_SETLKW, &fl) < 0)
+ die("fcntl unlock on %s: %m", q->file_name);
+ q->locked = 0;
+ DBG("Unlocked cache %s (dirty=%d)", q->file_name, dirty);
+}
+
+enum entry_audit_flags {
+ ET_FREE_LIST = 1,
+ ET_LRU = 2,
+ ET_HASH = 4
+};
+
+static byte *
+audit_entries(struct qache *q, byte *entrymap)
+{
+ uns i, j;
+
+ DBG("Auditing entries");
+
+ /* Check the free list */
+ i = q->first_free_entry;
+ while (i)
+ {
+ if (i >= q->hdr->max_entries || (entrymap[i] & ET_FREE_LIST) || q->entry_table[i].data_len != ~0U)
+ return "inconsistent free entry list";
+ entrymap[i] |= ET_FREE_LIST;
+ i = q->entry_table[i].hash_next;
+ }
+
+ /* Check the hash table */
+ for (i=0; i<q->hdr->hash_size; i++)
+ {
+ j = q->hash_table[i];
+ while (j)
+ {
+ if (j >= q->hdr->max_entries || (entrymap[j] & (ET_HASH | ET_FREE_LIST)))
+ return "inconsistent hash chains";
+ entrymap[j] |= ET_HASH;
+ j = q->entry_table[j].hash_next;
+ }
+ }
+
+ /* Check the LRU */
+ i = 0;
+ do
+ {
+ j = q->entry_table[i].lru_next;
+ if ((entrymap[i] & (ET_LRU | ET_FREE_LIST)) || j >= q->hdr->max_entries || q->entry_table[j].lru_prev != i)
+ return "inconsistent LRU list";
+ entrymap[i] |= ET_LRU;
+ i = j;
+ }
+ while (i);
+
+ /* Check if all non-free items are in all lists */
+ for (i=1; i<q->hdr->max_entries; i++)
+ {
+ if (entrymap[i] != ((q->entry_table[i].data_len == ~0U) ? ET_FREE_LIST : (ET_LRU | ET_HASH)))
+ return "inconsistent lists";
+ }
+ return NULL;
+}
+
+enum block_audit_flags {
+ BT_FREE_LIST = 1,
+ BT_ALLOC = 2
+};
+
+static byte *
+audit_blocks(struct qache *q, byte *entrymap, byte *blockmap)
+{
+ uns i, j;
+
+ DBG("Auditing blocks");
+
+ /* Check the free list */
+ for (i=q->first_free_block; i; i=q->next_table[i])
+ {
+ if (i < q->hdr->first_data_block || i >= q->hdr->num_blocks || (blockmap[i] & BT_FREE_LIST))
+ return "inconsistent free block list";
+ blockmap[i] |= BT_FREE_LIST;
+ }
+
+ /* Check allocation lists of entries */
+ for (i=1; i<q->hdr->max_entries; i++)
+ if (!(entrymap[i] & ET_FREE_LIST))
+ {
+ uns blocks = 0;
+ for (j=q->entry_table[i].first_data_block; j; j=q->next_table[j])
+ {
+ if (blockmap[j])
+ return "inconsistent entry block list";
+ blockmap[j] |= BT_ALLOC;
+ blocks++;
+ }
+ if (((q->entry_table[i].data_len + q->hdr->block_size - 1) >> q->hdr->block_shift) != blocks)
+ return "inconsistent entry data length";
+ }
+
+ /* Check if all blocks belong somewhere */
+ for (i=q->hdr->first_data_block; i < q->hdr->num_blocks; i++)
+ if (!blockmap[i])
+ {
+ DBG("Block %d unreferenced", i);
+ return "unreferenced blocks found";
+ }
+
+ return NULL;
+}
+
+static byte *
+do_audit(struct qache *q)
+{
+ byte *entry_map = xmalloc_zero(q->hdr->max_entries);
+ byte *block_map = xmalloc_zero(q->hdr->num_blocks);
+ byte *err = audit_entries(q, entry_map);
+ if (!err)
+ err = audit_blocks(q, entry_map, block_map);
+ xfree(block_map);
+ xfree(entry_map);
+ return err;
+}
+
+static void
+qache_setup_pointers(struct qache *q)
+{
+ q->hdr = (struct qache_header *) q->mmap_data;
+ q->entry_table = (struct qache_entry *) (q->mmap_data + q->hdr->entry_table_start);
+ q->hash_table = (u32 *) (q->mmap_data + q->hdr->hash_table_start);
+ q->next_table = (u32 *) (q->mmap_data + q->hdr->next_table_start);
+}
+