No need to use our own type when we already rely on C99 anyway.
ASSERT(i1->pixel_size == 4);
ASSERT(IMAGE_SSE_ALIGN_SIZE >= 16);
ASSERT(!(i1->row_size & (IMAGE_SSE_ALIGN_SIZE - 1)));
- ASSERT(!((addr_int_t)i1->pixels & (IMAGE_SSE_ALIGN_SIZE - 1)));
+ ASSERT(!((uintptr_t)i1->pixels & (IMAGE_SSE_ALIGN_SIZE - 1)));
image_destroy(i1);
i1 = image_new(&ctx, 283, 329, COLOR_SPACE_RGB, NULL);
big_free(void *start, unsigned int len)
{
byte *p = start;
- ASSERT(!((addr_int_t) p & (CPU_PAGE_SIZE-1)));
+ ASSERT(!((uintptr_t) p & (CPU_PAGE_SIZE-1)));
len = big_round(len);
#ifdef CONFIG_DEBUG
p -= CPU_PAGE_SIZE;
static void
dump_item(struct fastbuf *fb, struct cf_item *item, int level, void *ptr)
{
- ptr += (addr_int_t) item->ptr;
+ ptr += (uintptr_t) item->ptr;
enum cf_type type = item->type;
uns size = cf_type_size(item->type, item->u.utype);
int i;
for (struct cf_item *ci=sec->cfg; ci->cls; ci++)
{
int taken;
- byte *msg = interpret_set_item(ci, number, pars, &taken, ptr + (addr_int_t) ci->ptr, allow_dynamic && !ci[1].cls);
+ byte *msg = interpret_set_item(ci, number, pars, &taken, ptr + (uintptr_t) ci->ptr, allow_dynamic && !ci[1].cls);
if (msg)
return cf_printf("Item %s: %s", ci->name, msg);
*processed += taken;
cmp_items(void *i1, void *i2, struct cf_item *item)
{
ASSERT(item->cls == CC_STATIC);
- i1 += (addr_int_t) item->ptr;
- i2 += (addr_int_t) item->ptr;
+ i1 += (uintptr_t) item->ptr;
+ i2 += (uintptr_t) item->ptr;
if (item->type == CT_STRING)
return strcmp(* (byte**) i1, * (byte**) i2);
else // all numeric types
*msg = cf_printf("Unknown item %s", name);
return NULL;
}
- *ptr += (addr_int_t) ci->ptr;
+ *ptr += (uintptr_t) ci->ptr;
if (!c)
return ci;
if (ci->cls != CC_SECTION)
}
for (struct cf_item *ci=sec->cfg; ci->cls; ci++)
if (ci->cls == CC_SECTION)
- cf_init_section(ci->name, ci->u.sec, ptr + (addr_int_t) ci->ptr, 0);
+ cf_init_section(ci->name, ci->u.sec, ptr + (uintptr_t) ci->ptr, 0);
else if (ci->cls == CC_LIST)
- clist_init(ptr + (addr_int_t) ci->ptr);
+ clist_init(ptr + (uintptr_t) ci->ptr);
else if (ci->cls == CC_DYNAMIC) {
- void **dyn = ptr + (addr_int_t) ci->ptr;
+ void **dyn = ptr + (uintptr_t) ci->ptr;
if (!*dyn) { // replace NULL by an empty array
static uns zero = 0;
*dyn = (&zero) + 1;
byte *err;
for (struct cf_item *ci=sec->cfg; ci->cls; ci++)
if (ci->cls == CC_SECTION) {
- if ((err = commit_section(ci->u.sec, ptr + (addr_int_t) ci->ptr, commit_all))) {
+ if ((err = commit_section(ci->u.sec, ptr + (uintptr_t) ci->ptr, commit_all))) {
log(L_ERROR, "Cannot commit section %s: %s", ci->name, err);
return "commit of a subsection failed";
}
} else if (ci->cls == CC_LIST) {
uns idx = 0;
- CLIST_FOR_EACH(cnode *, n, * (clist*) (ptr + (addr_int_t) ci->ptr))
+ CLIST_FOR_EACH(cnode *, n, * (clist*) (ptr + (uintptr_t) ci->ptr))
if (idx++, err = commit_section(ci->u.sec, n, commit_all)) {
log(L_ERROR, "Cannot commit node #%d of list %s: %s", idx, ci->name, err);
return "commit of a list failed";
return bskip_slow(f, len);
}
-/* I/O on addr_int_t */
+/* I/O on uintptr_t */
#ifdef CPU_64BIT_POINTERS
#define bputa(x,p) bputq(x,p)
*/
static inline uns CONST hash_u32(uns x) { return 0x01008041*x; }
static inline uns CONST hash_u64(u64 x) { return hash_u32((uns)x ^ (uns)(x >> 32)); }
-static inline uns CONST hash_pointer(void *x) { return ((sizeof(x) <= 4) ? hash_u32((uns)(addr_int_t)x) : hash_u64((u64)(addr_int_t)x)); }
+static inline uns CONST hash_pointer(void *x) { return ((sizeof(x) <= 4) ? hash_u32((uns)(uintptr_t)x) : hash_u64((u64)(uintptr_t)x)); }
#endif
static inline uns
kmp4_hash(struct kmp4_struct *kmp UNUSED, struct kmp4_state *s, byte *c)
{
- return (c ? (*c << 16) : 0) + (uns)(addr_int_t)s;
+ return (c ? (*c << 16) : 0) + (uns)(uintptr_t)s;
}
#define KMP_PREFIX(x) kmp4_##x
static inline uns
P(hash_hash) (struct P(hash_table) *t UNUSED, struct P(state) *f, P(char_t) c)
{
- return (((uns)c) << 16) + (uns)(addr_int_t)f;
+ return (((uns)c) << 16) + (uns)(uintptr_t)f;
}
#endif
{
if (s <= p->threshold)
{
- byte *x = (byte *)(((addr_int_t) p->free + POOL_ALIGN - 1) & ~(addr_int_t)(POOL_ALIGN - 1));
+ byte *x = (byte *)(((uintptr_t) p->free + POOL_ALIGN - 1) & ~(uintptr_t)(POOL_ALIGN - 1));
if (x + s > p->last)
{
struct memchunk *c;
static inline void * LIKE_MALLOC
mp_alloc_fast(struct mempool *p, uns l)
{
- byte *f = (void *) (((addr_int_t) p->free + POOL_ALIGN - 1) & ~(addr_int_t)(POOL_ALIGN - 1));
+ byte *f = (void *) (((uintptr_t) p->free + POOL_ALIGN - 1) & ~(uintptr_t)(POOL_ALIGN - 1));
byte *ee = f + l;
if (ee > p->last)
return mp_alloc(p, l);
#else
/* Pointers are aligned, hence we can use lower bits. */
static inline uns P(red_flag) (P(bucket) *node)
- { return ((addr_int_t) node->son[0]) & 1L; }
+ { return ((uintptr_t) node->son[0]) & 1L; }
static inline void P(set_red_flag) (P(bucket) *node, uns flag)
- { node->son[0] = (void*) ( (((addr_int_t) node->son[0]) & ~1L) | (flag & 1L) ); }
+ { node->son[0] = (void*) ( (((uintptr_t) node->son[0]) & ~1L) | (flag & 1L) ); }
static inline P(bucket) * P(tree_son) (P(bucket) *node, uns id)
- { return (void *) (((addr_int_t) node->son[id]) & ~1L); }
+ { return (void *) (((uintptr_t) node->son[id]) & ~1L); }
static inline void P(set_tree_son) (P(bucket) *node, uns id, P(bucket) *son)
- { node->son[id] = (void *) ((addr_int_t) son | (((addr_int_t) node->son[id]) & 1L) ); }
+ { node->son[id] = (void *) ((uintptr_t) son | (((uintptr_t) node->son[id]) & 1L) ); }
#endif
/* Defaults for missing parameters. */
}
if (section->item.cf.cls == CC_LIST)
{
- item->cf.ptr = (void *)(addr_int_t)section->size;
+ item->cf.ptr = (void *)(uintptr_t)section->size;
section->size += sizeof(union value);
}
else
{
if (item->flags & FLAG_HIDE)
return;
- byte *val = (byte *)((addr_int_t)ptr + (addr_int_t)item->cf.ptr);
+ byte *val = (byte *)((uintptr_t)ptr + (uintptr_t)item->cf.ptr);
if (item->cf.cls == CC_LIST)
{
uns len = strlen(item->cf.name);
}
for(;;)
{
- current = (byte *) ALIGN_TO((addr_int_t) current, CPU_STRUCT_ALIGN);
+ current = (byte *) ALIGN_TO((uintptr_t) current, CPU_STRUCT_ALIGN);
if (current + sizeof(*this) > bufend)
break;
this = (SORT_NODE *) current;
};
struct dfa_state {
- addr_int_t edge[256]; /* Outgoing DFA edges. Bit 0 is set for incomplete edges which
+ uintptr_t edge[256]; /* Outgoing DFA edges. Bit 0 is set for incomplete edges which
* contain just state set and clear for complete ones which point
* to other states. NULL means `no match'.
*/
d = w->dfa_start;
while (*s)
{
- addr_int_t next = d->edge[*s];
+ uintptr_t next = d->edge[*s];
if (next & 1)
{
/* Need to lookup/create the destination state */
struct dfa_state *new = wp_new_state(w, next & ~1);
- d->edge[*s] = (addr_int_t) new;
+ d->edge[*s] = (uintptr_t) new;
d = new;
}
else if (!next)