else
align = 1;
row_pixels_size = cols * pixel_size;
- row_size = ALIGN(row_pixels_size, align);
+ row_size = ALIGN_TO(row_pixels_size, align);
u64 image_size_64 = (u64)row_size * rows;
u64 bytes_64 = image_size_64 + (sizeof(struct image) + IMAGE_SSE_ALIGN_SIZE - 1 + sizeof(uns));
if (unlikely(bytes_64 > image_max_bytes))
#undef T
byte *cf_type_names[] = { "int", "u64", "double", "ip", "string", "lookup", "user" };
-#define DARY_HDR_SIZE ALIGN(sizeof(uns), CPU_STRUCT_ALIGN)
+#define DARY_HDR_SIZE ALIGN_TO(sizeof(uns), CPU_STRUCT_ALIGN)
static byte *
interpret_set_dynamic(struct cf_item *item, int number, byte **pars, void **ptr)
struct fb_mmap *F = FB_MMAP(f);
sh_off_t pos0 = f->pos & ~(sh_off_t)(PAGE_SIZE-1);
int l = MIN((sh_off_t)mmap_window_size, F->file_extend - pos0);
- uns ll = ALIGN(l, PAGE_SIZE);
- uns oll = ALIGN(f->bufend - f->buffer, PAGE_SIZE);
+ uns ll = ALIGN_TO(l, PAGE_SIZE);
+ uns oll = ALIGN_TO(f->bufend - f->buffer, PAGE_SIZE);
int prot = ((F->mode & O_ACCMODE) == O_RDONLY) ? PROT_READ : (PROT_READ | PROT_WRITE);
DBG(" ... Mapping %x(%x)+%x(%x) len=%x extend=%x", (int)pos0, (int)f->pos, ll, l, (int)F->file_size, (int)F->file_extend);
f->pos = end;
if (f->pos >= F->file_extend)
{
- F->file_extend = ALIGN(F->file_extend + mmap_extend_size, (sh_off_t)PAGE_SIZE);
+ F->file_extend = ALIGN_TO(F->file_extend + mmap_extend_size, (sh_off_t)PAGE_SIZE);
if (sh_ftruncate(F->fd, F->file_extend))
die("ftruncate(%s): %m", f->name);
}
struct fb_mmap *F = FB_MMAP(f);
if (f->buffer)
- munmap(f->buffer, ALIGN(f->bufend-f->buffer, PAGE_SIZE));
+ munmap(f->buffer, ALIGN_TO(f->bufend-f->buffer, PAGE_SIZE));
if (F->file_extend > F->file_size &&
sh_ftruncate(F->fd, F->file_size))
die("ftruncate(%s): %m", f->name);
#define PTR_TO(s, i) &((s*)0)->i
#define OFFSETOF(s, i) ((unsigned int) PTR_TO(s, i))
#define SKIP_BACK(s, i, p) ((s *)((char *)p - OFFSETOF(s, i)))
-#define ALIGN(s, a) (((s)+a-1)&~(a-1))
+#define ALIGN_TO(s, a) (((s)+a-1)&~(a-1))
#define ALIGN_PTR(p, s) ((addr_int_t)(p) % (s) ? (typeof(p))((addr_int_t)(p) + (s) - (addr_int_t)(p) % (s)) : (p))
#define UNALIGNED_PART(ptr, type) (((addr_int_t) (ptr)) % sizeof(type))
* case of buffer-overflow. The function is not re-entrant because of a
* static longjmp handler. */
{
- uns lock_offset = ALIGN(expected_length + 3, PAGE_SIZE); // +3 due to the unaligned access
+ uns lock_offset = ALIGN_TO(expected_length + 3, PAGE_SIZE); // +3 due to the unaligned access
if (lock_offset > buf->len)
lizard_realloc(buf, lock_offset);
volatile sh_sighandler_t old_handler = signal_handler[SIGSEGV];
sh_off_t win_start = start/PAGE_SIZE * PAGE_SIZE;
size_t win_len = PARTMAP_WINDOW;
if ((sh_off_t) (win_start+win_len) > p->file_size)
- win_len = ALIGN(p->file_size - win_start, PAGE_SIZE);
+ win_len = ALIGN_TO(p->file_size - win_start, PAGE_SIZE);
if ((sh_off_t) (win_start+win_len) < end)
die("partmap_map: Window is too small for mapping %d bytes", size);
p->start_map = sh_mmap(NULL, win_len, p->writeable ? (PROT_READ | PROT_WRITE) : PROT_READ, MAP_SHARED, p->fd, win_start);
/* We don't need msyncing on Linux, since the mappings are guaranteed to be coherent */
len += (start % PAGE_SIZE);
start -= start % PAGE_SIZE;
- len = ALIGN(len, PAGE_SIZE);
+ len = ALIGN_TO(len, PAGE_SIZE);
if (msync(q->mmap_data + start, len, MS_ASYNC | MS_INVALIDATE) < 0)
log(L_ERROR, "Cache %s: msync failed: %m", q->file_name);
#endif
q->file_name = xstrdup(par->file_name);
ASSERT(par->block_size >= 8 && !(par->block_size & (par->block_size-1)));
- par->cache_size = ALIGN(par->cache_size, par->block_size);
+ par->cache_size = ALIGN_TO(par->cache_size, par->block_size);
if (par->force_reset <= 0 && qache_open_existing(q, par))
;
}
for(;;)
{
- current = (byte *) ALIGN((addr_int_t) current, CPU_STRUCT_ALIGN);
+ current = (byte *) ALIGN_TO((addr_int_t) current, CPU_STRUCT_ALIGN);
if (current + sizeof(*this) > bufend)
break;
this = (SORT_NODE *) current;