And update all uses across the tree.
Macros:
sh_fdatasync -> ucw_fdatasync
sh_fstat -> ucw_fstat
sh_ftruncate -> ucw_ftruncate
sh_mmap -> ucw_mmap
sh_open -> ucw_open
sh_pread -> ucw_pread
sh_pwrite -> ucw_pwrite
sh_seek -> ucw_seek
sh_stat -> ucw_stat
Functions:
sh_file_size -> ucw_file_size
File types:
sh_off_t -> ucw_off_t
sh_sighandler_t -> ucw_sighandler_t
sh_stat_t -> ucw_stat_t
sh_time_t -> ucw_time_t
Internals:
sh_getopt -> ucw_getopt
sh_getopt_long -> ucw_getopt_long
sh_getopt_longonly -> ucw_getopt_longonly
sh_optarg -> ucw_optarg
sh_opterr -> ucw_opterr
sh_optind -> ucw_optind
sh_optopt -> ucw_optopt
sh_xfree -> ucw_xfree
sh_xmalloc -> ucw_xmalloc
sh_xrealloc -> ucw_xrealloc
#ifdef COPY
msg(L_INFO, "Creating input file");
- int in_fd = sh_open("tmp/ft-in", O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
+ int in_fd = ucw_open("tmp/ft-in", O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
ASSERT(in_fd >= 0);
ASSERT(!(total_size % bufsize));
P_INIT;
for (uns i=0; i<files; i++)
{
sprintf(name[i], "tmp/ft-%d", i);
- fd[i] = sh_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
+ fd[i] = ucw_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
if (fd[i] < 0)
die("Cannot create %s: %m", name[i]);
}
#ifdef COPY
msg(L_INFO, "Creating input file");
- int in_fd = sh_open("tmp/ft-in", O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
+ int in_fd = ucw_open("tmp/ft-in", O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
ASSERT(in_fd >= 0);
ASSERT(!(total_size % xbufsize));
P_INIT;
for (uns i=0; i<files; i++)
{
sprintf(name[i], "tmp/ft-%d", i);
- fd[i] = sh_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
+ fd[i] = ucw_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
if (fd[i] < 0)
die("Cannot create %s: %m", name[i]);
buf[i] = big_alloc(bufsize);
DBG("libmagick_read_header()");
/* Read entire stream */
- sh_off_t file_size = bfilesize(io->fastbuf) - btell(io->fastbuf);
+ ucw_off_t file_size = bfilesize(io->fastbuf) - btell(io->fastbuf);
if (unlikely(file_size > MAX_FILE_SIZE))
{
IMAGE_ERROR(io->context, IMAGE_ERROR_READ_FAILED, "Too long stream.");
typedef int64_t s64; /* exactly 64 bits, signed */
typedef unsigned int uns; /* at least 32 bits */
-typedef u32 sh_time_t; /* seconds since UNIX epoch */
+typedef u32 ucw_time_t; /* seconds since UNIX epoch */
typedef s64 timestamp_t; /* milliseconds since UNIX epoch */
#ifdef CONFIG_LARGE_FILES /* File positions */
-typedef s64 sh_off_t;
+typedef s64 ucw_off_t;
#else
-typedef s32 sh_off_t;
+typedef s32 ucw_off_t;
#endif
#endif
f->bptr = f->bstop = f->buffer;
}
-inline void bsetpos(struct fastbuf *f, sh_off_t pos)
+inline void bsetpos(struct fastbuf *f, ucw_off_t pos)
{
/* We can optimize seeks only when reading */
if (pos >= f->pos - (f->bstop - f->buffer) && pos <= f->pos)
}
}
-void bseek(struct fastbuf *f, sh_off_t pos, int whence)
+void bseek(struct fastbuf *f, ucw_off_t pos, int whence)
{
switch (whence)
{
return 1;
}
-sh_off_t
+ucw_off_t
bfilesize(struct fastbuf *f)
{
if (!f)
return 0;
- sh_off_t pos = btell(f);
+ ucw_off_t pos = btell(f);
bflush(f);
if (!f->seek(f, 0, SEEK_END))
return -1;
- sh_off_t len = btell(f);
+ ucw_off_t len = btell(f);
bsetpos(f, pos);
return len;
}
byte *bptr, *bstop; /* Access pointers */
byte *buffer, *bufend; /* Start and end of the buffer */
char *name; /* File name for error messages */
- sh_off_t pos; /* Position of bstop in the file */
+ ucw_off_t pos; /* Position of bstop in the file */
int (*refill)(struct fastbuf *); /* Get a buffer with new data */
void (*spout)(struct fastbuf *); /* Write buffer data to the file */
- int (*seek)(struct fastbuf *, sh_off_t, int); /* Slow path for bseek(), buffer already flushed; returns success */
+ int (*seek)(struct fastbuf *, ucw_off_t, int); /* Slow path for bseek(), buffer already flushed; returns success */
void (*close)(struct fastbuf *); /* Close the stream */
int (*config)(struct fastbuf *, uns, int); /* Configure the stream */
int can_overwrite_buffer; /* Can the buffer be altered? (see discussion above) 0=never, 1=temporarily, 2=permanently */
void bclose(struct fastbuf *f);
void bflush(struct fastbuf *f);
-void bseek(struct fastbuf *f, sh_off_t pos, int whence);
-void bsetpos(struct fastbuf *f, sh_off_t pos);
+void bseek(struct fastbuf *f, ucw_off_t pos, int whence);
+void bsetpos(struct fastbuf *f, ucw_off_t pos);
void brewind(struct fastbuf *f);
-sh_off_t bfilesize(struct fastbuf *f); /* -1 if not seekable */
+ucw_off_t bfilesize(struct fastbuf *f); /* -1 if not seekable */
-static inline sh_off_t btell(struct fastbuf *f)
+static inline ucw_off_t btell(struct fastbuf *f)
{
return f->pos + (f->bptr - f->bstop);
}
else
{
af = xmalloc_zero(sizeof(*af) + strlen(name));
- if ((af->fd = sh_open(name, O_WRONLY | O_CREAT | O_TRUNC | O_APPEND, 0666)) < 0)
+ if ((af->fd = ucw_open(name, O_WRONLY | O_CREAT | O_TRUNC | O_APPEND, 0666)) < 0)
die("Cannot create %s: %m", name);
af->use_count = 1;
af->record_len = record_len;
}
static int
-fbbuf_seek(struct fastbuf *f, sh_off_t pos, int whence)
+fbbuf_seek(struct fastbuf *f, ucw_off_t pos, int whence)
{
/* Somebody might want to seek to the end of buffer, try to be nice to him. */
- sh_off_t len = f->bufend - f->buffer;
+ ucw_off_t len = f->bufend - f->buffer;
if (whence == SEEK_END)
pos += len;
ASSERT(pos >= 0 && pos <= len);
asio_submit(r);
asio_sync(F->io_queue);
DBG("FB-DIRECT: Truncating at %llu", (long long)f->pos);
- if (sh_ftruncate(F->fd, f->pos) < 0)
+ if (ucw_ftruncate(F->fd, f->pos) < 0)
die("Error truncating %s: %m", f->name);
}
else
}
static int
-fbdir_seek(struct fastbuf *f, sh_off_t pos, int whence)
+fbdir_seek(struct fastbuf *f, ucw_off_t pos, int whence)
{
DBG("FB-DIRECT: Seek %llu %d", (long long)pos, whence);
return 1;
fbdir_change_mode(FB_DIRECT(f), M_NULL); // Wait for all async requests to finish
- sh_off_t l = sh_seek(FB_DIRECT(f)->fd, pos, whence);
+ ucw_off_t l = ucw_seek(FB_DIRECT(f)->fd, pos, whence);
if (l < 0)
return 0;
f->pos = l;
int fd; /* File descriptor */
int is_temp_file;
int keep_back_buf; /* Optimize for backwards reading */
- sh_off_t wpos; /* Real file position */
+ ucw_off_t wpos; /* Real file position */
uns wlen; /* Window size */
};
#define FB_FILE(f) ((struct fb_file *)(f)->is_fastbuf)
/* Forward or no seek */
if (F->wpos <= f->pos)
{
- sh_off_t diff = f->pos - F->wpos;
+ ucw_off_t diff = f->pos - F->wpos;
/* Formula for long forward seeks (prefer lseek()) */
- if (diff > ((sh_off_t)blen << 2))
+ if (diff > ((ucw_off_t)blen << 2))
{
long_seek:
f->bptr = f->buffer + back;
/* Backwards seek */
else
{
- sh_off_t diff = F->wpos - f->pos;
+ ucw_off_t diff = F->wpos - f->pos;
/* Formula for long backwards seeks (keep smaller backbuffer than for shorter seeks ) */
- if (diff > ((sh_off_t)blen << 1))
+ if (diff > ((ucw_off_t)blen << 1))
{
- if ((sh_off_t)back > f->pos)
+ if ((ucw_off_t)back > f->pos)
back = f->pos;
goto long_seek;
}
return 1;
}
back *= 3;
- if ((sh_off_t)back > f->pos)
+ if ((ucw_off_t)back > f->pos)
back = f->pos;
f->bptr = f->buffer + back;
read_len = blen;
seek:
/* Do lseek() */
F->wpos = f->pos + (f->buffer - f->bptr);
- if (sh_seek(F->fd, F->wpos, SEEK_SET) < 0)
+ if (ucw_seek(F->fd, F->wpos, SEEK_SET) < 0)
die("Error seeking %s: %m", f->name);
}
/* Read (part of) buffer */
bfd_spout(struct fastbuf *f)
{
/* Do delayed lseek() if needed */
- if (FB_FILE(f)->wpos != f->pos && sh_seek(FB_FILE(f)->fd, f->pos, SEEK_SET) < 0)
+ if (FB_FILE(f)->wpos != f->pos && ucw_seek(FB_FILE(f)->fd, f->pos, SEEK_SET) < 0)
die("Error seeking %s: %m", f->name);
int l = f->bptr - f->buffer;
}
static int
-bfd_seek(struct fastbuf *f, sh_off_t pos, int whence)
+bfd_seek(struct fastbuf *f, ucw_off_t pos, int whence)
{
/* Delay the seek for the next refill() or spout() call (if whence != SEEK_END). */
- sh_off_t l;
+ ucw_off_t l;
switch (whence)
{
case SEEK_SET:
f->pos = l;
return 1;
case SEEK_END:
- l = sh_seek(FB_FILE(f)->fd, pos, SEEK_END);
+ l = ucw_seek(FB_FILE(f)->fd, pos, SEEK_END);
if (l < 0)
return 0;
FB_FILE(f)->wpos = f->pos = l;
}
static int
-fbgrow_seek(struct fastbuf *b, sh_off_t pos, int whence)
+fbgrow_seek(struct fastbuf *b, ucw_off_t pos, int whence)
{
ASSERT(FB_GBUF(b)->last_written); /* Seeks allowed only in read mode */
- sh_off_t len = FB_GBUF(b)->last_written - b->buffer;
+ ucw_off_t len = FB_GBUF(b)->last_written - b->buffer;
if (whence == SEEK_END)
pos += len;
ASSERT(pos >= 0 && pos <= len);
struct msblock {
struct msblock *next;
- sh_off_t pos;
+ ucw_off_t pos;
unsigned size;
byte data[0];
};
}
static int
-fbmem_seek(struct fastbuf *f, sh_off_t pos, int whence)
+fbmem_seek(struct fastbuf *f, ucw_off_t pos, int whence)
{
struct memstream *m = FB_MEM(f)->stream;
struct msblock *b;
/* Yes, this is linear. But considering the average number of buckets, it doesn't matter. */
for (b=m->first; b; b=b->next)
{
- if (pos <= b->pos + (sh_off_t)b->size) /* <=, because we need to be able to seek just after file end */
+ if (pos <= b->pos + (ucw_off_t)b->size) /* <=, because we need to be able to seek just after file end */
{
f->buffer = b->data;
f->bptr = b->data + (pos - b->pos);
struct fastbuf fb;
int fd;
int is_temp_file;
- sh_off_t file_size;
- sh_off_t file_extend;
- sh_off_t window_pos;
+ ucw_off_t file_size;
+ ucw_off_t file_extend;
+ ucw_off_t window_pos;
uns window_size;
int mode;
};
bfmm_map_window(struct fastbuf *f)
{
struct fb_mmap *F = FB_MMAP(f);
- sh_off_t pos0 = f->pos & ~(sh_off_t)(CPU_PAGE_SIZE-1);
- int l = MIN((sh_off_t)mmap_window_size, F->file_extend - pos0);
+ ucw_off_t pos0 = f->pos & ~(ucw_off_t)(CPU_PAGE_SIZE-1);
+ int l = MIN((ucw_off_t)mmap_window_size, F->file_extend - pos0);
uns ll = ALIGN_TO(l, CPU_PAGE_SIZE);
int prot = ((F->mode & O_ACCMODE) == O_RDONLY) ? PROT_READ : (PROT_READ | PROT_WRITE);
}
F->window_size = ll;
if (!f->buffer)
- f->buffer = sh_mmap(NULL, ll, prot, MAP_SHARED, F->fd, pos0);
+ f->buffer = ucw_mmap(NULL, ll, prot, MAP_SHARED, F->fd, pos0);
else
- f->buffer = sh_mmap(f->buffer, ll, prot, MAP_SHARED | MAP_FIXED, F->fd, pos0);
+ f->buffer = ucw_mmap(f->buffer, ll, prot, MAP_SHARED | MAP_FIXED, F->fd, pos0);
if (f->buffer == (byte *) MAP_FAILED)
die("mmap(%s): %m", f->name);
#ifdef MADV_SEQUENTIAL
bfmm_spout(struct fastbuf *f)
{
struct fb_mmap *F = FB_MMAP(f);
- sh_off_t end = f->pos + (f->bptr - f->bstop);
+ ucw_off_t end = f->pos + (f->bptr - f->bstop);
DBG("Spout <- %p %p %p %p", f->buffer, f->bptr, f->bstop, f->bufend);
if (end > F->file_size)
f->pos = end;
if (f->pos >= F->file_extend)
{
- F->file_extend = ALIGN_TO(F->file_extend + mmap_extend_size, (sh_off_t)CPU_PAGE_SIZE);
- if (sh_ftruncate(F->fd, F->file_extend))
+ F->file_extend = ALIGN_TO(F->file_extend + mmap_extend_size, (ucw_off_t)CPU_PAGE_SIZE);
+ if (ucw_ftruncate(F->fd, F->file_extend))
die("ftruncate(%s): %m", f->name);
}
bfmm_map_window(f);
}
static int
-bfmm_seek(struct fastbuf *f, sh_off_t pos, int whence)
+bfmm_seek(struct fastbuf *f, ucw_off_t pos, int whence)
{
if (whence == SEEK_END)
pos += FB_MMAP(f)->file_size;
if (f->buffer)
munmap(f->buffer, F->window_size);
if (F->file_extend > F->file_size &&
- sh_ftruncate(F->fd, F->file_size))
+ ucw_ftruncate(F->fd, F->file_size))
die("ftruncate(%s): %m", f->name);
bclose_file_helper(f, F->fd, F->is_temp_file);
xfree(f);
f->name = (byte *)(F+1);
memcpy(f->name, name, namelen);
F->fd = fd;
- F->file_extend = F->file_size = sh_seek(fd, 0, SEEK_END);
+ F->file_extend = F->file_size = ucw_seek(fd, 0, SEEK_END);
if (F->file_size < 0)
die("seek(%s): %m", name);
if (mode & O_APPEND)
#endif
if (params->type == FB_MMAP && (mode & O_ACCMODE) == O_WRONLY)
mode = (mode & ~O_ACCMODE) | O_RDWR;
- int fd = sh_open(name, mode, 0666);
+ int fd = ucw_open(name, mode, 0666);
if (fd < 0)
if (try)
return NULL;
int create_flags, fd, retry = 10;
do
{
- temp_file_name(name_buf, &create_flags);
- fd = open(name_buf, open_flags | create_flags, mode);
+ temp_file_name(name, &create_mode);
+ fd = ucw_open(name, flags | create_mode, mode);
}
while (fd < 0 && errno == EEXIST && retry --);
if (fd < 0)
#ifndef _UCW_GETOPT_GETOPT_SH_H
#define _UCW_GETOPT_GETOPT_SH_H
-#define getopt sh_getopt
-#define getopt_long sh_getopt_long
-#define getopt_long_only sh_getopt_longonly
-#define optarg sh_optarg
-#define optind sh_optind
-#define opterr sh_opterr
-#define optopt sh_optopt
+#define getopt ucw_getopt
+#define getopt_long ucw_getopt_long
+#define getopt_long_only ucw_getopt_longonly
+#define optarg ucw_optarg
+#define optind ucw_optind
+#define opterr ucw_opterr
+#define optopt ucw_optopt
#include "ucw/getopt/getopt.h"
for (i=0; i<TESTS; i++)
{
uns idx = random()%COUNT;
- sh_off_t ofs = idx*BLOCK;
+ ucw_off_t ofs = idx*BLOCK;
bseek(b, ofs, SEEK_SET);
bread(b, block, BLOCK);
if (block[17] != (idx & 0xff))
#ifdef CONFIG_LFS
-#define sh_open open64
-#define sh_seek lseek64
-#define sh_pread pread64
-#define sh_pwrite pwrite64
-#define sh_ftruncate ftruncate64
-#define sh_mmap(a,l,p,f,d,o) mmap64(a,l,p,f,d,o)
-#define sh_pread pread64
-#define sh_pwrite pwrite64
-#define sh_stat stat64
-#define sh_fstat fstat64
-typedef struct stat64 sh_stat_t;
+#define ucw_open open64
+#define ucw_seek lseek64
+#define ucw_pread pread64
+#define ucw_pwrite pwrite64
+#define ucw_ftruncate ftruncate64
+#define ucw_mmap(a,l,p,f,d,o) mmap64(a,l,p,f,d,o)
+#define ucw_pread pread64
+#define ucw_pwrite pwrite64
+#define ucw_stat stat64
+#define ucw_fstat fstat64
+typedef struct stat64 ucw_stat_t;
#else /* !CONFIG_LFS */
-#define sh_open open
-#define sh_seek(f,o,w) lseek(f,o,w)
-#define sh_ftruncate(f,o) ftruncate(f,o)
-#define sh_mmap(a,l,p,f,d,o) mmap(a,l,p,f,d,o)
-#define sh_pread pread
-#define sh_pwrite pwrite
-#define sh_stat stat
-#define sh_fstat fstat
-typedef struct stat sh_stat_t;
+#define ucw_open open
+#define ucw_seek(f,o,w) lseek(f,o,w)
+#define ucw_ftruncate(f,o) ftruncate(f,o)
+#define ucw_mmap(a,l,p,f,d,o) mmap(a,l,p,f,d,o)
+#define ucw_pread pread
+#define ucw_pwrite pwrite
+#define ucw_stat stat
+#define ucw_fstat fstat
+typedef struct stat ucw_stat_t;
#endif /* !CONFIG_LFS */
#if defined(_POSIX_SYNCHRONIZED_IO) && (_POSIX_SYNCHRONIZED_IO > 0)
-#define sh_fdatasync fdatasync
+#define ucw_fdatasync fdatasync
#else
-#define sh_fdatasync fsync
+#define ucw_fdatasync fsync
#endif
#define HAVE_PREAD
-static inline sh_off_t
-sh_file_size(const char *name)
+static inline ucw_off_t
+ucw_file_size(const char *name)
{
- int fd = sh_open(name, O_RDONLY);
+ int fd = ucw_open(name, O_RDONLY);
if (fd < 0)
die("Cannot open %s: %m", name);
- sh_off_t len = sh_seek(fd, 0, SEEK_END);
+ ucw_off_t len = ucw_seek(fd, 0, SEEK_END);
close(fd);
return len;
}
/* Memory allocation */
-#define xmalloc sh_xmalloc
-#define xrealloc sh_xrealloc
-#define xfree sh_xfree
+#define xmalloc ucw_xmalloc
+#define xrealloc ucw_xrealloc
+#define xfree ucw_xfree
/*
* Unfortunately, several libraries we might want to link to define
/* sighandler.c */
-typedef int (*sh_sighandler_t)(int); // gets signum, returns nonzero if abort() should be called
+typedef int (*ucw_sighandler_t)(int); // gets signum, returns nonzero if abort() should be called
void handle_signal(int signum);
void unhandle_signal(int signum);
-sh_sighandler_t set_signal_handler(int signum, sh_sighandler_t new);
+ucw_sighandler_t set_signal_handler(int signum, ucw_sighandler_t new);
/* bigalloc.c */
uns lock_offset = ALIGN_TO(expected_length + 3, CPU_PAGE_SIZE); // +3 due to the unaligned access
if (lock_offset > buf->len)
lizard_realloc(buf, lock_offset);
- volatile sh_sighandler_t old_handler = set_signal_handler(SIGSEGV, sigsegv_handler);
+ volatile ucw_sighandler_t old_handler = set_signal_handler(SIGSEGV, sigsegv_handler);
byte *ptr;
if (!setjmp(safe_decompress_jump))
{
if (strcmp(name, log_filename))
{
strcpy(log_filename, name);
- fd = sh_open(name, O_WRONLY | O_CREAT | O_APPEND, 0666);
+ fd = ucw_open(name, O_WRONLY | O_CREAT | O_APPEND, 0666);
if (fd < 0)
die("Unable to open log file %s: %m", name);
dup2(fd, 2);
#include <sys/time.h>
timestamp_t main_now;
-sh_time_t main_now_seconds;
+ucw_time_t main_now_seconds;
uns main_shutdown;
clist main_timer_list, main_file_list, main_hook_list, main_process_list;
#include "ucw/clists.h"
extern timestamp_t main_now; /* Current time in milliseconds since UNIX epoch */
-extern sh_time_t main_now_seconds; /* Current time in seconds since the epoch */
+extern ucw_time_t main_now_seconds; /* Current time in seconds since the epoch */
extern uns main_shutdown;
extern clist main_timer_list, main_file_list, main_hook_list, main_process_list;
{
struct partmap *p = xmalloc_zero(sizeof(struct partmap));
- p->fd = sh_open(name, writeable ? O_RDWR : O_RDONLY);
+ p->fd = ucw_open(name, writeable ? O_RDWR : O_RDONLY);
if (p->fd < 0)
die("open(%s): %m", name);
- if ((p->file_size = sh_seek(p->fd, 0, SEEK_END)) < 0)
+ if ((p->file_size = ucw_seek(p->fd, 0, SEEK_END)) < 0)
die("lseek(%s): %m", name);
p->writeable = writeable;
#ifdef CONFIG_PARTMAP_IS_MMAP
return p;
}
-sh_off_t
+ucw_off_t
partmap_size(struct partmap *p)
{
return p->file_size;
}
void
-partmap_load(struct partmap *p, sh_off_t start, uns size)
+partmap_load(struct partmap *p, ucw_off_t start, uns size)
{
if (p->start_map)
munmap(p->start_map, p->end_off - p->start_off);
- sh_off_t end = start + size;
- sh_off_t win_start = start/CPU_PAGE_SIZE * CPU_PAGE_SIZE;
+ ucw_off_t end = start + size;
+ ucw_off_t win_start = start/CPU_PAGE_SIZE * CPU_PAGE_SIZE;
size_t win_len = PARTMAP_WINDOW;
- if ((sh_off_t) (win_start+win_len) > p->file_size)
+ if ((ucw_off_t) (win_start+win_len) > p->file_size)
win_len = ALIGN_TO(p->file_size - win_start, CPU_PAGE_SIZE);
- if ((sh_off_t) (win_start+win_len) < end)
+ if ((ucw_off_t) (win_start+win_len) < end)
die("partmap_map: Window is too small for mapping %d bytes", size);
- p->start_map = sh_mmap(NULL, win_len, p->writeable ? (PROT_READ | PROT_WRITE) : PROT_READ, MAP_SHARED, p->fd, win_start);
+ p->start_map = ucw_mmap(NULL, win_len, p->writeable ? (PROT_READ | PROT_WRITE) : PROT_READ, MAP_SHARED, p->fd, win_start);
if (p->start_map == MAP_FAILED)
die("mmap failed at position %lld: %m", (long long)win_start);
p->start_off = win_start;
struct partmap {
int fd;
- sh_off_t file_size;
- sh_off_t start_off, end_off;
+ ucw_off_t file_size;
+ ucw_off_t start_off, end_off;
byte *start_map;
int writeable;
};
struct partmap *partmap_open(char *name, int writeable);
void partmap_close(struct partmap *p);
-sh_off_t partmap_size(struct partmap *p);
-void partmap_load(struct partmap *p, sh_off_t start, uns size);
+ucw_off_t partmap_size(struct partmap *p);
+void partmap_load(struct partmap *p, ucw_off_t start, uns size);
static inline void *
-partmap_map(struct partmap *p, sh_off_t start, uns size UNUSED)
+partmap_map(struct partmap *p, ucw_off_t start, uns size UNUSED)
{
#ifndef CONFIG_PARTMAP_IS_MMAP
- if (unlikely(!p->start_map || start < p->start_off || (sh_off_t) (start+size) > p->end_off))
+ if (unlikely(!p->start_map || start < p->start_off || (ucw_off_t) (start+size) > p->end_off))
partmap_load(p, start, size);
#endif
return p->start_map + (start - p->start_off);
}
static inline void *
-partmap_map_forward(struct partmap *p, sh_off_t start, uns size UNUSED)
+partmap_map_forward(struct partmap *p, ucw_off_t start, uns size UNUSED)
{
#ifndef CONFIG_PARTMAP_IS_MMAP
- if (unlikely((sh_off_t) (start+size) > p->end_off))
+ if (unlikely((ucw_off_t) (start+size) > p->end_off))
partmap_load(p, start, size);
#endif
return p->start_map + (start - p->start_off);
bwrite(fb, &h, sizeof(h));
/* Entry #0: heads of all lists */
- ASSERT(btell(fb) == (sh_off_t)h.entry_table_start);
+ ASSERT(btell(fb) == (ucw_off_t)h.entry_table_start);
struct qache_entry ent;
bzero(&ent, sizeof(ent));
ent.first_data_block = h.first_data_block;
}
/* The hash table */
- ASSERT(btell(fb) == (sh_off_t)h.hash_table_start);
+ ASSERT(btell(fb) == (ucw_off_t)h.hash_table_start);
for (uns i=0; i<h.hash_size; i++)
bputl(fb, 0);
/* The next pointers */
- ASSERT(btell(fb) == (sh_off_t)h.next_table_start);
+ ASSERT(btell(fb) == (ucw_off_t)h.next_table_start);
for (uns i=0; i<h.num_blocks; i++)
bputl(fb, (i < h.first_data_block || i == h.num_blocks-1) ? 0 : i+1);
/* Padding */
- ASSERT(btell(fb) <= (sh_off_t)(h.first_data_block << h.block_shift));
- while (btell(fb) < (sh_off_t)(h.first_data_block << h.block_shift))
+ ASSERT(btell(fb) <= (ucw_off_t)(h.first_data_block << h.block_shift));
+ while (btell(fb) < (ucw_off_t)(h.first_data_block << h.block_shift))
bputc(fb, 0);
/* Data blocks */
for (uns j=0; j<h.block_size; j+=4)
bputl(fb, 0);
- ASSERT(btell(fb) == (sh_off_t)par->cache_size);
+ ASSERT(btell(fb) == (ucw_off_t)par->cache_size);
bclose(fb);
msg(L_INFO, "Cache %s: created (%d bytes, %d slots, %d buckets)", q->file_name, par->cache_size, h.max_entries, h.hash_size);
ucwlib_unlock();
}
-sh_sighandler_t
-set_signal_handler(int signum, sh_sighandler_t new)
+ucw_sighandler_t
+set_signal_handler(int signum, ucw_sighandler_t new)
{
struct ucwlib_context *ctx = ucwlib_thread_context();
if (!ctx->signal_handlers)
- ctx->signal_handlers = xmalloc_zero(NSIG * sizeof(sh_sighandler_t));
- sh_sighandler_t old = ctx->signal_handlers[signum];
+ ctx->signal_handlers = xmalloc_zero(NSIG * sizeof(ucw_sighandler_t));
+ ucw_sighandler_t old = ctx->signal_handlers[signum];
ctx->signal_handlers[signum] = new;
return old;
}
void sbuck_drop(struct sort_bucket *b);
int sbuck_have(struct sort_bucket *b);
int sbuck_has_file(struct sort_bucket *b);
-sh_off_t sbuck_size(struct sort_bucket *b);
+ucw_off_t sbuck_size(struct sort_bucket *b);
struct fastbuf *sbuck_read(struct sort_bucket *b);
struct fastbuf *sbuck_write(struct sort_bucket *b);
void sbuck_swap_out(struct sort_bucket *b);
}
static struct sort_bucket *
-sbuck_join_to(struct sort_bucket *b, sh_off_t *sizep)
+sbuck_join_to(struct sort_bucket *b, ucw_off_t *sizep)
{
if (sorter_debug & SORT_DEBUG_NO_JOIN)
return NULL;
return out;
}
-static sh_off_t
-sbuck_ins_or_join(struct sort_bucket *b, cnode *list_pos, struct sort_bucket *join, sh_off_t join_size)
+static ucw_off_t
+sbuck_ins_or_join(struct sort_bucket *b, cnode *list_pos, struct sort_bucket *join, ucw_off_t join_size)
{
if (join && join->runs >= 2)
{
{
struct sort_bucket *ins[3] = { NULL }, *outs[3] = { NULL };
cnode *list_pos = b->n.prev;
- sh_off_t join_size;
+ ucw_off_t join_size;
struct sort_bucket *join = sbuck_join_to(b, &join_size);
if (!(sorter_debug & SORT_DEBUG_NO_PRESORT) || (b->flags & SBF_CUSTOM_PRESORT))
if (!sorter_presort(ctx, b, ins[0], join ? : ins[0]))
{
sorter_stop_timer(ctx, &ctx->total_pre_time);
- sh_off_t size = sbuck_ins_or_join(ins[0], list_pos, join, join_size);
+ ucw_off_t size = sbuck_ins_or_join(ins[0], list_pos, join, join_size);
SORT_XTRACE(((b->flags & SBF_SOURCE) ? 1 : 3), "Sorted in memory (%s, %dMB/s)", stk_fsize(size), sorter_speed(ctx, size));
sbuck_drop(b);
return;
outs[0] = join;
outs[1] = NULL;
ctx->twoway_merge(ctx, ins, outs);
- sh_off_t size = sbuck_ins_or_join(NULL, NULL, join, join_size);
+ ucw_off_t size = sbuck_ins_or_join(NULL, NULL, join, join_size);
sorter_stop_timer(ctx, &ctx->total_ext_time);
SORT_TRACE("Mergesort pass %d (final run, %s, %dMB/s)", pass, stk_fsize(size), sorter_speed(ctx, size));
sbuck_drop(ins[0]);
{
clist parts;
cnode *list_pos = b->n.prev;
- sh_off_t join_size;
+ ucw_off_t join_size;
struct sort_bucket *join = sbuck_join_to(b, &join_size);
uns trace_level = (b->flags & SBF_SOURCE) ? 1 : 3;
if (part_cnt <= 1)
{
- sh_off_t size = sbuck_ins_or_join(clist_head(&parts), list_pos, (part_cnt ? NULL : join), join_size);
+ ucw_off_t size = sbuck_ins_or_join(clist_head(&parts), list_pos, (part_cnt ? NULL : join), join_size);
SORT_XTRACE(trace_level, "Sorted in memory (%s, %dMB/s)", stk_fsize(size), sorter_speed(ctx, size));
return;
}
if (clist_empty(&parts))
{
- sh_off_t size = sbuck_ins_or_join((join ? NULL : out), list_pos, join, join_size);
+ ucw_off_t size = sbuck_ins_or_join((join ? NULL : out), list_pos, join, join_size);
SORT_TRACE("Multi-way merge completed (%d ways, %s, %dMB/s)", n, stk_fsize(size), sorter_speed(ctx, size));
return;
}
}
}
-sh_off_t
+ucw_off_t
sbuck_size(struct sort_bucket *b)
{
if ((b->flags & SBF_OPEN_WRITE) && !(b->flags & SBF_SWAPPED_OUT))
int thread_id; // Thread ID (either kernel tid or a counter)
int temp_counter; // Counter for fb-temp.c
struct asio_queue *io_queue; // Async I/O queue for fb-direct.c
- sh_sighandler_t *signal_handlers; // Signal handlers for sighandler.c
+ ucw_sighandler_t *signal_handlers; // Signal handlers for sighandler.c
};
struct ucwlib_context *ucwlib_thread_context(void);