#define MOD(a) a %= BASE
uns
-update_adler32(uns adler, byte *buf, uns len)
+update_adler32(uns adler, const byte *buf, uns len)
{
uns s1 = adler & 0xffff;
uns s2 = (adler >> 16) & 0xffff;
/* Text file parser */
-static byte *name_parse_fb;
+static const byte *name_parse_fb;
static struct fastbuf *parse_fb;
static uns line_num;
/* Parsing multiple files */
static byte *
-parse_fastbuf(byte *name_fb, struct fastbuf *fb, uns depth)
+parse_fastbuf(const byte *name_fb, struct fastbuf *fb, uns depth)
{
byte *msg;
name_parse_fb = name_fb;
}
static int
-load_file(byte *file)
+load_file(const byte *file)
{
cf_init_stack();
struct fastbuf *fb = bopen_try(file, O_RDONLY, 1<<14);
}
static int
-load_string(byte *string)
+load_string(const byte *string)
{
cf_init_stack();
struct fastbuf fb;
- fbbuf_init_read(&fb, string, strlen(string), 0);
+ fbbuf_init_read(&fb, (byte *)string, strlen(string), 0);
byte *msg = parse_fastbuf(NULL, &fb, 0);
return !!msg || done_stack();
}
/* Safe loading and reloading */
int
-cf_reload(byte *file)
+cf_reload(const byte *file)
{
cf_journal_swap();
struct cf_journal_item *oldj = cf_journal_new_transaction(1);
}
int
-cf_load(byte *file)
+cf_load(const byte *file)
{
struct cf_journal_item *oldj = cf_journal_new_transaction(1);
int err = load_file(file);
}
int
-cf_set(byte *string)
+cf_set(const byte *string)
{
struct cf_journal_item *oldj = cf_journal_new_transaction(0);
int err = load_string(string);
enum cf_commit_mode { CF_NO_COMMIT, CF_COMMIT, CF_COMMIT_ALL };
extern struct cf_section cf_sections;
-struct cf_item *cf_find_subitem(struct cf_section *sec, byte *name);
+struct cf_item *cf_find_subitem(struct cf_section *sec, const byte *name);
int cf_commit_all(enum cf_commit_mode cm);
void cf_add_dirty(struct cf_section *sec, void *ptr);
}
static struct cf_item *
-find_item(struct cf_section *curr_sec, byte *name, byte **msg, void **ptr)
+find_item(struct cf_section *curr_sec, const byte *name, byte **msg, void **ptr)
{
*msg = NULL;
if (name[0] == '^') // absolute name instead of relative
}
byte *
-cf_find_item(byte *name, struct cf_item *item)
+cf_find_item(const byte *name, struct cf_item *item)
{
byte *msg;
void *ptr = NULL;
struct cf_section cf_sections; // root section
struct cf_item *
-cf_find_subitem(struct cf_section *sec, byte *name)
+cf_find_subitem(struct cf_section *sec, const byte *name)
{
struct cf_item *ci = sec->cfg;
for (; ci->cls; ci++)
return total;
}
-void bwrite_slow(struct fastbuf *f, void *b, uns l)
+void bwrite_slow(struct fastbuf *f, const void *b, uns l)
{
while (l)
{
/* FastIO on standard files (specify buffer size 0 to enable mmaping) */
-struct fastbuf *bopen(byte *name, uns mode, uns buflen);
-struct fastbuf *bopen_try(byte *name, uns mode, uns buflen);
+struct fastbuf *bopen(const byte *name, uns mode, uns buflen);
+struct fastbuf *bopen_try(const byte *name, uns mode, uns buflen);
struct fastbuf *bopen_tmp(uns buflen);
struct fastbuf *bfdopen(int fd, uns buflen);
struct fastbuf *bfdopen_shared(int fd, uns buflen);
/* FastIO on memory mapped files */
-struct fastbuf *bopen_mm(byte *name, uns mode);
+struct fastbuf *bopen_mm(const byte *name, uns mode);
/* FastI on file descriptors with limit */
};
#define FB_ATOMIC(f) ((struct fb_atomic *)(f)->is_fastbuf)
-struct fastbuf *fbatomic_open(byte *name, struct fastbuf *master, uns bufsize, int record_len);
+struct fastbuf *fbatomic_open(const byte *name, struct fastbuf *master, uns bufsize, int record_len);
void fbatomic_internal_write(struct fastbuf *b);
static inline void
return bread_slow(f, b, l, 1);
}
-void bwrite_slow(struct fastbuf *f, void *b, uns l);
-static inline void bwrite(struct fastbuf *f, void *b, uns l)
+void bwrite_slow(struct fastbuf *f, const void *b, uns l);
+static inline void bwrite(struct fastbuf *f, const void *b, uns l)
{
if (bavailw(f) >= l)
{
#define bgets_stk(fb) ({ struct bgets_stk_struct _s; _s.f = (fb); for (bgets_stk_init(&_s); _s.cur_len; _s.cur_buf = alloca(_s.cur_len), bgets_stk_step(&_s)); _s.cur_buf; })
static inline void
-bputs(struct fastbuf *f, byte *b)
+bputs(struct fastbuf *f, const byte *b)
{
bwrite(f, b, strlen(b));
}
static inline void
-bputs0(struct fastbuf *f, byte *b)
+bputs0(struct fastbuf *f, const byte *b)
{
bwrite(f, b, strlen(b)+1);
}
static inline void
-bputsn(struct fastbuf *f, byte *b)
+bputsn(struct fastbuf *f, const byte *b)
{
bputs(f, b);
bputc(f, '\n');
/* Formatted output */
-int bprintf(struct fastbuf *b, char *msg, ...) FORMAT_CHECK(printf,2,3);
-int vbprintf(struct fastbuf *b, char *msg, va_list args);
+int bprintf(struct fastbuf *b, const char *msg, ...) FORMAT_CHECK(printf,2,3);
+int vbprintf(struct fastbuf *b, const char *msg, va_list args);
#endif
}
struct fastbuf *
-fbatomic_open(byte *name, struct fastbuf *master, uns bufsize, int record_len)
+fbatomic_open(const byte *name, struct fastbuf *master, uns bufsize, int record_len)
{
struct fb_atomic *F = xmalloc_zero(sizeof(*F));
struct fastbuf *f = &F->fb;
}
static struct fastbuf *
-bfdopen_internal(int fd, uns buflen, byte *name)
+bfdopen_internal(int fd, uns buflen, const byte *name)
{
int namelen = strlen(name) + 1;
struct fb_file *F = xmalloc(sizeof(struct fb_file) + buflen + namelen);
}
struct fastbuf *
-bopen_try(byte *name, uns mode, uns buflen)
+bopen_try(const byte *name, uns mode, uns buflen)
{
int fd = sh_open(name, mode, 0666);
if (fd < 0)
}
struct fastbuf *
-bopen(byte *name, uns mode, uns buflen)
+bopen(const byte *name, uns mode, uns buflen)
{
if (!buflen)
return bopen_mm(name, mode);
}
static struct fastbuf *
-bfmmopen_internal(int fd, byte *name, uns mode)
+bfmmopen_internal(int fd, const byte *name, uns mode)
{
int namelen = strlen(name) + 1;
struct fb_mmap *F = xmalloc(sizeof(struct fb_mmap) + namelen);
}
struct fastbuf *
-bopen_mm(byte *name, uns mode)
+bopen_mm(const byte *name, uns mode)
{
int fd;
#include <alloca.h>
int
-vbprintf(struct fastbuf *b, char *msg, va_list args)
+vbprintf(struct fastbuf *b, const char *msg, va_list args)
{
byte *buf;
int len, r;
}
int
-bprintf(struct fastbuf *b, char *msg, ...)
+bprintf(struct fastbuf *b, const char *msg, ...)
{
va_list args;
int res;
/* Safe loading and reloading of configuration files: conf-input.c */
extern byte *cf_def_file; /* DEFAULT_CONFIG; NULL if already loaded */
-int cf_reload(byte *file);
-int cf_load(byte *file);
-int cf_set(byte *string);
+int cf_reload(const byte *file);
+int cf_load(const byte *file);
+int cf_set(const byte *string);
/* Direct access to configuration items: conf-intr.c */
#undef T
struct cf_item;
-byte *cf_find_item(byte *name, struct cf_item *item);
+byte *cf_find_item(const byte *name, struct cf_item *item);
byte *cf_write_item(struct cf_item *item, enum cf_operation op, int number, byte **pars);
/* Debug dumping: conf-dump.c */
#define HAVE_PREAD
static inline sh_off_t
-sh_file_size(byte *name)
+sh_file_size(const byte *name)
{
int fd = sh_open(name, O_RDONLY);
if (fd < 0)
}
byte *
-lizard_decompress_safe(byte *in, struct lizard_buffer *buf, uns expected_length)
+lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uns expected_length)
/* Decompresses in into buf, sets *ptr to the data, and returns the
* uncompressed length. If an error has occured, -1 is returned and errno is
* set. The buffer buf is automatically reallocated. SIGSEGV is caught in
#define CHAIN_GOOD_MATCH 32 // we already have a good match => end
static inline uns
-hashf(byte *string)
+hashf(const byte *string)
/* 0..HASH_SIZE-1 */
{
return string[0] ^ (string[1]<<3) ^ (string[2]<<6);
}
static inline byte *
-locate_string(byte *string, int record_id, int head)
+locate_string(const byte *string, int record_id, int head)
/* The strings are recorded into the hash-table regularly, hence there is no
* need to store the pointer there. */
{
string += record_id - head;
if (record_id >= head)
string -= HASH_RECORDS-1;
- return string;
+ return (byte *)string;
}
static inline uns
-find_match(uns record_id, struct hash_record *hash_rec, byte *string, byte *string_end, byte **best_ptr, uns head)
+find_match(uns record_id, struct hash_record *hash_rec, const byte *string, const byte *string_end, byte **best_ptr, uns head)
/* hash_tab[hash] == record_id points to the head of the double-linked
* link-list of strings with the same hash. The records are statically
* stored in circular array hash_rec (with the 1st entry unused), and the
if (*cmp++ == string[4] && *cmp++ == string[5]
&& *cmp++ == string[6] && *cmp++ == string[7])
{
- byte *str = string + 8;
+ const byte *str = string + 8;
while (str <= string_end && *cmp++ == *str++);
}
}
}
static byte *
-flush_copy_command(uns bof, byte *out, byte *start, uns len)
+flush_copy_command(uns bof, byte *out, const byte *start, uns len)
{
if (bof && len <= 238)
*out++ = len + 17;
}
int
-lizard_compress(byte *in, uns in_len, byte *out)
+lizard_compress(const byte *in, uns in_len, byte *out)
/* Requires out being allocated for at least in_len * LIZARD_MAX_MULTIPLY +
* LIZARD_MAX_ADD. There must be at least LIZARD_NEEDS_CHARS characters
* allocated after in. Returns the actual compressed length. */
{
hash_ptr_t hash_tab[HASH_SIZE];
struct hash_record hash_rec[HASH_RECORDS];
- byte *in_end = in + in_len;
+ const byte *in_end = in + in_len;
byte *out_start = out;
- byte *copy_start = in;
+ const byte *copy_start = in;
uns head = 1; /* 0 in unused */
uns to_delete = 0, bof = 1;
bzero(hash_tab, sizeof(hash_tab)); /* init the hash-table */
}
static inline byte *
-read_unary_value(byte *in, uns *val)
+read_unary_value(const byte *in, uns *val)
{
uns l = 0;
while (!*in++)
l += 255;
l += in[-1];
*val = l;
- return in;
+ return (byte *)in;
}
int
-lizard_decompress(byte *in, byte *out)
+lizard_decompress(const byte *in, byte *out)
/* Requires out being allocated for the decompressed length must be known
* beforehand. It is desirable to lock the following memory page for
* read-only access to prevent buffer overflow. Returns the actual
*/
/* lizard.c */
-int lizard_compress(byte *in, uns in_len, byte *out);
-int lizard_decompress(byte *in, byte *out);
+int lizard_compress(const byte *in, uns in_len, byte *out);
+int lizard_decompress(const byte *in, byte *out);
/* lizard-safe.c */
struct lizard_buffer;
struct lizard_buffer *lizard_alloc(void);
void lizard_free(struct lizard_buffer *buf);
-byte *lizard_decompress_safe(byte *in, struct lizard_buffer *buf, uns expected_length);
+byte *lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uns expected_length);
/* adler32.c */
-uns update_adler32(uns adler, byte *ptr, uns len);
+uns update_adler32(uns adler, const byte *ptr, uns len);
static inline uns
-adler32(byte *buf, uns len)
+adler32(const byte *buf, uns len)
{
return update_adler32(1, buf, len);
}
#define stk_strdup(s) ({ const char *_s=(s); uns _l=strlen(_s)+1; char *_x=alloca(_l); memcpy(_x, _s, _l); _x; })
#define stk_strndup(s,n) ({ const char *_s=(s); uns _l=strnlen(_s,(n)); char *_x=alloca(_l+1); memcpy(_x, _s, _l); _x[_l]=0; _x; })
#define stk_strcat(s1,s2) ({ const char *_s1=(s1); const char *_s2=(s2); uns _l1=strlen(_s1); uns _l2=strlen(_s2); char *_x=alloca(_l1+_l2+1); memcpy(_x,_s1,_l1); memcpy(_x+_l1,_s2,_l2+1); _x; })
-#define stk_strmulticat(s...) ({ const char *_s[]={s}; char *_x=alloca(stk_array_len(_s, ARRAY_SIZE(_s)-1)); stk_array_join(_x, _s, ARRAY_SIZE(_s)-1, 0); _x; })
+#define stk_strmulticat(s...) ({ char *_s[]={s}; char *_x=alloca(stk_array_len(_s, ARRAY_SIZE(_s)-1)); stk_array_join(_x, _s, ARRAY_SIZE(_s)-1, 0); _x; })
#define stk_strarraycat(s,n) ({ char **_s=(s); int _n=(n); char *_x=alloca(stk_array_len(_s,_n)); stk_array_join(_x, _s, _n, 0); _x; })
#define stk_strjoin(s,n,sep) ({ char **_s=(s); int _n=(n); char *_x=alloca(stk_array_len(_s,_n)+_n-1); stk_array_join(_x, _s, _n, (sep)); _x; })
#define stk_printf(f...) ({ uns _l=stk_printf_internal(f); char *_x=alloca(_l); sprintf(_x, f); _x; })