const byte *s, *se;
byte *d, *de;
- uns code;
+ uint code;
int e;
#ifdef CONV_READ_STD
#endif
#ifdef CONV_READ_UTF8
- uns cc;
+ uint cc;
#endif
if (unlikely(c->state))
else
{
byte *k = string_table + code - 0x100;
- uns len = *k++;
- if (unlikely((uns)(de - d) < len))
+ uint len = *k++;
+ if (unlikely((uint)(de - d) < len))
{
c->state = SEQ_WRITE;
c->string_at = k;
{
void *p = &c->code;
c->string_at = p;
- uns code = c->code;
+ uint code = c->code;
c->string_at = p;
if (code < 0xd800 || code - 0xe000 < 0x2000)
{}
{
void *p = &c->code;
c->string_at = p;
- uns code = c->code;
+ uint code = c->code;
c->string_at = p;
if (code < 0xd800 || code - 0xe000 < 0x2000)
{}
unsigned short *x_to_out = c->x_to_out;
const unsigned char *s, *se;
unsigned char *d, *de, *k;
- unsigned int len, e;
+ uint len, e;
if (unlikely(c->state))
goto slow;
de = c->dest_end;
while (s < se)
{
- unsigned int code = x_to_out[in_to_x[*s]];
+ uint code = x_to_out[in_to_x[*s]];
if (code < 0x100)
{
if (unlikely(d >= de))
}
else
{
- static uns lookup[] = {
+ static uint lookup[] = {
[CONV_CHARSET_UTF8] = 1,
[CONV_CHARSET_UTF16_BE] = 2,
[CONV_CHARSET_UTF16_LE] = 3,
{ conv_utf16_be_to_std, conv_utf16_be_to_utf8, conv_none, conv_utf16_be_to_utf16_le },
{ conv_utf16_le_to_std, conv_utf16_le_to_utf8, conv_utf16_be_to_utf16_le, conv_none },
};
- uns src_idx = ((uns)src < ARRAY_SIZE(lookup)) ? lookup[src] : 0;
- uns dest_idx = ((uns)dest < ARRAY_SIZE(lookup)) ? lookup[dest] : 0;
+ uint src_idx = ((uint)src < ARRAY_SIZE(lookup)) ? lookup[src] : 0;
+ uint dest_idx = ((uint)dest < ARRAY_SIZE(lookup)) ? lookup[dest] : 0;
c->convert = tab[src_idx][dest_idx];
c->in_to_x = src_idx ? NULL : input_to_x[src];
c->x_to_out = dest_idx ? NULL : x_to_output[dest];
c->state = 0;
}
-unsigned int
-conv_x_to_ucs(unsigned int x)
+uint
+conv_x_to_ucs(uint x)
{
return x_to_uni[x];
}
-unsigned int
-conv_ucs_to_x(unsigned int ucs)
+uint
+conv_ucs_to_x(uint ucs)
{
return uni_to_x[ucs >> 8U][ucs & 0xff];
}
-unsigned int
+uint
conv_x_count(void)
{
return sizeof(x_to_uni) / sizeof(x_to_uni[0]);
}
int
-conv_in_to_ucs(struct conv_context *c, unsigned int y)
+conv_in_to_ucs(struct conv_context *c, uint y)
{
return x_to_uni[c->in_to_x[y]];
}
-int conv_ucs_to_out(struct conv_context *c, unsigned int ucs)
+int conv_ucs_to_out(struct conv_context *c, uint ucs)
{
- uns x = uni_to_x[ucs >> 8U][ucs & 0xff];
+ uint x = uni_to_x[ucs >> 8U][ucs & 0xff];
if (x == 256 || c->x_to_out[x] >= 256)
return -1;
else
int source_charset, dest_charset;
unsigned short int *in_to_x;
unsigned short int *x_to_out;
- unsigned int state, code, remains;
+ uint state, code, remains;
unsigned char *string_at;
};
};
/* Conversion of a single character between current non-UTF8 charset and Unicode */
-int conv_in_to_ucs(struct conv_context *c, unsigned int y);
-int conv_ucs_to_out(struct conv_context *c, unsigned int ucs);
+int conv_in_to_ucs(struct conv_context *c, uint y);
+int conv_ucs_to_out(struct conv_context *c, uint ucs);
/* For those brave ones who want to mess with charconv internals */
-unsigned int conv_x_to_ucs(unsigned int x);
-unsigned int conv_ucs_to_x(unsigned int ucs);
-unsigned int conv_x_count(void);
+uint conv_x_to_ucs(uint x);
+uint conv_ucs_to_x(uint ucs);
+uint conv_x_count(void);
/* Charset names */
flags = conv_run(ct);
if (ct->dest > ct->dest_start)
bdirect_write_commit(FB_CC(f)->orig, ct->dest);
- uns l = bdirect_write_prepare(FB_CC(f)->orig, &ct->dest_start);
+ uint l = bdirect_write_prepare(FB_CC(f)->orig, &ct->dest_start);
ct->dest = ct->dest_start;
ct->dest_end = ct->dest + l;
}
do
{
byte *src;
- uns len = bdirect_read_prepare(FB_CC(f)->orig, &src);
+ uint len = bdirect_read_prepare(FB_CC(f)->orig, &src);
if (!len)
break;
ct->source = src;
#include <alloca.h>
byte *
-mp_strconv(struct mempool *mp, const byte *s, uns in_cs, uns out_cs)
+mp_strconv(struct mempool *mp, const byte *s, uint in_cs, uint out_cs)
{
if (in_cs == out_cs)
return mp_strdup(mp, s);
struct conv_context c;
char *b[32];
- uns bs[32], n = 0, sum = 0;
- uns l = strlen(s) + 1;
+ uint bs[32], n = 0, sum = 0;
+ uint l = strlen(s) + 1;
conv_init(&c);
conv_set_charset(&c, in_cs, out_cs);
l <<= 1;
c.dest_start = c.dest = b[n] = alloca(l);
c.dest_end = c.dest_start+ l;
- uns r = conv_run(&c);
+ uint r = conv_run(&c);
sum += bs[n++] = c.dest - c.dest_start;
if (r & CONV_SOURCE_END)
{
c.dest_start = c.dest = mp_alloc(mp, sum);
- for (uns i = 0; i < n; i++)
+ for (uint i = 0; i < n; i++)
{
memcpy(c.dest, b[i], bs[i]);
c.dest += bs[i];
#define mp_strconv ucw_mp_strconv
#endif
-byte *mp_strconv(struct mempool *mp, const byte *s, uns cs_in, uns cs_out);
+byte *mp_strconv(struct mempool *mp, const byte *s, uint cs_in, uint cs_out);
-static inline byte *mp_strconv_to_utf8(struct mempool *mp, const byte *s, uns cs_in)
+static inline byte *mp_strconv_to_utf8(struct mempool *mp, const byte *s, uint cs_in)
{ return mp_strconv(mp, s, cs_in, CONV_CHARSET_UTF8); }
-static inline byte *mp_strconv_from_utf8(struct mempool *mp, const byte *s, uns cs_out)
+static inline byte *mp_strconv_from_utf8(struct mempool *mp, const byte *s, uint cs_out)
{ return mp_strconv(mp, s, CONV_CHARSET_UTF8, cs_out); }
#endif
int
find_charset_by_name(const char *c)
{
- unsigned int i;
+ uint i;
for(i=0; i<CONV_NUM_CHARSETS; i++)
if (!strcasecmp(cs_names[i], c))
#define INITIAL_MIN_SIZE 16
#define INITIAL_SCALE 2
-uns
-stk_strconv_init(struct conv_context *c, const byte *s, uns in_cs, uns out_cs)
+uint
+stk_strconv_init(struct conv_context *c, const byte *s, uint in_cs, uint out_cs)
{
- uns l = strlen(s);
+ uint l = strlen(s);
if (in_cs == out_cs)
{
c->source = s;
return l * INITIAL_SCALE + 1;
}
-uns
-stk_strconv_step(struct conv_context *c, byte *buf, uns len)
+uint
+stk_strconv_step(struct conv_context *c, byte *buf, uint len)
{
if (!c->source_end)
{
}
if (c->dest_start)
{
- uns l = c->dest_end - c->dest_start;
+ uint l = c->dest_end - c->dest_start;
memcpy(buf, c->dest_start, l);
c->dest = buf + l;
}
/* The following macros convert strings between given charsets (CONV_CHARSET_x). */
#define stk_strconv(s, cs_in, cs_out) \
- ({ struct conv_context _c; uns _l=stk_strconv_init(&_c, (s), (cs_in), (cs_out)); \
+ ({ struct conv_context _c; uint _l=stk_strconv_init(&_c, (s), (cs_in), (cs_out)); \
while (_l) _l=stk_strconv_step(&_c, alloca(_l), _l); _c.dest_start; })
#define stk_strconv_to_utf8(s, cs_in) stk_strconv(s, cs_in, CONV_CHARSET_UTF8)
/* Internals */
-uns stk_strconv_init(struct conv_context *c, const byte *s, uns cs_in, uns cs_out);
-uns stk_strconv_step(struct conv_context *c, byte *buf, uns len);
+uint stk_strconv_init(struct conv_context *c, const byte *s, uint cs_in, uint cs_out);
+uint stk_strconv_step(struct conv_context *c, byte *buf, uint len);
#endif
#include <charset/U-ligatures.h>
const u16 *
-Uexpand_lig(uns x)
+Uexpand_lig(uint x)
{
return _U_lig_hash[x % LIG_HASH_SIZE];
}
extern const byte *_U_cat[];
extern const u16 *_U_upper[], *_U_lower[], *_U_unaccent[];
-static inline uns Ucategory(uns x)
+static inline uint Ucategory(uint x)
{
if (_U_cat[x >> 8U])
return _U_cat[x >> 8U][x & 0xff];
return 0;
}
-static inline uns Utoupper(uns x)
+static inline uint Utoupper(uint x)
{
- uns w = (_U_upper[x >> 8U]) ? _U_upper[x >> 8U][x & 0xff] : 0;
+ uint w = (_U_upper[x >> 8U]) ? _U_upper[x >> 8U][x & 0xff] : 0;
return w ? w : x;
}
-static inline uns Utolower(uns x)
+static inline uint Utolower(uint x)
{
- uns w = (_U_lower[x >> 8U]) ? _U_lower[x >> 8U][x & 0xff] : 0;
+ uint w = (_U_lower[x >> 8U]) ? _U_lower[x >> 8U][x & 0xff] : 0;
return w ? w : x;
}
-static inline uns Uunaccent(uns x)
+static inline uint Uunaccent(uint x)
{
- uns w = (_U_unaccent[x >> 8U]) ? _U_unaccent[x >> 8U][x & 0xff] : 0;
+ uint w = (_U_unaccent[x >> 8U]) ? _U_unaccent[x >> 8U][x & 0xff] : 0;
return w ? w : x;
}
-extern const u16 *Uexpand_lig(uns x);
+extern const u16 *Uexpand_lig(uint x);
enum unicode_char_type {
_U_LETTER = 1, /* Letters */
#include <string.h>
#include <math.h>
-uns color_space_channels[COLOR_SPACE_MAX] = {
+uint color_space_channels[COLOR_SPACE_MAX] = {
[COLOR_SPACE_UNKNOWN] = 0,
[COLOR_SPACE_UNKNOWN_1] = 1,
[COLOR_SPACE_UNKNOWN_2] = 2,
};
byte *
-color_space_id_to_name(uns id)
+color_space_id_to_name(uint id)
{
ASSERT(id < COLOR_SPACE_MAX);
return color_space_name[id];
}
-uns
+uint
color_space_name_to_id(byte *name)
{
- for (uns i = 1; i < COLOR_SPACE_MAX; i++)
+ for (uint i = 1; i < COLOR_SPACE_MAX; i++)
if (color_space_name[i] && !strcasecmp(name, color_space_name[i]))
return i;
return 0;
struct color color_white = { .c = { 255 }, .color_space = COLOR_SPACE_GRAYSCALE };
int
-color_get(struct color *color, byte *src, uns src_space)
+color_get(struct color *color, byte *src, uint src_space)
{
color->color_space = src_space;
memcpy(color->c, src, color_space_channels[src_space]);
}
int
-color_put(struct image_context *ctx, struct color *color, byte *dest, uns dest_space)
+color_put(struct image_context *ctx, struct color *color, byte *dest, uint dest_space)
{
switch (dest_space)
{
case COLOR_SPACE_CMYK:
{
double rgb[3], cmyk[4];
- for (uns i = 0; i < 4; i++)
+ for (uint i = 0; i < 4; i++)
cmyk[i] = color->c[i] * (1.0 / 255);
cmyk_to_rgb_exact(rgb, cmyk);
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
dest[i] = CLAMP(rgb[i] * 255, 0, 255);
}
return 1;
case COLOR_SPACE_RGB:
{
double rgb[3], cmyk[4];
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
rgb[i] = color->c[i] * (1.0 / 255);
rgb_to_cmyk_exact(cmyk, rgb);
- for (uns i = 0; i < 4; i++)
+ for (uint i = 0; i < 4; i++)
dest[i] = CLAMP(cmyk[i] * 255, 0, 255);
}
return 1;
/* Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
* Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTER
* Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTER */
- uns r = src[0], g = src[1], b = src[2];
+ uint r = src[0], g = src[1], b = src[2];
dest[0] = (19595 * r + 38470 * g + 7471 * b) / 0x10000;
dest[1] = (0x800000 + 0x8000 * b - 11058 * r - 21710 * g) / 0x10000;
dest[2] = (0x800000 + 0x8000 * r - 27439 * g - 5329 * b) / 0x10000;
static inline void
pixel_conv_cmyk_to_rgb(byte *dest, byte *src)
{
- uns d = (255 - src[3]) * (0xffffffffU / 255 /255);
+ uint d = (255 - src[3]) * (0xffffffffU / 255 /255);
dest[0] = d * (255 - src[0]) >> 24;
dest[1] = d * (255 - src[1]) >> 24;
dest[2] = d * (255 - src[2]) >> 24;
static inline void
pixel_conv_rgb_to_cmyk(byte *dest, byte *src)
{
- uns k = MAX(src[0], src[1]);
+ uint k = MAX(src[0], src[1]);
k = MAX(k, src[2]);
- uns d = fast_div_u32_u8(0x7fffffffU, k); /* == 0 for zero K */
+ uint d = fast_div_u32_u8(0x7fffffffU, k); /* == 0 for zero K */
dest[0] = (d * (k - src[0])) >> 23;
dest[1] = (d * (k - src[1])) >> 23;
dest[2] = (d * (k - src[2])) >> 23;
pixel_conv_ycck_to_rgb(byte *dest, byte *src)
{
int y = src[0], cb = src[1] - 128, cr = src[2] - 128;
- uns d = (255 - src[3]) * (0xffffffffU / 255 /255);
+ uint d = (255 - src[3]) * (0xffffffffU / 255 /255);
dest[0] = (d * CLAMP(y + (91881 * cr) / 0x10000, 0, 255) >> 24);
dest[1] = (d * CLAMP(y - (22553 * cb + 46801 * cr) / 0x10000, 0, 255) >> 24);
dest[2] = (d * CLAMP(y + (116129 * cb) / 0x10000, 0, 255) >> 24);
static inline void
pixel_conv_rgb_to_ycck(byte *dest, byte *src)
{
- uns k = MAX(src[0], src[1]);
+ uint k = MAX(src[0], src[1]);
k = MAX(k, src[2]);
- uns d = fast_div_u32_u8(0x7fffffffU, k); /* == 0 for zero K */
- uns r = 255 - ((d * (k - src[0])) >> 23);
- uns g = 255 - ((d * (k - src[1])) >> 23);
- uns b = 255 - ((d * (k - src[2])) >> 23);
+ uint d = fast_div_u32_u8(0x7fffffffU, k); /* == 0 for zero K */
+ uint r = 255 - ((d * (k - src[0])) >> 23);
+ uint g = 255 - ((d * (k - src[1])) >> 23);
+ uint b = 255 - ((d * (k - src[2])) >> 23);
dest[0] = (19595 * r + 38470 * g + 7471 * b) / 0x10000;
dest[1] = (0x800000 + 0x8000 * b - 11058 * r - 21710 * g) / 0x10000;
dest[2] = (0x800000 + 0x8000 * r - 27439 * g - 5329 * b) / 0x10000;
return;
else if (dest->pixel_size != src->pixel_size)
{
- uns channels = MIN(dest->channels, src->channels);
+ uint channels = MIN(dest->channels, src->channels);
switch (channels)
{
case 1:
# define IMAGE_WALK_DOUBLE
# define IMAGE_WALK_IMAGE dest
# define IMAGE_WALK_SEC_IMAGE src
-# define IMAGE_WALK_DO_STEP do{ for (uns i = 0; i < channels; i++) walk_pos[i] = walk_sec_pos[i]; }while(0)
+# define IMAGE_WALK_DO_STEP do{ for (uint i = 0; i < channels; i++) walk_pos[i] = walk_sec_pos[i]; }while(0)
# include <images/image-walk.h>
return;
}
{
byte *s = src->pixels;
byte *d = dest->pixels;
- for (uns row = src->rows; row--; )
+ for (uint row = src->rows; row--; )
{
memcpy(d, s, src->row_pixels_size);
d += dest->row_size;
}
}
-static inline uns
-image_conv_alpha_func(uns value, uns alpha, uns acoef, uns bcoef)
+static inline uint
+image_conv_alpha_func(uint value, uint alpha, uint acoef, uint bcoef)
{
- return ((uns)(acoef + (int)alpha * (int)(value - bcoef)) * (0xffffffffU / 255 / 255)) >> 24;
+ return ((uint)(acoef + (int)alpha * (int)(value - bcoef)) * (0xffffffffU / 255 / 255)) >> 24;
}
static int
byte background[IMAGE_MAX_CHANNELS];
if (unlikely(!color_put(ctx, &opt->background, background, dest->flags & IMAGE_COLOR_SPACE)))
return 0;
- uns a[IMAGE_MAX_CHANNELS], b[IMAGE_MAX_CHANNELS];
- for (uns i = 0; i < dest->channels; i++)
+ uint a[IMAGE_MAX_CHANNELS], b[IMAGE_MAX_CHANNELS];
+ for (uint i = 0; i < dest->channels; i++)
a[i] = 255 * (b[i] = background[i]);
switch (dest->channels)
{
# define IMAGE_WALK_IMAGE dest
# define IMAGE_WALK_SEC_IMAGE src
# define IMAGE_WALK_DOUBLE
-# define IMAGE_WALK_DO_STEP do{ for (uns i = 0; i < dest->channels; i++) \
+# define IMAGE_WALK_DO_STEP do{ for (uint i = 0; i < dest->channels; i++) \
walk_pos[i] = image_conv_alpha_func(walk_pos[i], walk_sec_pos[src->channels - 1], a[i], b[i]); }while(0)
# include <images/image-walk.h>
}
byte background[IMAGE_MAX_CHANNELS];
if (unlikely(!color_put(ctx, &opt->background, background, dest->flags & IMAGE_COLOR_SPACE)))
return 0;
- uns a[IMAGE_MAX_CHANNELS], b[IMAGE_MAX_CHANNELS];
- for (uns i = 0; i < dest->channels; i++)
+ uint a[IMAGE_MAX_CHANNELS], b[IMAGE_MAX_CHANNELS];
+ for (uint i = 0; i < dest->channels; i++)
a[i] = 255 * (b[i] = background[i]);
switch (dest->channels)
{
# define IMAGE_WALK_IMAGE dest
# define IMAGE_WALK_SEC_IMAGE src
# define IMAGE_WALK_DOUBLE
-# define IMAGE_WALK_DO_STEP do{ for (uns i = 0; i < dest->channels; i++) \
+# define IMAGE_WALK_DO_STEP do{ for (uint i = 0; i < dest->channels; i++) \
walk_pos[i] = image_conv_alpha_func(walk_sec_pos[i], walk_sec_pos[src->channels - 1], a[i], b[i]); }while(0)
# include <images/image-walk.h>
}
static inline void
correct_gamma_detailed(double dest[3], double src[3], const struct color_space_gamma_info *info)
{
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
if (src[i] > info->transition)
dest[i] = (1 + info->offset) * pow(src[i], info->detailed_gamma) - info->offset;
else
static inline void
invert_gamma_detailed(double dest[3], double src[3], const struct color_space_gamma_info *info)
{
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
if (src[i] > info->transition * info->slope)
dest[i] = pow((src[i] + info->offset) / (1 + info->offset), 1 / info->detailed_gamma);
else
else
{
double d = 1 / (1 - cmyk[3]);
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
cmyk[i] = d * (cmyk[i] - cmyk[3]);
}
}
cmyk_to_rgb_exact(double rgb[3], double cmyk[4])
{
double d = 1 - cmyk[1];
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
rgb[i] = d * (1 - cmyk[i]);
}
srgb_to_luv_init(void)
{
DBG("Initializing sRGB -> Luv table");
- for (uns i = 0; i < 256; i++)
+ for (uint i = 0; i < 256; i++)
{
double t = i / 255.;
if (t > 0.04045)
t = t * (1 / 12.92);
srgb_to_luv_tab1[i] = CLAMP(t * 0xfff + 0.5, 0, 0xfff);
}
- for (uns i = 0; i < (9 << SRGB_TO_LUV_TAB2_SIZE); i++)
+ for (uint i = 0; i < (9 << SRGB_TO_LUV_TAB2_SIZE); i++)
{
double t = i / (double)((9 << SRGB_TO_LUV_TAB2_SIZE) - 1);
if (t > 0.008856)
CLAMP(t * ((1 << SRGB_TO_LUV_TAB2_SCALE) - 1) + 0.5,
0, (1 << SRGB_TO_LUV_TAB2_SCALE) - 1);
}
- for (uns i = 0; i < (20 << SRGB_TO_LUV_TAB3_SIZE); i++)
+ for (uint i = 0; i < (20 << SRGB_TO_LUV_TAB3_SIZE); i++)
{
srgb_to_luv_tab3[i] = i ? (13 << (SRGB_TO_LUV_TAB3_SCALE + SRGB_TO_LUV_TAB3_SIZE)) / i : 0;
}
}
void
-srgb_to_luv_pixels(byte *dest, byte *src, uns count)
+srgb_to_luv_pixels(byte *dest, byte *src, uint count)
{
while (count--)
{
struct color_interpolation_node *color_interpolation_table;
/* Returns volume of a given tetrahedron multiplied by 6 */
-static inline uns
-tetrahedron_volume(uns *v1, uns *v2, uns *v3, uns *v4)
+static inline uint
+tetrahedron_volume(uint *v1, uint *v2, uint *v3, uint *v4)
{
int a[3], b[3], c[3];
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
{
a[i] = v2[i] - v1[i];
b[i] = v3[i] - v1[i];
}
static void
-interpolate_tetrahedron(struct color_interpolation_node *n, uns *p, const uns *c)
+interpolate_tetrahedron(struct color_interpolation_node *n, uint *p, const uint *c)
{
- uns v[4][3];
- for (uns i = 0; i < 4; i++)
+ uint v[4][3];
+ for (uint i = 0; i < 4; i++)
{
v[i][0] = (c[i] & 0001) ? (1 << COLOR_CONV_OFS) : 0;
v[i][1] = (c[i] & 0010) ? (1 << COLOR_CONV_OFS) : 0;
((c[i] & 0010) ? (1 << COLOR_CONV_SIZE) : 0) +
((c[i] & 0100) ? (1 << (COLOR_CONV_SIZE * 2)) : 0);
}
- uns vol = tetrahedron_volume(v[0], v[1], v[2], v[3]);
+ uint vol = tetrahedron_volume(v[0], v[1], v[2], v[3]);
n->mul[0] = ((tetrahedron_volume(p, v[1], v[2], v[3]) << 8) + (vol >> 1)) / vol;
n->mul[1] = ((tetrahedron_volume(v[0], p, v[2], v[3]) << 8) + (vol >> 1)) / vol;
n->mul[2] = ((tetrahedron_volume(v[0], v[1], p, v[3]) << 8) + (vol >> 1)) / vol;
n->mul[3] = ((tetrahedron_volume(v[0], v[1], v[2], p) << 8) + (vol >> 1)) / vol;
- uns j;
+ uint j;
for (j = 0; j < 4; j++)
if (n->mul[j])
break;
- for (uns i = 0; i < 4; i++)
+ for (uint i = 0; i < 4; i++)
if (n->mul[i] == 0)
n->ofs[i] = n->ofs[j];
}
DBG("Initializing color interpolation table");
struct color_interpolation_node *n = color_interpolation_table =
xmalloc(sizeof(struct color_interpolation_node) << (COLOR_CONV_OFS * 3));
- uns p[3];
+ uint p[3];
for (p[2] = 0; p[2] < (1 << COLOR_CONV_OFS); p[2]++)
for (p[1] = 0; p[1] < (1 << COLOR_CONV_OFS); p[1]++)
for (p[0] = 0; p[0] < (1 << COLOR_CONV_OFS); p[0]++)
{
- uns index;
- static const uns tetrahedra[5][4] = {
+ uint index;
+ static const uint tetrahedra[5][4] = {
{0000, 0001, 0010, 0100},
{0110, 0111, 0100, 0010},
{0101, 0100, 0111, 0001},
return;
struct color_grid_node *g = *grid = xmalloc((sizeof(struct color_grid_node)) << (COLOR_CONV_SIZE * 3));
double src[3], dest[3];
- for (uns k = 0; k < (1 << COLOR_CONV_SIZE); k++)
+ for (uint k = 0; k < (1 << COLOR_CONV_SIZE); k++)
{
src[2] = k * (255 / (double)((1 << COLOR_CONV_SIZE) - 1));
- for (uns j = 0; j < (1 << COLOR_CONV_SIZE); j++)
+ for (uint j = 0; j < (1 << COLOR_CONV_SIZE); j++)
{
src[1] = j * (255/ (double)((1 << COLOR_CONV_SIZE) - 1));
- for (uns i = 0; i < (1 << COLOR_CONV_SIZE); i++)
+ for (uint i = 0; i < (1 << COLOR_CONV_SIZE); i++)
{
src[0] = i * (255 / (double)((1 << COLOR_CONV_SIZE) - 1));
func(dest, src);
}
void
-color_conv_pixels(byte *dest, byte *src, uns count, struct color_grid_node *grid)
+color_conv_pixels(byte *dest, byte *src, uint count, struct color_grid_node *grid)
{
while (count--)
{
src[2] = (color >> 16) & 255;
color_conv_pixel(dest, src, grid);
double src2[3], dest2[3];
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
src2[i] = src[i];
func(dest2, src2);
double err = 0;
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
err += (dest[i] - dest2[i]) * (dest[i] - dest2[i]);
return err;
}
src[2] = (color >> 16) & 255;
test(dest, src);
double src2[3], dest2[3];
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
src2[i] = src[i];
func(dest2, src2);
double err = 0;
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
err += (dest[i] - dest2[i]) * (dest[i] - dest2[i]);
return err;
}
test_grid(byte *name, struct color_grid_node *grid, color_conv_func func)
{
double max_err = 0, sum_err = 0;
- uns count = 100000;
- for (uns i = 0; i < count; i++)
+ uint count = 100000;
+ for (uint i = 0; i < count; i++)
{
double err = conv_error(random_max(0x1000000), grid, func);
max_err = MAX(err, max_err);
test_func(byte *name, test_fn test, color_conv_func func)
{
double max_err = 0, sum_err = 0;
- uns count = 100000;
- for (uns i = 0; i < count; i++)
+ uint count = 100000;
+ for (uint i = 0; i < count; i++)
{
double err = func_error(random_max(0x1000000), test, func);
max_err = MAX(err, max_err);
#define CNT 1000000
#define TESTS 10
byte *a = xmalloc(3 * CNT), *b = xmalloc(3 * CNT);
- for (uns i = 0; i < 3 * CNT; i++)
+ for (uint i = 0; i < 3 * CNT; i++)
a[i] = random_max(256);
timestamp_t timer;
init_timer(&timer);
- for (uns i = 0; i < TESTS; i++)
+ for (uint i = 0; i < TESTS; i++)
memcpy(b, a, CNT * 3);
DBG("memcpy time=%d", get_timer(&timer));
init_timer(&timer);
- for (uns i = 0; i < TESTS; i++)
+ for (uint i = 0; i < TESTS; i++)
srgb_to_luv_pixels(b, a, CNT);
DBG("direct time=%d", get_timer(&timer));
init_timer(&timer);
- for (uns i = 0; i < TESTS; i++)
+ for (uint i = 0; i < TESTS; i++)
color_conv_pixels(b, a, CNT, srgb_to_luv_grid);
DBG("grid time=%d", get_timer(&timer));
#endif
COLOR_SPACE_MAX
};
-extern uns color_space_channels[COLOR_SPACE_MAX];
+extern uint color_space_channels[COLOR_SPACE_MAX];
extern byte *color_space_name[COLOR_SPACE_MAX];
/* Color space ID <-> name conversions */
-byte *color_space_id_to_name(uns id);
-uns color_space_name_to_id(byte *name);
+byte *color_space_id_to_name(uint id);
+uint color_space_name_to_id(byte *name);
/* Struct color manipulation */
-int color_get(struct color *color, byte *src, uns src_space);
-int color_put(struct image_context *ctx, struct color *color, byte *dest, uns dest_space);
+int color_get(struct color *color, byte *src, uint src_space);
+int color_put(struct image_context *ctx, struct color *color, byte *dest, uint dest_space);
-static inline void color_make_gray(struct color *color, uns gray)
+static inline void color_make_gray(struct color *color, uint gray)
{
color->c[0] = gray;
color->color_space = COLOR_SPACE_GRAYSCALE;
}
-static inline void color_make_rgb(struct color *color, uns r, uns g, uns b)
+static inline void color_make_rgb(struct color *color, uint r, uint g, uint b)
{
color->c[0] = r;
color->c[1] = g;
};
struct image_conv_options {
- uns flags;
+ uint flags;
struct color background;
};
void color_compute_color_spaces_conversion_matrix(double matrix[9], const struct color_space_chromacity_info *src, const struct color_space_chromacity_info *dest);
void color_invert_matrix(double dest[9], double matrix[9]);
-static inline uns rgb_to_gray_func(uns r, uns g, uns b)
+static inline uint rgb_to_gray_func(uint r, uint g, uint b)
{
return (r * 19660 + g * 38666 + b * 7210) >> 16;
}
extern u32 srgb_to_luv_tab3[20 << SRGB_TO_LUV_TAB3_SIZE];
void srgb_to_luv_init(void);
-void srgb_to_luv_pixels(byte *dest, byte *src, uns count);
+void srgb_to_luv_pixels(byte *dest, byte *src, uint count);
/* L covers the interval [0..255]; u and v are centered to 128 and scaled by 1/4 in respect of L */
static inline void srgb_to_luv_pixel(byte *dest, byte *src)
{
- uns r = srgb_to_luv_tab1[src[0]];
- uns g = srgb_to_luv_tab1[src[1]];
- uns b = srgb_to_luv_tab1[src[2]];
- uns x =
- (uns)(4 * SRGB_XYZ_XR * 0xffff) * r +
- (uns)(4 * SRGB_XYZ_XG * 0xffff) * g +
- (uns)(4 * SRGB_XYZ_XB * 0xffff) * b;
- uns y =
- (uns)(9 * SRGB_XYZ_YR * 0xffff) * r +
- (uns)(9 * SRGB_XYZ_YG * 0xffff) * g +
- (uns)(9 * SRGB_XYZ_YB * 0xffff) * b;
- uns l = srgb_to_luv_tab2[y >> (28 - SRGB_TO_LUV_TAB2_SIZE)];
+ uint r = srgb_to_luv_tab1[src[0]];
+ uint g = srgb_to_luv_tab1[src[1]];
+ uint b = srgb_to_luv_tab1[src[2]];
+ uint x =
+ (uint)(4 * SRGB_XYZ_XR * 0xffff) * r +
+ (uint)(4 * SRGB_XYZ_XG * 0xffff) * g +
+ (uint)(4 * SRGB_XYZ_XB * 0xffff) * b;
+ uint y =
+ (uint)(9 * SRGB_XYZ_YR * 0xffff) * r +
+ (uint)(9 * SRGB_XYZ_YG * 0xffff) * g +
+ (uint)(9 * SRGB_XYZ_YB * 0xffff) * b;
+ uint l = srgb_to_luv_tab2[y >> (28 - SRGB_TO_LUV_TAB2_SIZE)];
dest[0] = l >> (SRGB_TO_LUV_TAB2_SCALE - 8);
- uns sum =
- (uns)((SRGB_XYZ_XR + 15 * SRGB_XYZ_YR + 3 * SRGB_XYZ_ZR) * 0x7fff) * r +
- (uns)((SRGB_XYZ_XG + 15 * SRGB_XYZ_YG + 3 * SRGB_XYZ_ZG) * 0x7fff) * g +
- (uns)((SRGB_XYZ_XB + 15 * SRGB_XYZ_YB + 3 * SRGB_XYZ_ZB) * 0x7fff) * b;
- uns s = srgb_to_luv_tab3[sum >> (27 - SRGB_TO_LUV_TAB3_SIZE)];
+ uint sum =
+ (uint)((SRGB_XYZ_XR + 15 * SRGB_XYZ_YR + 3 * SRGB_XYZ_ZR) * 0x7fff) * r +
+ (uint)((SRGB_XYZ_XG + 15 * SRGB_XYZ_YG + 3 * SRGB_XYZ_ZG) * 0x7fff) * g +
+ (uint)((SRGB_XYZ_XB + 15 * SRGB_XYZ_YB + 3 * SRGB_XYZ_ZB) * 0x7fff) * b;
+ uint s = srgb_to_luv_tab3[sum >> (27 - SRGB_TO_LUV_TAB3_SIZE)];
int xs = ((u64)x * s) >> 32;
int ys = ((u64)y * s) >> 32;
int xw = ((4 * 13) << (SRGB_TO_LUV_TAB3_SCALE - 4)) *
extern struct color_interpolation_node *color_interpolation_table;
void color_conv_init(void);
-void color_conv_pixels(byte *dest, byte *src, uns count, struct color_grid_node *grid);
+void color_conv_pixels(byte *dest, byte *src, uint count, struct color_grid_node *grid);
#define COLOR_CONV_SCALE_CONST (((((1 << COLOR_CONV_SIZE) - 1) << 16) + (1 << (16 - COLOR_CONV_OFS))) / 255)
static inline void color_conv_pixel(byte *dest, byte *src, struct color_grid_node *grid)
{
- uns s0 = src[0] * COLOR_CONV_SCALE_CONST;
- uns s1 = src[1] * COLOR_CONV_SCALE_CONST;
- uns s2 = src[2] * COLOR_CONV_SCALE_CONST;
+ uint s0 = src[0] * COLOR_CONV_SCALE_CONST;
+ uint s1 = src[1] * COLOR_CONV_SCALE_CONST;
+ uint s2 = src[2] * COLOR_CONV_SCALE_CONST;
struct color_grid_node *g0, *g1, *g2, *g3, *g = grid +
((s0 >> 16) + ((s1 >> 16) << COLOR_CONV_SIZE) + ((s2 >> 16) << (2 * COLOR_CONV_SIZE)));
struct color_interpolation_node *n = color_interpolation_table +
#include <string.h>
/* ImageLib section */
-uns image_trace;
-uns image_max_dim = 0xffff;
-uns image_max_bytes = ~0U;
+uint image_trace;
+uint image_max_dim = 0xffff;
+uint image_max_bytes = ~0U;
#if defined(CONFIG_IMAGES_SIM) || defined(CONFIG_IMAGES_DUP)
/* ImageSig section */
-uns image_sig_min_width;
-uns image_sig_min_height;
-uns *image_sig_prequant_thresholds;
-uns image_sig_postquant_min_steps;
-uns image_sig_postquant_max_steps;
-uns image_sig_postquant_threshold;
+uint image_sig_min_width;
+uint image_sig_min_height;
+uint *image_sig_prequant_thresholds;
+uint image_sig_postquant_min_steps;
+uint image_sig_postquant_max_steps;
+uint image_sig_postquant_threshold;
double image_sig_border_size;
int image_sig_border_bonus;
double image_sig_inertia_scale[3];
double image_sig_textured_threshold;
int image_sig_compare_method;
-uns image_sig_cmp_features_weights[IMAGE_REG_F + IMAGE_REG_H];
+uint image_sig_cmp_features_weights[IMAGE_REG_F + IMAGE_REG_H];
#endif
static struct cf_section image_lib_config = {
CF_ITEMS{
- CF_UNS("Trace", &image_trace),
- CF_UNS("ImageMaxDim", &image_max_dim),
- CF_UNS("ImageMaxBytes", &image_max_bytes),
+ CF_UINT("Trace", &image_trace),
+ CF_UINT("ImageMaxDim", &image_max_dim),
+ CF_UINT("ImageMaxBytes", &image_max_bytes),
CF_END
}
};
#if defined(CONFIG_IMAGES_SIM) || defined(CONFIG_IMAGES_DUP)
static struct cf_section image_sig_config = {
CF_ITEMS{
- CF_UNS("MinWidth", &image_sig_min_width),
- CF_UNS("MinHeight", &image_sig_min_height),
- CF_UNS_DYN("PreQuantThresholds", &image_sig_prequant_thresholds, CF_ANY_NUM),
- CF_UNS("PostQuantMinSteps", &image_sig_postquant_min_steps),
- CF_UNS("PostQuantMaxSteps", &image_sig_postquant_max_steps),
- CF_UNS("PostQuantThreshold", &image_sig_postquant_threshold),
+ CF_UINT("MinWidth", &image_sig_min_width),
+ CF_UINT("MinHeight", &image_sig_min_height),
+ CF_UINT_DYN("PreQuantThresholds", &image_sig_prequant_thresholds, CF_ANY_NUM),
+ CF_UINT("PostQuantMinSteps", &image_sig_postquant_min_steps),
+ CF_UINT("PostQuantMaxSteps", &image_sig_postquant_max_steps),
+ CF_UINT("PostQuantThreshold", &image_sig_postquant_threshold),
CF_DOUBLE("BorderSize", &image_sig_border_size),
CF_INT("BorderBonus", &image_sig_border_bonus),
CF_DOUBLE_ARY("InertiaScale", image_sig_inertia_scale, 3),
CF_DOUBLE("TexturedThreshold", &image_sig_textured_threshold),
CF_LOOKUP("CompareMethod", &image_sig_compare_method, ((const char * const []){"integrated", "fuzzy", "average", NULL})),
- CF_UNS_ARY("CompareFeaturesWeights", image_sig_cmp_features_weights, IMAGE_REG_F + IMAGE_REG_H),
+ CF_UINT_ARY("CompareFeaturesWeights", image_sig_cmp_features_weights, IMAGE_REG_F + IMAGE_REG_H),
CF_END
}
};
}
void
-image_context_msg(struct image_context *ctx, uns code, char *msg, ...)
+image_context_msg(struct image_context *ctx, uint code, char *msg, ...)
{
va_list args;
va_start(args, msg);
}
void
-image_context_vmsg(struct image_context *ctx, uns code, char *msg, va_list args)
+image_context_vmsg(struct image_context *ctx, uint code, char *msg, va_list args)
{
ctx->msg_code = code;
ctx->msg = bb_vprintf(&ctx->msg_buf, msg, args);
#include <fcntl.h>
-static inline uns
+static inline uint
err (int a, int b)
{
a -= b;
}
static inline u64
-err_sum(byte *pos1, byte *pos2, uns count)
+err_sum(byte *pos1, byte *pos2, uint count)
{
- uns e64 = 0;
+ uint e64 = 0;
while (count--)
{
- uns e = err(*pos1++, *pos2++);
+ uint e = err(*pos1++, *pos2++);
e += err(*pos1++, *pos2++);
e += err(*pos1++, *pos2++);
e64 += e;
}
static inline u64
-err_sum_transformed(byte *pos1, byte *pos2, uns cols, uns rows, int row_step_1, int col_step_2, int row_step_2)
+err_sum_transformed(byte *pos1, byte *pos2, uint cols, uint rows, int row_step_1, int col_step_2, int row_step_2)
{
DBG("err_sum_transformed(pos1=%p pos2=%p cols=%u rows=%u row_step_1=%d col_step_2=%d row_step_2=%d)",
pos1, pos2, cols, rows, row_step_1, col_step_2, row_step_2);
u64 e64 = 0;
- for (uns j = rows; j--; )
+ for (uint j = rows; j--; )
{
byte *p1 = pos1;
byte *p2 = pos2;
- uns e = 0;
- for (uns i = cols; i--; )
+ uint e = 0;
+ for (uint i = cols; i--; )
{
e += err(p1[0], p2[0]);
e += err(p1[1], p2[1]);
}
static inline int
-aspect_ratio_test(struct image_dup_context *ctx, uns cols1, uns rows1, uns cols2, uns rows2)
+aspect_ratio_test(struct image_dup_context *ctx, uint cols1, uint rows1, uint cols2, uint rows2)
{
DBG("aspect_ratio_test(cols1=%u rows1=%u cols2=%u rows2=%u)", cols1, rows1, cols2, rows2);
- uns r1 = cols1 * rows2;
- uns r2 = rows1 * cols2;
+ uint r1 = cols1 * rows2;
+ uint r2 = rows1 * cols2;
return
r1 <= ((r2 * ctx->ratio_threshold) >> 7) &&
r2 <= ((r1 * ctx->ratio_threshold) >> 7);
{
byte *block1 = image_dup_block(dup1, 0, 0);
byte *block2 = image_dup_block(dup2, 0, 0);
- uns e =
+ uint e =
err(block1[0], block2[0]) +
err(block1[1], block2[1]) +
err(block1[2], block2[2]);
}
static int
-blocks_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2, uns tab_col, uns tab_row, uns trans)
+blocks_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2, uint tab_col, uint tab_row, uint trans)
{
DBG("blocks_compare(tab_col=%d tab_row=%d trans=%d)", tab_col, tab_row, trans);
ctx->sum_pixels += 1 << (tab_col + tab_row);
switch (trans)
{
case 0: ;
- uns err = (err_sum(block1, block2, 1 << (tab_col + tab_row)) >> (tab_col + tab_row));
+ uint err = (err_sum(block1, block2, 1 << (tab_col + tab_row)) >> (tab_col + tab_row));
DBG("average error=%d", err);
ctx->error = err;
return err <= ctx->error_threshold;
default:
ASSERT(0);
}
- uns err = (err_sum_transformed(block1, block2, (1 << tab_col), (1 << tab_row), (3 << tab_col), col_step, row_step) >> (tab_col + tab_row));
+ uint err = (err_sum_transformed(block1, block2, (1 << tab_col), (1 << tab_row), (3 << tab_col), col_step, row_step) >> (tab_col + tab_row));
DBG("average error=%d", err);
ctx->error = err;
return err <= ctx->error_threshold;
}
static int
-same_size_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2, uns trans)
+same_size_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2, uint trans)
{
struct image *img1 = &dup1->image;
struct image *img2 = &dup2->image;
default:
ASSERT(0);
}
- uns err = (err_sum_transformed(block1, block2, img1->cols, img1->rows, img1->row_size, col_step, row_step) / ((u64)img1->cols * img1->rows));
+ uint err = (err_sum_transformed(block1, block2, img1->cols, img1->rows, img1->row_size, col_step, row_step) / ((u64)img1->cols * img1->rows));
DBG("average error=%d", err);
ctx->error = err;
return err <= ctx->error_threshold;
}
-uns
+uint
image_dup_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2)
{
DBG("image_dup_compare(%p, %p)", dup1, dup2);
return 0;
struct image *img1 = &dup1->image;
struct image *img2 = &dup2->image;
- uns flags = ctx->flags;
+ uint flags = ctx->flags;
if (flags & IMAGE_DUP_SCALE)
{
DBG("Scale support");
}
if (!(flags & 0xff))
return 0;
- uns result = 0;
+ uint result = 0;
if (flags & 0x0f)
{
- uns cols = MIN(dup1->tab_cols, dup2->tab_cols);
- uns rows = MIN(dup1->tab_rows, dup2->tab_rows);
- for (uns t = 0; t < 4; t++)
+ uint cols = MIN(dup1->tab_cols, dup2->tab_cols);
+ uint rows = MIN(dup1->tab_rows, dup2->tab_rows);
+ for (uint t = 0; t < 4; t++)
if (flags & (1 << t))
{
DBG("Testing trans %d", t);
- uns i = MAX(cols, rows), depth = 1;
+ uint i = MAX(cols, rows), depth = 1;
while (i--)
{
depth++;
- uns col = MAX(0, (int)(cols - i));
- uns row = MAX(0, (int)(rows - i));
+ uint col = MAX(0, (int)(cols - i));
+ uint row = MAX(0, (int)(rows - i));
if (!blocks_compare(ctx, dup1, dup2, col, row, t))
break;
if (!i &&
}
if (flags & 0xf0)
{
- uns cols = MIN(dup1->tab_cols, dup2->tab_rows);
- uns rows = MIN(dup1->tab_rows, dup2->tab_cols);
- for (uns t = 4; t < 8; t++)
+ uint cols = MIN(dup1->tab_cols, dup2->tab_rows);
+ uint rows = MIN(dup1->tab_rows, dup2->tab_cols);
+ for (uint t = 4; t < 8; t++)
if (flags & (1 << t))
{
DBG("Testing trans %d", t);
- uns i = MAX(cols, rows), depth = 1;
+ uint i = MAX(cols, rows), depth = 1;
while (i--)
{
depth++;
- uns col = MAX(0, (int)(cols - i));
- uns row = MAX(0, (int)(rows - i));
+ uint col = MAX(0, (int)(cols - i));
+ uint row = MAX(0, (int)(rows - i));
if (!blocks_compare(ctx, dup1, dup2, col, row, t))
break;
if (!i &&
}
static inline struct image *
-image_dup_subimage(struct image_context *ctx, struct image_dup *dup, struct image *block, uns tab_col, uns tab_row)
+image_dup_subimage(struct image_context *ctx, struct image_dup *dup, struct image *block, uint tab_col, uint tab_row)
{
return image_init_matrix(ctx, block, image_dup_block(dup, tab_col, tab_row),
1 << tab_col, 1 << tab_row, 3 << tab_col, COLOR_SPACE_RGB);
static inline void
pixels_average(byte *dest, byte *src1, byte *src2)
{
- dest[0] = ((uns)src1[0] + (uns)src2[0]) >> 1;
- dest[1] = ((uns)src1[1] + (uns)src2[1]) >> 1;
- dest[2] = ((uns)src1[2] + (uns)src2[2]) >> 1;
+ dest[0] = ((uint)src1[0] + (uint)src2[0]) >> 1;
+ dest[1] = ((uint)src1[1] + (uint)src2[1]) >> 1;
+ dest[2] = ((uint)src1[2] + (uint)src2[2]) >> 1;
}
-uns
-image_dup_estimate_size(uns cols, uns rows, uns same_size_compare, uns qtree_limit)
+uint
+image_dup_estimate_size(uint cols, uint rows, uint same_size_compare, uint qtree_limit)
{
- uns tab_cols, tab_rows;
- for (tab_cols = 0; (uns)(2 << tab_cols) < cols && tab_cols < qtree_limit; tab_cols++);
- for (tab_rows = 0; (uns)(2 << tab_rows) < rows && tab_rows < qtree_limit; tab_rows++);
- uns size = sizeof(struct image_dup) + (12 << (tab_cols + tab_rows)) + 2 * CPU_STRUCT_ALIGN;
+ uint tab_cols, tab_rows;
+ for (tab_cols = 0; (uint)(2 << tab_cols) < cols && tab_cols < qtree_limit; tab_cols++);
+ for (tab_rows = 0; (uint)(2 << tab_rows) < rows && tab_rows < qtree_limit; tab_rows++);
+ uint size = sizeof(struct image_dup) + (12 << (tab_cols + tab_rows)) + 2 * CPU_STRUCT_ALIGN;
if (same_size_compare)
size += cols * rows * 3 + CPU_STRUCT_ALIGN;
return ALIGN_TO(size, CPU_STRUCT_ALIGN);
}
-uns
-image_dup_new(struct image_dup_context *ctx, struct image *img, void *buffer, uns same_size_compare)
+uint
+image_dup_new(struct image_dup_context *ctx, struct image *img, void *buffer, uint same_size_compare)
{
DBG("image_dup_init()");
ASSERT(!((uintptr_t)buffer & (CPU_STRUCT_ALIGN - 1)));
{
if (!image_init_matrix(ctx->ic, &dup->image, ptr, img->cols, img->rows, img->cols * 3, COLOR_SPACE_RGB))
return 0;
- uns size = img->rows * img->cols * 3;
+ uint size = img->rows * img->cols * 3;
ptr += ALIGN_TO(size, CPU_STRUCT_ALIGN);
byte *s = img->pixels;
byte *d = dup->image.pixels;
- for (uns row = img->rows; row--; )
+ for (uint row = img->rows; row--; )
{
memcpy(d, s, img->row_pixels_size);
d += dup->image.row_size;
dup->image.rows = img->rows;
}
- for (dup->tab_cols = 0; (uns)(2 << dup->tab_cols) < img->cols && dup->tab_cols < ctx->qtree_limit; dup->tab_cols++);
- for (dup->tab_rows = 0; (uns)(2 << dup->tab_rows) < img->rows && dup->tab_rows < ctx->qtree_limit; dup->tab_rows++);
+ for (dup->tab_cols = 0; (uint)(2 << dup->tab_cols) < img->cols && dup->tab_cols < ctx->qtree_limit; dup->tab_cols++);
+ for (dup->tab_rows = 0; (uint)(2 << dup->tab_rows) < img->rows && dup->tab_rows < ctx->qtree_limit; dup->tab_rows++);
dup->tab_row_size = 6 << dup->tab_cols;
dup->tab_pixels = ptr;
- uns size = 12 << (dup->tab_cols + dup->tab_rows);
+ uint size = 12 << (dup->tab_cols + dup->tab_rows);
ptr += ALIGN_TO(size, CPU_STRUCT_ALIGN);
/* Scale original image to right bottom block */
}
/* Complete bottom row */
- for (uns i = dup->tab_cols; i--; )
+ for (uint i = dup->tab_cols; i--; )
{
byte *d = image_dup_block(dup, i, dup->tab_rows);
byte *s = image_dup_block(dup, i + 1, dup->tab_rows);
- for (uns y = 0; y < (uns)(1 << dup->tab_rows); y++)
- for (uns x = 0; x < (uns)(1 << i); x++)
+ for (uint y = 0; y < (uint)(1 << dup->tab_rows); y++)
+ for (uint x = 0; x < (uint)(1 << i); x++)
{
pixels_average(d, s, s + 3);
d += 3;
}
/* Complete remaining blocks */
- for (uns i = 0; i <= dup->tab_cols; i++)
+ for (uint i = 0; i <= dup->tab_cols; i++)
{
- uns line_size = (3 << i);
- for (uns j = dup->tab_rows; j--; )
+ uint line_size = (3 << i);
+ for (uint j = dup->tab_rows; j--; )
{
byte *d = image_dup_block(dup, i, j);
byte *s = image_dup_block(dup, i, j + 1);
- for (uns y = 0; y < (uns)(1 << j); y++)
+ for (uint y = 0; y < (uint)(1 << j); y++)
{
- for (uns x = 0; x < (uns)(1 << i); x++)
+ for (uint x = 0; x < (uint)(1 << i); x++)
{
pixels_average(d, s, s + line_size);
d += 3;
struct image_dup_context {
struct image_context *ic;
- uns flags;
- uns ratio_threshold;
- uns error_threshold;
- uns qtree_limit;
+ uint flags;
+ uint ratio_threshold;
+ uint error_threshold;
+ uint qtree_limit;
u64 sum_depth;
u64 sum_pixels;
- uns error;
+ uint error;
};
struct image_dup {
void image_dup_context_init(struct image_context *ic, struct image_dup_context *ctx);
void image_dup_context_cleanup(struct image_dup_context *ctx);
-uns image_dup_estimate_size(uns cols, uns rows, uns same_size_compare, uns qtree_limit);
-uns image_dup_new(struct image_dup_context *ctx, struct image *image, void *buffer, uns same_size_compare);
+uint image_dup_estimate_size(uint cols, uint rows, uint same_size_compare, uint qtree_limit);
+uint image_dup_new(struct image_dup_context *ctx, struct image *image, void *buffer, uint same_size_compare);
/* dup-cmp.c */
-uns image_dup_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2);
+uint image_dup_compare(struct image_dup_context *ctx, struct image_dup *dup1, struct image_dup *dup2);
/* internals */
-static inline byte *image_dup_block(struct image_dup *dup, uns tab_col, uns tab_row)
+static inline byte *image_dup_block(struct image_dup *dup, uint tab_col, uint tab_row)
{
return dup->tab_pixels + (dup->tab_row_size << tab_row) + (3 << (tab_row + tab_col));
}
#define image_trace ucw_image_trace
#endif
-extern uns image_trace; /* ImageLib.Trace */
+extern uint image_trace; /* ImageLib.Trace */
/* Error codes */
#define IMAGE_ERROR(ctx, type, msg...) image_context_msg((ctx), IMAGE_MSG_ERROR | (type), msg)
#define IMAGE_TRACE(ctx, level, msg...) do { \
- struct image_context *_ctx = (ctx); uns _level = (level); \
+ struct image_context *_ctx = (ctx); uint _level = (level); \
if (_level < _ctx->tracing_level) image_context_msg(_ctx, IMAGE_MSG_TRACE | _level, msg); } while (0)
#endif
#include <time.h>
#include <unistd.h>
-static uns want_image_iface;
-static uns want_threads;
+static uint want_image_iface;
+static uint want_threads;
#define TRY(x) do { if (!(x)) ASSERT(0); } while (0)
image_context_init(&ctx);
TRY(image_io_init(&ctx, &io));
- for (uns num = 0; num < 200; num++)
+ for (uint num = 0; num < 200; num++)
{
int r0 = random_max(100);
struct fastbuf *wfb = fbmem_create(10000);
struct fastbuf *rfb;
- uns format = 0;
+ uint format = 0;
while (!format)
{
switch (random_max(3))
if (pthread_attr_init(&attr) < 0 ||
pthread_attr_setstacksize(&attr, ucwlib_thread_stack_size) < 0)
ASSERT(0);
- for (uns i = 0; i < TEST_THREADS_COUNT - 1; i++)
+ for (uint i = 0; i < TEST_THREADS_COUNT - 1; i++)
{
if (pthread_create(threads + i, &attr, test_threads_thread, NULL) < 0)
die("Unable to create thread: %m");
}
test_threads_thread(NULL);
- for (uns i = 0; i < TEST_THREADS_COUNT - 1; i++)
+ for (uint i = 0; i < TEST_THREADS_COUNT - 1; i++)
if (pthread_join(threads[i], NULL) < 0)
die("Cannot join thread: %m");
#else
)
#endif
{
- uns P(cols) = IMAGE_WALK_COLS;
- uns P(rows) = IMAGE_WALK_ROWS;
+ uint P(cols) = IMAGE_WALK_COLS;
+ uint P(rows) = IMAGE_WALK_ROWS;
# if IMAGE_WALK_UNROLL > 1
- uns P(cols_unroll_block_count) = P(cols) / IMAGE_WALK_UNROLL;
- uns P(cols_unroll_end_count) = P(cols) % IMAGE_WALK_UNROLL;
+ uint P(cols_unroll_block_count) = P(cols) / IMAGE_WALK_UNROLL;
+ uint P(cols_unroll_end_count) = P(cols) % IMAGE_WALK_UNROLL;
# endif
byte *P(pos) = IMAGE_WALK_PIXELS, *P(row_start) = P(pos);
int P(col_step) = IMAGE_WALK_COL_STEP;
{
IMAGE_WALK_DO_ROW_START;
# if IMAGE_WALK_UNROLL == 1
- for (uns P(_i) = P(cols); P(_i)--; )
+ for (uint P(_i) = P(cols); P(_i)--; )
# else
- for (uns P(_i) = P(cols_unroll_block_count); P(_i)--; )
+ for (uint P(_i) = P(cols_unroll_block_count); P(_i)--; )
# endif
{
# if IMAGE_WALK_UNROLL >= 4
IMAGE_WALK__STEP;
}
# if IMAGE_WALK_UNROLL > 1
- for (uns P(_i) = P(cols_unroll_end_count); P(_i)--; )
+ for (uint P(_i) = P(cols_unroll_end_count); P(_i)--; )
{
IMAGE_WALK__STEP;
}
#include <string.h>
-static inline uns
-flags_to_pixel_size(uns flags)
+static inline uint
+flags_to_pixel_size(uint flags)
{
- uns pixel_size = color_space_channels[flags & IMAGE_COLOR_SPACE];
+ uint pixel_size = color_space_channels[flags & IMAGE_COLOR_SPACE];
if (flags & IMAGE_ALPHA)
pixel_size++;
return pixel_size;
}
struct image *
-image_new(struct image_context *ctx, uns cols, uns rows, uns flags, struct mempool *pool)
+image_new(struct image_context *ctx, uint cols, uint rows, uint flags, struct mempool *pool)
{
DBG("image_new(cols=%u rows=%u flags=0x%x pool=%p)", cols, rows, flags, pool);
flags &= IMAGE_NEW_FLAGS;
return NULL;
}
struct image *img;
- uns channels, pixel_size, row_pixels_size, row_size, align;
+ uint channels, pixel_size, row_pixels_size, row_size, align;
pixel_size = channels = flags_to_pixel_size(flags);
if (!channels || channels > 4)
{
row_pixels_size = cols * pixel_size;
row_size = ALIGN_TO(row_pixels_size, align);
u64 image_size_64 = (u64)row_size * rows;
- u64 bytes_64 = image_size_64 + (sizeof(struct image) + IMAGE_SSE_ALIGN_SIZE - 1 + sizeof(uns));
+ u64 bytes_64 = image_size_64 + (sizeof(struct image) + IMAGE_SSE_ALIGN_SIZE - 1 + sizeof(uint));
if (unlikely(bytes_64 > image_max_bytes))
{
IMAGE_ERROR(ctx, IMAGE_ERROR_INVALID_DIMENSIONS, "Image does not fit in memory");
}
struct image *
-image_clone(struct image_context *ctx, struct image *src, uns flags, struct mempool *pool)
+image_clone(struct image_context *ctx, struct image *src, uint flags, struct mempool *pool)
{
DBG("image_clone(src=%p flags=0x%x pool=%p)", src, src->flags, pool);
struct image *img;
{
byte *s = src->pixels;
byte *d = img->pixels;
- for (uns row = src->rows; row--; )
+ for (uint row = src->rows; row--; )
{
memcpy(d, s, src->row_pixels_size);
d += img->row_size;
if (img->flags & IMAGE_GAPS_PROTECTED)
{
byte *p = img->pixels;
- uns bytes = img->cols * img->pixel_size;
- for (uns row = img->rows; row--; p += img->row_size)
+ uint bytes = img->cols * img->pixel_size;
+ for (uint row = img->rows; row--; p += img->row_size)
bzero(p, bytes);
}
else
}
struct image *
-image_init_matrix(struct image_context *ctx, struct image *img, byte *pixels, uns cols, uns rows, uns row_size, uns flags)
+image_init_matrix(struct image_context *ctx, struct image *img, byte *pixels, uint cols, uint rows, uint row_size, uint flags)
{
DBG("image_init_matrix(img=%p pixels=%p cols=%u rows=%u row_size=%u flags=0x%x)", img, pixels, cols, rows, row_size, flags);
if (unlikely(!image_dimensions_valid(cols, rows)))
}
struct image *
-image_init_subimage(struct image_context *ctx UNUSED, struct image *img, struct image *src, uns left, uns top, uns cols, uns rows)
+image_init_subimage(struct image_context *ctx UNUSED, struct image *img, struct image *src, uint left, uint top, uint cols, uint rows)
{
DBG("image_init_subimage(img=%p src=%p left=%u top=%u cols=%u rows=%u)", img, src, left, top, cols, rows);
ASSERT(left + cols <= src->cols && top + rows <= src->rows);
}
byte *
-image_channels_format_to_name(uns format, byte *buf)
+image_channels_format_to_name(uint format, byte *buf)
{
byte *cs_name = color_space_id_to_name(format & IMAGE_COLOR_SPACE);
- uns l = strlen(cs_name);
+ uint l = strlen(cs_name);
memcpy(buf, cs_name, l + 1);
if (format & IMAGE_ALPHA)
strcpy(buf + l, "+Alpha");
return buf;
}
-uns
+uint
image_name_to_channels_format(byte *name)
{
- uns i;
+ uint i;
if (i = color_space_name_to_id(name))
return i;
- uns l = strlen(name);
+ uint l = strlen(name);
if (l > 6 && !strcasecmp(name + l - 5, "+alpha"))
{
byte buf[l + 1];
struct image_context {
byte *msg; /* last message */
- uns msg_code; /* last message code (see images/error.h for details) */
+ uint msg_code; /* last message code (see images/error.h for details) */
bb_t msg_buf; /* message buffer */
void (*msg_callback)(struct image_context *ctx); /* called for each message (in msg_{str,code}) */
- uns tracing_level; /* tracing level (zero to disable) */
+ uint tracing_level; /* tracing level (zero to disable) */
};
/* initialization/cleanup */
void image_context_cleanup(struct image_context *ctx);
/* message handling, see images/error.h for useful macros */
-void image_context_msg(struct image_context *ctx, uns code, char *msg, ...);
-void image_context_vmsg(struct image_context *ctx, uns code, char *msg, va_list args);
+void image_context_msg(struct image_context *ctx, uint code, char *msg, ...);
+void image_context_vmsg(struct image_context *ctx, uint code, char *msg, va_list args);
/* default callback, displays messages with standard libucw's log() routine */
void image_context_msg_default(struct image_context *ctx);
* - image structure is not directly connected to a single context
* but manipulation routines are (user must synchronize the access himself)! */
-extern uns image_max_dim; /* ImageLib.ImageMaxDim */
-extern uns image_max_bytes; /* ImageLib.ImageMaxBytes */
+extern uint image_max_dim; /* ImageLib.ImageMaxDim */
+extern uint image_max_bytes; /* ImageLib.ImageMaxBytes */
/* SSE aligning size, see IMAGE_SSE_ALIGNED */
#define IMAGE_SSE_ALIGN_SIZE 16
#define IMAGE_MAX_CHANNELS 4
#define IMAGE_CHANNELS_FORMAT_MAX_SIZE 128
-byte *image_channels_format_to_name(uns format, byte *buf);
-uns image_name_to_channels_format(byte *name);
+byte *image_channels_format_to_name(uint format, byte *buf);
+uint image_name_to_channels_format(byte *name);
struct color {
byte c[IMAGE_MAX_CHANNELS];
};
struct image {
- byte *pixels; /* aligned top left pixel, there are at least sizeof(uns)
+ byte *pixels; /* aligned top left pixel, there are at least sizeof(uint)
unused bytes after the buffer (possible optimizations) */
- uns cols; /* number of columns */
- uns rows; /* number of rows */
- uns channels; /* number of color channels including the alpha channel */
- uns pixel_size; /* size of pixel in bytes (1, 2, 3 or 4) */
- uns row_size; /* scanline size in bytes */
- uns row_pixels_size; /* scanline size in bytes excluding rows gaps */
- uns image_size; /* rows * row_size */
- uns flags; /* enum image_flag */
+ uint cols; /* number of columns */
+ uint rows; /* number of rows */
+ uint channels; /* number of color channels including the alpha channel */
+ uint pixel_size; /* size of pixel in bytes (1, 2, 3 or 4) */
+ uint row_size; /* scanline size in bytes */
+ uint row_pixels_size; /* scanline size in bytes excluding rows gaps */
+ uint image_size; /* rows * row_size */
+ uint flags; /* enum image_flag */
};
-struct image *image_new(struct image_context *ctx, uns cols, uns rows, uns flags, struct mempool *pool);
-struct image *image_clone(struct image_context *ctx, struct image *src, uns flags, struct mempool *pool);
+struct image *image_new(struct image_context *ctx, uint cols, uint rows, uint flags, struct mempool *pool);
+struct image *image_clone(struct image_context *ctx, struct image *src, uint flags, struct mempool *pool);
void image_destroy(struct image *img);
void image_clear(struct image_context *ctx, struct image *img);
-struct image *image_init_matrix(struct image_context *ctx, struct image *img, byte *pixels, uns cols, uns rows, uns row_size, uns flags);
-struct image *image_init_subimage(struct image_context *ctx, struct image *img, struct image *src, uns left, uns top, uns cols, uns rows);
+struct image *image_init_matrix(struct image_context *ctx, struct image *img, byte *pixels, uint cols, uint rows, uint row_size, uint flags);
+struct image *image_init_subimage(struct image_context *ctx, struct image *img, struct image *src, uint left, uint top, uint cols, uint rows);
-static inline int image_dimensions_valid(uns cols, uns rows)
+static inline int image_dimensions_valid(uint cols, uint rows)
{
return cols && rows && cols <= image_max_dim && rows <= image_max_dim;
}
/* scale.c */
int image_scale(struct image_context *ctx, struct image *dest, struct image *src);
-void image_dimensions_fit_to_box(uns *cols, uns *rows, uns max_cols, uns max_rows, uns upsample);
+void image_dimensions_fit_to_box(uint *cols, uint *rows, uint max_cols, uint max_rows, uint upsample);
/* image-io.c */
enum image_format format; /* [R W] - file format (IMAGE_FORMAT_x) */
struct fastbuf *fastbuf; /* [R W] - source/destination stream */
struct mempool *pool; /* [ I ] - parameter to image_new */
- uns cols; /* [ HI ] - number of columns, parameter to image_new */
- uns rows; /* [ HI ] - number of rows, parameter to image_new */
- uns flags; /* [ HI ] - see enum image_io_flags */
- uns jpeg_quality; /* [ W] - JPEG compression quality (1..100) */
- uns number_of_colors; /* [ H ] - number of image colors */
+ uint cols; /* [ HI ] - number of columns, parameter to image_new */
+ uint rows; /* [ HI ] - number of rows, parameter to image_new */
+ uint flags; /* [ HI ] - see enum image_io_flags */
+ uint jpeg_quality; /* [ W] - JPEG compression quality (1..100) */
+ uint number_of_colors; /* [ H ] - number of image colors */
struct color background_color; /* [ HI ] - background color, zero if undefined */
- uns exif_size; /* [ H W] - EXIF size in bytes (zero if not present) */
+ uint exif_size; /* [ H W] - EXIF size in bytes (zero if not present) */
byte *exif_data; /* [ H W] - EXIF data */
/* internals */
#endif
}
-static inline uns
+static inline uint
libjpeg_fastbuf_read_prepare(struct libjpeg_read_internals *i)
{
DBG("libjpeg_fb_read_prepare()");
byte *start;
- uns len = bdirect_read_prepare(i->fastbuf, &start);
+ uint len = bdirect_read_prepare(i->fastbuf, &start);
DBG("readed %u bytes at %p", len, start);
if (!len)
{
libjpeg_fastbuf_write_prepare(struct libjpeg_write_internals *i)
{
byte *start;
- uns len = bdirect_write_prepare(i->fastbuf, &start);
+ uint len = bdirect_write_prepare(i->fastbuf, &start);
i->fastbuf_pos = start + len;
i->dest.next_output_byte = start;
i->dest.free_in_buffer = len;
return TRUE;
}
-static inline uns
+static inline uint
libjpeg_read_byte(struct libjpeg_read_internals *i)
{
DBG("libjpeg_read_byte()");
}
static inline void
-libjpeg_read_buf(struct libjpeg_read_internals *i, byte *buf, uns len)
+libjpeg_read_buf(struct libjpeg_read_internals *i, byte *buf, uint len)
{
DBG("libjpeg_read_buf(len=%u)", len);
while (len)
if (!i->src.bytes_in_buffer)
if (!libjpeg_fill_input_buffer(&i->cinfo))
ERREXIT(&i->cinfo, JERR_CANT_SUSPEND);
- uns buf_size = i->src.bytes_in_buffer;
- uns read_size = MIN(buf_size, len);
+ uint buf_size = i->src.bytes_in_buffer;
+ uint read_size = MIN(buf_size, len);
memcpy(buf, i->src.next_input_byte, read_size);
i->src.bytes_in_buffer -= read_size;
i->src.next_input_byte += read_size;
{
struct libjpeg_read_internals *i = (struct libjpeg_read_internals *)cinfo;
struct image_io *io = i->err.io;
- uns len = libjpeg_read_byte(i) << 8;
+ uint len = libjpeg_read_byte(i) << 8;
len += libjpeg_read_byte(i);
DBG("Found APP1 marker, len=%u", len);
if (len < 2)
DBG("libjpeg_read_data()");
struct libjpeg_read_internals *i = io->read_data;
- uns read_flags = io->flags;
+ uint read_flags = io->flags;
/* Select color space */
switch (i->cinfo.jpeg_color_space)
i->cinfo.scale_denom = 2;
}
jpeg_calc_output_dimensions(&i->cinfo);
- DBG("Output dimensions %ux%u", (uns)i->cinfo.output_width, (uns)i->cinfo.output_height);
+ DBG("Output dimensions %ux%u", (uint)i->cinfo.output_width, (uint)i->cinfo.output_height);
if (unlikely(!image_io_read_data_prepare(&rdi, io, i->cinfo.output_width, i->cinfo.output_height, read_flags)))
{
jpeg_destroy_decompress(&i->cinfo);
if ((int)img->pixel_size == i->cinfo.output_components)
{
byte *pixels = img->pixels;
- for (uns r = img->rows; r--; )
+ for (uint r = img->rows; r--; )
{
jpeg_read_scanlines(&i->cinfo, (JSAMPLE **)&pixels, 1);
pixels += img->row_size;
if ((int)img->pixel_size == i.cinfo.input_components)
{
byte *pixels = img->pixels;
- for (uns r = img->rows; r--; )
+ for (uint r = img->rows; r--; )
{
jpeg_write_scanlines(&i.cinfo, (JSAMPLE **)&pixels, 1);
pixels += img->row_size;
#define MAX_FILE_SIZE (1 << 30)
#define QUANTUM_SCALE (QuantumDepth - 8)
-#define QUANTUM_TO_BYTE(x) ((uns)(x) >> QUANTUM_SCALE)
-#define BYTE_TO_QUANTUM(x) ((uns)(x) << QUANTUM_SCALE)
+#define QUANTUM_TO_BYTE(x) ((uint)(x) >> QUANTUM_SCALE)
+#define BYTE_TO_QUANTUM(x) ((uint)(x) << QUANTUM_SCALE)
#define ALPHA_TO_BYTE(x) (255 - QUANTUM_TO_BYTE(x))
#define BYTE_TO_ALPHA(x) (BYTE_TO_QUANTUM(255 - (x)))
static pthread_mutex_t libmagick_mutex = PTHREAD_MUTEX_INITIALIZER;
-static uns libmagick_counter;
+static uint libmagick_counter;
struct magick_read_data {
ExceptionInfo exception;
IMAGE_ERROR(io->context, IMAGE_ERROR_READ_FAILED, "Too long stream.");
return 0;
}
- uns buf_size = file_size;
+ uint buf_size = file_size;
byte *buf = xmalloc(buf_size);
breadb(io->fastbuf, buf, buf_size);
/* Prepare the image */
struct image_io_read_data_internals rdi;
- uns read_flags = io->flags;
- uns cs = read_flags & IMAGE_COLOR_SPACE;
+ uint read_flags = io->flags;
+ uint cs = read_flags & IMAGE_COLOR_SPACE;
if (cs != COLOR_SPACE_GRAYSCALE && cs != COLOR_SPACE_RGB)
read_flags = (read_flags & ~IMAGE_COLOR_SPACE & IMAGE_PIXEL_FORMAT) | COLOR_SPACE_RGB;
if ((read_flags & IMAGE_IO_USE_BACKGROUND) && !(read_flags & IMAGE_ALPHA))
static png_voidp
libpng_malloc(png_structp png_ptr, png_size_t size)
{
- DBG("libpng_malloc(size=%u)", (uns)size);
+ DBG("libpng_malloc(size=%u)", (uint)size);
return mp_alloc(png_get_mem_ptr(png_ptr), size);
}
static void
libpng_read_fn(png_structp png_ptr, png_bytep data, png_size_t length)
{
- DBG("libpng_read_fn(len=%u)", (uns)length);
+ DBG("libpng_read_fn(len=%u)", (uint)length);
if (unlikely(bread((struct fastbuf *)png_get_io_ptr(png_ptr), (byte *)data, length) < length))
png_error(png_ptr, "Incomplete data");
}
static void
libpng_write_fn(png_structp png_ptr, png_bytep data, png_size_t length)
{
- DBG("libpng_write_fn(len=%u)", (uns)length);
+ DBG("libpng_write_fn(len=%u)", (uint)length);
bwrite((struct fastbuf *)png_get_io_ptr(png_ptr), (byte *)data, length);
}
return 0;
}
- uns read_flags = io->flags;
+ uint read_flags = io->flags;
/* Apply transformations */
if (rd->bit_depth == 16)
struct image *img = rdi.image;
byte *pixels = img->pixels;
png_bytep rows[img->rows];
- for (uns r = 0; r < img->rows; r++, pixels += img->row_size)
+ for (uint r = 0; r < img->rows; r++, pixels += img->row_size)
rows[r] = (png_bytep)pixels;
png_read_image(rd->png_ptr, rows);
png_read_end(rd->png_ptr, rd->end_ptr);
/* Write pixels */
byte *pixels = img->pixels;
png_bytep rows[img->rows];
- for (uns r = 0; r < img->rows; r++, pixels += img->row_size)
+ for (uint r = 0; r < img->rows; r++, pixels += img->row_size)
rows[r] = (png_bytep)pixels;
png_write_image(png_ptr, rows);
png_write_end(png_ptr, info_ptr);
/* Prepare image */
struct image_io_read_data_internals rdi;
- uns read_flags = io->flags;
- uns cs = read_flags & IMAGE_COLOR_SPACE;
+ uint read_flags = io->flags;
+ uint cs = read_flags & IMAGE_COLOR_SPACE;
if (cs != COLOR_SPACE_GRAYSCALE && cs != COLOR_SPACE_RGB)
read_flags = (read_flags & ~IMAGE_COLOR_SPACE & IMAGE_CHANNELS_FORMAT) | COLOR_SPACE_RGB;
if (unlikely(!image_io_read_data_prepare(&rdi, io, image->ImageDesc.Width, image->ImageDesc.Height, read_flags)))
byte *img_end = rdi.image->pixels + rdi.image->image_size;
/* Handle deinterlacing */
- uns dein_step, dein_next;
+ uint dein_step, dein_next;
if (image->ImageDesc.Interlace)
{
DBG("Deinterlaced image");
case 1:
{
byte pal[256], *pal_pos = pal, *pal_end = pal + 256;
- for (uns i = 0; i < (uns)color_map->ColorCount; i++, pal_pos++, palette++)
+ for (uint i = 0; i < (uint)color_map->ColorCount; i++, pal_pos++, palette++)
*pal_pos = rgb_to_gray_func(palette->Red, palette->Green, palette->Blue);
if (pal_pos != pal_end)
bzero(pal_pos, pal_end - pal_pos);
# define DO_ROW_END do{ \
walk_row_start += dein_step; \
while (walk_row_start >= img_end) \
- { uns n = dein_next >> 1; walk_row_start = rdi.image->pixels + n, dein_step = dein_next; dein_next = n; } \
+ { uint n = dein_next >> 1; walk_row_start = rdi.image->pixels + n, dein_step = dein_next; dein_next = n; } \
}while(0)
# define IMAGE_WALK_PREFIX(x) walk_##x
# define IMAGE_WALK_INLINE
case 2:
{
byte pal[256 * 2], *pal_pos = pal, *pal_end = pal + 256 * 2;
- for (uns i = 0; i < (uns)color_map->ColorCount; i++, pal_pos += 2, palette++)
+ for (uint i = 0; i < (uint)color_map->ColorCount; i++, pal_pos += 2, palette++)
{
pal_pos[0] = rgb_to_gray_func(palette->Red, palette->Green, palette->Blue);
pal_pos[1] = 255;
case 3:
{
byte pal[256 * 4], *pal_pos = pal, *pal_end = pal + 256 * 4;
- for (uns i = 0; i < (uns)color_map->ColorCount; i++, pal_pos += 4, palette++)
+ for (uint i = 0; i < (uint)color_map->ColorCount; i++, pal_pos += 4, palette++)
{
pal_pos[0] = palette->Red;
pal_pos[1] = palette->Green;
case 4:
{
byte pal[256 * 4], *pal_pos = pal, *pal_end = pal + 256 * 4;
- for (uns i = 0; i < (uns)color_map->ColorCount; i++, pal_pos += 4, palette++)
+ for (uint i = 0; i < (uint)color_map->ColorCount; i++, pal_pos += 4, palette++)
{
pal_pos[0] = palette->Red;
pal_pos[1] = palette->Green;
}
struct image *
-image_io_read_data_prepare(struct image_io_read_data_internals *rdi, struct image_io *io, uns cols, uns rows, uns flags)
+image_io_read_data_prepare(struct image_io_read_data_internals *rdi, struct image_io *io, uint cols, uint rows, uint flags)
{
DBG("image_io_read_data_prepare()");
if (rdi->need_transformations = io->cols != cols || io->rows != rows ||
if (io->cols != rdi->image->cols || io->rows != rdi->image->rows)
{
DBG("Scaling image");
- uns flags = rdi->image->flags;
+ uint flags = rdi->image->flags;
if (!(rdi->need_transformations = ((io->flags ^ rdi->image->flags) & (IMAGE_NEW_FLAGS & ~IMAGE_PIXELS_ALIGNED))))
flags = io->flags;
struct image *img = image_new(io->context, io->cols, io->rows, flags, rdi->need_transformations ? NULL : io->pool);
int need_transformations;
};
-struct image *image_io_read_data_prepare(struct image_io_read_data_internals *rdi, struct image_io *io, uns cols, uns rows, uns flags);
+struct image *image_io_read_data_prepare(struct image_io_read_data_internals *rdi, struct image_io *io, uint cols, uint rows, uint flags);
int image_io_read_data_finish(struct image_io_read_data_internals *rdi, struct image_io *io);
void image_io_read_data_break(struct image_io_read_data_internals *rdi, struct image_io *io);
extern const u32 fast_div_tab[];
extern const byte fast_sqrt_tab[];
-static inline uns isqr(int x)
+static inline uint isqr(int x)
{
return x * x;
}
-static inline uns fast_div_u32_u8(uns x, uns y)
+static inline uint fast_div_u32_u8(uint x, uint y)
{
return ((u64)(x) * fast_div_tab[y]) >> 32;
}
-static inline uns fast_sqrt_u16(uns x)
+static inline uint fast_sqrt_u16(uint x)
{
- uns y;
+ uint y;
if (x < (1 << 10) - 3)
y = fast_sqrt_tab[(x + 3) >> 2] >> 3;
else if (x < (1 << 14) - 28)
return (x < y * y) ? y - 1 : y;
}
-static inline uns fast_sqrt_u32(uns x)
+static inline uint fast_sqrt_u32(uint x)
{
- uns y;
+ uint y;
if (x < (1 << 16))
{
if (x < (1 << 10) - 3)
#include <stdio.h>
#include <string.h>
-uns
+uint
get_image_obj_info(struct image_obj_info *ioi, struct odes *o)
{
byte *v = obj_find_aval(o, 'G');
return 0;
}
byte color_space[16], thumb_format[16];
- UNUSED uns cnt = sscanf(v, "%d%d%s%d%d%d%s", &ioi->cols, &ioi->rows, color_space,
+ UNUSED uint cnt = sscanf(v, "%d%d%s%d%d%d%s", &ioi->cols, &ioi->rows, color_space,
&ioi->colors, &ioi->thumb_cols, &ioi->thumb_rows, thumb_format);
ASSERT(cnt == 7);
ioi->thumb_format = (*thumb_format == 'p') ? IMAGE_FORMAT_PNG : IMAGE_FORMAT_JPEG;
return 1;
}
-uns
+uint
get_image_obj_thumb(struct image_obj_info *ioi, struct odes *o, struct mempool *pool)
{
struct oattr *a = obj_find_attr(o, 'N');
DBG("Missing image thumbnail attribute");
return 0;
}
- uns count = 0;
- uns max_len = 0;
+ uint count = 0;
+ uint max_len = 0;
for (struct oattr *b = a; b; b = b->same)
{
count++;
put_image_obj_signature(struct odes *o, struct image_signature *sig)
{
/* signatures should be short enough to in a single attribute */
- uns size = image_signature_size(sig->len);
+ uint size = image_signature_size(sig->len);
byte buf[BASE224_ENC_LENGTH(size) + 1];
buf[base224_encode(buf, (byte *)sig, size)] = 0;
obj_set_attr(o, 'H', buf);
}
-uns
+uint
get_image_obj_signature(struct image_signature *sig, struct odes *o)
{
byte *a = obj_find_aval(o, 'H');
if (!a)
return 0;
- UNUSED uns size = base224_decode((byte *)sig, a, strlen(a));
+ UNUSED uint size = base224_decode((byte *)sig, a, strlen(a));
ASSERT(size == image_signature_size(sig->len));
return 1;
}
#endif
struct image_obj_info {
- uns cols;
- uns rows;
- uns colors;
+ uint cols;
+ uint rows;
+ uint colors;
enum image_format thumb_format;
- uns thumb_cols;
- uns thumb_rows;
- uns thumb_size;
+ uint thumb_cols;
+ uint thumb_rows;
+ uint thumb_size;
byte *thumb_data;
};
struct mempool;
struct image_signature;
-uns get_image_obj_info(struct image_obj_info *ioi, struct odes *o);
-uns get_image_obj_thumb(struct image_obj_info *ioi, struct odes *o, struct mempool *pool);
+uint get_image_obj_info(struct image_obj_info *ioi, struct odes *o);
+uint get_image_obj_thumb(struct image_obj_info *ioi, struct odes *o, struct mempool *pool);
struct image *read_image_obj_thumb(struct image_obj_info *ioi, struct fastbuf *fb, struct image_io *io, struct mempool *pool);
void put_image_obj_signature(struct odes *o, struct image_signature *sig);
-uns get_image_obj_signature(struct image_signature *sig, struct odes *o);
+uint get_image_obj_signature(struct image_signature *sig, struct odes *o);
#endif
static void
IMAGE_SCALE_PREFIX(nearest_xy)(struct image *dest, struct image *src)
{
- uns x_inc = (src->cols << 16) / dest->cols;
- uns y_inc = (src->rows << 16) / dest->rows;
- uns x_start = x_inc >> 1, x_pos;
- uns y_pos = y_inc >> 1;
+ uint x_inc = (src->cols << 16) / dest->cols;
+ uint y_inc = (src->rows << 16) / dest->rows;
+ uint x_start = x_inc >> 1, x_pos;
+ uint y_pos = y_inc >> 1;
byte *row_start;
# define IMAGE_WALK_PREFIX(x) walk_##x
# define IMAGE_WALK_INLINE
byte *dest_row = dest->pixels;
if (src->cols == 1)
{
- for (uns y_counter = dest->rows; y_counter--; )
+ for (uint y_counter = dest->rows; y_counter--; )
{
// FIXME
ASSERT(0);
return;
}
/* Initialize the main loop */
- uns x_inc = ((src->cols - 1) << 16) / (dest->cols - 1);
+ uint x_inc = ((src->cols - 1) << 16) / (dest->cols - 1);
# define COLS_AT_ONCE 256
byte pixel_buf[COLS_AT_ONCE * 2 * IMAGE_SCALE_PIXEL_SIZE]; /* Buffers should fit in cache */
u16 coef_buf[COLS_AT_ONCE * IMAGE_SCALE_PIXEL_SIZE];
/* Main loop */
- for (uns y_counter = dest->rows; y_counter--; )
+ for (uint y_counter = dest->rows; y_counter--; )
{
- uns x_pos = 0;
+ uint x_pos = 0;
byte *dest_pos = dest_row;
- for (uns x_counter = dest->cols; --x_counter; )
- for (uns x_counter = dest->cols; x_counter > COLS_AT_ONCE; x_counter -= COLS_AT_ONCE)
+ for (uint x_counter = dest->cols; --x_counter; )
+ for (uint x_counter = dest->cols; x_counter > COLS_AT_ONCE; x_counter -= COLS_AT_ONCE)
{
byte *pixel_buf_pos = pixel_buf;
u16 *coef_buf_pos = coef_buf;
- for (uns i = 0; i < COLS_AT_ONCE / 2; i++)
+ for (uint i = 0; i < COLS_AT_ONCE / 2; i++)
{
byte *src_pos = src_row + (x_pos >> 16) * IMAGE_SCALE_PIXEL_SIZE;
- uns ofs = x_pos & 0xffff;
+ uint ofs = x_pos & 0xffff;
x_pos += x_inc;
byte *src_pos_2 = src_row + (x_pos >> 16) * IMAGE_SCALE_PIXEL_SIZE;
- uns ofs_2 = x_pos & 0xffff;
+ uint ofs_2 = x_pos & 0xffff;
x_pos += x_inc;
*coef_buf_pos++ = ofs;
byte *pixel_buf_pos_2 = pixel_buf_pos + IMAGE_SCALE_PIXEL_SIZE;
}
/*
byte *src_pos = src_row + (x_pos >> 16) * IMAGE_SCALE_PIXEL_SIZE;
- uns ofs = x_pos & 0xffff;
+ uint ofs = x_pos & 0xffff;
x_pos += x_inc;
dest_pos[0] = LINEAR_INTERPOLATE(src_pos[0], src_pos[0 + IMAGE_SCALE_PIXEL_SIZE], ofs);
# if IMAGE_SCALE_CHANNELS >= 2
static void
IMAGE_SCALE_PREFIX(bilinear_xy)(struct image *dest, struct image *src)
{
- uns x_inc = (((src->cols - 1) << 16) - 1) / (dest->cols);
- uns y_inc = (((src->rows - 1) << 16) - 1) / (dest->rows);
- uns y_pos = 0x10000;
+ uint x_inc = (((src->cols - 1) << 16) - 1) / (dest->cols);
+ uint y_inc = (((src->rows - 1) << 16) - 1) / (dest->rows);
+ uint y_pos = 0x10000;
byte *cache[2], buf1[dest->row_pixels_size + 16], buf2[dest->row_pixels_size + 16], *pbuf[2];
byte *dest_row = dest->pixels, *dest_pos;
- uns cache_index = ~0U, cache_i = 0;
+ uint cache_index = ~0U, cache_i = 0;
pbuf[0] = cache[0] = ALIGN_PTR((void *)buf1, 16);
pbuf[1] = cache[1] = ALIGN_PTR((void *)buf2, 16);
#ifdef __SSE2__
__m128i zero = _mm_setzero_si128();
#endif
- for (uns row_counter = dest->rows; row_counter--; )
+ for (uint row_counter = dest->rows; row_counter--; )
{
dest_pos = dest_row;
- uns y_index = y_pos >> 16;
- uns y_ofs = y_pos & 0xffff;
+ uint y_index = y_pos >> 16;
+ uint y_ofs = y_pos & 0xffff;
y_pos += y_inc;
- uns x_pos = 0;
- if (y_index > (uns)(cache_index + 1))
+ uint x_pos = 0;
+ if (y_index > (uint)(cache_index + 1))
cache_index = y_index - 1;
while (y_index > cache_index)
{
cache_index++;
byte *src_row = src->pixels + cache_index * src->row_size;
byte *cache_pos = cache[1];
- for (uns col_counter = dest->cols; --col_counter; )
+ for (uint col_counter = dest->cols; --col_counter; )
{
byte *c1 = src_row + (x_pos >> 16) * IMAGE_SCALE_PIXEL_SIZE;
byte *c2 = c1 + IMAGE_SCALE_PIXEL_SIZE;
- uns ofs = x_pos & 0xffff;
+ uint ofs = x_pos & 0xffff;
cache_pos[0] = LINEAR_INTERPOLATE(c1[0], c2[0], ofs);
# if IMAGE_SCALE_CHANNELS >= 2
cache_pos[1] = LINEAR_INTERPOLATE(c1[1], c2[1], ofs);
}
IMAGE_COPY_PIXEL(cache_pos, src_row + src->row_pixels_size - IMAGE_SCALE_PIXEL_SIZE);
}
- uns i = 0;
+ uint i = 0;
#ifdef __SSE2__
__m128i coef = _mm_set1_epi16(y_ofs >> 9);
for (; (int)i < (int)dest->row_pixels_size - 15; i += 16)
byte *rdest = dest->pixels, *pdest;
u64 x_inc = ((u64)dest->cols << 32) / src->cols, x_pos;
u64 y_inc = ((u64)dest->rows << 32) / src->rows, y_pos = 0;
- uns x_inc_frac = (u64)0xffffffffff / x_inc;
- uns y_inc_frac = (u64)0xffffffffff / y_inc;
- uns final_mul = ((u64)(x_inc >> 16) * (y_inc >> 16)) >> 16;
- uns buf_size = dest->cols * IMAGE_SCALE_CHANNELS;
+ uint x_inc_frac = (u64)0xffffffffff / x_inc;
+ uint y_inc_frac = (u64)0xffffffffff / y_inc;
+ uint final_mul = ((u64)(x_inc >> 16) * (y_inc >> 16)) >> 16;
+ uint buf_size = dest->cols * IMAGE_SCALE_CHANNELS;
u32 buf[buf_size], *pbuf;
buf_size *= sizeof(u32);
bzero(buf, buf_size);
- for (uns rows_counter = src->rows; rows_counter--; )
+ for (uint rows_counter = src->rows; rows_counter--; )
{
pbuf = buf;
psrc = rsrc;
y_pos += y_inc;
if (y_pos <= 0x100000000)
{
- for (uns cols_counter = src->cols; cols_counter--; )
+ for (uint cols_counter = src->cols; cols_counter--; )
{
x_pos += x_inc;
if (x_pos <= 0x100000000)
else
{
x_pos -= 0x100000000;
- uns mul2 = (uns)(x_pos >> 16) * x_inc_frac;
- uns mul1 = 0xffffff - mul2;
+ uint mul2 = (uint)(x_pos >> 16) * x_inc_frac;
+ uint mul1 = 0xffffff - mul2;
pbuf[0] += (psrc[0] * mul1) >> 24;
pbuf[0 + IMAGE_SCALE_CHANNELS] += (psrc[0] * mul2) >> 24;
# if IMAGE_SCALE_CHANNELS >= 2
y_pos -= 0x100000000;
pdest = rdest;
rdest += dest->row_size;
- uns mul2 = (uns)(y_pos >> 16) * y_inc_frac;
- uns mul1 = 0xffffff - mul2;
- uns a0 = 0;
+ uint mul2 = (uint)(y_pos >> 16) * y_inc_frac;
+ uint mul1 = 0xffffff - mul2;
+ uint a0 = 0;
# if IMAGE_SCALE_CHANNELS >= 2
- uns a1 = 0;
+ uint a1 = 0;
# endif
# if IMAGE_SCALE_CHANNELS >= 3
- uns a2 = 0;
+ uint a2 = 0;
# endif
# if IMAGE_SCALE_CHANNELS >= 4
- uns a3 = 0;
+ uint a3 = 0;
# endif
- for (uns cols_counter = src->cols; cols_counter--; )
+ for (uint cols_counter = src->cols; cols_counter--; )
{
x_pos += x_inc;
if (x_pos <= 0x100000000)
else
{
x_pos -= 0x100000000;
- uns mul4 = (uns)(x_pos >> 16) * x_inc_frac;
- uns mul3 = 0xffffff - mul4;
- uns mul13 = ((u64)mul1 * mul3) >> 24;
- uns mul23 = ((u64)mul2 * mul3) >> 24;
- uns mul14 = ((u64)mul1 * mul4) >> 24;
- uns mul24 = ((u64)mul2 * mul4) >> 24;
+ uint mul4 = (uint)(x_pos >> 16) * x_inc_frac;
+ uint mul3 = 0xffffff - mul4;
+ uint mul13 = ((u64)mul1 * mul3) >> 24;
+ uint mul23 = ((u64)mul2 * mul3) >> 24;
+ uint mul14 = ((u64)mul1 * mul4) >> 24;
+ uint mul24 = ((u64)mul2 * mul4) >> 24;
pdest[0] = ((((psrc[0] * mul13) >> 24) + pbuf[0]) * final_mul) >> 16;
pbuf[0] = ((psrc[0] * mul23) >> 24) + a0;
pbuf[0 + IMAGE_SCALE_CHANNELS] += ((psrc[0 + IMAGE_SCALE_PIXEL_SIZE] * mul14) >> 24);
}
pdest = rdest;
pbuf = buf;
- for (uns cols_counter = dest->cols; cols_counter--; )
+ for (uint cols_counter = dest->cols; cols_counter--; )
{
pdest[0] = (pbuf[0] * final_mul) >> 16;
# if IMAGE_SCALE_CHANNELS >= 2
static void
image_scale_nearest_y(struct image *dest, struct image *src)
{
- uns y_inc = (src->rows << 16) / dest->rows;
- uns y_pos = y_inc >> 1;
+ uint y_inc = (src->rows << 16) / dest->rows;
+ uint y_pos = y_inc >> 1;
byte *dest_pos = dest->pixels;
- for (uns row_counter = dest->rows; row_counter--; )
+ for (uint row_counter = dest->rows; row_counter--; )
{
byte *src_pos = src->pixels + (y_pos >> 16) * src->row_size;
y_pos += y_inc;
/* Handle problematic special case */
if (src->rows == 1)
{
- for (uns y_counter = dest->rows; y_counter--; dest_row += dest->row_size)
+ for (uint y_counter = dest->rows; y_counter--; dest_row += dest->row_size)
memcpy(dest_row, src->pixels, src->row_pixels_size);
return;
}
/* Initialize the main loop */
- uns y_inc = ((src->rows - 1) << 16) / (dest->rows - 1), y_pos = 0;
+ uint y_inc = ((src->rows - 1) << 16) / (dest->rows - 1), y_pos = 0;
#ifdef __SSE2__
__m128i zero = _mm_setzero_si128();
#endif
/* Main loop */
- for (uns y_counter = dest->rows; --y_counter; )
+ for (uint y_counter = dest->rows; --y_counter; )
{
- uns coef = y_pos & 0xffff;
+ uint coef = y_pos & 0xffff;
byte *src_row_1 = src->pixels + (y_pos >> 16) * src->row_size;
byte *src_row_2 = src_row_1 + src->row_size;
- uns i = 0;
+ uint i = 0;
#ifdef __SSE2__
/* SSE2 */
__m128i sse_coef = _mm_set1_epi16(coef >> 9);
}
void
-image_dimensions_fit_to_box(uns *cols, uns *rows, uns max_cols, uns max_rows, uns upsample)
+image_dimensions_fit_to_box(uint *cols, uint *rows, uint max_cols, uint max_rows, uint upsample)
{
ASSERT(image_dimensions_valid(*cols, *rows));
ASSERT(image_dimensions_valid(max_cols, max_rows));
{
byte buf[1024], *line = buf;
MSG("signature: flags=0x%x df=%u dh=%u f=(%u", sig->flags, sig->df, sig->dh, sig->vec.f[0]);
- for (uns i = 1; i < IMAGE_VEC_F; i++)
+ for (uint i = 1; i < IMAGE_VEC_F; i++)
MSG(" %u", sig->vec.f[i]);
MSG(")");
LINE;
- for (uns j = 0; j < sig->len; j++)
+ for (uint j = 0; j < sig->len; j++)
{
struct image_region *reg = sig->reg + j;
MSG("region %u: wa=%u wb=%u f=(%u", j, reg->wa, reg->wb, reg->f[0]);
- for (uns i = 1; i < IMAGE_VEC_F; i++)
+ for (uint i = 1; i < IMAGE_VEC_F; i++)
MSG(" %u", reg->f[i]);
MSG(") h=(%u", reg->h[0]);
- for (uns i = 1; i < IMAGE_REG_H; i++)
+ for (uint i = 1; i < IMAGE_REG_H; i++)
MSG(" %u", reg->h[i]);
MSG(")");
LINE;
#define MSGL(x...) do{ MSG(x); LINE; }while(0)
#ifndef EXPLAIN
-static uns image_signatures_dist_integrated(struct image_signature *sig1, struct image_signature *sig2)
+static uint image_signatures_dist_integrated(struct image_signature *sig1, struct image_signature *sig2)
#else
-static uns image_signatures_dist_integrated_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
+static uint image_signatures_dist_integrated_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
#endif
{
- uns dist[IMAGE_REG_MAX * IMAGE_REG_MAX], p[IMAGE_REG_MAX], q[IMAGE_REG_MAX];
- uns n, i, j, k, l, s, d;
+ uint dist[IMAGE_REG_MAX * IMAGE_REG_MAX], p[IMAGE_REG_MAX], q[IMAGE_REG_MAX];
+ uint n, i, j, k, l, s, d;
struct image_region *reg1, *reg2;
#ifdef EXPLAIN
byte buf[1024], *line = buf;
for (j = 0, reg2 = sig2->reg; j < sig2->len; j++, reg2++)
for (i = 0, reg1 = sig1->reg; i < sig1->len; i++, reg1++)
{
- uns dt = 0, ds = 0, dp = 0, d;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint dt = 0, ds = 0, dp = 0, d;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
dt += image_sig_cmp_features_weights[i] * isqr((int)reg1->f[i] - (int)reg2->f[i]);
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
ds += image_sig_cmp_features_weights[IMAGE_VEC_F + i] * isqr((int)reg1->h[i] - (int)reg2->h[i]);
- for (uns i = 3; i < 5; i++)
+ for (uint i = 3; i < 5; i++)
dp += image_sig_cmp_features_weights[IMAGE_VEC_F + i] * isqr((int)reg1->h[i] - (int)reg2->h[i]);
#if 0
int x1, y1, x2, y2;
}
MSGL("%d %d %d %d", x1, y1, x2, y2);
dp = image_sig_cmp_features_weights[IMAGE_VEC_F + 3] * isqr(x1 - x2) +
- image_sig_cmp_features_weights[IMAGE_VEC_F + 4] * isqr(y1 - y2);
+ image_sig_cmp_features_weights[IMAGE_VEC_F + 4] * isqr(y1 - y2);
#endif
#if 0
d = dt * (4 + MIN(8, (ds >> 12))) * (4 + MIN(8, (dp >> 10))) + (ds >> 11) + (dp >> 10);
dist[n++] = (d << 8) + i + (j << 4);
MSG("[%u, %u] d=%u dt=%u ds=%u dp=%u df=(%d", i, j, d, dt, ds, dp, (int)reg1->f[0] - (int)reg2->f[0]);
#ifdef EXPLAIN
- for (uns i = 1; i < IMAGE_VEC_F; i++)
+ for (uint i = 1; i < IMAGE_VEC_F; i++)
MSG(" %d", (int)reg1->f[i] - (int)reg2->f[i]);
MSG(") dh=(%d", (int)reg1->h[0] - (int)reg2->h[0]);
- for (uns i = 1; i < IMAGE_REG_H; i++)
+ for (uint i = 1; i < IMAGE_REG_H; i++)
MSG(" %d", (int)reg1->h[i] - (int)reg2->h[i]);
MSGL(")");
#endif
for (j = 0, reg2 = sig2->reg; j < sig2->len; j++, reg2++)
for (i = 0, reg1 = sig1->reg; i < sig1->len; i++, reg1++)
{
- uns dt = 0;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint dt = 0;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
dt += image_sig_cmp_features_weights[i] * isqr((int)reg1->f[i] - (int)reg2->f[i]);
dist[n++] = (dt << 12) + i + (j << 4);
#ifdef EXPLAIN
MSG("[%u, %u] dt=%u df=(%d", i, j, dt, (int)reg1->f[0] - (int)reg2->f[0]);
- for (uns i = 1; i < IMAGE_VEC_F; i++)
+ for (uint i = 1; i < IMAGE_VEC_F; i++)
MSG(" %d", (int)reg1->f[i] - (int)reg2->f[i]);
MSGL(")");
#endif
image_signatures_dist_integrated_sort(dist, n);
/* Compute significance matrix and resulting distance */
- uns sum = 0;
+ uint sum = 0;
MSGL("Significance matrix:");
for (k = 0, l = 128; l; k++)
{
reg1 = sig1->reg + i;
reg2 = sig2->reg + j;
MSG("[%u, %u] s=%u d=%u df=(%d", i, j, s, d, (int)reg1->f[0] - (int)reg2->f[0]);
- for (uns i = 1; i < IMAGE_VEC_F; i++)
+ for (uint i = 1; i < IMAGE_VEC_F; i++)
MSG(" %d", (int)reg1->f[i] - (int)reg2->f[i]);
if (!((sig1->flags | sig2->flags) & IMAGE_SIG_TEXTURED))
{
MSG(") dh=(%d", (int)reg1->h[0] - (int)reg2->h[0]);
- for (uns i = 1; i < IMAGE_REG_H; i++)
+ for (uint i = 1; i < IMAGE_REG_H; i++)
MSG(" %d", (int)reg1->h[i] - (int)reg2->h[i]);
}
MSGL(")");
d = sum / 32;
- uns a = sig1->cols * sig2->rows;
- uns b = sig1->rows * sig2->cols;
+ uint a = sig1->cols * sig2->rows;
+ uint b = sig1->rows * sig2->cols;
if (a < 2 * b && b < 2 * a)
d = d * 2;
else if (a < 4 * b && b < 4 * a)
}
#ifndef EXPLAIN
-static uns image_signatures_dist_fuzzy(struct image_signature *sig1, struct image_signature *sig2)
+static uint image_signatures_dist_fuzzy(struct image_signature *sig1, struct image_signature *sig2)
#else
-static uns image_signatures_dist_fuzzy_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
+static uint image_signatures_dist_fuzzy_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
#endif
{
#ifdef EXPLAIN
return ~0U;
}
- uns cnt1 = sig1->len;
- uns cnt2 = sig2->len;
+ uint cnt1 = sig1->len;
+ uint cnt2 = sig2->len;
struct image_region *reg1 = sig1->reg;
struct image_region *reg2 = sig2->reg;
- uns mf[IMAGE_REG_MAX][IMAGE_REG_MAX], mh[IMAGE_REG_MAX][IMAGE_REG_MAX];
- uns lf[IMAGE_REG_MAX * 2], lh[IMAGE_REG_MAX * 2];
- uns df = sig1->df + sig2->df, dh = sig1->dh + sig2->dh;
+ uint mf[IMAGE_REG_MAX][IMAGE_REG_MAX], mh[IMAGE_REG_MAX][IMAGE_REG_MAX];
+ uint lf[IMAGE_REG_MAX * 2], lh[IMAGE_REG_MAX * 2];
+ uint df = sig1->df + sig2->df, dh = sig1->dh + sig2->dh;
/* Compute distance matrix */
- for (uns i = 0; i < cnt1; i++)
- for (uns j = 0; j < cnt2; j++)
+ for (uint i = 0; i < cnt1; i++)
+ for (uint j = 0; j < cnt2; j++)
{
- uns d = 0;
- for (uns k = 0; k < IMAGE_VEC_F; k++)
+ uint d = 0;
+ for (uint k = 0; k < IMAGE_VEC_F; k++)
{
int dif = reg1[i].f[k] - reg2[j].f[k];
d += image_sig_cmp_features_weights[k] * dif * dif;
}
mf[i][j] = d;
d = 0;
- for (uns k = 0; k < IMAGE_REG_H; k++)
+ for (uint k = 0; k < IMAGE_REG_H; k++)
{
int dif = reg1[i].h[k] - reg2[j].h[k];
d += image_sig_cmp_features_weights[k + IMAGE_VEC_F] * dif * dif;
mh[i][j] = d;
}
- uns lfs = 0, lhs = 0;
- for (uns i = 0; i < cnt1; i++)
+ uint lfs = 0, lhs = 0;
+ for (uint i = 0; i < cnt1; i++)
{
- uns f = mf[i][0], h = mh[i][0];
- for (uns j = 1; j < cnt2; j++)
+ uint f = mf[i][0], h = mh[i][0];
+ for (uint j = 1; j < cnt2; j++)
{
f = MIN(f, mf[i][j]);
h = MIN(h, mh[i][j]);
lfs += lf[i] * (6 * reg1[i].wa + 2 * reg1[i].wb);
lhs += lh[i] * reg1[i].wa;
}
- for (uns i = 0; i < cnt2; i++)
+ for (uint i = 0; i < cnt2; i++)
{
- uns f = mf[0][i], h = mh[0][i];
- for (uns j = 1; j < cnt1; j++)
+ uint f = mf[0][i], h = mh[0][i];
+ for (uint j = 1; j < cnt1; j++)
{
f = MIN(f, mf[j][i]);
h = MIN(h, mh[j][i]);
lhs += lh[i] * reg2[i].wa;
}
- uns measure = lfs * 6 + lhs * 2 * 8;
+ uint measure = lfs * 6 + lhs * 2 * 8;
#ifdef EXPLAIN
/* Display similarity vectors */
MSG("Lf=(");
- for (uns i = 0; i < cnt1 + cnt2; i++)
+ for (uint i = 0; i < cnt1 + cnt2; i++)
{
if (i)
MSG(" ");
}
MSGL(")");
MSG("Lh=(");
- for (uns i = 0; i < cnt1 + cnt2; i++)
+ for (uint i = 0; i < cnt1 + cnt2; i++)
{
if (i)
MSG(" ");
}
#ifndef EXPLAIN
-static uns image_signatures_dist_average(struct image_signature *sig1, struct image_signature *sig2)
+static uint image_signatures_dist_average(struct image_signature *sig1, struct image_signature *sig2)
#else
-static uns image_signatures_dist_average_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
+static uint image_signatures_dist_average_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
#endif
{
#ifdef EXPLAIN
MSGL("Average matching");
#endif
- uns dist = 0;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint dist = 0;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
- uns d = image_sig_cmp_features_weights[0] * isqr((int)sig1->vec.f[i] - (int)sig2->vec.f[i]);
+ uint d = image_sig_cmp_features_weights[0] * isqr((int)sig1->vec.f[i] - (int)sig2->vec.f[i]);
MSGL("feature %u: d=%u (%u %u)", i, d, sig1->vec.f[i], sig2->vec.f[i]);
dist += d;
}
#ifndef EXPLAIN
#define CALL(x) image_signatures_dist_##x(sig1, sig2)
-uns image_signatures_dist(struct image_signature *sig1, struct image_signature *sig2)
+uint image_signatures_dist(struct image_signature *sig1, struct image_signature *sig2)
#else
#define CALL(x) image_signatures_dist_##x##_explain(sig1, sig2, msg, param)
-uns image_signatures_dist_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
+uint image_signatures_dist_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param)
#endif
{
if (!sig1->len)
#include <stdio.h>
#define ASORT_PREFIX(x) image_signatures_dist_integrated_##x
-#define ASORT_KEY_TYPE uns
+#define ASORT_KEY_TYPE uint
#include <ucw/sorter/array-simple.h>
#define EXPLAIN
{
byte *p = buf;
*p++ = '(';
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
if (i)
*p++ = ' ';
{
byte *p = buf;
p += sprintf(p, "(txt=");
- for (uns i = 0; i < IMAGE_REG_F; i++)
+ for (uint i = 0; i < IMAGE_REG_F; i++)
{
if (i)
*p++ = ' ';
p += sprintf(p, "%u", reg->f[i]);
}
p += sprintf(p, " shp=");
- for (uns i = 0; i < IMAGE_REG_H; i++)
+ for (uint i = 0; i < IMAGE_REG_H; i++)
{
if (i)
*p++ = ' ';
{
struct image *image = data->image;
struct image_sig_block *block = data->blocks;
- uns sum[IMAGE_VEC_F];
+ uint sum[IMAGE_VEC_F];
bzero(sum, sizeof(sum));
/* Every block of 4x4 pixels */
byte *row_start = image->pixels;
- for (uns block_y = 0; block_y < data->rows; block_y++, row_start += image->row_size * 4)
+ for (uint block_y = 0; block_y < data->rows; block_y++, row_start += image->row_size * 4)
{
byte *p = row_start;
- for (uns block_x = 0; block_x < data->cols; block_x++, p += 12, block++)
+ for (uint block_x = 0; block_x < data->cols; block_x++, p += 12, block++)
{
int t[16], s[16], *tp = t;
block->x = block_x;
block->y = block_y;
/* Convert pixels to Luv color space and compute average coefficients */
- uns l_sum = 0, u_sum = 0, v_sum = 0;
+ uint l_sum = 0, u_sum = 0, v_sum = 0;
byte *p2 = p;
if (block_x < data->full_cols && block_y < data->full_rows)
{
- for (uns y = 0; y < 4; y++, p2 += image->row_size - 12)
- for (uns x = 0; x < 4; x++, p2 += 3)
+ for (uint y = 0; y < 4; y++, p2 += image->row_size - 12)
+ for (uint x = 0; x < 4; x++, p2 += 3)
{
byte luv[3];
srgb_to_luv_pixel(luv, p2);
/* Incomplete square near the edge */
else
{
- uns x, y;
- uns square_cols = (block_x < data->full_cols) ? 4 : image->cols & 3;
- uns square_rows = (block_y < data->full_rows) ? 4 : image->rows & 3;
+ uint x, y;
+ uint square_cols = (block_x < data->full_cols) ? 4 : image->cols & 3;
+ uint square_rows = (block_y < data->full_rows) ? 4 : image->rows & 3;
for (y = 0; y < square_rows; y++, p2 += image->row_size)
{
byte *p3 = p2;
tp++;
}
block->area = square_cols * square_rows;
- uns inv = 0x10000 / block->area;
+ uint inv = 0x10000 / block->area;
sum[0] += l_sum;
sum[1] += u_sum;
sum[2] += v_sum;
# define DAUB_3 -8481 /* (1 - sqrt 3) / (4 * sqrt 2) * 0x10000 */
/* ... to the rows */
- uns i;
+ uint i;
for (i = 0; i < 16; i += 4)
{
s[i + 0] = (DAUB_0 * t[i + 2] + DAUB_1 * t[i + 3] + DAUB_2 * t[i + 0] + DAUB_3 * t[i + 1]) / 0x10000;
}
/* Compute featrures average */
- uns inv = 0xffffffffU / data->area;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint inv = 0xffffffffU / data->area;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
data->f[i] = ((u64)sum[i] * inv) >> 32;
if (image->cols < image_sig_min_width || image->rows < image_sig_min_height)
void
image_sig_finish(struct image_sig_data *data, struct image_signature *sig)
{
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
sig->vec.f[i] = data->f[i];
sig->len = data->regions_count;
sig->flags = data->flags;
/* For each region */
u64 w_total = 0;
- uns w_border = MIN(data->cols, data->rows) * image_sig_border_size;
+ uint w_border = MIN(data->cols, data->rows) * image_sig_border_size;
int w_mul = w_border ? image_sig_border_bonus * 256 / (int)w_border : 0;
- for (uns i = 0; i < sig->len; i++)
+ for (uint i = 0; i < sig->len; i++)
{
struct image_sig_region *r = data->regions + i;
DBG("Processing region %u: count=%u", i, r->count);
{
x_sum += b->x;
y_sum += b->y;
- uns d = b->x;
+ uint d = b->x;
d = MIN(d, b->y);
d = MIN(d, data->cols - b->x - 1);
d = MIN(d, data->rows - b->y - 1);
}
w_total += w_sum;
r->w_sum = w_sum;
- uns x_avg = x_sum / r->count;
- uns y_avg = y_sum / r->count;
+ uint x_avg = x_sum / r->count;
+ uint y_avg = y_sum / r->count;
DBG(" centroid=(%u %u)", x_avg, y_avg);
/* Compute normalized inertia */
u64 sum1 = 0, sum2 = 0, sum3 = 0;
for (struct image_sig_block *b = r->blocks; b; b = b->next)
{
- uns inc2 = isqr(x_avg - b->x) + isqr(y_avg - b->y);
- uns inc1 = fast_sqrt_u32(inc2);
+ uint inc2 = isqr(x_avg - b->x) + isqr(y_avg - b->y);
+ uint inc1 = fast_sqrt_u32(inc2);
sum1 += inc1;
sum2 += inc2;
sum3 += inc1 * inc2;
sig->reg[i].h[0] = CLAMP(image_sig_inertia_scale[0] * sum1 * ((3 * M_PI * M_PI) / 2) * pow(r->count, -1.5), 0, 255);
sig->reg[i].h[1] = CLAMP(image_sig_inertia_scale[1] * sum2 * ((4 * M_PI * M_PI * M_PI) / 2) / ((u64)r->count * r->count), 0, 255);
sig->reg[i].h[2] = CLAMP(image_sig_inertia_scale[2] * sum3 * ((5 * M_PI * M_PI * M_PI * M_PI) / 2) * pow(r->count, -2.5), 0, 255);
- sig->reg[i].h[3] = (uns)x_avg * 127 / data->cols;
- sig->reg[i].h[4] = (uns)y_avg * 127 / data->rows;
+ sig->reg[i].h[3] = (uint)x_avg * 127 / data->cols;
+ sig->reg[i].h[4] = (uint)y_avg * 127 / data->rows;
}
/* Compute average differences */
}
else
{
- uns cnt = 0;
- for (uns i = 0; i < sig->len; i++)
- for (uns j = i + 1; j < sig->len; j++)
+ uint cnt = 0;
+ for (uint i = 0; i < sig->len; i++)
+ for (uint j = i + 1; j < sig->len; j++)
{
- uns d = 0;
- for (uns k = 0; k < IMAGE_REG_F; k++)
+ uint d = 0;
+ for (uint k = 0; k < IMAGE_REG_F; k++)
d += image_sig_cmp_features_weights[k] * isqr(sig->reg[i].f[k] - sig->reg[j].f[k]);
df += fast_sqrt_u32(d);
d = 0;
- for (uns k = 0; k < IMAGE_REG_H; k++)
+ for (uint k = 0; k < IMAGE_REG_H; k++)
d += image_sig_cmp_features_weights[k + IMAGE_REG_F] * isqr(sig->reg[i].h[k] - sig->reg[j].h[k]);
dh += fast_sqrt_u32(d);
cnt++;
DBG("Average regions difs: df=%u dh=%u", sig->df, sig->dh);
/* Compute normalized weights */
- uns wa = 128, wb = 128;
- for (uns i = sig->len; --i > 0; )
+ uint wa = 128, wb = 128;
+ for (uint i = sig->len; --i > 0; )
{
struct image_sig_region *r = data->regions + i;
wa -= sig->reg[i].wa = CLAMP(r->count * 128 / data->blocks_count, 1, (int)(wa - i));
/* Dump regions features */
#ifdef LOCAL_DEBUG
- for (uns i = 0; i < sig->len; i++)
+ for (uint i = 0; i < sig->len; i++)
{
byte buf[IMAGE_REGION_DUMP_MAX];
image_region_dump(buf, sig->reg + i);
#ifdef LOCAL_DEBUG
static void
-dump_segmentation(struct image_sig_region *regions, uns regions_count)
+dump_segmentation(struct image_sig_region *regions, uint regions_count)
{
- uns cols = 0, rows = 0;
- for (uns i = 0; i < regions_count; i++)
+ uint cols = 0, rows = 0;
+ for (uint i = 0; i < regions_count; i++)
for (struct image_sig_block *b = regions[i].blocks; b; b = b->next)
{
cols = MAX(cols, b->x + 1);
rows = MAX(rows, b->y + 1);
}
- uns size = (cols + 1) * rows;
+ uint size = (cols + 1) * rows;
byte buf[size];
bzero(buf, size);
- for (uns i = 0; i < regions_count; i++)
+ for (uint i = 0; i < regions_count; i++)
{
byte c = (i < 10) ? '0' + i : 'A' - 10 + i;
for (struct image_sig_block *b = regions[i].blocks; b; b = b->next)
buf[b->x + b->y * (cols + 1)] = c;
}
- for (uns i = 0; i < rows; i++)
+ for (uint i = 0; i < rows; i++)
log(L_DEBUG, "%s", &buf[i * (cols + 1)]);
}
#endif
block->next = region->blocks;
region->blocks = block;
region->count++;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
region->b[i] += block->v[i];
region->c[i] += isqr(block->v[i]);
{
u64 a = 0;
region->e = 0;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
region->e += region->c[i];
a += (u64)region->b[i] * region->b[i];
}
region->e -= a / region->count;
- DBG("Finished region %u", (uns)region->e / region->count);
+ DBG("Finished region %u", (uint)region->e / region->count);
}
}
-static inline uns
+static inline uint
prequant_heap_cmp(struct image_sig_region *a, struct image_sig_region *b)
{
return a->e > b->e;
}
#define ASORT_PREFIX(x) prequant_##x
-#define ASORT_KEY_TYPE uns
+#define ASORT_KEY_TYPE uint
#include <ucw/sorter/array-simple.h>
-static uns
-prequant(struct image_sig_block *blocks, uns blocks_count, struct image_sig_region *regions)
+static uint
+prequant(struct image_sig_block *blocks, uint blocks_count, struct image_sig_region *regions)
{
DBG("Starting pre-quantization");
- uns regions_count, heap_count, axis;
+ uint regions_count, heap_count, axis;
struct image_sig_block *blocks_end = blocks + blocks_count, *block, *block2;
struct image_sig_region *heap[IMAGE_REG_MAX + 1], *region, *region2;
{
region = heap[1];
DBG("Step... regions_count=%u heap_count=%u region->count=%u, region->e=%u",
- regions_count, heap_count, region->count, (uns)region->e);
+ regions_count, heap_count, region->count, (uint)region->e);
if (region->count < 2 ||
region->e < image_sig_prequant_thresholds[regions_count - 1] * blocks_count)
{
/* Select axis to split - the one with maximum average quadratic error */
axis = 0;
u64 cov = (u64)region->count * region->c[0] - (u64)region->b[0] * region->b[0];
- for (uns i = 1; i < 6; i++)
+ for (uint i = 1; i < 6; i++)
{
- uns j = (u64)region->count * region->c[i] - (u64)region->b[i] * region->b[i];
+ uint j = (u64)region->count * region->c[i] - (u64)region->b[i] * region->b[i];
if (j > cov)
{
axis = i;
cov = j;
}
}
- DBG("Splitting axis %u with average quadratic error %u", axis, (uns)(cov / (region->count * region->count)));
+ DBG("Splitting axis %u with average quadratic error %u", axis, (uint)(cov / (region->count * region->count)));
/* Sort values on the split axis */
- uns val[256], cnt[256], cval;
+ uint val[256], cnt[256], cval;
if (region->count > 64)
{
bzero(cnt, sizeof(cnt));
for (block = region->blocks; block; block = block->next)
cnt[block->v[axis]]++;
cval = 0;
- for (uns i = 0; i < 256; i++)
+ for (uint i = 0; i < 256; i++)
if (cnt[i])
{
val[cval] = i;
else
{
block = region->blocks;
- for (uns i = 0; i < region->count; i++, block = block->next)
+ for (uint i = 0; i < region->count; i++, block = block->next)
val[i] = block->v[axis];
prequant_sort(val, region->count);
cval = 1;
cnt[0] = 1;
- for (uns i = 1; i < region->count; i++)
+ for (uint i = 1; i < region->count; i++)
if (val[i] == val[cval - 1])
cnt[cval - 1]++;
else
}
/* Select split value - to minimize error */
- uns b1 = val[0] * cnt[0];
- uns c1 = isqr(val[0]) * cnt[0];
- uns b2 = region->b[axis] - b1;
- uns c2 = region->c[axis] - c1;
- uns i = cnt[0], j = region->count - cnt[0];
+ uint b1 = val[0] * cnt[0];
+ uint c1 = isqr(val[0]) * cnt[0];
+ uint b2 = region->b[axis] - b1;
+ uint c2 = region->c[axis] - c1;
+ uint i = cnt[0], j = region->count - cnt[0];
u64 best_err = c1 - (u64)b1 * b1 / i + c2 - (u64)b2 * b2 / j;
- uns split_val = val[0];
- for (uns k = 1; k < cval - 1; k++)
+ uint split_val = val[0];
+ for (uint k = 1; k < cval - 1; k++)
{
- uns b0 = val[k] * cnt[k];
- uns c0 = isqr(val[k]) * cnt[k];
+ uint b0 = val[k] * cnt[k];
+ uint c0 = isqr(val[k]) * cnt[k];
b1 += b0;
b2 -= b0;
c1 += c0;
/* Post-quantization - run a few K-mean iterations to improve pre-quantized regions */
-static uns
-postquant(struct image_sig_block *blocks, uns blocks_count, struct image_sig_region *regions, uns regions_count)
+static uint
+postquant(struct image_sig_block *blocks, uint blocks_count, struct image_sig_region *regions, uint regions_count)
{
DBG("Starting post-quantization");
struct image_sig_block *blocks_end = blocks + blocks_count, *block;
struct image_sig_region *regions_end = regions + regions_count, *region;
- uns error = 0, last_error;
+ uint error = 0, last_error;
/* Initialize regions and initial segmentation error */
for (region = regions; region != regions_end; )
{
- uns inv = 0xffffffffU / region->count;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint inv = 0xffffffffU / region->count;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
region->a[i] = ((u64)region->b[i] * inv) >> 32;
error += region->c[i] - region->a[i] * region->b[i];
}
/* Convergation cycle */
- for (uns step = 0; step < image_sig_postquant_max_steps; step++)
+ for (uint step = 0; step < image_sig_postquant_max_steps; step++)
{
DBG("Step...");
for (block = blocks; block != blocks_end; block++)
{
struct image_sig_region *best_region = NULL;
- uns best_dist = ~0U;
+ uint best_dist = ~0U;
for (region = regions; region != regions_end; region++)
{
- uns dist =
+ uint dist =
isqr(block->v[0] - region->a[0]) +
isqr(block->v[1] - region->a[1]) +
isqr(block->v[2] - region->a[2]) +
region->count++;
block->next = region->blocks;
region->blocks = block;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
region->b[i] += block->v[i];
region->c[i] += isqr(block->v[i]);
for (region = regions; region != regions_end; )
if (region->count)
{
- uns inv = 0xffffffffU / region->count;
- for (uns i = 0; i < IMAGE_VEC_F; i++)
+ uint inv = 0xffffffffU / region->count;
+ for (uint i = 0; i < IMAGE_VEC_F; i++)
{
region->a[i] = ((u64)region->b[i] * inv) >> 32;
error += region->c[i] - region->a[i] * region->b[i];
return;
}
- uns cols = data->cols;
- uns rows = data->rows;
- uns cell_cols = MIN((cols + 1) / 2, MAX_CELLS_COLS);
- uns cell_rows = MIN((rows + 1) / 2, MAX_CELLS_ROWS);
- uns cell_x[MAX_CELLS_COLS + 1];
- uns cell_y[MAX_CELLS_ROWS + 1];
- uns i, j;
+ uint cols = data->cols;
+ uint rows = data->rows;
+ uint cell_cols = MIN((cols + 1) / 2, MAX_CELLS_COLS);
+ uint cell_rows = MIN((rows + 1) / 2, MAX_CELLS_ROWS);
+ uint cell_x[MAX_CELLS_COLS + 1];
+ uint cell_y[MAX_CELLS_ROWS + 1];
+ uint i, j;
u32 cnt[IMAGE_REG_MAX];
if (cell_cols * cell_rows < 4)
cell_y[cell_rows] = rows;
/* Preprocess blocks */
- for (uns i = 0; i < data->regions_count; i++)
+ for (uint i = 0; i < data->regions_count; i++)
for (struct image_sig_block *block = data->regions[i].blocks; block; block = block->next)
block->region = i;
/* Process cells */
double e = 0;
- for (uns j = 0; j < cell_rows; j++)
- for (uns i = 0; i < cell_cols; i++)
+ for (uint j = 0; j < cell_rows; j++)
+ for (uint i = 0; i < cell_cols; i++)
{
- uns cell_area = 0;
+ uint cell_area = 0;
bzero(cnt, data->regions_count * sizeof(u32));
struct image_sig_block *b1 = data->blocks + cell_x[i] + cell_y[j] * cols, *b2;
- for (uns y = cell_y[j]; y < cell_y[j + 1]; y++, b1 += cols)
+ for (uint y = cell_y[j]; y < cell_y[j + 1]; y++, b1 += cols)
{
b2 = b1;
- for (uns x = cell_x[i]; x < cell_x[i + 1]; x++, b2++)
+ for (uint x = cell_x[i]; x < cell_x[i + 1]; x++, b2++)
{
cnt[b2->region]++;
cell_area++;
}
}
- for (uns k = 0; k < data->regions_count; k++)
+ for (uint k = 0; k < data->regions_count; k++)
{
int a = data->blocks_count * cnt[k] - cell_area * data->regions[k].count;
e += (double)a * a / ((double)isqr(data->regions[k].count) * cell_area);
#endif
/* Configuration */
-extern uns image_sig_min_width, image_sig_min_height;
-extern uns *image_sig_prequant_thresholds;
-extern uns image_sig_postquant_min_steps, image_sig_postquant_max_steps, image_sig_postquant_threshold;
+extern uint image_sig_min_width, image_sig_min_height;
+extern uint *image_sig_prequant_thresholds;
+extern uint image_sig_postquant_min_steps, image_sig_postquant_max_steps, image_sig_postquant_threshold;
extern double image_sig_border_size;
extern int image_sig_border_bonus;
extern double image_sig_inertia_scale[];
extern double image_sig_textured_threshold;
extern int image_sig_compare_method;
-extern uns image_sig_cmp_features_weights[];
+extern uint image_sig_cmp_features_weights[];
#define IMAGE_VEC_F 6
#define IMAGE_REG_F IMAGE_VEC_F
};
};
-static inline uns image_signature_size(uns len)
+static inline uint image_signature_size(uint len)
{
return OFFSETOF(struct image_signature, reg) + len * sizeof(struct image_region);
}
/* sig-cmp.c */
-uns image_signatures_dist(struct image_signature *sig1, struct image_signature *sig2);
-uns image_signatures_dist_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param);
+uint image_signatures_dist(struct image_signature *sig1, struct image_signature *sig2);
+uint image_signatures_dist_explain(struct image_signature *sig1, struct image_signature *sig2, void (*msg)(byte *text, void *param), void *param);
#endif
static void
print_matrix(double m[9])
{
- for (uns j = 0; j < 3; j++)
+ for (uint j = 0; j < 3; j++)
{
- for (uns i = 0; i < 3; i++)
+ for (uint i = 0; i < 3; i++)
printf(" %12.8f", m[i + j * 3]);
printf("\n");
}
{ NULL, 0, 0, 0 }
};
-static uns verbose = 1;
+static uint verbose = 1;
static byte *file_name_1;
static byte *file_name_2;
static enum image_format format_1;
static enum image_format format_2;
static struct color background_color;
-static uns transformations = IMAGE_DUP_TRANS_ALL;
+static uint transformations = IMAGE_DUP_TRANS_ALL;
#define MSG(x...) do{ if (verbose) msg(L_INFO, ##x); }while(0)
struct mempool *pool = mp_new(1 << 18);
MSG("Creating internal structures");
dup1 = mp_start(pool, image_dup_estimate_size(img1->cols, img1->rows, 1, idc.qtree_limit));
- uns size = image_dup_new(&idc, img1, dup1, 1);
+ uint size = image_dup_new(&idc, img1, dup1, 1);
TRY(size);
mp_end(pool, (void *)dup1 + size);
dup2 = mp_start(pool, image_dup_estimate_size(img2->cols, img2->rows, 1, idc.qtree_limit));
{ NULL, 0, 0, 0 }
};
-static uns verbose = 1;
+static uint verbose = 1;
static byte *file_name_1;
static byte *file_name_2;
static enum image_format format_1;
static struct color background_color;
static byte *segmentation_name_1;
static byte *segmentation_name_2;
-static uns display_base64;
-static uns display_base224;
+static uint display_base64;
+static uint display_base224;
#define MSG(x...) do{ if (verbose) msg(L_INFO, ##x); }while(0)
#define TRY(x) do{ if (!(x)) exit(1); }while(0)
byte buf[MAX(IMAGE_VECTOR_DUMP_MAX, IMAGE_REGION_DUMP_MAX)];
image_vector_dump(buf, &sig->vec);
MSG("vector: %s", buf);
- for (uns i = 0; i < sig->len; i++)
+ for (uint i = 0; i < sig->len; i++)
{
image_region_dump(buf, sig->reg + i);
MSG("region %u: %s", i, buf);
}
- uns sig_size = image_signature_size(sig->len);
+ uint sig_size = image_signature_size(sig->len);
if (display_base64)
{
byte buf[BASE64_ENC_LENGTH(sig_size) + 1];
- uns enc_size = base64_encode(buf, (byte *)sig, sig_size);
+ uint enc_size = base64_encode(buf, (byte *)sig, sig_size);
buf[enc_size] = 0;
MSG("base64 encoded: %s", buf);
}
if (display_base224)
{
byte buf[BASE224_ENC_LENGTH(sig_size) + 1];
- uns enc_size = base224_encode(buf, (byte *)sig, sig_size);
+ uint enc_size = base224_encode(buf, (byte *)sig, sig_size);
buf[enc_size] = 0;
MSG("base224 encoded: %s", buf);
}
TRY(img = image_new(&ctx, data->image->cols, data->image->rows, COLOR_SPACE_RGB, NULL));
image_clear(&ctx, img);
- for (uns i = 0; i < data->regions_count; i++)
+ for (uint i = 0; i < data->regions_count; i++)
{
byte c[3];
double luv[3], xyz[3], srgb[3];
c[2] = CLAMP(srgb[2] * 255, 0, 255);
for (struct image_sig_block *block = data->regions[i].blocks; block; block = block->next)
{
- uns x1 = block->x * 4;
- uns y1 = block->y * 4;
- uns x2 = MIN(x1 + 4, img->cols);
- uns y2 = MIN(y1 + 4, img->rows);
+ uint x1 = block->x * 4;
+ uint y1 = block->y * 4;
+ uint x2 = MIN(x1 + 4, img->cols);
+ uint y2 = MIN(y1 + 4, img->rows);
byte *p = img->pixels + x1 * 3 + y1 * img->row_size;
- for (uns y = y1; y < y2; y++, p += img->row_size)
+ for (uint y = y1; y < y2; y++, p += img->row_size)
{
byte *p2 = p;
- for (uns x = x1; x < x2; x++, p2 += 3)
+ for (uint x = x1; x < x2; x++, p2 += 3)
{
p2[0] = c[0];
p2[1] = c[1];
if (img1 && img2)
{
- uns dist;
+ uint dist;
if (verbose)
{
struct fastbuf *fb = bfdopen(0, 4096);
{ NULL, 0, 0, 0 }
};
-static uns verbose = 1;
+static uint verbose = 1;
static byte *input_file_name;
static enum image_format input_format;
static byte *output_file_name;
static enum image_format output_format;
-static uns cols;
-static uns rows;
-static uns fit_to_box;
-static uns channels_format;
-static uns jpeg_quality;
+static uint cols;
+static uint rows;
+static uint fit_to_box;
+static uint channels_format;
+static uint jpeg_quality;
static struct color background_color;
static struct color default_background_color;
-static uns remove_alpha;
-static uns exif;
+static uint remove_alpha;
+static uint exif;
static void
parse_color(struct color *color, byte *s)
io.flags |= IMAGE_IO_USE_BACKGROUND;
if (jpeg_quality)
io.jpeg_quality = jpeg_quality;
- uns output_fmt = output_format ? : image_file_name_to_format(output_file_name);
- uns output_cs = io.flags & IMAGE_COLOR_SPACE;
+ uint output_fmt = output_format ? : image_file_name_to_format(output_file_name);
+ uint output_cs = io.flags & IMAGE_COLOR_SPACE;
if (output_fmt != IMAGE_FORMAT_JPEG &&
output_cs != COLOR_SPACE_GRAYSCALE &&
output_cs != COLOR_SPACE_RGB)
str_hier_suffix
# ucw/strtonum.h
str_to_uintmax
-str_to_uns
+str_to_uint
# ucw/tbf.h
tbf_init
tbf_limit
#define DO16(buf) DO8(buf,0); DO8(buf,8);
#define MOD(a) a %= BASE
-uns
-adler32_update(uns adler, const byte *buf, uns len)
+uint
+adler32_update(uint adler, const byte *buf, uint len)
{
- uns s1 = adler & 0xffff;
- uns s2 = (adler >> 16) & 0xffff;
+ uint s1 = adler & 0xffff;
+ uint s2 = (adler >> 16) & 0xffff;
int k;
if (!buf) return 1L;
{
if (!s)
return NULL;
- uns l = strlen(s) + 1;
+ uint l = strlen(s) + 1;
return memcpy(xmalloc(l), s, l);
}
#include <unistd.h>
#include <errno.h>
-static uns asio_num_users;
+static uint asio_num_users;
static struct worker_pool asio_wpool;
static void
ASSERT(r);
asio_put(r);
- for (uns i=0; i<10; i++)
+ for (uint i=0; i<10; i++)
{
r = asio_get(&q);
r->op = ASIO_WRITE_BACK;
*/
struct asio_queue {
- uns buffer_size; // How large buffers do we use [user-settable]
- uns max_writebacks; // Maximum number of writeback requests active [user-settable]
- uns allocated_requests;
- uns running_requests; // Total number of running requests
- uns running_writebacks; // How many of them are writebacks
+ uint buffer_size; // How large buffers do we use [user-settable]
+ uint max_writebacks; // Maximum number of writeback requests active [user-settable]
+ uint allocated_requests;
+ uint running_requests; // Total number of running requests
+ uint running_writebacks; // How many of them are writebacks
clist idle_list; // Recycled requests waiting for get
clist done_list; // Finished requests
struct work_queue queue;
- uns use_count; // For use by the caller
+ uint use_count; // For use by the caller
};
enum asio_op {
byte *buffer;
int fd;
enum asio_op op;
- uns len;
+ uint len;
int status;
int returned_errno;
int submitted;
static void generate(void)
{
- uns i;
+ uint i;
for (i=0; i<N; i++)
#if 0
ASORT_ELT(i) = N-i-1;
static void check(void)
{
- uns i;
+ uint i;
for (i=0; i<N; i++)
if (ASORT_ELT(i) != i)
{
static void
encode_block(byte *w, u32 hi, u32 lo)
{
- uns x, y;
+ uint x, y;
/*
* Splitting of the 39-bit block: [a-e][0-5] are the base-32 digits, *'s are used for base-7.
}
}
-uns
-base224_encode(byte *dest, const byte *src, uns len)
+uint
+base224_encode(byte *dest, const byte *src, uint len)
{
u32 lo=0, hi=0; /* 64-bit buffer accumulating input bits */
- uns i=0; /* How many source bits do we have buffered */
+ uint i=0; /* How many source bits do we have buffered */
u32 x;
byte *w=dest;
return w - dest;
}
-uns
-base224_decode(byte *dest, const byte *src, uns len)
+uint
+base224_decode(byte *dest, const byte *src, uint len)
{
u32 hi=0, lo=0; /* 64-bit buffer accumulating output bits */
- uns i=0; /* How many bits do we have accumulated */
+ uint i=0; /* How many bits do we have accumulated */
u32 h, l; /* Decoding of the current block */
- uns x; /* base-7 part of the current block */
- uns len0;
+ uint x; /* base-7 part of the current block */
+ uint len0;
byte *start = dest;
do
* Stores them in @dest and returns the number of bytes the output
* takes.
*/
-uns base224_encode(byte *dest, const byte *src, uns len);
+uint base224_encode(byte *dest, const byte *src, uint len);
/**
* Decodes @len bytes of data pointed to by @src from base224 encoding.
* All invalid characters are ignored. The result is stored into @dest
* and length of the result is returned.
*/
-uns base224_decode(byte *dest, const byte *src, uns len);
+uint base224_decode(byte *dest, const byte *src, uint len);
/**
* Use this macro to calculate @base224_encode() output buffer size.
};
static const byte base64_pad = '=';
-uns
-base64_encode(byte *dest, const byte *src, uns len)
+uint
+base64_encode(byte *dest, const byte *src, uint len)
{
const byte *current = src;
- uns i = 0;
+ uint i = 0;
while (len > 2) { /* keep going until we have less than 24 bits */
dest[i++] = base64_table[current[0] >> 2];
}
/* as above, but backwards. :) */
-uns
-base64_decode(byte *dest, const byte *src, uns len)
+uint
+base64_decode(byte *dest, const byte *src, uint len)
{
const byte *current = src;
- uns ch;
- uns i = 0, j = 0;
+ uint ch;
+ uint i = 0, j = 0;
static byte reverse_table[256];
- static uns table_built = 0;
+ static uint table_built = 0;
if (table_built == 0) {
ucwlib_lock();
* Stores them in @dest and returns the number of bytes the output
* takes.
*/
-uns base64_encode(byte *dest, const byte *src, uns len);
+uint base64_encode(byte *dest, const byte *src, uint len);
/**
* Decodes @len bytes of data pointed to by @src from base64 encoding.
* All invalid characters are ignored. The result is stored into @dest
* and length of the result is returned.
*/
-uns base64_decode(byte *dest, const byte *src, uns len);
+uint base64_decode(byte *dest, const byte *src, uint len);
/**
* Use this macro to calculate @base64_encode() output buffer size.
#include <stdio.h>
char *
-bb_vprintf_at(bb_t *bb, uns ofs, const char *fmt, va_list args)
+bb_vprintf_at(bb_t *bb, uint ofs, const char *fmt, va_list args)
{
bb_grow(bb, ofs + 1);
va_list args2;
}
while (cnt < 0);
}
- else if ((uns)cnt >= bb->len - ofs)
+ else if ((uint)cnt >= bb->len - ofs)
{
bb_do_grow(bb, ofs + cnt + 1);
va_copy(args2, args);
}
char *
-bb_printf_at(bb_t *bb, uns ofs, const char *fmt, ...)
+bb_printf_at(bb_t *bb, uint ofs, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
* Returns pointer to the new string (eg. @ofs bytes after the
* beginning of buffer).
**/
-char *bb_vprintf_at(bb_t *bb, uns ofs, const char *fmt, va_list args);
+char *bb_vprintf_at(bb_t *bb, uint ofs, const char *fmt, va_list args);
/**
* Like @bb_vprintf_at(), but it takes individual arguments.
**/
-char *bb_printf_at(bb_t *bb, uns ofs, const char *fmt, ...);
+char *bb_printf_at(bb_t *bb, uint ofs, const char *fmt, ...);
#endif
struct item {
struct bh_node n;
- uns key;
+ uint key;
};
-static inline uns bht_key(struct bh_node *n)
+static inline uint bht_key(struct bh_node *n)
{
return ((struct item *)n)->key;
}
-static inline uns bht_less(struct bh_node *a, struct bh_node *b)
+static inline uint bht_less(struct bh_node *a, struct bh_node *b)
{
return bht_key(a) < bht_key(b);
}
static void
-bht_do_dump(struct bh_node *a, struct bh_node *expected_last, uns offset)
+bht_do_dump(struct bh_node *a, struct bh_node *expected_last, uint offset)
{
if (!a)
return;
int main(void)
{
- uns i;
+ uint i;
struct bh_heap h;
#define N 1048576
#define K(i) ((259309*i+1009)%N)
}
// bht_dump(&h);
ASSERT(bht_key(bht_findmin(&h)) == 0);
- uns cnt = 0;
+ uint cnt = 0;
BH_FOR_ALL(bht_, &h, a)
{
cnt++;
#define BH_FOR_ALL(bh_px, bh_heap, bh_var) \
do { \
struct bh_node *bh_stack[32]; \
- uns bh_sp = 0; \
+ uint bh_sp = 0; \
if (bh_stack[0] = (bh_heap)->root.first_son) \
bh_sp++; \
while (bh_sp) { \
* The time complexity is `O(log(N))`.
**/
#define BIN_SEARCH_FIRST_GE_CMP(ary,N,x,ary_lt_x) ({ \
- uns l = 0, r = (N); \
+ uint l = 0, r = (N); \
while (l < r) \
{ \
- uns m = (l+r)/2; \
+ uint m = (l+r)/2; \
if (ary_lt_x(ary,m,x)) \
l = m+1; \
else \
#include <ucw/bitops.h>
#include <ucw/bitarray.h>
-uns bit_array_count_bits(bitarray_t a, uns n)
+uint bit_array_count_bits(bitarray_t a, uint n)
{
- uns m = 0;
+ uint m = 0;
n = BIT_ARRAY_WORDS(n);
while (n--)
m += bit_count(*a++);
return m;
}
-bitarray_t bit_array_xrealloc(bitarray_t a, uns old_n, uns new_n)
+bitarray_t bit_array_xrealloc(bitarray_t a, uint old_n, uint new_n)
{
- uns old_bytes = BIT_ARRAY_BYTES(old_n);
- uns new_bytes = BIT_ARRAY_BYTES(new_n);
+ uint old_bytes = BIT_ARRAY_BYTES(old_n);
+ uint new_bytes = BIT_ARRAY_BYTES(new_n);
if (old_bytes == new_bytes)
return a;
a = xrealloc(a, new_bytes);
{
if (!fgets(buf, sizeof(buf), stdin))
return 0;
- uns n;
+ uint n;
for (n = 0; buf[n] == '0' || buf[n] == '1'; n++);
bit_array_zero(a, n);
- for (uns i = 0; i < n; i++)
+ for (uint i = 0; i < n; i++)
if (buf[i] == '1')
bit_array_set(a, i);
printf("%u\n", bit_array_count_bits(a, n));
int main(void)
{
- uns i;
+ uint i;
while (scanf("%x", &i) == 1)
printf("%d\n", bit_ffs(i));
return 0;
int
bit_fls(u32 x)
{
- uns l;
+ uint l;
if (!x)
return -1;
int main(void)
{
- uns i;
+ uint i;
while (scanf("%x", &i) == 1)
printf("%d\n", bit_fls(i));
return 0;
#define BIT_ARRAY_BYTES(n) (4*BIT_ARRAY_WORDS(n))
#define BIT_ARRAY(name,size) u32 name[BIT_ARRAY_WORDS(size)]
-static inline bitarray_t bit_array_xmalloc(uns n)
+static inline bitarray_t bit_array_xmalloc(uint n)
{
return xmalloc(BIT_ARRAY_BYTES(n));
}
-bitarray_t bit_array_xrealloc(bitarray_t a, uns old_n, uns new_n);
+bitarray_t bit_array_xrealloc(bitarray_t a, uint old_n, uint new_n);
-static inline bitarray_t bit_array_xmalloc_zero(uns n)
+static inline bitarray_t bit_array_xmalloc_zero(uint n)
{
return xmalloc_zero(BIT_ARRAY_BYTES(n));
}
-static inline void bit_array_zero(bitarray_t a, uns n)
+static inline void bit_array_zero(bitarray_t a, uint n)
{
bzero(a, BIT_ARRAY_BYTES(n));
}
-static inline void bit_array_set_all(bitarray_t a, uns n)
+static inline void bit_array_set_all(bitarray_t a, uint n)
{
- uns w = n / 32;
+ uint w = n / 32;
memset(a, 255, w * 4);
- uns m = n & 31;
+ uint m = n & 31;
if (m)
a[w] = (1U << m) - 1;
}
-static inline void bit_array_set(bitarray_t a, uns i)
+static inline void bit_array_set(bitarray_t a, uint i)
{
a[i/32] |= (1 << (i%32));
}
-static inline void bit_array_clear(bitarray_t a, uns i)
+static inline void bit_array_clear(bitarray_t a, uint i)
{
a[i/32] &= ~(1 << (i%32));
}
-static inline void bit_array_assign(bitarray_t a, uns i, uns x)
+static inline void bit_array_assign(bitarray_t a, uint i, uint x)
{
if (x)
bit_array_set(a, i);
bit_array_clear(a, i);
}
-static inline uns bit_array_isset(bitarray_t a, uns i)
+static inline uint bit_array_isset(bitarray_t a, uint i)
{
return a[i/32] & (1 << (i%32));
}
-static inline uns bit_array_get(bitarray_t a, uns i)
+static inline uint bit_array_get(bitarray_t a, uint i)
{
return !! bit_array_isset(a, i);
}
-static inline uns bit_array_test_and_set(bitarray_t a, uns i)
+static inline uint bit_array_test_and_set(bitarray_t a, uint i)
{
- uns t = bit_array_isset(a, i);
+ uint t = bit_array_isset(a, i);
bit_array_set(a, i);
return t;
}
-static inline uns bit_array_test_and_clear(bitarray_t a, uns i)
+static inline uint bit_array_test_and_clear(bitarray_t a, uint i)
{
- uns t = bit_array_isset(a, i);
+ uint t = bit_array_isset(a, i);
bit_array_clear(a, i);
return t;
}
-uns bit_array_count_bits(bitarray_t a, uns n);
+uint bit_array_count_bits(bitarray_t a, uint n);
/* Iterate over all set bits */
#define BIT_ARRAY_FISH_BITS_BEGIN(var,ary,size) \
- for (uns var##_hi=0; var##_hi < BIT_ARRAY_WORDS(size); var##_hi++) \
+ for (uint var##_hi=0; var##_hi < BIT_ARRAY_WORDS(size); var##_hi++) \
{ \
u32 var##_cur = ary[var##_hi]; \
- for (uns var = 32 * var##_hi; var##_cur; var++, var##_cur >>= 1) \
+ for (uint var = 32 * var##_hi; var##_cur; var++, var##_cur >>= 1) \
if (var##_cur & 1) \
do
#ifdef __pentium4 /* On other ia32 machines, the C version is faster */
-static inline uns bit_ffs(uns w)
+static inline uint bit_ffs(uint w)
{
asm("bsfl %1,%0" :"=r" (w) :"rm" (w));
return w;
#else
-static inline uns bit_ffs(uns w)
+static inline uint bit_ffs(uint w)
{
- uns b = (w & 0xffff) ? 0 : 16;
+ uint b = (w & 0xffff) ? 0 : 16;
b += ((w >> b) & 0xff) ? 0 : 8;
return b + ffs_table[(w >> b) & 0xff];
}
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
-static inline uns bit_count(uns w)
+static inline uint bit_count(uint w)
{
return __builtin_popcount(w);
}
#else
-static inline uns bit_count(uns w)
+static inline uint bit_count(uint w)
{
- uns n = 0;
+ uint n = 0;
while (w)
{
w &= w - 1;
#include <string.h>
struct bitsig {
- uns l, m, n, maxn, max_m_mult;
+ uint l, m, n, maxn, max_m_mult;
u32 hash[4];
- uns hindex;
+ uint hindex;
byte array[0];
};
struct bitsig *
-bitsig_init(uns perrlog, uns maxn)
+bitsig_init(uint perrlog, uint maxn)
{
struct bitsig *b;
u64 m;
- uns mbytes;
+ uint mbytes;
m = ((u64) maxn * perrlog * 145 + 99) / 100;
if (m >= (u64) 1 << 32)
b->hindex = 0;
}
-static inline uns
+static inline uint
bitsig_hash_bit(struct bitsig *b)
{
u32 h;
int
bitsig_member(struct bitsig *b, byte *item)
{
- uns i, bit;
+ uint i, bit;
bitsig_hash_init(b, item);
for (i=0; i<b->l; i++)
int
bitsig_insert(struct bitsig *b, byte *item)
{
- uns i, bit, was;
+ uint i, bit, was;
bitsig_hash_init(b, item);
was = 1;
struct bitsig;
-struct bitsig *bitsig_init(uns perrlog, uns maxn);
+struct bitsig *bitsig_init(uint perrlog, uint maxn);
void bitsig_free(struct bitsig *b);
int bitsig_member(struct bitsig *b, byte *item);
int bitsig_insert(struct bitsig *b, byte *item);
* This way we bypass most possible problems with different compilation environments.
*
* All functions and macros accept any numbers and if it is necessary, they simply ignore higher bits.
- * It does not matter whether a parameter is signed or unsigned. Parameters are evaluated exactly once,
+ * It does not matter whether a parameter is signed or uintigned. Parameters are evaluated exactly once,
* so they can have side-effects.
***/
/**
* Compute the value of a valid hexadecimal character (ie. passed the @Cxdigit() check).
**/
-static inline uns Cxvalue(byte x)
+static inline uint Cxvalue(byte x)
{
- return (x < (uns)'A') ? x - '0' : (x & 0xdf) - 'A' + 10;
+ return (x < (uint)'A') ? x - '0' : (x & 0xdf) - 'A' + 10;
}
#endif
/**
* Compute the number of nodes in @l. Beware of linear time complexity.
**/
-static inline uns clist_size(clist *l)
+static inline uint clist_size(clist *l)
{
- uns i = 0;
+ uint i = 0;
CLIST_FOR_EACH(cnode *, n, *l)
i++;
return i;
}
void *
-cf_malloc(uns size)
+cf_malloc(uint size)
{
return mp_alloc(cf_get_pool(), size);
}
void *
-cf_malloc_zero(uns size)
+cf_malloc_zero(uint size)
{
return mp_alloc_zero(cf_get_pool(), size);
}
#include <ucw/fastbuf.h>
static void
-spaces(struct fastbuf *fb, uns nr)
+spaces(struct fastbuf *fb, uint nr)
{
- for (uns i=0; i<nr; i++)
+ for (uint i=0; i<nr; i++)
bputs(fb, " ");
}
dump_basic(struct fastbuf *fb, void *ptr, enum cf_type type, union cf_union *u)
{
switch (type) {
- case CT_INT: bprintf(fb, "%d ", *(uns*)ptr); break;
+ case CT_INT: bprintf(fb, "%d ", *(uint*)ptr); break;
case CT_U64: bprintf(fb, "%llu ", (long long) *(u64*)ptr); break;
case CT_DOUBLE: bprintf(fb, "%lg ", *(double*)ptr); break;
- case CT_IP: bprintf(fb, "%08x ", *(uns*)ptr); break;
+ case CT_IP: bprintf(fb, "%08x ", *(uint*)ptr); break;
case CT_STRING:
if (*(char**)ptr)
bprintf(fb, "'%s' ", *(char**)ptr);
{
ptr += (uintptr_t) item->ptr;
enum cf_type type = item->type;
- uns size = cf_type_size(item->type, item->u.utype);
+ uint size = cf_type_size(item->type, item->u.utype);
int i;
spaces(fb, level);
bprintf(fb, "%s: C%s #", item->name, class_names[item->cls]);
if (item->cls == CC_SECTION)
dump_section(fb, item->u.sec, level+1, ptr);
else if (item->cls == CC_LIST) {
- uns idx = 0;
+ uint idx = 0;
CLIST_FOR_EACH(cnode *, n, * (clist*) ptr) {
spaces(fb, level+1);
bprintf(fb, "item %d\n", ++idx);
#include <ucw/bbuf.h>
-#define GBUF_TYPE uns
+#define GBUF_TYPE uint
#define GBUF_PREFIX(x) split_##x
#include <ucw/gbuf.h>
struct cf_parser_state {
const char *name_parse_fb;
struct fastbuf *parse_fb;
- uns line_num;
+ uint line_num;
char *line;
split_t word_buf;
- uns words;
- uns ends_by_brace; // the line is ended by "{"
+ uint words;
+ uint ends_by_brace; // the line is ended by "{"
bb_t copy_buf;
- uns copied;
+ uint copied;
char line_buf[];
};
static void
append(struct cf_parser_state *p, char *start, char *end)
{
- uns len = end - start;
+ uint len = end - start;
bb_grow(&p->copy_buf, p->copied + len + 1);
memcpy(p->copy_buf.ptr + p->copied, start, len);
p->copied += len + 1;
}
static char *
-get_word(struct cf_parser_state *p, uns is_command_name)
+get_word(struct cf_parser_state *p, uint is_command_name)
{
char *msg;
char *line = p->line;
} else if (*line == '"') {
line++;
- uns start_copy = p->copied;
+ uint start_copy = p->copied;
while (1) {
char *start = line;
- uns escape = 0;
+ uint escape = 0;
while (*line) {
if (*line == '"' && !escape)
break;
line++;
char *tmp = stk_str_unesc(p->copy_buf.ptr + start_copy);
- uns l = strlen(tmp);
+ uint l = strlen(tmp);
bb_grow(&p->copy_buf, start_copy + l + 1);
strcpy(p->copy_buf.ptr + start_copy, tmp);
p->copied = start_copy + l + 1;
}
static char *
-get_token(struct cf_parser_state *p, uns is_command_name, char **err)
+get_token(struct cf_parser_state *p, uint is_command_name, char **err)
{
*err = NULL;
while (1) {
msg(L_WARN, "The line %s:%d following a backslash is empty", p->name_parse_fb ? : "", p->line_num);
} else {
split_grow(&p->word_buf, p->words+1);
- uns start = p->copied;
+ uint start = p->copied;
p->word_buf.ptr[p->words++] = p->copied;
*err = get_word(p, is_command_name);
return *err ? NULL : p->copy_buf.ptr + start;
}
static char *
-parse_fastbuf(struct cf_context *cc, const char *name_fb, struct fastbuf *fb, uns depth)
+parse_fastbuf(struct cf_context *cc, const char *name_fb, struct fastbuf *fb, uint depth)
{
struct cf_parser_state *p = cc->parser;
if (!p)
break;
char *name = p->copy_buf.ptr + p->word_buf.ptr[0];
char *pars[p->words-1];
- for (uns i=1; i<p->words; i++)
+ for (uint i=1; i<p->words; i++)
pars[i-1] = p->copy_buf.ptr + p->word_buf.ptr[i];
int optional_include = !strcasecmp(name, "optionalinclude");
if (optional_include || !strcasecmp(name, "include"))
err = cf_printf("Cannot open file %s: %m", pars[0]);
goto error;
}
- uns ll = p->line_num;
+ uint ll = p->line_num;
err = parse_fastbuf(cc, stk_strdup(pars[0]), new_fb, depth+1);
p->line_num = ll;
bclose(new_fb);
};
static void
-cf_remember_entry(struct cf_context *cc, uns type, const char *arg)
+cf_remember_entry(struct cf_context *cc, uint type, const char *arg)
{
if (!cc->enable_journal)
return;
ASSERT(cc->enable_journal);
cf_journal_swap();
struct cf_journal_item *oldj = cf_journal_new_transaction(1);
- uns ec = cc->everything_committed;
+ uint ec = cc->everything_committed;
cc->everything_committed = 0;
clist old_entries;
int is_active;
int config_loaded; // at least one config file was loaded
struct cf_parser_state *parser;
- uns everything_committed; // did we already commit each section?
- uns postpone_commit; // counter of calls to cf_open_group()
- uns other_options; // used internally by cf_getopt()
+ uint everything_committed; // did we already commit each section?
+ uint postpone_commit; // counter of calls to cf_open_group()
+ uint other_options; // used internally by cf_getopt()
clist conf_entries; // files/strings to reload
struct cf_journal_item *journal; // journalling
int enable_journal;
struct old_pools *pools;
struct item_stack stack[MAX_STACK_SIZE]; // interpreter stack
- uns stack_level;
+ uint stack_level;
struct cf_section sections; // root section
- uns sections_initialized;
+ uint sections_initialized;
dirtsec_t dirty; // dirty sections
- uns dirties;
+ uint dirties;
};
/* conf-ctxt.c */
extern char *cf_op_names[];
extern char *cf_type_names[];
-uns cf_type_size(enum cf_type type, struct cf_user_type *utype);
+uint cf_type_size(enum cf_type type, struct cf_user_type *utype);
char *cf_interpret_line(struct cf_context *cc, char *name, enum cf_operation op, int number, char **pars);
void cf_init_stack(struct cf_context *cc);
int cf_done_stack(struct cf_context *cc);
typedef char *cf_basic_parser(char *str, void *ptr);
static struct {
- uns size;
+ uint size;
void *parser;
} parsers[] = {
{ sizeof(int), cf_parse_int },
{ 0, NULL }, // user-defined types are parsed extra
};
-inline uns
+inline uint
cf_type_size(enum cf_type type, struct cf_user_type *utype)
{
if (type < CT_USER)
cf_parse_lookup(char *str, int *ptr, const char * const *t)
{
const char * const *n = t;
- uns total_len = 0;
+ uint total_len = 0;
while (*n && strcasecmp(*n, str)) {
total_len += strlen(*n) + 2;
n++;
}
static char *
-cf_parse_ary(uns number, char **pars, void *ptr, enum cf_type type, union cf_union *u)
+cf_parse_ary(uint number, char **pars, void *ptr, enum cf_type type, union cf_union *u)
{
- for (uns i=0; i<number; i++)
+ for (uint i=0; i<number; i++)
{
char *msg;
- uns size = cf_type_size(type, u->utype);
+ uint size = cf_type_size(type, u->utype);
if (type < CT_LOOKUP)
msg = ((cf_basic_parser*) parsers[type].parser) (pars[i], ptr + i * size);
else if (type == CT_LOOKUP)
interpret_set_dynamic(struct cf_item *item, int number, char **pars, void **ptr)
{
enum cf_type type = item->type;
- uns size = cf_type_size(type, item->u.utype);
+ uint size = cf_type_size(type, item->u.utype);
cf_journal_block(ptr, sizeof(void*));
// boundary checks done by the caller
*ptr = gary_init(size, number, mp_get_allocator(cf_get_pool()));
{
enum cf_type type = item->type;
void *old_p = *ptr;
- uns size = cf_type_size(item->type, item->u.utype);
- ASSERT(size >= sizeof(uns));
+ uint size = cf_type_size(item->type, item->u.utype);
+ ASSERT(size >= sizeof(uint));
int old_nr = old_p ? GARY_SIZE(old_p) : 0;
int taken = MIN(number, ABS(item->number)-old_nr);
*processed = taken;
return cf_printf("Dynamic arrays do not support operation %s", cf_op_names[op]);
}
-static char *interpret_set_item(struct cf_item *item, int number, char **pars, int *processed, void *ptr, uns allow_dynamic);
+static char *interpret_set_item(struct cf_item *item, int number, char **pars, int *processed, void *ptr, uint allow_dynamic);
static char *
-interpret_section(struct cf_section *sec, int number, char **pars, int *processed, void *ptr, uns allow_dynamic)
+interpret_section(struct cf_section *sec, int number, char **pars, int *processed, void *ptr, uint allow_dynamic)
{
cf_add_dirty(sec, ptr);
*processed = 0;
return "Nothing to add to the list";
struct cf_section *sec = item->u.sec;
*processed = 0;
- uns index = 0;
+ uint index = 0;
while (number > 0)
{
void *node = cf_malloc(sec->size);
return cf_printf("Type %s cannot be used with bitmaps", cf_type_names[item->type]);
cf_journal_block(ptr, sizeof(u32));
for (int i=0; i<number; i++) {
- uns idx;
+ uint idx;
if (item->type == CT_INT)
TRY( cf_parse_int(pars[i], &idx) );
else
}
static char *
-interpret_set_item(struct cf_item *item, int number, char **pars, int *processed, void *ptr, uns allow_dynamic)
+interpret_set_item(struct cf_item *item, int number, char **pars, int *processed, void *ptr, uint allow_dynamic)
{
int taken;
switch (item->cls)
return "Missing value";
taken = MIN(number, item->number);
*processed = taken;
- uns size = cf_type_size(item->type, item->u.utype);
+ uint size = cf_type_size(item->type, item->u.utype);
cf_journal_block(ptr, taken * size);
return cf_parse_ary(taken, pars, ptr, item->type, &item->u);
case CC_DYNAMIC:
if (item->type == CT_INT)
* (u32*) ptr = ~0u;
else {
- uns nr = -1;
+ uint nr = -1;
while (item->u.lookup[++nr]);
* (u32*) ptr = ~0u >> (32-nr);
}
{
CLIST_FOR_EACH(cnode *, n, *list)
{
- uns found = 1;
- for (uns i=0; i<32; i++)
+ uint found = 1;
+ for (uint i=0; i<32; i++)
if (mask & (1<<i))
if (cmp_items(n, query, sec->cfg+i))
{
static char *
record_selector(struct cf_item *item, struct cf_section *sec, u32 *mask)
{
- uns nr = sec->flags & SEC_FLAG_NUMBER;
+ uint nr = sec->flags & SEC_FLAG_NUMBER;
if (item >= sec->cfg && item < sec->cfg + nr) // setting an attribute relative to this section
{
- uns i = item - sec->cfg;
+ uint i = item - sec->cfg;
if (i >= 32)
return "Cannot select list nodes by this attribute";
if (sec->cfg[i].cls != CC_STATIC)
struct cf_journal_item {
struct cf_journal_item *prev;
byte *ptr;
- uns len;
+ uint len;
byte copy[0];
};
}
void
-cf_journal_block(void *ptr, uns len)
+cf_journal_block(void *ptr, uint len)
{
struct cf_context *cc = cf_get_context();
if (!cc->enable_journal)
{
prev = curr->prev;
curr->prev = next;
- for (uns i=0; i<curr->len; i++)
+ for (uint i=0; i<curr->len; i++)
{
byte x = curr->copy[i];
curr->copy[i] = curr->ptr[i];
}
struct cf_journal_item *
-cf_journal_new_transaction(uns new_pool)
+cf_journal_new_transaction(uint new_pool)
{
struct cf_context *cc = cf_get_context();
if (new_pool)
}
void
-cf_journal_commit_transaction(uns new_pool, struct cf_journal_item *oldj)
+cf_journal_commit_transaction(uint new_pool, struct cf_journal_item *oldj)
{
struct cf_context *cc = cf_get_context();
if (new_pool)
}
void
-cf_journal_rollback_transaction(uns new_pool, struct cf_journal_item *oldj)
+cf_journal_rollback_transaction(uint new_pool, struct cf_journal_item *oldj)
{
struct cf_context *cc = cf_get_context();
if (!cc->enable_journal)
#include <errno.h>
struct unit {
- uns name; // one-letter name of the unit
- uns num, den; // fraction
+ uint name; // one-letter name of the unit
+ uint num, den; // fraction
};
static const struct unit units[] = {
const struct unit *u;
char *end;
errno = 0;
- uns x = strtoul(str, &end, 0);
+ uint x = strtoul(str, &end, 0);
if (errno == ERANGE)
msg = cf_rngerr;
else if (u = lookup_unit(str, end, &msg)) {
else {
const struct unit *u;
double x;
- uns read_chars;
+ uint read_chars;
if (sscanf(str, "%lf%n", &x, &read_chars) != 1)
msg = "Invalid number";
else if (u = lookup_unit(str, str + read_chars, &msg))
{
if (!*p)
return "Missing IP address";
- uns x = 0;
+ uint x = 0;
char *p2;
if (*p == '0' && (p[1] | 32) == 'x' && Cxdigit(p[2])) {
errno = 0;
p = p2;
}
else
- for (uns i = 0; i < 4; i++) {
+ for (uint i = 0; i < 4; i++) {
if (i) {
if (*p++ != '.')
goto error;
if (!Cdigit(*p))
goto error;
errno = 0;
- uns y = strtoul(p, &p2, 10);
+ uint y = strtoul(p, &p2, 10);
if (errno == ERANGE || p2 == (char*) p || y > 255)
goto error;
p = p2;
}
void
-cf_declare_rel_section(const char *name, struct cf_section *sec, void *ptr, uns allow_unknown)
+cf_declare_rel_section(const char *name, struct cf_section *sec, void *ptr, uint allow_unknown)
{
struct cf_context *cc = cf_obtain_context();
if (!cc->sections.cfg)
}
void
-cf_declare_section(const char *name, struct cf_section *sec, uns allow_unknown)
+cf_declare_section(const char *name, struct cf_section *sec, uint allow_unknown)
{
cf_declare_rel_section(name, sec, NULL, allow_unknown);
}
void
-cf_init_section(const char *name, struct cf_section *sec, void *ptr, uns do_bzero)
+cf_init_section(const char *name, struct cf_section *sec, void *ptr, uint do_bzero)
{
if (do_bzero) {
ASSERT(sec->size);
}
static char *
-commit_section(struct cf_section *sec, void *ptr, uns commit_all)
+commit_section(struct cf_section *sec, void *ptr, uint commit_all)
{
struct cf_context *cc = cf_get_context();
char *err;
return "commit of a subsection failed";
}
} else if (ci->cls == CC_LIST) {
- uns idx = 0;
+ uint idx = 0;
CLIST_FOR_EACH(cnode *, n, * (clist*) (ptr + (uintptr_t) ci->ptr))
if (idx++, err = commit_section(ci->u.sec, n, commit_all)) {
msg(L_ERROR, "Cannot commit node #%d of list %s: %s", idx, ci->name, err);
* hence we need to call them in a fixed order. */
#define ARY_LT_X(ary,i,x) ary[i].sec < x.sec || ary[i].sec == x.sec && ary[i].ptr < x.ptr
struct dirty_section comp = { sec, ptr };
- uns pos = BIN_SEARCH_FIRST_GE_CMP(cc->dirty.ptr, cc->dirties, comp, ARY_LT_X);
+ uint pos = BIN_SEARCH_FIRST_GE_CMP(cc->dirty.ptr, cc->dirties, comp, ARY_LT_X);
if (commit_all
|| (pos < cc->dirties && cc->dirty.ptr[pos].sec == sec && cc->dirty.ptr[pos].ptr == ptr))
}
static char *
-time_parser(uns number, char **pars, time_t *ptr)
+time_parser(uint number, char **pars, time_t *ptr)
{
*ptr = number ? atoi(pars[0]) : time(NULL);
return NULL;
#undef F
};
-static uns nr1 = 15;
+static uint nr1 = 15;
static int *nrs1;
static int nrs2[5];
static char *str1 = "no worries";
static char *
parse_u16(char *string, u16 *ptr)
{
- uns a;
+ uint a;
char *msg = cf_parse_int(string, &a);
if (msg)
return msg;
static char *
init_top(void *ptr UNUSED)
{
- for (uns i=0; i<5; i++)
+ for (uint i=0; i<5; i++)
{
struct sub_sect_1 *s = xmalloc(sizeof(struct sub_sect_1)); // XXX: cannot by cf_malloc(), because it's deleted when cf_reload()'ed
cf_init_section("slaves", &cf_sec_1, s, 1);
CF_INIT(init_top),
CF_COMMIT(commit_top),
CF_ITEMS {
- CF_UNS("nr1", &nr1),
+ CF_UINT("nr1", &nr1),
CF_INT_DYN("nrs1", &nrs1, 1000),
CF_INT_ARY("nrs2", nrs2, 5),
CF_STRING("str1", &str1),
* @cf_journal_block() on the overwritten memory block. It returns an error
* message or NULL if everything is all right.
**/
-typedef char *cf_parser(uns number, char **pars, void *ptr);
+typedef char *cf_parser(uint number, char **pars, void *ptr);
/**
* A parser function for user-defined types gets a string and a pointer to
* the destination variable. It must store the value within [ptr,ptr+size),
typedef char *cf_copier(void *dest, void *src);
struct cf_user_type { /** Structure to store information about user-defined variable type. **/
- uns size; // of the parsed attribute
+ uint size; // of the parsed attribute
char *name; // name of the type (for dumping)
cf_parser1 *parser; // how to parse it
cf_dumper1 *dumper; // how to dump the type
};
struct cf_section { /** A section. **/
- uns size; // 0 for a global block, sizeof(struct) for a section
+ uint size; // 0 for a global block, sizeof(struct) for a section
cf_hook *init; // fills in default values (no need to bzero)
cf_hook *commit; // verifies parsed data (optional)
cf_copier *copy; // copies values from another instance (optional, no need to copy basic attributes)
struct cf_item *cfg; // CC_END-terminated array of items
- uns flags; // for internal use only
+ uint flags; // for internal use only
};
/***
* struct list_node {
* cnode n; // This one is for the list itself
* char *name;
- * uns value;
+ * uint value;
* };
*
* static struct clist nodes;
* CF_TYPE(struct list_node),
* CF_ITEMS {
* CF_STRING("name", PTR_TO(struct list_node, name)),
- * CF_UNS("value", PTR_TO(struct list_node, value)),
+ * CF_UINT("value", PTR_TO(struct list_node, value)),
* CF_END
* }
* };
#define CF_INT(n,p) CF_STATIC(n,p,INT,int,1) /** Single `int` value. **/
#define CF_INT_ARY(n,p,c) CF_STATIC(n,p,INT,int,c) /** Static array of integers. **/
#define CF_INT_DYN(n,p,c) CF_DYNAMIC(n,p,INT,int,c) /** Dynamic array of integers. **/
-#define CF_UNS(n,p) CF_STATIC(n,p,INT,uns,1) /** Single `uns` (`unsigned`) value. **/
-#define CF_UNS_ARY(n,p,c) CF_STATIC(n,p,INT,uns,c) /** Static array of unsigned integers. **/
-#define CF_UNS_DYN(n,p,c) CF_DYNAMIC(n,p,INT,uns,c) /** Dynamic array of unsigned integers. **/
+#define CF_UINT(n,p) CF_STATIC(n,p,INT,uint,1) /** Single `uint` (`unsigned`) value. **/
+#define CF_UINT_ARY(n,p,c) CF_STATIC(n,p,INT,uint,c) /** Static array of unsigned integers. **/
+#define CF_UINT_DYN(n,p,c) CF_DYNAMIC(n,p,INT,uint,c) /** Dynamic array of unsigned integers. **/
#define CF_U64(n,p) CF_STATIC(n,p,U64,u64,1) /** Single unsigned 64bit integer (`u64`). **/
#define CF_U64_ARY(n,p,c) CF_STATIC(n,p,U64,u64,c) /** Static array of u64s. **/
#define CF_U64_DYN(n,p,c) CF_DYNAMIC(n,p,U64,u64,c) /** Dynamic array of u64s. **/
* cf_set(), or cf_getopt() on the particular context.
***/
struct mempool *cf_get_pool(void); /** Return a pointer to the current configuration pool. **/
-void *cf_malloc(uns size); /** Returns @size bytes of memory allocated from the current configuration pool. **/
-void *cf_malloc_zero(uns size); /** Like @cf_malloc(), but zeroes the memory. **/
+void *cf_malloc(uint size); /** Returns @size bytes of memory allocated from the current configuration pool. **/
+void *cf_malloc_zero(uint size); /** Like @cf_malloc(), but zeroes the memory. **/
char *cf_strdup(const char *s); /** Copy a string into @cf_malloc()ed memory. **/
char *cf_printf(const char *fmt, ...) FORMAT_CHECK(printf,1,2); /** printf() into @cf_malloc()ed memory. **/
* <<custom_parser,Custom parsers>> do not need to call it, it is called
* before them.
**/
-void cf_journal_block(void *ptr, uns len);
+void cf_journal_block(void *ptr, uint len);
#define CF_JOURNAL_VAR(var) cf_journal_block(&(var), sizeof(var)) // Store a single value into the journal
struct cf_journal_item; /** Opaque identifier of the journal state. **/
* get back to it. The @new_pool parameter tells if a new memory pool
* should be created and used from now.
**/
-struct cf_journal_item *cf_journal_new_transaction(uns new_pool);
+struct cf_journal_item *cf_journal_new_transaction(uint new_pool);
/**
* Marks current state as a complete transaction. The @new_pool
* parameter tells if the transaction was created with new memory pool
* is the journal state returned from last
* @cf_journal_new_transaction() call.
**/
-void cf_journal_commit_transaction(uns new_pool, struct cf_journal_item *oldj);
+void cf_journal_commit_transaction(uint new_pool, struct cf_journal_item *oldj);
/**
* Returns to an old journal state, reverting anything the current
* transaction did. The @new_pool parameter must be the same as the
* is the journal state you got from @cf_journal_new_transaction() --
* it is the state to return to.
**/
-void cf_journal_rollback_transaction(uns new_pool, struct cf_journal_item *oldj);
+void cf_journal_rollback_transaction(uint new_pool, struct cf_journal_item *oldj);
/***
* [[declare]]
* Please note that a single section definition cannot be used in multiple
* configuration contexts simultaneously.
**/
-void cf_declare_section(const char *name, struct cf_section *sec, uns allow_unknown);
+void cf_declare_section(const char *name, struct cf_section *sec, uint allow_unknown);
/**
* Like @cf_declare_section(), but instead of item pointers, the section
* contains offsets relative to @ptr. In other words, it does the same
* as `CF_SECTION`, but for top-level sections.
**/
-void cf_declare_rel_section(const char *name, struct cf_section *sec, void *ptr, uns allow_unknown);
+void cf_declare_rel_section(const char *name, struct cf_section *sec, void *ptr, uint allow_unknown);
/**
* If you have a section in a structure and you want to initialize it
* (eg. if you want a copy of default values outside the configuration),
*
* This is used mostly internally. You probably do not need it.
**/
-void cf_init_section(const char *name, struct cf_section *sec, void *ptr, uns do_bzero);
+void cf_init_section(const char *name, struct cf_section *sec, void *ptr, uint do_bzero);
/***
* [[bparser]]
typedef int64_t s64; /** Exactly 64 bits, signed **/
typedef unsigned int uint; /** A better pronounceable alias for `unsigned int` **/
-typedef uint uns; /** Backwards compatible alias for `uint' ***/
+// FIXME
+// typedef uint uns; /** Backwards compatible alias for `uint' ***/
typedef s64 timestamp_t; /** Milliseconds since an unknown epoch **/
#ifdef CONFIG_UCW_LARGE_FILES
#include <ucw/crc.h>
#include <ucw/crc-tables.h>
-static void crc32_update_by1(crc32_context *ctx, const byte *buf, uns len)
+static void crc32_update_by1(crc32_context *ctx, const byte *buf, uint len)
{
u32 crc = ctx->state;
while (len--)
ctx->state = crc;
}
-static void crc32_update_by4(crc32_context *ctx, const byte *buf, uns len)
+static void crc32_update_by4(crc32_context *ctx, const byte *buf, uint len)
{
- uns init_bytes, words;
+ uint init_bytes, words;
u32 crc = ctx->state;
u32 term1, term2, *buf32;
ctx->state = crc;
}
-static void crc32_update_by8(crc32_context *ctx, const byte *buf, uns len)
+static void crc32_update_by8(crc32_context *ctx, const byte *buf, uint len)
{
- uns init_bytes, quads;
+ uint init_bytes, quads;
u32 crc = ctx->state;
u32 term1, term2, *buf32;
}
void
-crc32_init(crc32_context *ctx, uns crc_mode)
+crc32_init(crc32_context *ctx, uint crc_mode)
{
ctx->state = 0xffffffff;
switch (crc_mode)
}
u32
-crc32_hash_buffer(const byte *buf, uns len)
+crc32_hash_buffer(const byte *buf, uint len)
{
crc32_context ctx;
crc32_init(&ctx, CRC_MODE_DEFAULT);
{
if (argc != 5)
die("Usage: crc-t <alg> <len> <block> <iters>");
- uns alg = atoi(argv[1]);
- uns len = atoi(argv[2]);
- uns block = atoi(argv[3]);
- uns iters = atoi(argv[4]);
+ uint alg = atoi(argv[1]);
+ uint len = atoi(argv[2]);
+ uint block = atoi(argv[3]);
+ uint iters = atoi(argv[4]);
byte *buf = xmalloc(len);
- for (uns i=0; i<len; i++)
+ for (uint i=0; i<len; i++)
buf[i] = i ^ (i >> 5) ^ (i >> 11);
- for (uns i=0; i<iters; i++)
+ for (uint i=0; i<iters; i++)
{
crc32_context ctx;
- uns modes[] = { CRC_MODE_DEFAULT, CRC_MODE_SMALL, CRC_MODE_BIG };
+ uint modes[] = { CRC_MODE_DEFAULT, CRC_MODE_SMALL, CRC_MODE_BIG };
ASSERT(alg < ARRAY_SIZE(modes));
crc32_init(&ctx, modes[alg]);
- for (uns p=0; p<len;)
+ for (uint p=0; p<len;)
{
- uns l = MIN(len-p, block);
+ uint l = MIN(len-p, block);
crc32_update(&ctx, buf+p, l);
p += l;
}
- uns crc = crc32_final(&ctx);
+ uint crc = crc32_final(&ctx);
if (!i)
printf("%08x\n", crc);
}
*/
typedef struct crc32_context {
u32 state;
- void (*update_func)(struct crc32_context *ctx, const byte *buf, uns len);
+ void (*update_func)(struct crc32_context *ctx, const byte *buf, uint len);
} crc32_context;
/**
* Initialize new calculation of CRC in a given context.
* @crc_mode selects which algorithm should be used.
**/
-void crc32_init(crc32_context *ctx, uns crc_mode);
+void crc32_init(crc32_context *ctx, uint crc_mode);
/**
* Algorithm used for CRC calculation. The algorithms differ by the amount
};
/** Feed @len bytes starting at @buf to the CRC calculator. **/
-static inline void crc32_update(crc32_context *ctx, const byte *buf, uns len)
+static inline void crc32_update(crc32_context *ctx, const byte *buf, uint len)
{
ctx->update_func(ctx, buf, len);
}
* crc32_update(&ctx, buf, len);
* return crc32_final(&ctx);
*/
-u32 crc32_hash_buffer(const byte *buf, uns len);
+u32 crc32_hash_buffer(const byte *buf, uint len);
#endif
{
if (u[0] == '#')
{
- uns id;
- const char *err = str_to_uns(&id, u, NULL, 10 | STN_WHOLE);
+ uint id;
+ const char *err = str_to_uint(&id, u, NULL, 10 | STN_WHOLE);
if (err)
die("Cannot parse user `%s': %s", u, err);
dp->run_as_uid = id;
{
if (g[0] == '#')
{
- uns id;
- const char *err = str_to_uns(&id, g, NULL, 10 | STN_WHOLE);
+ uint id;
+ const char *err = str_to_uint(&id, g, NULL, 10 | STN_WHOLE);
if (err)
die("Cannot parse group `%s': %s", g, err);
dp->run_as_gid = id;
/** Parameters passed to the daemon helper. **/
struct daemon_params {
- uns flags; // DAEMON_FLAG_xxx
+ uint flags; // DAEMON_FLAG_xxx
const char *pid_file; // A path to PID file (optional)
const char *run_as_user; // User name or "#uid" (optional)
const char *run_as_group; // Group name or "#gid" (optional)
output buffer and feed the data to the function.
byte output[BASE64_ENC_LENGTH(input_size)];
- uns output_size = base64_encode(output, input, input_size);
+ uint output_size = base64_encode(output, input, input_size);
- Decoding can be done in similar way. It is enough to have output
buffer of the same size as the input one.
chunks. The input chunk size must be multiple of `BASE64_IN_CHUNK`.
The output will be corresponding multiple of `BASE64_OUT_CHUNK`.
- uns input_size;
+ uint input_size;
byte input[BASE64_IN_CHUNK * 10];
while(input_size = read_chunk(input, BASE64_IN_CHUNK * 10)) {
byte output[BASE64_OUT_CHUNK * 10];
- uns output_size = base64_encode(output, input, input_size);
+ uint output_size = base64_encode(output, input, input_size);
use_chunk(output, output_size);
}
+
For example, you can have an static array of five unsigned integers:
+
- static uns array[] = { 1, 2, 3, 4, 5 };
+ static uint array[] = { 1, 2, 3, 4, 5 };
+
static struct cf_section section = {
CF_ITEMS {
- CF_UNS_ARY("array", array, 5),
+ CF_UINT_ARY("array", array, 5),
CF_END
}
};
#define ARRAY_PREFIX(name) intarray_##name
#include <array.h>
- #define ARRAY_TYPE uns
- #define ARRAY_PREFIX(name) unsarray_##name
+ #define ARRAY_TYPE uint
+ #define ARRAY_PREFIX(name) uintarray_##name
#include <array.h>
This will generate the data types (presumably `intarray_t` and
-`unsarray_t`) and the index functions (`intarray_index` and
-`unsarray_index`). We can use them like anything else.
+`uintarray_t`) and the index functions (`intarray_index` and
+`uintarray_index`). We can use them like anything else.
Maybe the `ARRAY_PREFIX` deserves some attention. When the header file
wants to generate an identifier, it uses this macro with
some name. Then the macro takes the name, adds a prefix to it and
returns the new identifier, so `ARRAY_PREFIX(t)` will generate
-`intarray_t` in the first case and `unsarray_t` in the second. This
+`intarray_t` in the first case and `uintarray_t` in the second. This
allows having more than one instance of the same data structure or
algorithm, because it generates different identifiers for them.
#define ARRAY_A_TYPE ARRAY_PREFIX(t)
typedef ARRAY_TYPE *ARRAY_A_TYPE
- static ARRAY_TYPE *ARRAY_PREFIX(index)(ARRAY_A_TYPE array, uns index)
+ static ARRAY_TYPE *ARRAY_PREFIX(index)(ARRAY_A_TYPE array, uint index)
{
return array + index;
}
parts together.
byte buffer[MAX_BUFFER];
- uns buffer_len;
+ uint buffer_len;
md5_context c;
md5_init(&c);
while(buffer_len = get_chunk(buffer, MAX_BUFFER)) {
to customize the behaviour. The macros are:
[[give_hashfn]]
-- `HASH_GIVE_HASHFN` -- the table will use `uns
+- `HASH_GIVE_HASHFN` -- the table will use `uint
HASH_PREFIX(hash)(key)` to calculate hash of `key`.
There is a sensible default for integers and strings.
In the case of <<key_complex,`HASH_KEY_COMPLEX`>>, it is mandatory
*)` is used to initialize the rest of node. Useful if you use
<<fun_HASH_PREFIX_OPEN_PAREN_lookup_CLOSE_PAREN_,`HASH_PREFIX(lookup())`>>
- `HASH_GIVE_ALLOC` -- you need to provide `void
- \*HASH_PREFIX(alloc)(uns size` and `void HASH_PREFIX(free)(void \*)`
+ \*HASH_PREFIX(alloc)(uint size` and `void HASH_PREFIX(free)(void \*)`
to allocate and deallocate the nodes. Default uses
<<basics:xmalloc()>> and <<basics:xfree()>>, <<mempool:mempool
routines>> or <<eltpool:eltpool routines>>, depending on
<<use_pool,`HASH_USE_POOL`>>, <<auto_pool,`HASH_AUTO_POOL`>>,
<<use_eltpool,`HASH_USE_ELTPOOL`>> and <<auto_eltpool,`HASH_AUTO_ELTPOOL`>> switches.
- <<table_alloc:`HASH_GIVE_TABLE_ALLOC`>> -- you need to provide `void
- \*HASH_PREFIX(table_alloc)(uns size` and `void HASH_PREFIX(table_free)(void \*)`
+ \*HASH_PREFIX(table_alloc)(uint size` and `void HASH_PREFIX(table_free)(void \*)`
to allocate and deallocate the table itself. Default uses
<<basics:xmalloc()>> and <<basics:xfree()>> or the functions
from `HASH_GIVE_ALLOC` depending on <<table_alloc:`HASH_TABLE_ALLOC`>> switch.
void *stdin_data(struct mempool *pool) {
struct fastbuf *fb = bopen_fd(0, NULL); // Read from stdin
- uns amount;
+ uint amount;
char *ptr = mp_start(pool, 1024);
while(amount = bread(fb, ptr, 1024)) { // Read a block
ptr += amount; // Move after it
- Long name: an arbitrary string. Set to NULL if the option has no long form.
- Variable, where the value of the option shall be stored, together with
its <<conf:enum_cf_type,data type>>. The type is either one of the conventional
- types (`int`, `uns`, etc.), or a user-defined type providing its own parser
+ types (`int`, `uint`, etc.), or a user-defined type providing its own parser
function via <<conf:struct_cf_user_type,`cf_user_type`>>.
- <<flags,Flags>> further specifying behavior of the option (whether it is mandatory,
whether it carries a value, whether it can be set repeatedly, etc.).
#define ASORT_KEY_TYPE int
#include <ucw/sorter/array-simple.h>
-This generates an intarr_sort(int *array, uns array_size) function that
+This generates an intarr_sort(int *array, uint array_size) function that
can be used the obvious way.
A more complicated example could be sorting a structure, where items
#define ASORT_EXTRA_ARGS , struct elem *odd_array, struct elem *even_array
#include <ucw/sorter/array-simple.h>
-Now we got a complicated_sort(uns array_size, struct elem *odd_array,
+Now we got a complicated_sort(uint array_size, struct elem *odd_array,
struct *even_array) function to perform our sorting.
[[array]]
- `ASORT_LT(x,y)` -- Comparing macro. Uses the `<` operator if not
provided.
- `ASORT_HASH(x)` -- A monotone hash function (or macro). Should
- return `uns`.
+ return `uint`.
- `ASORT_LONG_HASH(x)` -- Like `ASORT_HASH(x)`, but returns 64-bit
number instead of 32-bit.
- `ASORT_THRESHOLD` -- How small should a chunk of data be to be sorted
When you want to use it, define `SORT_HASH_BITS` and set it to the
number of significant bits the hashing function provides. Then provide
-a callback function `uns SORT_PREFIX(hash)(SORT_KEY *key)`.
+a callback function `uint SORT_PREFIX(hash)(SORT_KEY *key)`.
[[merge-external]]
Merging items with identical keys
and provide these functions:
- `void SORT_PREFIX(write_merged)(struct fastbuf \*dest, SORT_KEY
- \*\*keys, void \*\*data, uns n, void *buf)`
+ \*\*keys, void \*\*data, uint n, void *buf)`
-- This function takes @n records in memory and writes a single
record into the @dest <<fastbuf:,fastbuf>>. The @keys and @data are
just the records. The @buf parameter points to a workspace memory.
macro over all the keys. The function is allowed to modify all its
parameters.
- `void SORT_PREFIX(copy_merged)(SORT_KEY \*\*keys, struct fastbuf
-\*\*data, uns n, struct fastbuf \*dest)`
+\*\*data, uint n, struct fastbuf \*dest)`
-- This one is similar to the above one, but the data are still in
the <<fastbuf:,fastbufs>> @data and no workspace is provided. This
is only used when `SORT_DATA_SIZE` or `SORT_UNIFY_WORKSPACE` is
#include <ucw/eltpool.h>
struct eltpool *
-ep_new(uns elt_size, uns elts_per_chunk)
+ep_new(uint elt_size, uint elts_per_chunk)
{
struct eltpool *pool = xmalloc_zero(sizeof(*pool));
pool->elt_size = ALIGN_TO(MAX(elt_size, sizeof(struct eltpool_free)), CPU_STRUCT_ALIGN);
{
struct eltpool_chunk *ch = page_alloc(pool->chunk_size);
void *p = (void *)(ch+1);
- for (uns i=1; i<pool->elts_per_chunk; i++)
+ for (uint i=1; i<pool->elts_per_chunk; i++)
{
struct eltpool_free *f = p;
f->next = pool->first_free;
struct eltpool *ep = ep_new(sizeof(struct argh), 64);
clist l;
clist_init(&l);
- for (uns i=0; i<65536; i++)
+ for (uint i=0; i<65536; i++)
{
struct argh *a = ep_alloc(ep);
if (i % 3)
struct eltpool {
struct eltpool_chunk *first_chunk;
struct eltpool_free *first_free;
- uns elt_size;
- uns chunk_size;
- uns elts_per_chunk;
- uns num_allocated; // Just for debugging
- uns num_chunks;
+ uint elt_size;
+ uint chunk_size;
+ uint elts_per_chunk;
+ uint num_allocated; // Just for debugging
+ uint num_chunks;
};
struct eltpool_chunk {
*
* Element pools can be treated as <<trans:respools,resources>>, see <<trans:res_eltpool()>>.
**/
-struct eltpool *ep_new(uns elt_size, uns elts_per_chunk);
+struct eltpool *ep_new(uint elt_size, uint elts_per_chunk);
/**
* Release a memory pool created by @ep_new() including all
return f->bptr >= f->bstop && !brefill(f, 1);
}
-void bputc_slow(struct fastbuf *f, uns c)
+void bputc_slow(struct fastbuf *f, uint c)
{
if (f->bptr >= f->bufend)
bspout(f);
*f->bptr++ = c;
}
-uns bread_slow(struct fastbuf *f, void *b, uns l, uns check)
+uint bread_slow(struct fastbuf *f, void *b, uint l, uint check)
{
- uns total = 0;
+ uint total = 0;
while (l)
{
- uns k = f->bstop - f->bptr;
+ uint k = f->bstop - f->bptr;
if (!k)
{
return total;
}
-void bwrite_slow(struct fastbuf *f, const void *b, uns l)
+void bwrite_slow(struct fastbuf *f, const void *b, uint l)
{
while (l)
{
- uns k = f->bufend - f->bptr;
+ uint k = f->bufend - f->bptr;
if (!k)
{
}
}
-void bbcopy_slow(struct fastbuf *f, struct fastbuf *t, uns l)
+void bbcopy_slow(struct fastbuf *f, struct fastbuf *t, uint l)
{
while (l)
{
byte *fptr, *tptr;
- uns favail, tavail, n;
+ uint favail, tavail, n;
favail = bdirect_read_prepare(f, &fptr);
if (!favail)
}
}
-int bconfig(struct fastbuf *f, uns item, int value)
+int bconfig(struct fastbuf *f, uint item, int value)
{
return (f->config && !(f->flags & FB_DEAD)) ? f->config(f, item, value) : -1;
}
bsetpos(f, 0);
}
-int bskip_slow(struct fastbuf *f, uns len)
+int bskip_slow(struct fastbuf *f, uint len)
{
while (len)
{
byte *buf;
- uns l = bdirect_read_prepare(f, &buf);
+ uint l = bdirect_read_prepare(f, &buf);
if (!l)
return 0;
l = MIN(l, len);
bclose(f);
}
-static void fb_res_dump(struct resource *r, uns indent UNUSED)
+static void fb_res_dump(struct resource *r, uint indent UNUSED)
{
struct fastbuf *f = r->priv;
printf(" name=%s\n", f->name);
byte *buffer, *bufend; /* Start and end of the buffer */
char *name; /* File name (used for error messages) */
ucw_off_t pos; /* Position of bstop in the file */
- uns flags; /* See enum fb_flags */
+ uint flags; /* See enum fb_flags */
int (*refill)(struct fastbuf *); /* Get a buffer with new data, returns 0 on EOF */
void (*spout)(struct fastbuf *); /* Write buffer data to the file */
int (*seek)(struct fastbuf *, ucw_off_t, int);/* Slow path for @bseek(), buffer already flushed; returns success */
void (*close)(struct fastbuf *); /* Close the stream */
- int (*config)(struct fastbuf *, uns, int); /* Configure the stream */
+ int (*config)(struct fastbuf *, uint, int); /* Configure the stream */
int can_overwrite_buffer; /* Can the buffer be altered? 0=never, 1=temporarily, 2=permanently */
struct resource *res; /* The fastbuf can be tied to a resource pool */
};
*/
struct fb_params {
enum fb_type type; /* The chosen back-end */
- uns buffer_size; /* 0 for default size */
- uns keep_back_buf; /* FB_STD: optimize for bi-directional access */
- uns read_ahead; /* FB_DIRECT options */
- uns write_back;
+ uint buffer_size; /* 0 for default size */
+ uint keep_back_buf; /* FB_STD: optimize for bi-directional access */
+ uint read_ahead; /* FB_DIRECT options */
+ uint write_back;
struct asio_queue *asio;
};
* up any parameters, there is a couple of shortcuts.
***/
-struct fastbuf *bopen(const char *name, uns mode, uns buflen); /** Equivalent to @bopen_file() with `FB_STD` back-end. **/
-struct fastbuf *bopen_try(const char *name, uns mode, uns buflen); /** Equivalent to @bopen_file_try() with `FB_STD` back-end. **/
-struct fastbuf *bopen_tmp(uns buflen); /** Equivalent to @bopen_tmp_file() with `FB_STD` back-end. **/
-struct fastbuf *bfdopen(int fd, uns buflen); /** Equivalent to @bopen_fd() with `FB_STD` back-end. **/
-struct fastbuf *bfdopen_shared(int fd, uns buflen); /** Like @bfdopen(), but it does not close the @fd on @bclose(). **/
+struct fastbuf *bopen(const char *name, uint mode, uint buflen); /** Equivalent to @bopen_file() with `FB_STD` back-end. **/
+struct fastbuf *bopen_try(const char *name, uint mode, uint buflen); /** Equivalent to @bopen_file_try() with `FB_STD` back-end. **/
+struct fastbuf *bopen_tmp(uint buflen); /** Equivalent to @bopen_tmp_file() with `FB_STD` back-end. **/
+struct fastbuf *bfdopen(int fd, uint buflen); /** Equivalent to @bopen_fd() with `FB_STD` back-end. **/
+struct fastbuf *bfdopen_shared(int fd, uint buflen); /** Like @bfdopen(), but it does not close the @fd on @bclose(). **/
/***
* === Temporary files [[fbtemp]]
/* Internal functions of some file back-ends */
-struct fastbuf *bfdopen_internal(int fd, const char *name, uns buflen);
-struct fastbuf *bfmmopen_internal(int fd, const char *name, uns mode);
+struct fastbuf *bfdopen_internal(int fd, const char *name, uint buflen);
+struct fastbuf *bfmmopen_internal(int fd, const char *name, uint mode);
#ifdef CONFIG_UCW_FB_DIRECT
-extern uns fbdir_cheat;
+extern uint fbdir_cheat;
struct asio_queue;
-struct fastbuf *fbdir_open_fd_internal(int fd, const char *name, struct asio_queue *io_queue, uns buffer_size, uns read_ahead, uns write_back);
+struct fastbuf *fbdir_open_fd_internal(int fd, const char *name, struct asio_queue *io_queue, uint buffer_size, uint read_ahead, uint write_back);
#endif
void bclose_file_helper(struct fastbuf *f, int fd, int is_temp_file);
* number of bytes. This is frequently used for reading from sockets.
***/
-struct fastbuf *bopen_limited_fd(int fd, uns bufsize, uns limit); /** Create a fastbuf which reads at most @limit bytes from @fd. **/
+struct fastbuf *bopen_limited_fd(int fd, uint bufsize, uint limit); /** Create a fastbuf which reads at most @limit bytes from @fd. **/
/***
* === Fastbufs on in-memory streams [[fbmem]]
* an arbitrary number of fastbuf for reading from the stream.
***/
-struct fastbuf *fbmem_create(uns blocksize); /** Create stream and return its writing fastbuf. **/
+struct fastbuf *fbmem_create(uint blocksize); /** Create stream and return its writing fastbuf. **/
struct fastbuf *fbmem_clone_read(struct fastbuf *f); /** Given a writing fastbuf, create a new reading fastbuf. **/
/***
* It is not possible to close this fastbuf. This implies that no tying to
* resources takes place.
*/
-void fbbuf_init_read(struct fastbuf *f, byte *buffer, uns size, uns can_overwrite);
+void fbbuf_init_read(struct fastbuf *f, byte *buffer, uint size, uint can_overwrite);
/**
* Creates a write-only fastbuf which writes into a provided memory buffer.
* It is not possible to close this fastbuf. This implies that no tying to
* resources takes place.
*/
-void fbbuf_init_write(struct fastbuf *f, byte *buffer, uns size);
+void fbbuf_init_write(struct fastbuf *f, byte *buffer, uint size);
-static inline uns fbbuf_count_written(struct fastbuf *f) /** Calculates, how many bytes were already written into the buffer. **/
+static inline uint fbbuf_count_written(struct fastbuf *f) /** Calculates, how many bytes were already written into the buffer. **/
{
return f->bptr - f->bstop;
}
struct mempool;
-struct fastbuf *fbgrow_create(unsigned basic_size); /** Create the growing buffer pre-allocated to @basic_size bytes. **/
-struct fastbuf *fbgrow_create_mp(struct mempool *mp, unsigned basic_size); /** Create the growing buffer pre-allocated to @basic_size bytes. **/
+struct fastbuf *fbgrow_create(uint basic_size); /** Create the growing buffer pre-allocated to @basic_size bytes. **/
+struct fastbuf *fbgrow_create_mp(struct mempool *mp, uint basic_size); /** Create the growing buffer pre-allocated to @basic_size bytes. **/
void fbgrow_reset(struct fastbuf *b); /** Reset stream and prepare for writing. **/
void fbgrow_rewind(struct fastbuf *b); /** Prepare for reading (of already written data). **/
* @fbgrow_rewind()) to return the pointer to internal buffer and its length in
* bytes. The returned buffer can be invalidated by further requests.
**/
-uns fbgrow_get_buf(struct fastbuf *b, byte **buf);
+uint fbgrow_get_buf(struct fastbuf *b, byte **buf);
/***
* === Fastbuf on memory pools [[fbpool]]
* Start a new continuous block and prepare for writing (see <<mempool:mp_start()>>).
* Provide the memory pool you want to use for this block as @mp.
**/
-void fbpool_start(struct fbpool *fb, struct mempool *mp, uns init_size);
+void fbpool_start(struct fbpool *fb, struct mempool *mp, uint init_size);
/**
* Close the block and return the address of its start (see <<mempool:mp_end()>>).
* The length can be determined by calling <<mempool:mp_size(mp, ptr)>>.
struct fastbuf fb;
struct fb_atomic_file *af;
byte *expected_max_bptr;
- uns slack_size;
+ uint slack_size;
};
/**
*
* The file is closed when all fastbufs using it are closed.
**/
-struct fastbuf *fbatomic_open(const char *name, struct fastbuf *master, uns bufsize, int record_len);
+struct fastbuf *fbatomic_open(const char *name, struct fastbuf *master, uint bufsize, int record_len);
void fbatomic_internal_write(struct fastbuf *b);
/**
* Creates a new "/dev/null"-like fastbuf.
* Any read attempt returns an EOF, any write attempt is silently ignored.
**/
-struct fastbuf *fbnull_open(uns bufsize);
+struct fastbuf *fbnull_open(uint bufsize);
/**
* Can be used by any back-end to switch it to the null mode.
* You need to provide at least one byte long buffer for writing.
**/
-void fbnull_start(struct fastbuf *b, byte *buf, uns bufsize);
+void fbnull_start(struct fastbuf *b, byte *buf, uint bufsize);
/**
* Checks whether a fastbuf has been switched to the null mode.
BCONFIG_KEEP_BACK_BUF, /* Optimize for bi-directional access */
};
-int bconfig(struct fastbuf *f, uns type, int data); /** Configure a fastbuf. Returns previous value. **/
+int bconfig(struct fastbuf *f, uint type, int data); /** Configure a fastbuf. Returns previous value. **/
/*** === Universal functions working on all fastbuf's [[ffbasic]] ***/
f->bptr--;
}
-void bputc_slow(struct fastbuf *f, uns c);
-static inline void bputc(struct fastbuf *f, uns c) /** Write a single character. **/
+void bputc_slow(struct fastbuf *f, uint c);
+static inline void bputc(struct fastbuf *f, uint c) /** Write a single character. **/
{
if (f->bptr < f->bufend)
*f->bptr++ = c;
bputc_slow(f, c);
}
-static inline uns bavailr(struct fastbuf *f) /** Return the length of the cached data to be read. Do not use directly. **/
+static inline uint bavailr(struct fastbuf *f) /** Return the length of the cached data to be read. Do not use directly. **/
{
return f->bstop - f->bptr;
}
-static inline uns bavailw(struct fastbuf *f) /** Return the length of the buffer available for writing. Do not use directly. **/
+static inline uint bavailw(struct fastbuf *f) /** Return the length of the buffer available for writing. Do not use directly. **/
{
return f->bufend - f->bptr;
}
-uns bread_slow(struct fastbuf *f, void *b, uns l, uns check);
+uint bread_slow(struct fastbuf *f, void *b, uint l, uint check);
/**
* Read at most @l bytes of data into @b.
* Returns number of bytes read.
* 0 means end of file.
*/
-static inline uns bread(struct fastbuf *f, void *b, uns l)
+static inline uint bread(struct fastbuf *f, void *b, uint l)
{
if (bavailr(f) >= l)
{
* If at the end of file, it returns 0.
* If there are data, but less than @l, it raises `ucw.fb.eof`.
*/
-static inline uns breadb(struct fastbuf *f, void *b, uns l)
+static inline uint breadb(struct fastbuf *f, void *b, uint l)
{
if (bavailr(f) >= l)
{
return bread_slow(f, b, l, 1);
}
-void bwrite_slow(struct fastbuf *f, const void *b, uns l);
-static inline void bwrite(struct fastbuf *f, const void *b, uns l) /** Writes buffer @b of length @l into fastbuf. **/
+void bwrite_slow(struct fastbuf *f, const void *b, uint l);
+static inline void bwrite(struct fastbuf *f, const void *b, uint l) /** Writes buffer @b of length @l into fastbuf. **/
{
if (bavailw(f) >= l)
{
* Returns pointer to the terminating 0 or NULL on `EOF`.
* Raises `ucw.fb.toolong` if the line is longer than @l.
**/
-char *bgets(struct fastbuf *f, char *b, uns l);
-char *bgets0(struct fastbuf *f, char *b, uns l); /** The same as @bgets(), but for 0-terminated strings. **/
+char *bgets(struct fastbuf *f, char *b, uint l);
+char *bgets0(struct fastbuf *f, char *b, uint l); /** The same as @bgets(), but for 0-terminated strings. **/
/**
* Returns either length of read string (excluding the terminator) or -1 if it is too long.
* In such cases exactly @l bytes are read.
*/
-int bgets_nodie(struct fastbuf *f, char *b, uns l);
+int bgets_nodie(struct fastbuf *f, char *b, uint l);
struct mempool;
struct bb_t;
* Read a string, strip the trailing `\n` and store it into growing buffer @b.
* Raises `ucw.fb.toolong` if the line is longer than @limit.
**/
-uns bgets_bb(struct fastbuf *f, struct bb_t *b, uns limit);
+uint bgets_bb(struct fastbuf *f, struct bb_t *b, uint limit);
/**
* Read a string, strip the trailing `\n` and store it into buffer allocated from a memory pool.
**/
struct bgets_stk_struct {
struct fastbuf *f;
byte *old_buf, *cur_buf, *src;
- uns old_len, cur_len, src_len;
+ uint old_len, cur_len, src_len;
};
void bgets_stk_init(struct bgets_stk_struct *s);
void bgets_stk_step(struct bgets_stk_struct *s);
bputc(f, '\n');
}
-void bbcopy_slow(struct fastbuf *f, struct fastbuf *t, uns l);
+void bbcopy_slow(struct fastbuf *f, struct fastbuf *t, uint l);
/**
* Copy @l bytes of data from fastbuf @f to fastbuf @t.
* `UINT_MAX` (`~0U`) means all data, even if more than `UINT_MAX` bytes remain.
**/
-static inline void bbcopy(struct fastbuf *f, struct fastbuf *t, uns l)
+static inline void bbcopy(struct fastbuf *f, struct fastbuf *t, uint l)
{
if (bavailr(f) >= l && bavailw(t) >= l)
{
bbcopy_slow(f, t, l);
}
-int bskip_slow(struct fastbuf *f, uns len);
-static inline int bskip(struct fastbuf *f, uns len) /** Skip @len bytes without reading them. **/
+int bskip_slow(struct fastbuf *f, uint len);
+static inline int bskip(struct fastbuf *f, uint len) /** Skip @len bytes without reading them. **/
{
if (bavailr(f) >= len)
{
* The reading must be ended by @bdirect_read_commit() or @bdirect_read_commit_modified(),
* unless the user did not read or modify anything.
**/
-static inline uns bdirect_read_prepare(struct fastbuf *f, byte **buf)
+static inline uint bdirect_read_prepare(struct fastbuf *f, byte **buf)
{
if (f->bptr == f->bstop && !f->refill(f))
{
* where we can write to. The operation must be ended by @bdirect_write_commit(),
* unless nothing is written.
**/
-static inline uns bdirect_write_prepare(struct fastbuf *f, byte **buf)
+static inline uint bdirect_write_prepare(struct fastbuf *f, byte **buf)
{
if (f->bptr == f->bufend)
f->spout(f);
#include <fcntl.h>
#include <unistd.h>
-static uns trace;
+static uint trace;
#ifndef TEST
static struct cf_section fbatomic_config = {
CF_ITEMS {
- CF_UNS("Trace", &trace),
+ CF_UINT("Trace", &trace),
CF_END
}
};
int fd;
int use_count;
int record_len;
- uns locked;
+ uint locked;
byte name[1];
};
struct fb_atomic *F = FB_ATOMIC(f);
if (F->af->locked)
{
- uns written = f->bptr - f->buffer;
- uns size = f->bufend - f->buffer + F->slack_size;
+ uint written = f->bptr - f->buffer;
+ uint size = f->bufend - f->buffer + F->slack_size;
F->slack_size *= 2;
TRACE("Reallocating buffer for atomic file %s with slack %d", f->name, F->slack_size);
f->buffer = xrealloc(f->buffer, size);
}
struct fastbuf *
-fbatomic_open(const char *name, struct fastbuf *master, uns bufsize, int record_len)
+fbatomic_open(const char *name, struct fastbuf *master, uint bufsize, int record_len)
{
struct fb_atomic *F = xmalloc_zero(sizeof(*F));
struct fastbuf *f = &F->fb;
}
void
-fbbuf_init_read(struct fastbuf *f, byte *buf, uns size, uns can_overwrite)
+fbbuf_init_read(struct fastbuf *f, byte *buf, uint size, uint can_overwrite)
{
*f = (struct fastbuf) {
.buffer = buf,
}
void
-fbbuf_init_write(struct fastbuf *f, byte *buf, uns size)
+fbbuf_init_write(struct fastbuf *f, byte *buf, uint size)
{
*f = (struct fastbuf) {
.buffer = buf,
#define FB_DIRECT(f) ((struct fb_direct *)(f))
#ifndef TEST
-uns fbdir_cheat;
+uint fbdir_cheat;
static struct cf_section fbdir_cf = {
CF_ITEMS {
- CF_UNS("Cheat", &fbdir_cheat),
+ CF_UINT("Cheat", &fbdir_cheat),
CF_END
}
};
}
static struct asio_queue *
-fbdir_get_io_queue(uns buffer_size, uns write_back)
+fbdir_get_io_queue(uint buffer_size, uint write_back)
{
struct ucwlib_context *ctx = ucwlib_thread_context();
struct asio_queue *q = ctx->io_queue;
}
static int
-fbdir_config(struct fastbuf *f, uns item, int value)
+fbdir_config(struct fastbuf *f, uint item, int value)
{
int orig;
}
struct fastbuf *
-fbdir_open_fd_internal(int fd, const char *name, struct asio_queue *q, uns buffer_size, uns read_ahead UNUSED, uns write_back)
+fbdir_open_fd_internal(int fd, const char *name, struct asio_queue *q, uint buffer_size, uint read_ahead UNUSED, uint write_back)
{
int namelen = strlen(name) + 1;
struct fb_direct *F = xmalloc(sizeof(struct fb_direct) + namelen);
int is_temp_file;
int keep_back_buf; /* Optimize for backwards reading */
ucw_off_t wpos; /* Real file position */
- uns wlen; /* Window size */
+ uint wlen; /* Window size */
};
#define FB_FILE(f) ((struct fb_file *)(f))
#define FB_BUFFER(f) (byte *)(FB_FILE(f) + 1)
{
struct fb_file *F = FB_FILE(f);
byte *read_ptr = (f->buffer = FB_BUFFER(f));
- uns blen = f->bufend - f->buffer, back = F->keep_back_buf ? blen >> 2 : 0, read_len = blen;
+ uint blen = f->bufend - f->buffer, back = F->keep_back_buf ? blen >> 2 : 0, read_len = blen;
/* Forward or no seek */
if (F->wpos <= f->pos)
{
goto seek;
}
/* Short forward seek (prefer read() to skip data )*/
- else if ((uns)diff >= back)
+ else if ((uint)diff >= back)
{
- uns skip = diff - back;
+ uint skip = diff - back;
F->wpos += skip;
while (skip)
{
/* Reuse part of the previous window and append new data (also F->wpos == f->pos) */
else
{
- uns keep = back - (uns)diff;
+ uint keep = back - (uint)diff;
if (keep >= F->wlen)
back = diff + (keep = F->wlen);
else
goto long_seek;
}
/* Seek into previous window (do nothing... for example brewind) */
- else if ((uns)diff <= F->wlen)
+ else if ((uint)diff <= F->wlen)
{
f->bstop = f->buffer + F->wlen;
f->bptr = f->bstop - diff;
/* Reuse part of previous window */
if (F->wlen && read_len <= back + diff && read_len > back + diff - F->wlen)
{
- uns keep = read_len + F->wlen - back - diff;
+ uint keep = read_len + F->wlen - back - diff;
memmove(f->buffer + read_len - keep, f->buffer, keep);
}
seek:
}
static int
-bfd_config(struct fastbuf *f, uns item, int value)
+bfd_config(struct fastbuf *f, uint item, int value)
{
int orig;
}
struct fastbuf *
-bfdopen_internal(int fd, const char *name, uns buflen)
+bfdopen_internal(int fd, const char *name, uint buflen)
{
ASSERT(buflen);
int namelen = strlen(name) + 1;
struct fastbuf *f, *t;
f = bopen_tmp(16);
t = bfdopen_shared(1, 13);
- for (uns i = 0; i < 16; i++)
+ for (uint i = 0; i < 16; i++)
bwrite(f, "<hello>", 7);
bprintf(t, "%d\n", (int)btell(f));
brewind(f);
{
if (b->bptr == b->bufend)
{
- uns len = b->bufend - b->buffer;
+ uint len = b->bufend - b->buffer;
if (FB_GBUF(b)->mp)
{
byte *old = b->buffer;
xfree(b);
}
-struct fastbuf *fbgrow_create_mp(struct mempool *mp, unsigned basic_size)
+struct fastbuf *fbgrow_create_mp(struct mempool *mp, uint basic_size)
{
ASSERT(basic_size);
struct fastbuf *b;
return b;
}
-struct fastbuf *fbgrow_create(unsigned basic_size)
+struct fastbuf *fbgrow_create(uint basic_size)
{
return fbgrow_create_mp(NULL, basic_size);
}
brewind(b);
}
-uns fbgrow_get_buf(struct fastbuf *b, byte **buf)
+uint fbgrow_get_buf(struct fastbuf *b, byte **buf)
{
byte *end = FB_GBUF(b)->end;
end = MAX(end, b->bptr);
int main(void)
{
struct fastbuf *f;
- uns t;
+ uint t;
f = fbgrow_create(3);
- for (uns i=0; i<5; i++)
+ for (uint i=0; i<5; i++)
{
fbgrow_reset(f);
bwrite(f, "12345", 5);
}
struct fastbuf *
-bopen_limited_fd(int fd, uns buflen, uns limit)
+bopen_limited_fd(int fd, uint buflen, uint limit)
{
struct fb_limfd *F = xmalloc(sizeof(struct fb_limfd) + buflen);
struct fastbuf *f = &F->fb;
#include <stdio.h>
struct memstream {
- unsigned blocksize;
- unsigned uc;
+ uint blocksize;
+ uint uc;
struct msblock *first;
};
struct msblock {
struct msblock *next;
ucw_off_t pos;
- unsigned size;
+ uint size;
byte data[0];
};
}
struct fastbuf *
-fbmem_create(unsigned blocksize)
+fbmem_create(uint blocksize)
{
struct fastbuf *f = xmalloc_zero(sizeof(struct fb_mem));
struct memstream *s = xmalloc_zero(sizeof(struct memstream));
#include <unistd.h>
#include <sys/mman.h>
-static uns mmap_window_size = 16*CPU_PAGE_SIZE;
-static uns mmap_extend_size = 4*CPU_PAGE_SIZE;
+static uint mmap_window_size = 16*CPU_PAGE_SIZE;
+static uint mmap_extend_size = 4*CPU_PAGE_SIZE;
#ifndef TEST
static struct cf_section fbmm_config = {
CF_ITEMS {
- CF_UNS("WindowSize", &mmap_window_size),
- CF_UNS("ExtendSize", &mmap_extend_size),
+ CF_UINT("WindowSize", &mmap_window_size),
+ CF_UINT("ExtendSize", &mmap_extend_size),
CF_END
}
};
ucw_off_t file_size;
ucw_off_t file_extend;
ucw_off_t window_pos;
- uns window_size;
+ uint window_size;
int mode;
};
#define FB_MMAP(f) ((struct fb_mmap *)(f))
struct fb_mmap *F = FB_MMAP(f);
ucw_off_t pos0 = f->pos & ~(ucw_off_t)(CPU_PAGE_SIZE-1);
int l = MIN((ucw_off_t)mmap_window_size, F->file_extend - pos0);
- uns ll = ALIGN_TO(l, CPU_PAGE_SIZE);
+ uint ll = ALIGN_TO(l, CPU_PAGE_SIZE);
int prot = ((F->mode & O_ACCMODE) == O_RDONLY) ? PROT_READ : (PROT_READ | PROT_WRITE);
DBG(" ... Mapping %x(%x)+%x(%x) len=%x extend=%x", (int)pos0, (int)f->pos, ll, l, (int)F->file_size, (int)F->file_extend);
}
static int
-bfmm_config(struct fastbuf *f, uns item, int value)
+bfmm_config(struct fastbuf *f, uint item, int value)
{
int orig;
}
struct fastbuf *
-bfmmopen_internal(int fd, const char *name, uns mode)
+bfmmopen_internal(int fd, const char *name, uint mode)
{
int namelen = strlen(name) + 1;
struct fb_mmap *F = xmalloc(sizeof(struct fb_mmap) + namelen);
fbmulti_set_ptrs(f);
// Refill the subbuf
- uns len = FB_MULTI(f)->cur->fb->refill(FB_MULTI(f)->cur->fb);
+ uint len = FB_MULTI(f)->cur->fb->refill(FB_MULTI(f)->cur->fb);
if (len)
{
fbmulti_get_ptrs(f);
{
char *data[] = { "One\nLine", "Two\nLines", "Th\nreeLi\nnes\n" };
struct fastbuf fb[ARRAY_SIZE(data)];
- for (uns i=0;i<ARRAY_SIZE(data);i++)
+ for (uint i=0;i<ARRAY_SIZE(data);i++)
fbbuf_init_read(&fb[i], data[i], strlen(data[i]), 0);
struct fastbuf *f = fbmulti_create();
{
char *data[] = { "Mnl", "ige" };
struct fastbuf fb[ARRAY_SIZE(data)];
- for (uns i=0;i<ARRAY_SIZE(data);i++)
+ for (uint i=0;i<ARRAY_SIZE(data);i++)
fbbuf_init_read(&fb[i], data[i], strlen(data[i]), 0);
struct fastbuf *f = fbmulti_create();
int pos[] = {0, 3, 1, 4, 2, 5};
- for (uns i=0;i<ARRAY_SIZE(pos);i++)
+ for (uint i=0;i<ARRAY_SIZE(pos);i++)
{
bsetpos(f, pos[i]);
putchar(bgetc(f));
{
char *data[] = { "Nested", "Data", "As", "In", "Real", "Usage", };
struct fastbuf fb[ARRAY_SIZE(data)];
- for (uns i=0;i<ARRAY_SIZE(data);i++)
+ for (uint i=0;i<ARRAY_SIZE(data);i++)
fbbuf_init_read(&fb[i], data[i], strlen(data[i]), 0);
struct fastbuf sp;
xfree(b);
}
-struct fastbuf *fbnull_open(uns bufsize)
+struct fastbuf *fbnull_open(uint bufsize)
{
struct fastbuf *b = xmalloc(sizeof(*b) + bufsize);
bzero(b, sizeof(*b));
return 1;
}
-void fbnull_start(struct fastbuf *b, byte *buf, uns bufsize)
+void fbnull_start(struct fastbuf *b, byte *buf, uint bufsize)
{
ASSERT(buf && bufsize);
b->pos = btell(b);
int main(void)
{
struct fastbuf *b = fbnull_open(7);
- for (uns i = 0; i < 100; i++)
+ for (uint i = 0; i < 100; i++)
{
if (btell(b) != i * 10)
ASSERT(0);
CF_COMMIT(fbpar_cf_commit),
CF_ITEMS {
CF_LOOKUP("Type", (int *)F(type), ((const char * const []){"std", "direct", "mmap", NULL})),
- CF_UNS("BufSize", F(buffer_size)),
- CF_UNS("KeepBackBuf", F(keep_back_buf)),
- CF_UNS("ReadAhead", F(read_ahead)),
- CF_UNS("WriteBack", F(write_back)),
+ CF_UINT("BufSize", F(buffer_size)),
+ CF_UINT("KeepBackBuf", F(keep_back_buf)),
+ CF_UINT("ReadAhead", F(read_ahead)),
+ CF_UINT("WriteBack", F(write_back)),
CF_END
}
# undef F
}
static struct fastbuf *
-bopen_fd_internal(int fd, struct fb_params *params, uns mode, const char *name)
+bopen_fd_internal(int fd, struct fb_params *params, uint mode, const char *name)
{
char buf[32];
if (!name)
/* Compatibility wrappers */
struct fastbuf *
-bopen_try(const char *name, uns mode, uns buflen)
+bopen_try(const char *name, uint mode, uint buflen)
{
return bopen_file_try(name, mode, &(struct fb_params){ .type = FB_STD, .buffer_size = buflen });
}
struct fastbuf *
-bopen(const char *name, uns mode, uns buflen)
+bopen(const char *name, uint mode, uint buflen)
{
return bopen_file(name, mode, &(struct fb_params){ .type = FB_STD, .buffer_size = buflen });
}
struct fastbuf *
-bfdopen(int fd, uns buflen)
+bfdopen(int fd, uint buflen)
{
return bopen_fd(fd, &(struct fb_params){ .type = FB_STD, .buffer_size = buflen });
}
struct fastbuf *
-bfdopen_shared(int fd, uns buflen)
+bfdopen_shared(int fd, uint buflen)
{
struct fastbuf *f = bfdopen(fd, buflen);
bconfig(f, BCONFIG_IS_TEMP_FILE, 2);
{
if (b->bptr == b->bufend)
{
- uns len = b->bufend - b->buffer;
+ uint len = b->bufend - b->buffer;
b->bstop = b->buffer = mp_expand(FB_POOL(b)->mp);
b->bufend = b->buffer + mp_avail(FB_POOL(b)->mp);
b->bptr = b->buffer + len;
}
void
-fbpool_start(struct fbpool *b, struct mempool *mp, uns init_size)
+fbpool_start(struct fbpool *b, struct mempool *mp, uint init_size)
{
b->mp = mp;
b->fb.buffer = b->fb.bstop = b->fb.bptr = mp_start(mp, init_size);
struct mempool *mp;
struct fbpool fb;
byte *p;
- uns l;
+ uint l;
mp = mp_new(64);
fbpool_init(&fb);
fbpool_start(&fb, mp, 16);
- for (uns i = 0; i < 1024; i++)
+ for (uint i = 0; i < 1024; i++)
bprintf(&fb.fb, "<hello>");
p = fbpool_end(&fb);
l = mp_size(mp, p);
if (l != 1024 * 7)
ASSERT(0);
- for (uns i = 0; i < 1024; i++)
+ for (uint i = 0; i < 1024; i++)
if (memcmp(p + i * 7, "<hello>", 7))
ASSERT(0);
mp_delete(mp);
#include <stdlib.h>
-static void test_err(void *x UNUSED, uns flags, char *msg UNUSED)
+static void test_err(void *x UNUSED, uint flags, char *msg UNUSED)
{
if (flags & FBSOCK_READ)
printf("READ");
struct fbsock_params { /** Configuration of socket fastbuf. **/
int fd;
int fd_is_shared;
- uns bufsize;
- uns timeout_ms;
- void (*err)(void *data, uns flags, char *msg);
+ uint bufsize;
+ uint timeout_ms;
+ void (*err)(void *data, uint flags, char *msg);
void *data; // Passed to the err callback
};
}
struct fastbuf *
-bopen_tmp(uns buflen)
+bopen_tmp(uint buflen)
{
return bopen_tmp_file(&(struct fb_params){ .type = FB_STD, .buffer_size = buflen });
}
#define FF_ALL(type, name, size) GEN(type,name,size,be) GEN(type,name,size,le)
FF_ALL(int, w, 16)
-FF_ALL(uns, l, 32)
+FF_ALL(uint, l, 32)
FF_ALL(u64, q, 64)
FF_ALL(u64, 5, 40)
* where `NAME` together with `TYPE` can be:
*
* - `w` for 16-bit unsigned integers stored in sequences of 2 bytes, the `TYPE` is int
- * - `l` for 32-bit unsigned integers stored in sequences of 4 bytes, the `TYPE` is uns
+ * - `l` for 32-bit unsigned integers stored in sequences of 4 bytes, the `TYPE` is uint
* - `5` for 40-bit unsigned integers stored in sequences of 5 bytes, the `TYPE` is u64
* - `q` for 64-bit unsigned integers stored in sequences of 8 bytes, the `TYPE` is u64
*
#define FF_ALL(type, name, bits, defendian) FF_ALL_X(type, name, bits, defendian)
FF_ALL(int, w, 16, FF_ENDIAN)
-FF_ALL(uns, l, 32, FF_ENDIAN)
+FF_ALL(uint, l, 32, FF_ENDIAN)
FF_ALL(u64, q, 64, FF_ENDIAN)
FF_ALL(u64, 5, 40, FF_ENDIAN)
bgets_stk_step(struct bgets_stk_struct *s)
{
byte *buf = s->cur_buf;
- uns buf_len = s->cur_len;
+ uint buf_len = s->cur_len;
if (s->old_buf)
{
memcpy( s->cur_buf, s->old_buf, s->old_len);
}
do
{
- uns cnt = MIN(s->src_len, buf_len);
- for (uns i = cnt; i--;)
+ uint cnt = MIN(s->src_len, buf_len);
+ for (uint i = cnt; i--;)
{
byte v = *s->src++;
if (v == '\n')
#include <ucw/bbuf.h>
char * /* Non-standard */
-bgets(struct fastbuf *f, char *b, uns l)
+bgets(struct fastbuf *f, char *b, uint l)
{
ASSERT(l);
byte *src;
- uns src_len = bdirect_read_prepare(f, &src);
+ uint src_len = bdirect_read_prepare(f, &src);
if (!src_len)
return NULL;
do
{
- uns cnt = MIN(l, src_len);
- for (uns i = cnt; i--;)
+ uint cnt = MIN(l, src_len);
+ for (uint i = cnt; i--;)
{
byte v = *src++;
if (v == '\n')
}
int
-bgets_nodie(struct fastbuf *f, char *b, uns l)
+bgets_nodie(struct fastbuf *f, char *b, uint l)
{
ASSERT(l);
byte *src, *start = b;
- uns src_len = bdirect_read_prepare(f, &src);
+ uint src_len = bdirect_read_prepare(f, &src);
if (!src_len)
return 0;
do
{
- uns cnt = MIN(l, src_len);
- for (uns i = cnt; i--;)
+ uint cnt = MIN(l, src_len);
+ for (uint i = cnt; i--;)
{
byte v = *src++;
if (v == '\n')
return b - (char *)start;
}
-uns
-bgets_bb(struct fastbuf *f, struct bb_t *bb, uns limit)
+uint
+bgets_bb(struct fastbuf *f, struct bb_t *bb, uint limit)
{
ASSERT(limit);
byte *src;
- uns src_len = bdirect_read_prepare(f, &src);
+ uint src_len = bdirect_read_prepare(f, &src);
if (!src_len)
return 0;
bb_grow(bb, 1);
byte *buf = bb->ptr;
- uns len = 0, buf_len = MIN(bb->len, limit);
+ uint len = 0, buf_len = MIN(bb->len, limit);
do
{
- uns cnt = MIN(src_len, buf_len);
- for (uns i = cnt; i--;)
+ uint cnt = MIN(src_len, buf_len);
+ for (uint i = cnt; i--;)
{
byte v = *src++;
if (v == '\n')
bgets_mp(struct fastbuf *f, struct mempool *mp)
{
byte *src;
- uns src_len = bdirect_read_prepare(f, &src);
+ uint src_len = bdirect_read_prepare(f, &src);
if (!src_len)
return NULL;
#define BLOCK_SIZE (4096 - sizeof(void *))
struct block *prev;
byte data[BLOCK_SIZE];
} *blocks = NULL;
- uns sum = 0, buf_len = BLOCK_SIZE, cnt;
+ uint sum = 0, buf_len = BLOCK_SIZE, cnt;
struct block first_block, *new_block = &first_block;
byte *buf = new_block->data;
do
{
cnt = MIN(src_len, buf_len);
- for (uns i = cnt; i--;)
+ for (uint i = cnt; i--;)
{
byte v = *src++;
if (v == '\n')
}
while (src_len);
exit: ;
- uns len = buf - new_block->data;
+ uint len = buf - new_block->data;
byte *result = mp_alloc(mp, sum + len + 1) + sum;
result[len] = 0;
memcpy(result, new_block->data, len);
}
char *
-bgets0(struct fastbuf *f, char *b, uns l)
+bgets0(struct fastbuf *f, char *b, uint l)
{
ASSERT(l);
byte *src;
- uns src_len = bdirect_read_prepare(f, &src);
+ uint src_len = bdirect_read_prepare(f, &src);
if (!src_len)
return NULL;
do
{
- uns cnt = MIN(l, src_len);
- for (uns i = cnt; i--;)
+ uint cnt = MIN(l, src_len);
+ for (uint i = cnt; i--;)
{
*b = *src++;
if (!*b)
/*** UTF-8 ***/
int
-bget_utf8_slow(struct fastbuf *b, uns repl)
+bget_utf8_slow(struct fastbuf *b, uint repl)
{
int c = bgetc(b);
int code;
}
int
-bget_utf8_32_slow(struct fastbuf *b, uns repl)
+bget_utf8_32_slow(struct fastbuf *b, uint repl)
{
int c = bgetc(b);
int code;
}
void
-bput_utf8_slow(struct fastbuf *b, uns u)
+bput_utf8_slow(struct fastbuf *b, uint u)
{
ASSERT(u < 65536);
if (u < 0x80)
}
void
-bput_utf8_32_slow(struct fastbuf *b, uns u)
+bput_utf8_32_slow(struct fastbuf *b, uint u)
{
ASSERT(u < (1U<<31));
if (u < 0x80)
/*** UTF-16 ***/
int
-bget_utf16_be_slow(struct fastbuf *b, uns repl)
+bget_utf16_be_slow(struct fastbuf *b, uint repl)
{
if (bpeekc(b) < 0)
return -1;
- uns u = bgetw_be(b), x, y;
+ uint u = bgetw_be(b), x, y;
if ((int)u < 0)
return repl;
if ((x = u - 0xd800) >= 0x800)
}
int
-bget_utf16_le_slow(struct fastbuf *b, uns repl)
+bget_utf16_le_slow(struct fastbuf *b, uint repl)
{
if (bpeekc(b) < 0)
return -1;
- uns u = bgetw_le(b), x, y;
+ uint u = bgetw_le(b), x, y;
if ((int)u < 0)
return repl;
if ((x = u - 0xd800) >= 0x800)
}
void
-bput_utf16_be_slow(struct fastbuf *b, uns u)
+bput_utf16_be_slow(struct fastbuf *b, uint u)
{
if (u < 0xd800 || (u < 0x10000 && u >= 0xe000))
{
}
void
-bput_utf16_le_slow(struct fastbuf *b, uns u)
+bput_utf16_le_slow(struct fastbuf *b, uint u)
{
if (u < 0xd800 || (u < 0x10000 && u >= 0xe000))
{
#undef F
};
- uns func = ~0U;
+ uint func = ~0U;
if (argc > 1)
- for (uns i = 0; i < ARRAY_SIZE(names); i++)
+ for (uint i = 0; i < ARRAY_SIZE(names); i++)
if (!strcasecmp(names[i], argv[1]))
func = i;
if (!~func)
struct fastbuf *b = fbgrow_create(8);
if (func < FUNC_BPUT_UTF8)
{
- uns u;
+ uint u;
while (scanf("%x", &u) == 1)
bputc(b, u);
fbgrow_rewind(b);
}
else
{
- uns u, i = 0;
+ uint u, i = 0;
while (scanf("%x", &u) == 1)
{
switch (func)
/* ** UTF-8 ** */
-int bget_utf8_slow(struct fastbuf *b, uns repl);
-int bget_utf8_32_slow(struct fastbuf *b, uns repl);
-void bput_utf8_slow(struct fastbuf *b, uns u);
-void bput_utf8_32_slow(struct fastbuf *b, uns u);
+int bget_utf8_slow(struct fastbuf *b, uint repl);
+int bget_utf8_32_slow(struct fastbuf *b, uint repl);
+void bput_utf8_slow(struct fastbuf *b, uint u);
+void bput_utf8_32_slow(struct fastbuf *b, uint u);
-static inline int bget_utf8_repl(struct fastbuf *b, uns repl)
+static inline int bget_utf8_repl(struct fastbuf *b, uint repl)
{
- uns u;
+ uint u;
if (bavailr(b) >= 3)
{
b->bptr = utf8_get_repl(b->bptr, &u, repl);
return bget_utf8_slow(b, repl);
}
-static inline int bget_utf8_32_repl(struct fastbuf *b, uns repl)
+static inline int bget_utf8_32_repl(struct fastbuf *b, uint repl)
{
- uns u;
+ uint u;
if (bavailr(b) >= 6)
{
b->bptr = utf8_32_get_repl(b->bptr, &u, repl);
return bget_utf8_32_repl(b, UNI_REPLACEMENT);
}
-static inline void bput_utf8(struct fastbuf *b, uns u) /** Write a single utf8 character from range [0, 0xffff]. **/
+static inline void bput_utf8(struct fastbuf *b, uint u) /** Write a single utf8 character from range [0, 0xffff]. **/
{
if (bavailw(b) >= 3)
b->bptr = utf8_put(b->bptr, u);
bput_utf8_slow(b, u);
}
-static inline void bput_utf8_32(struct fastbuf *b, uns u) /** Write a single utf8 character (from the whole unicode range). **/
+static inline void bput_utf8_32(struct fastbuf *b, uint u) /** Write a single utf8 character (from the whole unicode range). **/
{
if (bavailw(b) >= 6)
b->bptr = utf8_32_put(b->bptr, u);
/* ** UTF-16 ** */
-int bget_utf16_be_slow(struct fastbuf *b, uns repl);
-int bget_utf16_le_slow(struct fastbuf *b, uns repl);
-void bput_utf16_be_slow(struct fastbuf *b, uns u);
-void bput_utf16_le_slow(struct fastbuf *b, uns u);
+int bget_utf16_be_slow(struct fastbuf *b, uint repl);
+int bget_utf16_le_slow(struct fastbuf *b, uint repl);
+void bput_utf16_be_slow(struct fastbuf *b, uint u);
+void bput_utf16_le_slow(struct fastbuf *b, uint u);
-static inline int bget_utf16_be_repl(struct fastbuf *b, uns repl)
+static inline int bget_utf16_be_repl(struct fastbuf *b, uint repl)
{
- uns u;
+ uint u;
if (bavailr(b) >= 4)
{
b->bptr = utf16_be_get_repl(b->bptr, &u, repl);
return bget_utf16_be_slow(b, repl);
}
-static inline int bget_utf16_le_repl(struct fastbuf *b, uns repl)
+static inline int bget_utf16_le_repl(struct fastbuf *b, uint repl)
{
- uns u;
+ uint u;
if (bavailr(b) >= 4)
{
b->bptr = utf16_le_get_repl(b->bptr, &u, repl);
* Write an utf16 character to fastbuf.
* Big endian version.
**/
-static inline void bput_utf16_be(struct fastbuf *b, uns u)
+static inline void bput_utf16_be(struct fastbuf *b, uint u)
{
if (bavailw(b) >= 4)
b->bptr = utf16_be_put(b->bptr, u);
* Write an utf16 character to fastbuf.
* Little endian version.
**/
-static inline void bput_utf16_le(struct fastbuf *b, uns u)
+static inline void bput_utf16_le(struct fastbuf *b, uint u)
{
if (bavailw(b) >= 4)
b->bptr = utf16_le_put(b->bptr, u);
u64 bget_varint_slow(struct fastbuf *b, u64 repl)
{
- uns h = bgetc(b);
- uns l = varint_len(h);
+ uint h = bgetc(b);
+ uint l = varint_len(h);
byte buf[l];
buf[0] = h;
l--;
void bput_varint_slow(struct fastbuf *b, u64 u)
{
byte buf[9];
- uns l = varint_put(buf, u);
+ uint l = varint_put(buf, u);
bwrite(b, buf, l);
}
#undef F
};
- uns func = ~0U;
+ uint func = ~0U;
if (argc > 1)
- for (uns i = 0; i < ARRAY_SIZE(names); i++)
+ for (uint i = 0; i < ARRAY_SIZE(names); i++)
if (!strcasecmp(names[i], argv[1]))
func = i;
if (!~func) {
struct fastbuf *b = fbgrow_create(8);
switch (func) {
- uns u;
+ uint u;
uintmax_t r;
int i;
case FUNC_BGET_VARINT:
**/
static inline u64 bget_varint_repl(struct fastbuf *b, u64 repl)
{
- uns l;
+ uint l;
if (bavailr(b) >= 1) {
l = varint_len(*b->bptr);
if (bavailr(b) >= l) {
/** Writes u64 u encoded as varint to the fastbuf b. **/
static inline void bput_varint(struct fastbuf *b, u64 u)
{
- uns l = varint_space(u);
+ uint l = varint_space(u);
if (bavailw(b) >= l)
b->bptr += varint_put(b->bptr, u);
else
* length of available memory.
**/
typedef struct BUF_T {
- uns len;
+ uint len;
GBUF_TYPE *ptr;
} BUF_T;
* Use <<fun__GENERIC_LINK_|GBUF_PREFIX|grow|,`GBUF_PREFIX(grow)()`>>
* for growing.
**/
-static void UNUSED GBUF_PREFIX(set_size)(BUF_T *b, uns len)
+static void UNUSED GBUF_PREFIX(set_size)(BUF_T *b, uint len)
{
b->len = len;
b->ptr = xrealloc(b->ptr, len * sizeof(GBUF_TYPE));
#endif
}
-static void UNUSED GBUF_PREFIX(do_grow)(BUF_T *b, uns len)
+static void UNUSED GBUF_PREFIX(do_grow)(BUF_T *b, uint len)
{
if (len < 2*b->len) // to ensure logarithmic cost
len = 2*b->len;
* any more) by
* <<fun__GENERIC_LINK_|GBUF_PREFIX|set_size|,`GBUF_PREFIX(set_size)()`>>.
**/
-static inline GBUF_TYPE *GBUF_PREFIX(grow)(BUF_T *b, uns len)
+static inline GBUF_TYPE *GBUF_PREFIX(grow)(BUF_T *b, uint len)
{
if (unlikely(len > b->len))
GBUF_PREFIX(do_grow)(b, len);
#define HASH_WANT_REMOVE
#define HASH_GIVE_HASHFN
-static uns test4_hash(char *host, int port)
+static uint test4_hash(char *host, int port)
{
return hash_string_nocase(host) ^ hash_u32(port);
}
}
#define HASH_GIVE_EXTRA_SIZE
-static inline uns test4_extra_size(char *host, int port UNUSED)
+static inline uint test4_extra_size(char *host, int port UNUSED)
{
return strlen(host);
}
int
main(int argc, char **argv)
{
- uns m = ~0U;
+ uint m = ~0U;
if (argc > 1)
{
m = 0;
#define TEST_TIME 1000000
/* The shift of the string according to the alignment. */
-static uns alignment = 0;
+static uint alignment = 0;
static void
random_string(byte *str, int len)
str[len] = 0;
}
-static uns
+static uint
elapsed_time(void)
{
static struct timeval last_tv, tv;
- uns elapsed;
+ uint elapsed;
gettimeofday(&tv, NULL);
elapsed = (tv.tv_sec - last_tv.tv_sec) * 1000000 + (tv.tv_usec - last_tv.tv_usec);
last_tv = tv;
printf("%d strings tested OK\n", i);
for (i=0; strings[i]; i++)
{
- uns h1, h2;
+ uint h1, h2;
h1 = hash_string(strings[i]);
h2 = hash_string_nocase(strings[i]);
if (h1 != hash_block(strings[i], str_len(strings[i])))
for (i=0; lengths[i] >= 0; i++)
{
byte str[lengths[i] + 1 + alignment];
- uns count = TEST_TIME / (lengths[i] + 10);
- uns el1 = 0, el2 = 0, elh = 0, elhn = 0;
- uns tot1 = 0, tot2 = 0, hash = 0, hashn = 0;
- uns j;
+ uint count = TEST_TIME / (lengths[i] + 10);
+ uint el1 = 0, el2 = 0, elh = 0, elhn = 0;
+ uint tot1 = 0, tot2 = 0, hash = 0, hashn = 0;
+ uint j;
for (j=0; j<count; j++)
{
random_string(str + alignment, lengths[i]);
#define SHIFT_BITS 7
/* A bit-mask which clears higher bytes than a given threshold. */
-static uns mask_higher_bits[sizeof(uns)];
+static uint mask_higher_bits[sizeof(uint)];
static void CONSTRUCTOR
hashfunc_init(void)
{
- uns i, j;
+ uint i, j;
byte *str;
- for (i=0; i<sizeof(uns); i++)
+ for (i=0; i<sizeof(uint); i++)
{
str = (byte *) (mask_higher_bits + i);
for (j=0; j<i; j++)
str[j] = -1;
- for (j=i; j<sizeof(uns); j++)
+ for (j=i; j<sizeof(uint); j++)
str[j] = 0;
}
}
-static inline uns CONST
-str_len_uns(uns x)
+static inline uint CONST
+str_len_uint(uint x)
{
- const uns sub = ~0U / 0xff;
- const uns and = sub * 0x80;
- uns a, i;
+ const uint sub = ~0U / 0xff;
+ const uint and = sub * 0x80;
+ uint a, i;
byte *bytes;
a = ~x & (x - sub) & and;
/*
* no zero byte in x.
*/
if (!a)
- return sizeof(uns);
+ return sizeof(uint);
bytes = (byte *) &x;
- for (i=0; i<sizeof(uns) && bytes[i]; i++);
+ for (i=0; i<sizeof(uint) && bytes[i]; i++);
return i;
}
-inline uns
+inline uint
str_len_aligned(const char *str)
{
- const uns *u = (const uns *) str;
- uns len = 0;
+ const uint *u = (const uint *) str;
+ uint len = 0;
while (1)
{
- uns l = str_len_uns(*u++);
+ uint l = str_len_uint(*u++);
len += l;
- if (l < sizeof(uns))
+ if (l < sizeof(uint))
return len;
}
}
-inline uns
+inline uint
hash_string_aligned(const char *str)
{
- const uns *u = (const uns *) str;
- uns hash = 0;
+ const uint *u = (const uint *) str;
+ uint hash = 0;
while (1)
{
- uns last_len = str_len_uns(*u);
+ uint last_len = str_len_uint(*u);
hash = ROL(hash, SHIFT_BITS);
- if (last_len < sizeof(uns))
+ if (last_len < sizeof(uint))
{
- uns tmp = *u & mask_higher_bits[last_len];
+ uint tmp = *u & mask_higher_bits[last_len];
hash ^= tmp;
return hash;
}
}
}
-inline uns
-hash_block_aligned(const byte *buf, uns len)
+inline uint
+hash_block_aligned(const byte *buf, uint len)
{
- const uns *u = (const uns *) buf;
- uns hash = 0;
- while (len >= sizeof(uns))
+ const uint *u = (const uint *) buf;
+ uint hash = 0;
+ while (len >= sizeof(uint))
{
hash = ROL(hash, SHIFT_BITS) ^ *u++;
- len -= sizeof(uns);
+ len -= sizeof(uint);
}
hash = ROL(hash, SHIFT_BITS) ^ (*u & mask_higher_bits[len]);
return hash;
}
#ifndef CPU_ALLOW_UNALIGNED
-uns
+uint
str_len(const char *str)
{
- uns shift = UNALIGNED_PART(str, uns);
+ uint shift = UNALIGNED_PART(str, uint);
if (!shift)
return str_len_aligned(str);
else
{
- uns i;
- shift = sizeof(uns) - shift;
+ uint i;
+ shift = sizeof(uint) - shift;
for (i=0; i<shift; i++)
if (!str[i])
return i;
}
}
-uns
+uint
hash_string(const char *str)
{
const byte *s = str;
- uns shift = UNALIGNED_PART(s, uns);
+ uint shift = UNALIGNED_PART(s, uint);
if (!shift)
return hash_string_aligned(s);
else
{
- uns hash = 0;
- uns i;
+ uint hash = 0;
+ uint i;
for (i=0; ; i++)
{
- uns modulo = i % sizeof(uns);
- uns shift;
+ uint modulo = i % sizeof(uint);
+ uint shift;
#ifdef CPU_LITTLE_ENDIAN
shift = modulo;
#else
- shift = sizeof(uns) - 1 - modulo;
+ shift = sizeof(uint) - 1 - modulo;
#endif
if (!modulo)
hash = ROL(hash, SHIFT_BITS);
}
}
-uns
-hash_block(const byte *buf, uns len)
+uint
+hash_block(const byte *buf, uint len)
{
- uns shift = UNALIGNED_PART(buf, uns);
+ uint shift = UNALIGNED_PART(buf, uint);
if (!shift)
return hash_block_aligned(buf, len);
else
{
- uns hash = 0;
- uns i;
+ uint hash = 0;
+ uint i;
for (i=0; ; i++)
{
- uns modulo = i % sizeof(uns);
- uns shift;
+ uint modulo = i % sizeof(uint);
+ uint shift;
#ifdef CPU_LITTLE_ENDIAN
shift = modulo;
#else
- shift = sizeof(uns) - 1 - modulo;
+ shift = sizeof(uint) - 1 - modulo;
#endif
if (!modulo)
hash = ROL(hash, SHIFT_BITS);
}
#endif
-uns
+uint
hash_string_nocase(const char *str)
{
const byte *s = str;
- uns hash = 0;
- uns i;
+ uint hash = 0;
+ uint i;
for (i=0; ; i++)
{
- uns modulo = i % sizeof(uns);
- uns shift;
+ uint modulo = i % sizeof(uint);
+ uint shift;
#ifdef CPU_LITTLE_ENDIAN
shift = modulo;
#else
- shift = sizeof(uns) - 1 - modulo;
+ shift = sizeof(uint) - 1 - modulo;
#endif
if (!modulo)
hash = ROL(hash, SHIFT_BITS);
/*** === String hashes [[strhash]] ***/
-/* The following functions need str to be aligned to sizeof(uns). */
-uns str_len_aligned(const char *str) PURE; /** Get the string length (not a really useful hash function, but there is no better place for it). The string must be aligned to sizeof(uns). For unaligned see @str_len(). **/
-uns hash_string_aligned(const char *str) PURE; /** Hash the string. The string must be aligned to sizeof(uns). For unaligned see @hash_string(). **/
-uns hash_block_aligned(const byte *buf, uns len) PURE; /** Hash arbitrary data. They must be aligned to sizeof(uns). For unaligned see @hash_block(). **/
+/* The following functions need str to be aligned to sizeof(uint). */
+uint str_len_aligned(const char *str) PURE; /** Get the string length (not a really useful hash function, but there is no better place for it). The string must be aligned to sizeof(uint). For unaligned see @str_len(). **/
+uint hash_string_aligned(const char *str) PURE; /** Hash the string. The string must be aligned to sizeof(uint). For unaligned see @hash_string(). **/
+uint hash_block_aligned(const byte *buf, uint len) PURE; /** Hash arbitrary data. They must be aligned to sizeof(uint). For unaligned see @hash_block(). **/
#ifdef CPU_ALLOW_UNALIGNED
#undef str_len
#define hash_string(str) hash_string_aligned(str)
#define hash_block(str, len) hash_block_aligned(str, len)
#else
-uns str_len(const char *str) PURE; /** Get the string length. If you know it is aligned to sizeof(uns), you can use faster @str_len_aligned(). **/
-uns hash_string(const char *str) PURE; /** Hash the string. If it is aligned to sizeof(uns), you can use faster @hash_string_aligned(). **/
-uns hash_block(const byte *buf, uns len) PURE; /** Hash arbitrary data. If they are aligned to sizeof(uns), use faster @hash_block_aligned(). **/
+uint str_len(const char *str) PURE; /** Get the string length. If you know it is aligned to sizeof(uint), you can use faster @str_len_aligned(). **/
+uint hash_string(const char *str) PURE; /** Hash the string. If it is aligned to sizeof(uint), you can use faster @hash_string_aligned(). **/
+uint hash_block(const byte *buf, uint len) PURE; /** Hash arbitrary data. If they are aligned to sizeof(uint), use faster @hash_block_aligned(). **/
#endif
-uns hash_string_nocase(const char *str) PURE; /** Hash the string in a case insensitive way. Works only with ASCII characters. **/
+uint hash_string_nocase(const char *str) PURE; /** Hash the string in a case insensitive way. Works only with ASCII characters. **/
/*** === Integer hashes [[inthash]] ***/
* of using shifts and adds on architectures where multiplication
* instructions are slow).
*/
-static inline uns CONST hash_u32(uns x) { return 0x01008041*x; } /** Hash a 32 bit unsigned integer. **/
-static inline uns CONST hash_u64(u64 x) { return hash_u32((uns)x ^ (uns)(x >> 32)); } /** Hash a 64 bit unsigned integer. **/
-static inline uns CONST hash_pointer(void *x) { return ((sizeof(x) <= 4) ? hash_u32((uns)(uintptr_t)x) : hash_u64((u64)(uintptr_t)x)); } /** Hash a pointer. **/
+static inline uint CONST hash_u32(uint x) { return 0x01008041*x; } /** Hash a 32 bit unsigned integer. **/
+static inline uint CONST hash_u64(u64 x) { return hash_u32((uint)x ^ (uint)(x >> 32)); } /** Hash a 64 bit unsigned integer. **/
+static inline uint CONST hash_pointer(void *x) { return ((sizeof(x) <= 4) ? hash_u32((uint)(uintptr_t)x) : hash_u64((u64)(uintptr_t)x)); } /** Hash a pointer. **/
#endif
*
* You can also supply several functions:
*
- * HASH_GIVE_HASHFN unsigned int hash(key) -- calculate hash value of key.
+ * HASH_GIVE_HASHFN uint hash(key) -- calculate hash value of key.
* We have sensible default hash functions for strings
* and integers.
* HASH_GIVE_EQ int eq(key1, key2) -- return whether keys are equal.
* and static strings, strcpy for end-allocated strings.
* HASH_GIVE_INIT_DATA void init_data(node *) -- initialize data fields in a
* newly created node. Very useful for lookup operations.
- * HASH_GIVE_ALLOC void *alloc(unsigned int size) -- allocate space for
+ * HASH_GIVE_ALLOC void *alloc(uint size) -- allocate space for
* a node. Default is xmalloc() or pooled allocation, depending
* on HASH_USE_POOL, HASH_AUTO_POOL, HASH_USE_ELTPOOL
* and HASH_AUTO_ELTPOOL switches. void free(void *) -- the converse.
- * HASH_GIVE_TABLE_ALLOC void *table_alloc(unsigned int size), void *table_free(void *)
+ * HASH_GIVE_TABLE_ALLOC void *table_alloc(uint size), void *table_free(void *)
* Allocate or free space for the table itself. Default is xmalloc()
* or the functions defined by HASH_GIVE_ALLOC if HASH_TABLE_ALLOC is set.
*
* hash table operations is struct HASH_PREFIX(table) *.
* HASH_TABLE_VARS Extra variables to be defined in table structure
* HASH_LOOKUP_DETECT_NEW
- * the prototype for lookup is changed to node *lookup(key, int *new_item)
- * new_item must not be NULL and returns 1 whether lookup
- * just created a new item in the hashtable or 0 otherwise.
+ * the prototype for lookup is changed to node *lookup(key, int *new_item)
+ * new_item must not be NULL and returns 1 whether lookup
+ * just created a new item in the hashtable or 0 otherwise.
*
* You also get a iterator macro at no extra charge:
*
typedef struct P(bucket) {
struct P(bucket) *next;
#ifndef HASH_CONSERVE_SPACE
- uns hash;
+ uint hash;
#endif
P(node) n;
} P(bucket);
#ifdef HASH_TABLE_VARS
HASH_TABLE_VARS
#endif
- uns hash_size;
- uns hash_count, hash_max, hash_min, hash_hard_max;
+ uint hash_size;
+ uint hash_count, hash_max, hash_min, hash_hard_max;
P(bucket) **ht;
#ifdef HASH_AUTO_POOL
struct mempool *pool;
#ifndef HASH_GIVE_HASHFN
#define HASH_GIVE_HASHFN
- static inline uns P(hash) (TAUC char *k)
+ static inline uint P(hash) (TAUC char *k)
{
# ifdef HASH_NOCASE
return hash_string_nocase(k);
#elif defined(HASH_USE_POOL)
/* If the caller has requested to use his mempool, do so */
#include <ucw/mempool.h>
-static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(HASH_USE_POOL, size); }
+static inline void * P(alloc) (TAUC uint size) { return mp_alloc_fast(HASH_USE_POOL, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#elif defined(HASH_AUTO_POOL)
/* Use our own pools */
#include <ucw/mempool.h>
-static inline void * P(alloc) (TAUC unsigned int size) { return mp_alloc_fast(T.pool, size); }
+static inline void * P(alloc) (TAUC uint size) { return mp_alloc_fast(T.pool, size); }
static inline void P(free) (TAUC void *x UNUSED) { }
static inline void P(init_alloc) (TAU) { T.pool = mp_new(HASH_AUTO_POOL); }
static inline void P(cleanup_alloc) (TAU) { mp_delete(T.pool); }
#elif defined(HASH_USE_ELTPOOL)
/* If the caller has requested to use his eltpool, do so */
#include <ucw/eltpool.h>
-static inline void * P(alloc) (TAUC unsigned int size UNUSED) { ASSERT(size <= (HASH_USE_ELTPOOL)->elt_size); return ep_alloc(HASH_USE_ELTPOOL); }
+static inline void * P(alloc) (TAUC uint size UNUSED) { ASSERT(size <= (HASH_USE_ELTPOOL)->elt_size); return ep_alloc(HASH_USE_ELTPOOL); }
static inline void P(free) (TAUC void *x) { ep_free(HASH_USE_ELTPOOL, x); }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#elif defined(HASH_AUTO_ELTPOOL)
/* Use our own eltpools */
#include <ucw/eltpool.h>
-static inline void * P(alloc) (TAUC unsigned int size UNUSED) { return ep_alloc(T.eltpool); }
+static inline void * P(alloc) (TAUC uint size UNUSED) { return ep_alloc(T.eltpool); }
static inline void P(free) (TAUC void *x) { ep_free(T.eltpool, x); }
static inline void P(init_alloc) (TAU) { T.eltpool = ep_new(sizeof(P(bucket)), HASH_AUTO_ELTPOOL); }
static inline void P(cleanup_alloc) (TAU) { ep_delete(T.eltpool); }
#else
/* The default allocation method */
-static inline void * P(alloc) (TAUC unsigned int size) { return xmalloc(size); }
+static inline void * P(alloc) (TAUC uint size) { return xmalloc(size); }
static inline void P(free) (TAUC void *x) { xfree(x); }
static inline void P(init_alloc) (TAU) { }
static inline void P(cleanup_alloc) (TAU) { }
#ifdef HASH_USE_ELTPOOL
#error HASH_TABLE_ALLOC not supported in combination with eltpools
#endif
-static inline void * P(table_alloc) (TAUC unsigned int size) { return P(alloc)(TTC size); }
+static inline void * P(table_alloc) (TAUC uint size) { return P(alloc)(TTC size); }
static inline void P(table_free) (TAUC void *x) { P(free)(TTC x); }
#else
-static inline void * P(table_alloc) (TAUC unsigned int size) { return xmalloc(size); }
+static inline void * P(table_alloc) (TAUC uint size) { return xmalloc(size); }
static inline void P(table_free) (TAUC void *x) { xfree(x); }
#endif
#endif
#ifdef HASH_ZERO_FILL
-static inline void * P(new_bucket)(TAUC uns size)
+static inline void * P(new_bucket)(TAUC uint size)
{
byte *buck = P(alloc)(TTC size);
bzero(buck, size);
return buck;
}
#else
-static inline void * P(new_bucket)(TAUC uns size) { return P(alloc)(TTC size); }
+static inline void * P(new_bucket)(TAUC uint size) { return P(alloc)(TTC size); }
#endif
/* Now the operations */
static void HASH_PREFIX(cleanup)(TA)
{
#ifndef HASH_USE_POOL
- uns i;
+ uint i;
P(bucket) *b, *bb;
for (i=0; i<T.hash_size; i++)
}
#endif
-static inline uns P(bucket_hash) (TAUC P(bucket) *b)
+static inline uint P(bucket_hash) (TAUC P(bucket) *b)
{
#ifdef HASH_CONSERVE_SPACE
return P(hash)(TTC HASH_KEY(b->n.));
#endif
}
-static void P(rehash) (TAC uns size)
+static void P(rehash) (TAC uint size)
{
P(bucket) *b, *nb;
P(bucket) **oldt = T.ht, **newt;
- uns oldsize = T.hash_size;
- uns i, h;
+ uint oldsize = T.hash_size;
+ uint i, h;
DBG("Rehashing %d->%d at count %d", oldsize, size, T.hash_count);
T.hash_size = size;
**/
static HASH_NODE* HASH_PREFIX(find)(TAC HASH_KEY_DECL)
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
static HASH_NODE* HASH_PREFIX(find_next)(TAC P(node) *start)
{
#ifndef HASH_CONSERVE_SPACE
- uns h0 = P(hash) (TTC HASH_KEY(start->));
+ uint h0 = P(hash) (TTC HASH_KEY(start->));
#endif
P(bucket) *b = SKIP_BACK(P(bucket), n, start);
**/
static HASH_NODE * HASH_PREFIX(new)(TAC HASH_KEY_DECL)
{
- uns h0, h;
+ uint h0, h;
P(bucket) *b;
h0 = P(hash) (TTC HASH_KEY( ));
static HASH_NODE* HASH_PREFIX(lookup)(TAC HASH_KEY_DECL)
#endif
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b;
for (b=T.ht[h]; b; b=b->next)
**/
static int HASH_PREFIX(delete)(TAC HASH_KEY_DECL)
{
- uns h0 = P(hash) (TTC HASH_KEY( ));
- uns h = h0 % T.hash_size;
+ uint h0 = P(hash) (TTC HASH_KEY( ));
+ uint h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; b=*bb; bb=&b->next)
static void HASH_PREFIX(remove)(TAC HASH_NODE *n)
{
P(bucket) *x = SKIP_BACK(struct P(bucket), n, n);
- uns h0 = P(bucket_hash)(TTC x);
- uns h = h0 % T.hash_size;
+ uint h0 = P(bucket_hash)(TTC x);
+ uint h = h0 % T.hash_size;
P(bucket) *b, **bb;
for (bb=&T.ht[h]; (b=*bb) && b != x; bb=&b->next)
#define HASH_FOR_ALL_DYNAMIC(h_px, h_table, h_var) \
do { \
- uns h_slot; \
+ uint h_slot; \
struct GLUE_(h_px,bucket) *h_buck; \
for (h_slot=0; h_slot < (h_table)->hash_size; h_slot++) \
for (h_buck = (h_table)->ht[h_slot]; h_buck; h_buck = h_buck->next) \
**/
#define HEAP_INIT(type,heap,num,less,swap) \
do { \
- uns _i = num; \
- uns _j, _l; \
+ uint _i = num; \
+ uint _j, _l; \
type x; \
while (_i >= 1) \
{ \
**/
#define HEAP_DELETE_MIN(type,heap,num,less,swap) \
do { \
- uns _j, _l; \
+ uint _j, _l; \
type x; \
swap(heap,1,num,x); \
num--; \
**/
#define HEAP_INSERT(type,heap,num,less,swap,elt) \
do { \
- uns _j, _u; \
+ uint _j, _u; \
type x; \
heap[++num] = elt; \
_j = num; \
**/
#define HEAP_INCREASE(type,heap,num,less,swap,pos,elt) \
do { \
- uns _j, _l; \
+ uint _j, _l; \
type x; \
heap[pos] = elt; \
_j = pos; \
**/
#define HEAP_DECREASE(type,heap,num,less,swap,pos,elt) \
do { \
- uns _j, _u; \
+ uint _j, _u; \
type x; \
heap[pos] = elt; \
_j = pos; \
**/
#define HEAP_DELETE(type,heap,num,less,swap,pos) \
do { \
- uns _j, _l, _u; \
+ uint _j, _l, _u; \
type x; \
_j = pos; \
swap(heap,_j,num,x); \
#include <sys/mman.h>
void *
-mmap_file(const char *name, unsigned *len, int writeable)
+mmap_file(const char *name, uint *len, int writeable)
{
int fd = open(name, writeable ? O_RDWR : O_RDONLY);
struct stat st;
}
void
-munmap_file(void *start, unsigned len)
+munmap_file(void *start, uint len)
{
munmap(start, len);
}
/* io-mmap.c */
-void *mmap_file(const char *name, unsigned *len, int writeable);
-void munmap_file(void *start, unsigned len);
+void *mmap_file(const char *name, uint *len, int writeable);
+void munmap_file(void *start, uint len);
/* io-careful.c */
return err;
if (p)
{
- uns len;
+ uint len;
if (!cf_parse_int(p, &len) && len <= 32)
am->mask = ~(len == 32 ? 0 : ~0U >> len);
else if (cf_parse_ip(p, &am->mask))
*
* This file defines:
*
- * struct search structure with both the internal and the user-defined variables
- * used during the search and accessible from all macros
+ * struct search structure with both the internal and the user-defined variables
+ * used during the search and accessible from all macros
*
- * void search(kmp,search,src) executes the search; search structure is allocated by the caller (possible input/output)
+ * void search(kmp,search,src) executes the search; search structure is allocated by the caller (possible input/output)
*
- * void run(kmp,src) the same, but automatically allocates search structre from the stack
+ * void run(kmp,src) the same, but automatically allocates search structre from the stack
*
*
* Parameters to the generator (these marked with [*] are mandatory):
* [*] KMPS_KMP_PREFIX(x) prefix used for ucw/kmp.h
*
* KMPS_SOURCE user-defined text source (together with KMPS_GET_CHAR);
- * if unset, the one from ucw/kmp.h is taken
+ * if unset, the one from ucw/kmp.h is taken
* KMPS_GET_CHAR(kmp,src,search) analogy to KMP_GET_CHAR, but it must store the next character to search->c
*
* KMPS_ADD_CONTROLS add control characters (see KMP_CONTROL_CHAR in kmp.h) at both ends of the input string
- * KMPS_MERGE_CONTROLS merge adjacent control characters to a single one
+ * KMPS_MERGE_CONTROLS merge adjacent control characters to a single one
*
* KMPS_VARS user-defined variables in struct search (in .u substructure to avoid collisions)
*
* KMPS_INIT(kmp,src,search) statement executed at the beginning of search()
* KMPS_EXIT(kmp,src,search) ... at the end
* KMPS_STEP(kmp,src,search) ... after each step (read of next character + current state update)
- * of the algorithm, but before KMPS_FOUND[_CHAIN]
+ * of the algorithm, but before KMPS_FOUND[_CHAIN]
* KMPS_FOUND_CHAIN(kmp,src,search) ... for each state representing locally longest match
* (stored in search->out - NOT necessary search->s!);
- * all matches form a NULL-terminated link list (search->out, search->out->next, ...)
- * in order of decreasing length
+ * all matches form a NULL-terminated link list (search->out, search->out->next, ...)
+ * in order of decreasing length
* KMPS_FOUND(kmp,src,search) ... called for every match (in search->out)
* KMPS_WANT_BEST algorithm computes globally longest match, which is available
- * in search->best in KMPS_EXIT; if there is no match, it points to the null state
+ * in search->best in KMPS_EXIT; if there is no match, it points to the null state
*/
#define P(x) KMPS_PREFIX(x)
# endif
KP(char_t) c; /* last character */
# ifdef KMPS_ADD_CONTROLS
- uns eof;
+ uint eof;
# endif
# ifdef KMPS_VARS
struct {
#include <ucw/kmp-search.h>
#define KMPS_PREFIX(x) kmp1s2_##x
#define KMPS_KMP_PREFIX(x) kmp1_##x
-#define KMPS_VARS uns count;
+#define KMPS_VARS uint count;
#define KMPS_INIT(kmp,src,s) s->u.count = 0
#define KMPS_FOUND(kmp,src,s) s->u.count++
#include <ucw/kmp-search.h>
#define KMP_USE_UTF8
#define KMP_TOLOWER
#define KMP_ONLYALPHA
-#define KMP_STATE_VARS char *str; uns id;
-#define KMP_ADD_EXTRA_ARGS uns id
+#define KMP_STATE_VARS char *str; uint id;
+#define KMP_ADD_EXTRA_ARGS uint id
#define KMP_VARS char *start;
#define KMP_ADD_INIT(kmp,src) kmp->u.start = src
#define KMP_ADD_NEW(kmp,src,s) do{ TRACE("Inserting string %s with id %d", kmp->u.start, id); \
/* TEST3 - random tests */
#define KMP_PREFIX(x) kmp3_##x
-#define KMP_STATE_VARS uns index;
-#define KMP_ADD_EXTRA_ARGS uns index
+#define KMP_STATE_VARS uint index;
+#define KMP_ADD_EXTRA_ARGS uint index
#define KMP_VARS char *start;
#define KMP_ADD_INIT(kmp,src) kmp->u.start = src
#define KMP_ADD_NEW(kmp,src,s) s->u.index = index
#define KMP_ADD_DUP(kmp,src,s) *(kmp->u.start) = 0
#define KMP_WANT_CLEANUP
#define KMP_WANT_SEARCH
-#define KMPS_VARS uns sum, *cnt;
+#define KMPS_VARS uint sum, *cnt;
#define KMPS_FOUND(kmp,src,s) do{ ASSERT(s->u.cnt[s->out->u.index]); s->u.cnt[s->out->u.index]--; s->u.sum--; }while(0)
#include <ucw/kmp.h>
{
TRACE("Running test3");
struct mempool *pool = mp_new(1024);
- for (uns testn = 0; testn < 100; testn++)
+ for (uint testn = 0; testn < 100; testn++)
{
mp_flush(pool);
- uns n = random_max(100);
+ uint n = random_max(100);
char *s[n];
struct kmp3_struct kmp;
kmp3_init(&kmp);
- for (uns i = 0; i < n; i++)
+ for (uint i = 0; i < n; i++)
{
- uns m = random_max(10);
+ uint m = random_max(10);
s[i] = mp_alloc(pool, m + 1);
- for (uns j = 0; j < m; j++)
+ for (uint j = 0; j < m; j++)
s[i][j] = 'a' + random_max(3);
s[i][m] = 0;
kmp3_add(&kmp, s[i], i);
}
kmp3_build(&kmp);
- for (uns i = 0; i < 10; i++)
+ for (uint i = 0; i < 10; i++)
{
- uns m = random_max(100);
+ uint m = random_max(100);
byte b[m + 1];
- for (uns j = 0; j < m; j++)
+ for (uint j = 0; j < m; j++)
b[j] = 'a' + random_max(4);
b[m] = 0;
- uns cnt[n];
+ uint cnt[n];
struct kmp3_search search;
search.u.sum = 0;
search.u.cnt = cnt;
- for (uns j = 0; j < n; j++)
+ for (uint j = 0; j < n; j++)
{
cnt[j] = 0;
if (*s[j])
- for (uns k = 0; k < m; k++)
+ for (uint k = 0; k < m; k++)
if (!strncmp(b + k, s[j], strlen(s[j])))
cnt[j]++, search.u.sum++;
}
return (a == b) || (a && b && *a == *b);
}
-static inline uns
+static inline uint
kmp4_hash(struct kmp4_struct *kmp UNUSED, struct kmp4_state *s, byte *c)
{
- return (c ? (*c << 16) : 0) + (uns)(uintptr_t)s;
+ return (c ? (*c << 16) : 0) + (uint)(uintptr_t)s;
}
#define KMP_PREFIX(x) kmp4_##x
* Brief description of all parameters:
*
* Basic parameters:
- * KMP_PREFIX(x) macro to add a name prefix (used on all global names
+ * KMP_PREFIX(x) macro to add a name prefix (used on all global names
* defined by the KMP generator); mandatory;
* we abbreviate this to P(x) below
*
* KMP_ONLYALPHA converts non-alphas to KMP_CONTROL_CHAR (see below)
*
* Parameters controlling add(kmp, src):
- * KMP_ADD_EXTRA_ARGS extra arguments, should be used carefully because of possible collisions
- * KMP_ADD_INIT(kmp,src) called in the beginning of add(), src is the first
+ * KMP_ADD_EXTRA_ARGS extra arguments, should be used carefully because of possible collisions
+ * KMP_ADD_INIT(kmp,src) called in the beginning of add(), src is the first
* KMP_INIT_STATE(kmp,s) initialization of a new state s (called before KMP_ADD_{NEW,DUP});
- * null state is not included and should be handled after init() if necessary;
- * all user-defined data are filled by zeros before call to KMP_INIT_STATE
- * KMP_ADD_NEW(kmp,src,s) initialize last state of every new key string (called after KMP_INIT_STATE);
- * the string must be parsed before so src is after the last string's character
- * KMP_ADD_DUP(kmp,src,s) analogy of KMP_ADD_NEW called for duplicates
+ * null state is not included and should be handled after init() if necessary;
+ * all user-defined data are filled by zeros before call to KMP_INIT_STATE
+ * KMP_ADD_NEW(kmp,src,s) initialize last state of every new key string (called after KMP_INIT_STATE);
+ * the string must be parsed before so src is after the last string's character
+ * KMP_ADD_DUP(kmp,src,s) analogy of KMP_ADD_NEW called for duplicates
*
* Parameters to build():
* KMP_BUILD_STATE(kmp,s) called for all states (including null) in order of non-decreasing tree depth
#define HASH_GIVE_HASHFN
#ifdef KMP_GIVE_HASHFN
-static inline uns
+static inline uint
P(hash_hash) (struct P(hash_table) *t, struct P(state) *f, P(char_t) c)
{
return P(hash) ((struct P(struct) *) t, f, c);
}
#else
-static inline uns
+static inline uint
P(hash_hash) (struct P(hash_table) *t UNUSED, struct P(state) *f, P(char_t) c)
{
- return (((uns)c) << 16) + (uns)(uintptr_t)f;
+ return (((uint)c) << 16) + (uint)(uintptr_t)f;
}
#endif
#ifdef KMP_GIVE_ALLOC
#define HASH_GIVE_ALLOC
static inline void *
-P(hash_alloc) (struct P(hash_table) *t, uns size)
+P(hash_alloc) (struct P(hash_table) *t, uint size)
{
return P(alloc) ((struct P(struct) *) t, size);
}
P(get_char) (struct P(struct) *kmp UNUSED, P(source_t) *src, P(char_t) *c)
{
# ifdef KMP_USE_UTF8
- uns cc;
+ uint cc;
*src = utf8_get(*src, &cc);
# ifdef KMP_ONLYALPHA
if (!cc) {}
# endif
}
# else
- uns cc = *(*src)++;
+ uint cc = *(*src)++;
# ifdef KMP_ONLYALPHA
if (!cc) {}
else if (!Calpha(cc))
if (!P(get_char)(kmp, &src, &c))
return NULL;
struct P(state) *p = &kmp->null, *s;
- uns len = 0;
+ uint len = 0;
do
{
s = P(hash_find)(&kmp->hash, p, c);
{
if (P(empty)(kmp))
return;
- uns read = 0, write = 0;
+ uint read = 0, write = 0;
struct P(state) *fifo[kmp->hash.hash_count], *null = &kmp->null;
for (struct P(state) *s = null->back; s; s = s->next)
fifo[write++] = s;
#define CHECK_PTR_TYPE(x, type) ((x)-(type)(x) + (type)(x)) /** Check that a pointer @x is of type @type. Fail compilation if not. **/
#define PTR_TO(s, i) &((s*)0)->i /** Return OFFSETOF() in form of a pointer. **/
-#define OFFSETOF(s, i) ((uns)offsetof(s, i)) /** Offset of item @i from the start of structure @s **/
+#define OFFSETOF(s, i) ((uint)offsetof(s, i)) /** Offset of item @i from the start of structure @s **/
#define SKIP_BACK(s, i, p) ((s *)((char *)p - OFFSETOF(s, i))) /** Given a pointer @p to item @i of structure @s, return a pointer to the start of the struct. **/
/** Align an integer @s to the nearest higher multiple of @a (which should be a power of two) **/
#define COMPARE_LT(x,y) do { if ((x)<(y)) return 1; if ((x)>(y)) return 0; } while(0)
#define COMPARE_GT(x,y) COMPARE_LT(y,x)
-#define ROL(x, bits) (((x) << (bits)) | ((uns)(x) >> (sizeof(uns)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the left **/
-#define ROR(x, bits) (((uns)(x) >> (bits)) | ((x) << (sizeof(uns)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the right **/
+#define ROL(x, bits) (((x) << (bits)) | ((uint)(x) >> (sizeof(uint)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the left **/
+#define ROR(x, bits) (((uint)(x) >> (bits)) | ((x) << (sizeof(uint)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the right **/
/*** === Shortcuts for GCC Extensions ***/
* This is the basic printf-like function for logging a message.
* The @flags contain the log level and possibly other flag bits (like `L_SIGHANDLER`).
**/
-void msg(uns flags, const char *fmt, ...) FORMAT_CHECK(printf,2,3);
-void vmsg(uns flags, const char *fmt, va_list args); /** A vararg version of msg(). **/
+void msg(uint flags, const char *fmt, ...) FORMAT_CHECK(printf,2,3);
+void vmsg(uint flags, const char *fmt, va_list args); /** A vararg version of msg(). **/
void die(const char *, ...) NONRET FORMAT_CHECK(printf,1,2); /** Log a fatal error message and exit the program. **/
void vdie(const char *fmt, va_list args) NONRET; /** va_list version of die() **/
/*** === Random numbers (random.c) ***/
-uns random_u32(void); /** Return a pseudorandom 32-bit number. **/
-uns random_max(uns max); /** Return a pseudorandom 32-bit number in range [0,@max). **/
+uint random_u32(void); /** Return a pseudorandom 32-bit number. **/
+uint random_max(uint max); /** Return a pseudorandom 32-bit number in range [0,@max). **/
u64 random_u64(void); /** Return a pseudorandom 64-bit number. **/
u64 random_max_u64(u64 max); /** Return a pseudorandom 64-bit number in range [0,@max). **/
#include <errno.h>
struct lizard_buffer {
- uns len;
+ uint len;
void *ptr;
};
}
static void
-lizard_realloc(struct lizard_buffer *buf, uns max_len)
+lizard_realloc(struct lizard_buffer *buf, uint max_len)
/* max_len needs to be aligned to CPU_PAGE_SIZE */
{
if (max_len <= buf->len)
buf->len = max_len;
buf->ptr = mmap(NULL, buf->len + CPU_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (buf->ptr == MAP_FAILED)
- die("mmap(anonymous, %d bytes): %m", (uns)(buf->len + CPU_PAGE_SIZE));
+ die("mmap(anonymous, %d bytes): %m", (uint)(buf->len + CPU_PAGE_SIZE));
if (mprotect(buf->ptr + buf->len, CPU_PAGE_SIZE, PROT_NONE) < 0)
die("mprotect: %m");
}
}
byte *
-lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uns expected_length)
+lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uint expected_length)
{
- uns lock_offset = ALIGN_TO(expected_length + 3, CPU_PAGE_SIZE); // +3 due to the unaligned access
+ uint lock_offset = ALIGN_TO(expected_length + 3, CPU_PAGE_SIZE); // +3 due to the unaligned access
if (lock_offset > buf->len)
lizard_realloc(buf, lock_offset);
volatile ucw_sighandler_t old_handler = set_signal_handler(SIGSEGV, sigsegv_handler);
main(int argc, char **argv)
{
int opt;
- uns action = 't';
- uns crash = 0;
+ uint action = 't';
+ uint crash = 0;
log_init(argv[0]);
while ((opt = cf_getopt(argc, argv, options, CF_NO_LONG_OPTS, NULL)) >= 0)
switch (opt)
void *mi, *mo;
int li, lo;
- uns adler = 0;
+ uint adler = 0;
struct stat st;
stat(argv[optind], &st);
#define CHAIN_MAX_TESTS 8 // crop longer collision chains
#define CHAIN_GOOD_MATCH 32 // we already have a good match => end
-static inline uns
+static inline uint
hashf(const byte *string)
/* 0..HASH_SIZE-1 */
{
return (byte *)string;
}
-static inline uns
-find_match(uns record_id, struct hash_record *hash_rec, const byte *string, const byte *string_end, byte **best_ptr, uns head)
+static inline uint
+find_match(uint record_id, struct hash_record *hash_rec, const byte *string, const byte *string_end, byte **best_ptr, uint head)
/* hash_tab[hash] == record_id points to the head of the double-linked
* link-list of strings with the same hash. The records are statically
* stored in circular array hash_rec (with the 1st entry unused), and the
* pointers are just 16-bit indices. The strings in every collision chain
* are ordered by age. */
{
- uns count = CHAIN_MAX_TESTS;
- uns best_len = 0;
+ uint count = CHAIN_MAX_TESTS;
+ uint best_len = 0;
while (record_id && count-- > 0)
{
byte *record_string = locate_string(string, record_id, head);
}
else
cmp += 4;
- uns len = cmp - record_string - 1; /* cmp points 2 characters after the last match */
+ uint len = cmp - record_string - 1; /* cmp points 2 characters after the last match */
if (len > best_len)
{
best_len = len;
return best_len;
}
-static uns
-hash_string(hash_ptr_t *hash_tab, uns hash, struct hash_record *hash_rec, /*byte *string,*/ uns head, uns *to_delete)
+static uint
+hash_string(hash_ptr_t *hash_tab, uint hash, struct hash_record *hash_rec, /*byte *string,*/ uint head, uint *to_delete)
/* We reuse hash-records stored in a circular array. First, delete the old
* one and then add the new one in front of the link-list. */
{
struct hash_record *rec = hash_rec + head;
if (*to_delete) /* unlink the original record */
{
- uns prev_id = rec->prev & ((1<<15)-1);
+ uint prev_id = rec->prev & ((1<<15)-1);
if (rec->prev & (1<<15)) /* was a head */
hash_tab[prev_id] = 0;
else /* thanks to the ordering, this was a tail */
}
static inline byte *
-dump_unary_value(byte *out, uns l)
+dump_unary_value(byte *out, uint l)
{
while (l > 255)
{
}
static byte *
-flush_copy_command(uns bof, byte *out, const byte *start, uns len)
+flush_copy_command(uint bof, byte *out, const byte *start, uint len)
{
if (bof && len <= 238)
*out++ = len + 17;
}
int
-lizard_compress(const byte *in, uns in_len, byte *out)
+lizard_compress(const byte *in, uint in_len, byte *out)
/* Requires out being allocated for at least in_len * LIZARD_MAX_MULTIPLY +
* LIZARD_MAX_ADD. There must be at least LIZARD_NEEDS_CHARS characters
* allocated after in. Returns the actual compressed length. */
const byte *in_end = in + in_len;
byte *out_start = out;
const byte *copy_start = in;
- uns head = 1; /* 0 in unused */
- uns to_delete = 0, bof = 1;
+ uint head = 1; /* 0 in unused */
+ uint to_delete = 0, bof = 1;
bzero(hash_tab, sizeof(hash_tab)); /* init the hash-table */
while (in < in_end)
{
- uns hash = hashf(in);
+ uint hash = hashf(in);
byte *best = NULL;
- uns len = find_match(hash_tab[hash], hash_rec, in, in_end, &best, head);
+ uint len = find_match(hash_tab[hash], hash_rec, in, in_end, &best, head);
if (len < 3)
#if 0 // TODO: now, our routine does not detect matches of length 2
if (len == 2 && (in - best->string - 1) < (1<<10))
goto literal;
}
/* Record the match. */
- uns copy_len = in - copy_start;
- uns is_in_copy_mode = bof || copy_len >= 4;
- uns shift = in - best - 1;
+ uint copy_len = in - copy_start;
+ uint is_in_copy_mode = bof || copy_len >= 4;
+ uint shift = in - best - 1;
/* Try to use a 2-byte sequence. */
#if 0
if (len == 2)
}
/* Update the hash-table. */
head = hash_string(hash_tab, hash, hash_rec, head, &to_delete);
- for (uns i=1; i<len; i++)
+ for (uint i=1; i<len; i++)
head = hash_string(hash_tab, hashf(in+i), hash_rec, head, &to_delete);
in += len;
copy_start = in;
bof = 0;
}
- uns copy_len = in - copy_start;
+ uint copy_len = in - copy_start;
if (copy_len)
out = flush_copy_command(bof, out, copy_start, copy_len);
*out++ = 17; /* add EOF */
}
static inline byte *
-read_unary_value(const byte *in, uns *val)
+read_unary_value(const byte *in, uint *val)
{
- uns l = 0;
+ uint l = 0;
while (!*in++)
l += 255;
l += in[-1];
* decompressed length or a negative number when an error has occurred. */
{
byte *out_start = out;
- uns expect_copy_command = 1;
- uns len;
+ uint expect_copy_command = 1;
+ uint len;
if (*in > 17) /* short copy command at BOF */
{
len = *in++ - 17;
}
while (1)
{
- uns c = *in++;
- uns pos;
+ uint c = *in++;
+ uint pos;
if (c < 0x10)
if (expect_copy_command == 1)
{
*
* Use @lizard_decompress() to get the original data.
**/
-int lizard_compress(const byte *in, uns in_len, byte *out);
+int lizard_compress(const byte *in, uint in_len, byte *out);
/**
* Decompress data previously compressed by @lizard_compress().
* Beware this function is not thread-safe and is not even reentrant
* (because of internal segfault handling).
**/
-byte *lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uns expected_length);
+byte *lizard_decompress_safe(const byte *in, struct lizard_buffer *buf, uint expected_length);
/* adler32.c */
* @adler is the old value, @byte points to @len bytes of data to update with.
* Result is returned.
**/
-uns adler32_update(uns adler, const byte *ptr, uns len);
+uint adler32_update(uint adler, const byte *ptr, uint len);
/**
* Compute the Adler-32 checksum of a block of data.
**/
-static inline uns adler32(const byte *buf, uns len)
+static inline uint adler32(const byte *buf, uint len)
{
return adler32_update(1, buf, len);
}
cnode n;
clist types; // simple_list of names
double rate;
- uns burst;
+ uint burst;
};
static char *
#define P(x) PTR_TO(struct limit_config, x)
CF_LIST("Types", P(types), &cf_string_list_config),
CF_DOUBLE("Rate", P(rate)),
- CF_UNS("Burst", P(burst)),
+ CF_UINT("Burst", P(burst)),
#undef P
CF_END
}
/*** Type sets ***/
-static uns
+static uint
log_type_mask(clist *l)
{
if (clist_empty(l))
return ~0U;
- uns types = 0;
+ uint types = 0;
CLIST_FOR_EACH(simp_node *, s, *l)
if (!strcmp(s->s, "all"))
return ~0U;
static void
log_apply_limits(struct log_stream *ls, struct limit_config *lim)
{
- uns mask = log_type_mask(&lim->types);
+ uint mask = log_type_mask(&lim->types);
if (!mask)
return;
tbf->burst = lim->burst;
tbf_init(tbf);
- for (uns i=0; i < LS_NUM_TYPES; i++)
+ for (uint i=0; i < LS_NUM_TYPES; i++)
if (mask & (1 << i))
limits[i] = tbf;
}
int type = log_register_type("foo");
struct log_stream *ls = log_new_configured("combined");
- for (uns i=0; i<10; i++)
+ for (uint i=0; i<10; i++)
{
msg(L_INFO | ls->regnum | type, "Hello, universe!");
usleep(200000);
struct file_stream {
struct log_stream ls; // ls.name is the current name of the log file
int fd;
- uns flags; // FF_xxx
+ uint flags; // FF_xxx
char *orig_name; // Original name with strftime escapes
};
}
struct log_stream *
-log_new_fd(int fd, uns flags)
+log_new_fd(int fd, uint flags)
{
struct log_stream *ls = log_new_stream(sizeof(struct file_stream));
struct file_stream *fs = (struct file_stream *) ls;
}
struct log_stream *
-log_new_file(const char *path, uns flags)
+log_new_file(const char *path, uint flags)
{
struct log_stream *ls = log_new_stream(sizeof(struct file_stream));
struct file_stream *fs = (struct file_stream *) ls;
/* The head of the list of freed log_streams indexes in log_streams.ptr (~0U if none free).
* Freed positions in log_streams.ptr are connected into a linked list in the following way:
* log_streams.ptr[log_streams_free].levels is the index of next freed position (or ~0U) */
-static uns log_streams_free = ~0U;
+static uint log_streams_free = ~0U;
/* Initialize the logstream module.
* It is not neccessary to call this explicitely as it is called by
}
void
-log_set_format(struct log_stream *ls, uns mask, uns data)
+log_set_format(struct log_stream *ls, uint mask, uint data)
{
ls->msgfmt = (ls->msgfmt & mask) | data;
CLIST_FOR_EACH(simp_node *, i, ls->substreams)
log_type_names = xmalloc_zero(LS_NUM_TYPES * sizeof(char *));
log_type_names[0] = "default";
}
- uns id;
+ uint id;
for (id=0; id < LS_NUM_TYPES && log_type_names[id]; id++)
if (!strcmp(log_type_names[id], name))
return LS_SET_TYPE(id);
if (!log_type_names)
return -1;
- for (uns id=0; id < LS_NUM_TYPES && log_type_names[id]; id++)
+ for (uint id=0; id < LS_NUM_TYPES && log_type_names[id]; id++)
if (!strcmp(log_type_names[id], name))
return LS_SET_TYPE(id);
return -1;
{ "local7", LOG_LOCAL7 },
};
- for (uns i=0; i < ARRAY_SIZE(facilities); i++)
+ for (uint i=0; i < ARRAY_SIZE(facilities); i++)
if (!strcmp(facilities[i].name, name))
return facilities[i].id;
return -1;
*/
struct log_stream *
-log_stream_by_flags(uns flags)
+log_stream_by_flags(uint flags)
{
int n = LS_GET_STRNUM(flags);
if (n < 0 || n >= log_streams_after || log_streams.ptr[n]->regnum == -1)
char **log_type_names;
char *
-log_type_name(uns flags)
+log_type_name(uint flags)
{
- uns type = LS_GET_TYPE(flags);
+ uint type = LS_GET_TYPE(flags);
if (!log_type_names || !log_type_names[type])
return "default";
/*** Logging ***/
void
-vmsg(uns cat, const char *fmt, va_list args)
+vmsg(uint cat, const char *fmt, va_list args)
{
struct timeval tv;
struct tm tm;
char msgbuf[256];
char *p;
int len;
- uns sighandler = cat & L_SIGHANDLER;
+ uint sighandler = cat & L_SIGHANDLER;
struct log_stream *ls;
struct log_msg m = { .flags = cat };
/*** Utility functions ***/
void
-msg(unsigned int cat, const char *fmt, ...)
+msg(uint cat, const char *fmt, ...)
{
va_list args;
int m_len; // Length without the \0
struct tm *tm; // Current time
struct timeval *tv;
- uns flags; // Category and other flags as passed to msg()
+ uint flags; // Category and other flags as passed to msg()
char *raw_msg; // Unformatted parts
char *stime;
char *sutime;
- uns depth; // Recursion depth
+ uint depth; // Recursion depth
bool error; // An error has occurred (e.g., an infinite loop in sub-streams)
};
struct log_stream {
char *name; // Optional name, allocated by the user (or constructor)
int regnum; // Stream number, already encoded by LS_SET_STRNUM(); -1 if closed
- uns levels; // Bitmask of accepted severity levels (default: all)
- uns types; // Bitmask of accepted message types (default: all)
- uns msgfmt; // Formatting flags (LSFMT_xxx)
- uns use_count; // Number of references to the stream
- uns stream_flags; // Various other flags (LSFLAG_xxx)
+ uint levels; // Bitmask of accepted severity levels (default: all)
+ uint types; // Bitmask of accepted message types (default: all)
+ uint msgfmt; // Formatting flags (LSFMT_xxx)
+ uint use_count; // Number of references to the stream
+ uint stream_flags; // Various other flags (LSFLAG_xxx)
int (*filter)(struct log_stream* ls, struct log_msg *m); // Filter function, return non-zero to discard the message
clist substreams; // Pass the message to these streams (simple_list of pointers)
int (*handler)(struct log_stream *ls, struct log_msg *m); // Called to commit the message, return 0 for success, errno on error
int log_find_type(const char *name);
/** Given a flag set, extract the message type ID and return its name. **/
-char *log_type_name(uns flags);
+char *log_type_name(uint flags);
/*** === Operations on streams ***/
* Set formatting flags of a given stream and all its substreams. The flags are
* AND'ed with @mask and OR'ed with @data.
**/
-void log_set_format(struct log_stream *ls, uns mask, uns data);
+void log_set_format(struct log_stream *ls, uint mask, uint data);
/**
* Find a stream by its registration number (in the format of logging flags).
* Returns NULL if there is no such stream.
**/
-struct log_stream *log_stream_by_flags(uns flags);
+struct log_stream *log_stream_by_flags(uint flags);
/** Return a pointer to the default stream (stream #0). **/
static inline struct log_stream *log_default_stream(void)
* even in multi-threaded programs.
***/
-struct log_stream *log_new_file(const char *path, uns flags); /** Create a stream bound to a log file. See `FF_xxx` for @flags. **/
-struct log_stream *log_new_fd(int fd, uns flags); /** Create a stream bound to a file descriptor. See `FF_xxx` for @flags. **/
+struct log_stream *log_new_file(const char *path, uint flags); /** Create a stream bound to a log file. See `FF_xxx` for @flags. **/
+struct log_stream *log_new_fd(int fd, uint flags); /** Create a stream bound to a file descriptor. See `FF_xxx` for @flags. **/
enum log_file_flag { /** Flags used for file-based logging **/
FF_FORMAT_NAME = 1, // Internal: Name contains strftime escapes
}
void
-block_io_read(struct main_block_io *bio, void *buf, uns len)
+block_io_read(struct main_block_io *bio, void *buf, uint len)
{
ASSERT(bio->file.n.next);
if (len)
}
void
-block_io_write(struct main_block_io *bio, void *buf, uns len)
+block_io_write(struct main_block_io *bio, void *buf, uint len)
{
ASSERT(bio->file.n.next);
if (len)
struct rio_buffer {
cnode n;
- uns full;
- uns written;
+ uint full;
+ uint written;
byte buf[];
};
static int
rec_io_process_read_buf(struct main_rec_io *rio)
{
- uns got;
+ uint got;
while (rio->read_running && (got = rio->read_handler(rio)))
{
DBG("RIO READ: Ate %u bytes", got);
}
restart: ;
- uns rec_start_pos = rio->read_rec_start - rio->read_buf;
- uns rec_end_pos = rec_start_pos + rio->read_avail;
- uns free_space = rio->read_buf_size - rec_end_pos;
+ uint rec_start_pos = rio->read_rec_start - rio->read_buf;
+ uint rec_end_pos = rec_start_pos + rio->read_avail;
+ uint free_space = rio->read_buf_size - rec_end_pos;
DBG("RIO READ: rec_start=%u avail=%u prev_avail=%u free=%u/%u",
rec_start_pos, rio->read_avail, rio->read_prev_avail,
free_space, rio->read_buf_size);
static void
rec_io_recalc_read(struct main_rec_io *rio)
{
- uns flow = !rio->write_throttle_read || rio->write_watermark < rio->write_throttle_read;
- uns run = rio->read_started && flow;
+ uint flow = !rio->write_throttle_read || rio->write_watermark < rio->write_throttle_read;
+ uint run = rio->read_started && flow;
DBG("RIO: Recalc read (flow=%u, start=%u) -> %u", flow, rio->read_started, run);
if (run != rio->read_running)
{
}
void
-rec_io_write(struct main_rec_io *rio, void *data, uns len)
+rec_io_write(struct main_rec_io *rio, void *data, uint len)
{
byte *bdata = data;
ASSERT(rec_io_is_active(rio));
b = rec_io_get_buffer(rio);
clist_add_tail(&rio->busy_write_buffers, &b->n);
}
- uns l = MIN(len, rio->write_buf_size - b->full);
+ uint l = MIN(len, rio->write_buf_size - b->full);
memcpy(b->buf + b->full, bdata, l);
b->full += l;
bdata += l;
void
rec_io_set_timeout(struct main_rec_io *rio, timestamp_t expires_delta)
{
- DBG("RIO: Setting timeout %u", (uns) expires_delta);
+ DBG("RIO: Setting timeout %u", (uint) expires_delta);
if (!expires_delta)
timer_del(&rio->timer);
else
timer_add_rel(&rio->timer, expires_delta);
}
-uns
+uint
rec_io_parse_line(struct main_rec_io *rio)
{
- for (uns i = rio->read_prev_avail; i < rio->read_avail; i++)
+ for (uint i = rio->read_prev_avail; i < rio->read_avail; i++)
if (rio->read_rec_start[i] == '\n')
return i+1;
return 0;
#ifdef TEST
-static uns rhand(struct main_rec_io *rio)
+static uint rhand(struct main_rec_io *rio)
{
- uns r = rec_io_parse_line(rio);
+ uint r = rec_io_parse_line(rio);
if (r)
{
rio->read_rec_start[r-1] = 0;
return (m == main_current_nocheck());
}
-static inline uns
+static inline uint
count_timers(struct main_context *m)
{
if (m->timer_table)
DBG("MAIN: Setting timer %p (expire at now+%lld)", tm, (long long)(expires - m->now));
else
DBG("MAIN: Clearing timer %p", tm);
- uns num_timers = count_timers(m);
+ uint num_timers = count_timers(m);
if (tm->expires < expires)
{
if (!tm->expires)
timer_add(tm, 0);
}
-static uns
+static uint
file_want_events(struct main_file *fi)
{
- uns events = 0;
+ uint events = 0;
if (fi->read_handler)
events |= POLLIN;
if (fi->write_handler)
{
msg(L_DEBUG, "### Main loop status on %lld", (long long) m->now);
msg(L_DEBUG, "\tActive timers:");
- uns num_timers = count_timers(m);
- for (uns i = 1; i <= num_timers; i++)
+ uint num_timers = count_timers(m);
+ for (uint i = 1; i <= num_timers; i++)
timer_debug(m->timer_table[i]);
msg(L_DEBUG, "\tActive files:");
CLIST_FOR_EACH(struct main_file *, fi, m->file_list)
#else
struct pollfd *p = m->poll_table;
struct main_file **pf = m->poll_file_table;
- for (uns i=0; i < m->file_cnt; i++)
+ for (uint i=0; i < m->file_cnt; i++)
if (p[i].revents)
{
struct main_file *fi = pf[i];
struct main_context {
timestamp_t now; /* [*] Current time in milliseconds since an unknown epoch. See main_get_time(). */
timestamp_t idle_time; /* [*] Total time in milliseconds spent by waiting for events. */
- uns shutdown; /* [*] Setting this to nonzero forces the main_loop() function to terminate. */
+ uint shutdown; /* [*] Setting this to nonzero forces the main_loop() function to terminate. */
clist file_list;
clist file_active_list;
clist hook_list;
clist hook_done_list;
clist process_list;
clist signal_list;
- uns file_cnt;
- uns single_step;
+ uint file_cnt;
+ uint single_step;
#ifdef CONFIG_UCW_EPOLL
int epoll_fd; /* File descriptor used for epoll */
struct epoll_event *epoll_events;
clist file_recalc_list;
#else
- uns poll_table_obsolete;
+ uint poll_table_obsolete;
struct pollfd *poll_table;
struct main_file **poll_file_table;
#endif
struct main_timer {
cnode n;
timestamp_t expires;
- uns index;
+ uint index;
void (*handler)(struct main_timer *tm); /* [*] Function to be called when the timer expires. */
void *data; /* [*] Data for use by the handler */
};
int (*read_handler)(struct main_file *fi); /* [*] To be called when ready for reading/writing; must call file_chg() afterwards */
int (*write_handler)(struct main_file *fi);
void *data; /* [*] Data for use by the handlers */
- uns events;
+ uint events;
#ifdef CONFIG_UCW_EPOLL
- uns last_want_events;
+ uint last_want_events;
#else
struct pollfd *pollfd;
#endif
struct main_block_io {
struct main_file file;
byte *rbuf; /* Read/write pointers for use by file_read/write */
- uns rpos, rlen;
+ uint rpos, rlen;
byte *wbuf;
- uns wpos, wlen;
+ uint wpos, wlen;
void (*read_done)(struct main_block_io *bio); /* [*] Called when file_read is finished; rpos < rlen if EOF */
void (*write_done)(struct main_block_io *bio); /* [*] Called when file_write is finished */
void (*error_handler)(struct main_block_io *bio, int cause); /* [*] Handler to call on errors */
* You can use a call with zero @len to cancel the current read, but all read data
* will be thrown away.
**/
-void block_io_read(struct main_block_io *bio, void *buf, uns len);
+void block_io_read(struct main_block_io *bio, void *buf, uint len);
/**
* Request that the main loop writes @len bytes of data from @buf to @bio.
* If you call it with zero @len, it will cancel the previous write, but note
* that some data may already be written.
**/
-void block_io_write(struct main_block_io *bio, void *buf, uns len);
+void block_io_write(struct main_block_io *bio, void *buf, uint len);
/**
* Sets a timer for a file @bio. If the timer is not overwritten or disabled
struct main_file file;
byte *read_buf;
byte *read_rec_start; /* [*] Start of current record */
- uns read_avail; /* [*] How much data is available */
- uns read_prev_avail; /* [*] How much data was available in previous read_handler */
- uns read_buf_size; /* [*] Read buffer size allocated (can be set before rec_io_add()) */
- uns read_started; /* Reading requested by user */
- uns read_running; /* Reading really runs (read_started && not stopped by write_throttle_read) */
- uns read_rec_max; /* [*] Maximum record size (0=unlimited) */
+ uint read_avail; /* [*] How much data is available */
+ uint read_prev_avail; /* [*] How much data was available in previous read_handler */
+ uint read_buf_size; /* [*] Read buffer size allocated (can be set before rec_io_add()) */
+ uint read_started; /* Reading requested by user */
+ uint read_running; /* Reading really runs (read_started && not stopped by write_throttle_read) */
+ uint read_rec_max; /* [*] Maximum record size (0=unlimited) */
clist busy_write_buffers;
clist idle_write_buffers;
- uns write_buf_size; /* [*] Write buffer size allocated (can be set before rec_io_add()) */
- uns write_watermark; /* [*] How much data are waiting to be written */
- uns write_throttle_read; /* [*] If more than write_throttle_read bytes are buffered, stop reading; 0=no stopping */
- uns (*read_handler)(struct main_rec_io *rio); /* [*] Called whenever more bytes are read; returns 0 (want more) or number of bytes eaten */
+ uint write_buf_size; /* [*] Write buffer size allocated (can be set before rec_io_add()) */
+ uint write_watermark; /* [*] How much data are waiting to be written */
+ uint write_throttle_read; /* [*] If more than write_throttle_read bytes are buffered, stop reading; 0=no stopping */
+ uint (*read_handler)(struct main_rec_io *rio); /* [*] Called whenever more bytes are read; returns 0 (want more) or number of bytes eaten */
int (*notify_handler)(struct main_rec_io *rio, int status); /* [*] Called to notify about errors and other events */
/* Returns either HOOK_RETRY or HOOK_IDLE. */
struct main_timer timer;
/** Analogous to @block_io_set_timeout(). **/
void rec_io_set_timeout(struct main_rec_io *rio, timestamp_t expires_delta);
-void rec_io_write(struct main_rec_io *rio, void *data, uns len);
+void rec_io_write(struct main_rec_io *rio, void *data, uint len);
/**
* An auxiliary function used for parsing of lines. When called in the @read_handler,
* it searches for the end of line character. When a complete line is found, the length
* of the line (including the end of line character) is returned. Otherwise, it returns zero.
**/
-uns rec_io_parse_line(struct main_rec_io *rio);
+uint rec_io_parse_line(struct main_rec_io *rio);
/**
* Specifies what kind of error or other event happened, when the @notify_handler
#ifdef CPU_LITTLE_ENDIAN
#define byteReverse(buf, len) /* Nothing */
#else
-void byteReverse(byte *buf, uns longs);
+void byteReverse(byte *buf, uint longs);
/*
* Note: this code is harmless on little-endian machines.
*/
-void byteReverse(byte *buf, uns longs)
+void byteReverse(byte *buf, uint longs)
{
u32 t;
do {
- t = (u32) ((uns) buf[3] << 8 | buf[2]) << 16 |
- ((uns) buf[1] << 8 | buf[0]);
+ t = (u32) ((uint) buf[3] << 8 | buf[2]) << 16 |
+ ((uint) buf[1] << 8 | buf[0]);
*(u32 *) buf = t;
buf += 4;
} while (--longs);
* Update context to reflect the concatenation of another buffer full
* of bytes.
*/
-void md5_update(md5_context *ctx, const byte *buf, uns len)
+void md5_update(md5_context *ctx, const byte *buf, uint len)
{
u32 t;
*/
byte *md5_final(md5_context *ctx)
{
- uns count;
+ uint count;
byte *p;
/* Compute number of bytes mod 64 */
buf[3] += d;
}
-void md5_hash_buffer(byte *outbuf, const byte *buffer, uns length)
+void md5_hash_buffer(byte *outbuf, const byte *buffer, uint length)
{
md5_context c;
md5_init(&c);
* as if you concatenated all the data together and fed them here all at
* once.
*/
-void md5_update(md5_context *context, const byte *buf, uns len);
+void md5_update(md5_context *context, const byte *buf, uint len);
/**
* Call this after the last @md5_update(). It will terminate the
* algorithm and return a pointer to the result.
* md5_update(&c, buffer, length);
* memcpy(outbuf, md5_final(&c), MD5_SIZE);
*/
-void md5_hash_buffer(byte *outbuf, const byte *buffer, uns length);
+void md5_hash_buffer(byte *outbuf, const byte *buffer, uint length);
#define MD5_HEX_SIZE 33 /** How many bytes a string buffer for MD5 in hexadecimal format should have. **/
#define MD5_SIZE 16 /** Number of bytes the MD5 hash takes in the binary form. **/
#include <string.h>
static char *
-mp_vprintf_at(struct mempool *mp, uns ofs, const char *fmt, va_list args)
+mp_vprintf_at(struct mempool *mp, uint ofs, const char *fmt, va_list args)
{
char *ret = mp_grow(mp, ofs + 1) + ofs;
va_list args2;
}
while (cnt < 0);
}
- else if ((uns)cnt >= mp_avail(mp) - ofs)
+ else if ((uint)cnt >= mp_avail(mp) - ofs)
{
ret = mp_grow(mp, ofs + cnt + 1) + ofs;
va_copy(args2, args);
char *
mp_append_vprintf(struct mempool *mp, char *ptr, const char *fmt, va_list args)
{
- uns ofs = mp_open(mp, ptr);
+ uint ofs = mp_open(mp, ptr);
ASSERT(ofs && !ptr[ofs - 1]);
return mp_vprintf_at(mp, ofs - 1, fmt, args);
}
{
if (!s)
return NULL;
- uns l = strlen(s) + 1;
+ uint l = strlen(s) + 1;
char *t = mp_alloc_fast_noalign(p, l);
memcpy(t, s, l);
return t;
}
void *
-mp_memdup(struct mempool *p, const void *s, uns len)
+mp_memdup(struct mempool *p, const void *s, uint len)
{
void *t = mp_alloc_fast(p, len);
memcpy(t, s, len);
va_list args, a;
va_start(args, p);
char *x, *y;
- uns cnt = 0;
+ uint cnt = 0;
va_copy(a, args);
while (x = va_arg(a, char *))
cnt++;
- uns *sizes = alloca(cnt * sizeof(uns));
- uns len = 1;
+ uint *sizes = alloca(cnt * sizeof(uint));
+ uint len = 1;
cnt = 0;
va_end(a);
va_copy(a, args);
}
char *
-mp_strjoin(struct mempool *p, char **a, uns n, uns sep)
+mp_strjoin(struct mempool *p, char **a, uint n, uint sep)
{
- uns sizes[n];
- uns len = 1;
- for (uns i=0; i<n; i++)
+ uint sizes[n];
+ uint len = 1;
+ for (uint i=0; i<n; i++)
len += sizes[i] = strlen(a[i]);
if (sep && n)
len += n-1;
char *dest = mp_alloc_fast_noalign(p, len);
char *d = dest;
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
if (sep && i)
*d++ = sep;
}
char *
-mp_str_from_mem(struct mempool *a, const void *mem, uns len)
+mp_str_from_mem(struct mempool *a, const void *mem, uint len)
{
char *str = mp_alloc_noalign(a, len+1);
memcpy(str, mem, len);
struct mempool *pool; // Can be useful when analysing coredump for memory leaks
#endif
struct mempool_chunk *next;
- uns size;
+ uint size;
};
-static uns
-mp_align_size(uns size)
+static uint
+mp_align_size(uint size)
{
#ifdef CONFIG_UCW_POOL_IS_MMAP
return ALIGN_TO(size + MP_CHUNK_TAIL, CPU_PAGE_SIZE) - MP_CHUNK_TAIL;
}
void
-mp_init(struct mempool *pool, uns chunk_size)
+mp_init(struct mempool *pool, uint chunk_size)
{
chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
*pool = (struct mempool) {
}
static void *
-mp_new_big_chunk(struct mempool *pool, uns size)
+mp_new_big_chunk(struct mempool *pool, uint size)
{
struct mempool_chunk *chunk;
chunk = xmalloc(size + MP_CHUNK_TAIL) + size;
}
static void *
-mp_new_chunk(struct mempool *pool, uns size)
+mp_new_chunk(struct mempool *pool, uint size)
{
#ifdef CONFIG_UCW_POOL_IS_MMAP
struct mempool_chunk *chunk;
}
struct mempool *
-mp_new(uns chunk_size)
+mp_new(uint chunk_size)
{
chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
struct mempool_chunk *chunk = mp_new_chunk(NULL, chunk_size);
}
static void
-mp_stats_chain(struct mempool *pool, struct mempool_chunk *chunk, struct mempool_stats *stats, uns idx)
+mp_stats_chain(struct mempool *pool, struct mempool_chunk *chunk, struct mempool_stats *stats, uint idx)
{
while (chunk)
{
}
void *
-mp_alloc_internal(struct mempool *pool, uns size)
+mp_alloc_internal(struct mempool *pool, uint size)
{
struct mempool_chunk *chunk;
if (size <= pool->threshold)
else if (likely(size <= MP_SIZE_MAX))
{
pool->idx = 1;
- uns aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN);
+ uint aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN);
chunk = mp_new_big_chunk(pool, aligned);
chunk->next = pool->state.last[1];
#ifdef CONFIG_DEBUG
}
void *
-mp_alloc(struct mempool *pool, uns size)
+mp_alloc(struct mempool *pool, uint size)
{
return mp_alloc_fast(pool, size);
}
void *
-mp_alloc_noalign(struct mempool *pool, uns size)
+mp_alloc_noalign(struct mempool *pool, uint size)
{
return mp_alloc_fast_noalign(pool, size);
}
void *
-mp_alloc_zero(struct mempool *pool, uns size)
+mp_alloc_zero(struct mempool *pool, uint size)
{
void *ptr = mp_alloc_fast(pool, size);
bzero(ptr, size);
}
void *
-mp_start_internal(struct mempool *pool, uns size)
+mp_start_internal(struct mempool *pool, uint size)
{
void *ptr = mp_alloc_internal(pool, size);
pool->state.free[pool->idx] += size;
}
void *
-mp_start(struct mempool *pool, uns size)
+mp_start(struct mempool *pool, uint size)
{
return mp_start_fast(pool, size);
}
void *
-mp_start_noalign(struct mempool *pool, uns size)
+mp_start_noalign(struct mempool *pool, uint size)
{
return mp_start_fast_noalign(pool, size);
}
void *
-mp_grow_internal(struct mempool *pool, uns size)
+mp_grow_internal(struct mempool *pool, uint size)
{
if (unlikely(size > MP_SIZE_MAX))
die("Cannot allocate %u bytes of memory", size);
- uns avail = mp_avail(pool);
+ uint avail = mp_avail(pool);
void *ptr = mp_ptr(pool);
if (pool->idx)
{
- uns amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX;
+ uint amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX;
amortized = MAX(amortized, size);
amortized = ALIGN_TO(amortized, CPU_STRUCT_ALIGN);
struct mempool_chunk *chunk = pool->state.last[1], *next = chunk->next;
}
}
-uns
+uint
mp_open(struct mempool *pool, void *ptr)
{
return mp_open_fast(pool, ptr);
}
void *
-mp_realloc(struct mempool *pool, void *ptr, uns size)
+mp_realloc(struct mempool *pool, void *ptr, uint size)
{
return mp_realloc_fast(pool, ptr, size);
}
void *
-mp_realloc_zero(struct mempool *pool, void *ptr, uns size)
+mp_realloc_zero(struct mempool *pool, void *ptr, uint size)
{
- uns old_size = mp_open_fast(pool, ptr);
+ uint old_size = mp_open_fast(pool, ptr);
ptr = mp_grow(pool, size);
if (size > old_size)
bzero(ptr + old_size, size - old_size);
}
void *
-mp_spread_internal(struct mempool *pool, void *p, uns size)
+mp_spread_internal(struct mempool *pool, void *p, uint size)
{
void *old = mp_ptr(pool);
void *new = mp_grow_internal(pool, p-old+size);
#include <time.h>
static void
-fill(byte *ptr, uns len, uns magic)
+fill(byte *ptr, uint len, uint magic)
{
while (len--)
*ptr++ = (magic++ & 255);
}
static void
-check(byte *ptr, uns len, uns magic, uns align)
+check(byte *ptr, uint len, uint magic, uint align)
{
ASSERT(!((uintptr_t)ptr & (align - 1)));
while (len--)
if (cf_getopt(argc, argv, CF_SHORT_OPTS, CF_NO_LONG_OPTS, NULL) >= 0 || argc != optind)
die("Invalid usage");
- uns max = 1000, n = 0, m = 0, can_realloc = 0;
+ uint max = 1000, n = 0, m = 0, can_realloc = 0;
void *ptr[max];
struct mempool_state *state[max];
- uns len[max], num[max], align[max];
+ uint len[max], num[max], align[max];
struct mempool *mp = mp_new(128), mp_static;
- for (uns i = 0; i < 5000; i++)
+ for (uint i = 0; i < 5000; i++)
{
- for (uns j = 0; j < n; j++)
+ for (uint j = 0; j < n; j++)
check(ptr[j], len[j], j, align[j]);
#if 0
DBG("free_small=%u free_big=%u idx=%u chunk_size=%u last_big=%p", mp->state.free[0], mp->state.free[1], mp->idx, mp->chunk_size, mp->last_big);
ASSERT(0);
grow:
{
- uns k = n - 1;
- for (uns i = random_max(4); i--; )
+ uint k = n - 1;
+ for (uint i = random_max(4); i--; )
{
- uns l = len[k];
+ uint l = len[k];
len[k] = random_max(0x2000);
DBG("grow(%u)", len[k]);
ptr[k] = mp_grow(mp, len[k]);
}
else if (can_realloc && n && (r -= 20) < 0)
{
- uns i = n - 1, l = len[i];
+ uint i = n - 1, l = len[i];
DBG("realloc(%p, %u)", ptr[i], len[i]);
ptr[i] = mp_realloc(mp, ptr[i], len[i] = random_max(0x2000));
DBG(" -> (%p, %u)", ptr[i], len[i]);
}
else if (m && (r -= 1) < 0)
{
- uns i = random_max(m);
+ uint i = random_max(m);
DBG("restore(%u)", i);
mp_restore(mp, state[i]);
n = num[m = i];
* You should use this one as an opaque handle only, the insides are internal.
**/
struct mempool_state {
- uns free[2];
+ uint free[2];
void *last[2];
struct mempool_state *next;
};
struct ucw_allocator allocator;
struct mempool_state state;
void *unused, *last_big;
- uns chunk_size, threshold, idx;
+ uint chunk_size, threshold, idx;
u64 total_size;
};
struct mempool_stats { /** Mempool statistics. See @mp_stats(). **/
u64 total_size; /* Real allocated size in bytes */
u64 used_size; /* Estimated size allocated from mempool to application */
- uns chain_count[3]; /* Number of allocated chunks in small/big/unused chains */
+ uint chain_count[3]; /* Number of allocated chunks in small/big/unused chains */
u64 chain_size[3]; /* Size of allocated chunks in small/big/unused chains */
};
*
* Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
**/
-void mp_init(struct mempool *pool, uns chunk_size);
+void mp_init(struct mempool *pool, uint chunk_size);
/**
* Allocate and initialize a new memory pool.
*
* Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
**/
-struct mempool *mp_new(uns chunk_size);
+struct mempool *mp_new(uint chunk_size);
/**
* Cleanup mempool initialized by mp_init or mp_new.
***/
/* For internal use only, do not call directly */
-void *mp_alloc_internal(struct mempool *pool, uns size) LIKE_MALLOC;
+void *mp_alloc_internal(struct mempool *pool, uint size) LIKE_MALLOC;
/**
* The function allocates new @size bytes on a given memory pool.
* `CPU_STRUCT_ALIGN` bytes and this condition remains true also
* after future reallocations.
**/
-void *mp_alloc(struct mempool *pool, uns size);
+void *mp_alloc(struct mempool *pool, uint size);
/**
* The same as @mp_alloc(), but the result may be unaligned.
**/
-void *mp_alloc_noalign(struct mempool *pool, uns size);
+void *mp_alloc_noalign(struct mempool *pool, uint size);
/**
* The same as @mp_alloc(), but fills the newly allocated memory with zeroes.
**/
-void *mp_alloc_zero(struct mempool *pool, uns size);
+void *mp_alloc_zero(struct mempool *pool, uint size);
/**
* Inlined version of @mp_alloc().
**/
-static inline void *mp_alloc_fast(struct mempool *pool, uns size)
+static inline void *mp_alloc_fast(struct mempool *pool, uint size)
{
- uns avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
+ uint avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
if (size <= avail)
{
pool->state.free[0] = avail - size;
/**
* Inlined version of @mp_alloc_noalign().
**/
-static inline void *mp_alloc_fast_noalign(struct mempool *pool, uns size)
+static inline void *mp_alloc_fast_noalign(struct mempool *pool, uint size)
{
if (size <= pool->state.free[0])
{
***/
/* For internal use only, do not call directly */
-void *mp_start_internal(struct mempool *pool, uns size) LIKE_MALLOC;
-void *mp_grow_internal(struct mempool *pool, uns size);
-void *mp_spread_internal(struct mempool *pool, void *p, uns size);
+void *mp_start_internal(struct mempool *pool, uint size) LIKE_MALLOC;
+void *mp_grow_internal(struct mempool *pool, uint size);
+void *mp_spread_internal(struct mempool *pool, void *p, uint size);
-static inline uns mp_idx(struct mempool *pool, void *ptr)
+static inline uint mp_idx(struct mempool *pool, void *ptr)
{
return ptr == pool->last_big;
}
* Keep in mind that you can't make any other pool allocations
* before you "close" the growing buffer with @mp_end().
*/
-void *mp_start(struct mempool *pool, uns size);
-void *mp_start_noalign(struct mempool *pool, uns size);
+void *mp_start(struct mempool *pool, uint size);
+void *mp_start_noalign(struct mempool *pool, uint size);
/**
* Inlined version of @mp_start().
**/
-static inline void *mp_start_fast(struct mempool *pool, uns size)
+static inline void *mp_start_fast(struct mempool *pool, uint size)
{
- uns avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
+ uint avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
if (size <= avail)
{
pool->idx = 0;
/**
* Inlined version of @mp_start_noalign().
**/
-static inline void *mp_start_fast_noalign(struct mempool *pool, uns size)
+static inline void *mp_start_fast_noalign(struct mempool *pool, uint size)
{
if (size <= pool->state.free[0])
{
* Return the number of bytes available for extending the growing buffer.
* (Before a reallocation will be needed).
**/
-static inline uns mp_avail(struct mempool *pool)
+static inline uint mp_avail(struct mempool *pool)
{
return pool->state.free[pool->idx];
}
* change its starting position. The content will be unchanged to the minimum
* of the old and new sizes; newly allocated memory will be uninitialized.
* Multiple calls to mp_grow() have amortized linear cost wrt. the maximum value of @size. */
-static inline void *mp_grow(struct mempool *pool, uns size)
+static inline void *mp_grow(struct mempool *pool, uint size)
{
return (size <= mp_avail(pool)) ? mp_ptr(pool) : mp_grow_internal(pool, size);
}
* Ensure that there is at least @size bytes free after @p,
* if not, reallocate and adjust @p.
**/
-static inline void *mp_spread(struct mempool *pool, void *p, uns size)
+static inline void *mp_spread(struct mempool *pool, void *p, uint size)
{
- return (((uns)((byte *)pool->state.last[pool->idx] - (byte *)p) >= size) ? p : mp_spread_internal(pool, p, size));
+ return (((uint)((byte *)pool->state.last[pool->idx] - (byte *)p) >= size) ? p : mp_spread_internal(pool, p, size));
}
/**
* the last byte in the buffer, returns a pointer after the last byte
* of the new (possibly reallocated) buffer.
**/
-static inline char *mp_append_char(struct mempool *pool, char *p, uns c)
+static inline char *mp_append_char(struct mempool *pool, char *p, uint c)
{
p = mp_spread(pool, p, 1);
*p++ = c;
* the last byte in the buffer, returns a pointer after the last byte
* of the new (possibly reallocated) buffer.
**/
-static inline void *mp_append_block(struct mempool *pool, void *p, const void *block, uns size)
+static inline void *mp_append_block(struct mempool *pool, void *p, const void *block, uint size)
{
char *q = mp_spread(pool, p, size);
memcpy(q, block, size);
/**
* Return size in bytes of the last allocated memory block (with @mp_alloc() or @mp_end()).
**/
-static inline uns mp_size(struct mempool *pool, void *ptr)
+static inline uint mp_size(struct mempool *pool, void *ptr)
{
- uns idx = mp_idx(pool, ptr);
+ uint idx = mp_idx(pool, ptr);
return ((byte *)pool->state.last[idx] - (byte *)ptr) - pool->state.free[idx];
}
* for growing and return its size in bytes. The contents and the start pointer
* remain unchanged. Do not forget to call @mp_end() to close it.
**/
-uns mp_open(struct mempool *pool, void *ptr);
+uint mp_open(struct mempool *pool, void *ptr);
/**
* Inlined version of @mp_open().
**/
-static inline uns mp_open_fast(struct mempool *pool, void *ptr)
+static inline uint mp_open_fast(struct mempool *pool, void *ptr)
{
pool->idx = mp_idx(pool, ptr);
- uns size = ((byte *)pool->state.last[pool->idx] - (byte *)ptr) - pool->state.free[pool->idx];
+ uint size = ((byte *)pool->state.last[pool->idx] - (byte *)ptr) - pool->state.free[pool->idx];
pool->state.free[pool->idx] += size;
return size;
}
* to the new @size. Behavior is similar to @mp_grow(), but the resulting
* block is closed.
**/
-void *mp_realloc(struct mempool *pool, void *ptr, uns size);
+void *mp_realloc(struct mempool *pool, void *ptr, uint size);
/**
* The same as @mp_realloc(), but fills the additional bytes (if any) with zeroes.
**/
-void *mp_realloc_zero(struct mempool *pool, void *ptr, uns size);
+void *mp_realloc_zero(struct mempool *pool, void *ptr, uint size);
/**
* Inlined version of @mp_realloc().
**/
-static inline void *mp_realloc_fast(struct mempool *pool, void *ptr, uns size)
+static inline void *mp_realloc_fast(struct mempool *pool, void *ptr, uint size)
{
mp_open_fast(pool, ptr);
ptr = mp_grow(pool, size);
***/
char *mp_strdup(struct mempool *, const char *) LIKE_MALLOC; /** Makes a copy of a string on a mempool. Returns NULL for NULL string. **/
-void *mp_memdup(struct mempool *, const void *, uns) LIKE_MALLOC; /** Makes a copy of a memory block on a mempool. **/
+void *mp_memdup(struct mempool *, const void *, uint) LIKE_MALLOC; /** Makes a copy of a memory block on a mempool. **/
/**
* Concatenates all passed strings. The last parameter must be NULL.
* This will concatenate two strings:
* @p is the mempool to provide memory, @a is array of strings and @n
* tells how many there is of them.
**/
-char *mp_strjoin(struct mempool *p, char **a, uns n, uns sep) LIKE_MALLOC;
+char *mp_strjoin(struct mempool *p, char **a, uint n, uint sep) LIKE_MALLOC;
/**
* Convert memory block to a string. Makes a copy of the given memory block
* in the mempool @p, adding an extra terminating zero byte at the end.
**/
-char *mp_str_from_mem(struct mempool *p, const void *mem, uns len) LIKE_MALLOC;
+char *mp_str_from_mem(struct mempool *p, const void *mem, uint len) LIKE_MALLOC;
/***
exit(0);
}
-void opt_conf_hook_internal(struct opt_item * opt, uns event, const char * value UNUSED, void * data) {
+void opt_conf_hook_internal(struct opt_item * opt, uint event, const char * value UNUSED, void * data) {
struct opt_context *oc = data;
struct cf_context *cc = cf_get_context();
opt_help_scan(&h, sec);
// Calculate natural width of each column
- uns n = GARY_SIZE(h.lines);
- uns widths[3] = { 0, 0, 0 };
- for (uns i=0; i<n; i++) {
+ uint n = GARY_SIZE(h.lines);
+ uint widths[3] = { 0, 0, 0 };
+ for (uint i=0; i<n; i++) {
struct help_line *l = &h.lines[i];
- for (uns f=0; f<3; f++) {
+ for (uint f=0; f<3; f++) {
if (!l->fields[f])
l->fields[f] = "";
- uns w = strlen(l->fields[f]);
+ uint w = strlen(l->fields[f]);
widths[f] = MAX(widths[f], w);
}
}
widths[1] += 4;
// Print columns
- for (uns i=0; i<n; i++) {
+ for (uint i=0; i<n; i++) {
struct help_line *l = &h.lines[i];
if (l->extra)
puts(l->extra);
else {
int t = 0;
- for (uns f=0; f<3; f++) {
+ for (uint f=0; f<3; f++) {
t += widths[f];
t -= printf("%s", l->fields[f]);
while (t > 0) {
.dumper = (cf_dumper1*) teapot_temperature_dumper
};
-static void opt_test_hook(struct opt_item * opt, uns event UNUSED, const char * value, void * data) {
+static void opt_test_hook(struct opt_item * opt, uint event UNUSED, const char * value, void * data) {
if (!show_hooks)
return;
if (opt)
printf("Chosen teapot: %s|", teapot_type_str[set]);
printf("Temperature: %d%s|", temperature.value, temp_scale_str[temperature.scale]);
printf("Verbosity: %d|", verbose);
- uns magick = GARY_SIZE(black_magic);
- for (uns i=0; i<magick; i++)
+ uint magick = GARY_SIZE(black_magic);
+ for (uint i=0; i<magick; i++)
printf("Black magic: %d|", black_magic[i]);
printf("Prayer: %s|", pray ? "yes" : "no");
printf("Clean: %s|", clean_pot ? "yes" : "no");
#include <alloca.h>
#include <math.h>
-static uns opt_default_value_flags[] = {
+static uint opt_default_value_flags[] = {
[OPT_CL_BOOL] = OPT_NO_VALUE,
[OPT_CL_STATIC] = OPT_MAYBE_VALUE,
[OPT_CL_MULTIPLE] = OPT_REQUIRED_VALUE,
opt->item = item;
opt->count = 0;
opt->name = item->name;
- uns flags = item->flags;
+ uint flags = item->flags;
if (item->letter >= OPT_POSITIONAL_TAIL) {
flags &= ~OPT_VALUE_FLAGS;
opt->flags = flags;
}
-static void opt_invoke_hooks(struct opt_context *oc, uns event, struct opt_item *item, char *value)
+static void opt_invoke_hooks(struct opt_context *oc, uint event, struct opt_item *item, char *value)
{
for (int i = 0; i < oc->hook_count; i++) {
struct opt_item *hook = oc->hooks[i];
}
static struct opt_precomputed * opt_find_item_longopt(struct opt_context * oc, char * str) {
- uns len = strlen(str);
+ uint len = strlen(str);
struct opt_precomputed * candidate = NULL;
for (int i = 0; i < oc->opt_count; i++) {
static int opt_longopt(struct opt_context * oc, char ** argv, int index) {
int eaten = 0;
char * name_in = argv[index] + 2; // skipping the -- on the beginning
- uns pos = strchrnul(name_in, '=') - name_in;
+ uint pos = strchrnul(name_in, '=') - name_in;
struct opt_precomputed * opt = opt_find_item_longopt(oc, strndupa(name_in, pos));
char * value = NULL;
static void opt_positional(struct opt_context * oc, char * value) {
oc->positional_count++;
- uns id = oc->positional_count > oc->positional_max ? OPT_POSITIONAL_TAIL : OPT_POSITIONAL(oc->positional_count);
+ uint id = oc->positional_count > oc->positional_max ? OPT_POSITIONAL_TAIL : OPT_POSITIONAL(oc->positional_count);
struct opt_precomputed * opt = oc->shortopt[id];
if (!opt)
opt_failure("Too many positional arguments.");
struct opt_section * section; // subsection for OPT_CL_SECTION
int value; // value for OPT_CL_SWITCH
void (* call)(struct opt_item * opt, const char * value, void * data); // function to call for OPT_CL_CALL
- void (* hook)(struct opt_item * opt, uns event, const char * value, void * data); // function to call for OPT_CL_HOOK
+ void (* hook)(struct opt_item * opt, uint event, const char * value, void * data); // function to call for OPT_CL_HOOK
struct cf_user_type * utype; // specification of the user-defined type for CT_USER
} u;
u16 flags; // as defined below (for hooks, event mask is stored instead)
void opt_handle_config(struct opt_item * opt, const char * value, void * data);
void opt_handle_set(struct opt_item * opt, const char * value, void * data);
void opt_handle_dumpconfig(struct opt_item * opt, const char * value, void * data);
-void opt_conf_hook_internal(struct opt_item * opt, uns event, const char * value, void * data);
+void opt_conf_hook_internal(struct opt_item * opt, uint event, const char * value, void * data);
// XXX: This is duplicated with <ucw/getopt.h>, but that one will hopefully go away one day.
/**
}
void
-partmap_load(struct partmap *p, ucw_off_t start, uns size)
+partmap_load(struct partmap *p, ucw_off_t start, uint size)
{
if (p->start_map)
munmap(p->start_map, p->end_off - p->start_off);
int main(int argc, char **argv)
{
struct partmap *p = partmap_open(argv[1], 0);
- uns l = partmap_size(p);
- uns i;
+ uint l = partmap_size(p);
+ uint i;
for (i=0; i<l; i++)
putchar(*(char *)partmap_map(p, i, 1));
partmap_close(p);
struct partmap *partmap_open(char *name, int writeable);
void partmap_close(struct partmap *p);
ucw_off_t partmap_size(struct partmap *p);
-void partmap_load(struct partmap *p, ucw_off_t start, uns size);
+void partmap_load(struct partmap *p, ucw_off_t start, uint size);
-static inline void *partmap_map(struct partmap *p, ucw_off_t start, uns size UNUSED)
+static inline void *partmap_map(struct partmap *p, ucw_off_t start, uint size UNUSED)
{
#ifndef CONFIG_UCW_PARTMAP_IS_MMAP
if (unlikely(!p->start_map || start < p->start_off || (ucw_off_t) (start+size) > p->end_off))
return p->start_map + (start - p->start_off);
}
-static inline void *partmap_map_forward(struct partmap *p, ucw_off_t start, uns size UNUSED)
+static inline void *partmap_map_forward(struct partmap *p, ucw_off_t start, uint size UNUSED)
{
#ifndef CONFIG_UCW_PARTMAP_IS_MMAP
if (unlikely((ucw_off_t) (start+size) > p->end_off))
#include <ucw/prime.h>
static int /* Sequential search */
-__isprime(uns x) /* We know x != 2 && x != 3 */
+__isprime(uint x) /* We know x != 2 && x != 3 */
{
- uns test = 5;
+ uint test = 5;
if (x == 5)
return 1;
}
int
-isprime(uns x)
+isprime(uint x)
{
if (x < 5)
return (x == 2 || x == 3);
}
}
-uns
-nextprime(uns x) /* Returns some prime greater than x */
+uint
+nextprime(uint x) /* Returns some prime greater than x */
{
x += 5 - (x % 6); /* x is 6k-1 */
for(;;)
int
main(int argc, char **argv)
{
- uns k = atol(argv[1]);
+ uint k = atol(argv[1]);
printf("%d is%s prime\n", k, isprime(k) ? "" : "n't");
printf("Next prime is %d\n", nextprime(k));
return 0;
* Return a non-zero value iff @x is a prime number.
* The time complexity is `O(sqrt(x))`.
**/
-int isprime(uns x);
+int isprime(uint x);
/**
* Return some prime greater than @x. The function does not checks overflows, but it should
* be safe at least for @x lower than `1U << 31`.
* If the Cramer's conjecture is true, it should have complexity `O(sqrt(x) * log(x)^2)`.
**/
-uns nextprime(uns x);
+uint nextprime(uint x);
/* primetable.c */
* Returns zero if there is no such prime (we guarantee the existance of at
* least one prime greater than `1U << 31` in the table).
**/
-uns next_table_prime(uns x);
+uint next_table_prime(uint x);
/**
* Quickly lookup a precomputed table to return a prime number smaller than @x.
* Returns zero if @x is smaller than `7`.
**/
-uns prev_table_prime(uns x);
+uint prev_table_prime(uint x);
#endif // _UCW_PRIME_H
#include <ucw/binsearch.h>
/* A table of odd primes, each is about 1.2 times the previous one */
-static uns prime_table[] = {
+static uint prime_table[] = {
3,
7,
13,
#define NPRIMES ARRAY_SIZE(prime_table)
-uns
-next_table_prime(uns x)
+uint
+next_table_prime(uint x)
{
if (x >= prime_table[NPRIMES-1])
return 0;
return prime_table[BIN_SEARCH_FIRST_GE(prime_table, NPRIMES, x+1)];
}
-uns
-prev_table_prime(uns x)
+uint
+prev_table_prime(uint x)
{
int i = BIN_SEARCH_FIRST_GE(prime_table, NPRIMES, x);
return i ? prime_table[i-1] : 0;
int main(void)
{
#if 0 /* Generate the table */
- uns x = 3, xx;
+ uint x = 3, xx;
do
{
printf(" %u,\n", x);
#else
for (int i=1; i<=100; i++)
printf("%d\t%d\t%d\n", i, next_table_prime(i), prev_table_prime(i));
- for (uns i=0xfffffff0; i; i++)
+ for (uint i=0xfffffff0; i; i++)
printf("%u\t%u\t%u\n", i, next_table_prime(i), prev_table_prime(i));
return 0;
#endif
/* We expect the random generator in libc to give at least 30 bits of randomness */
COMPILE_ASSERT(RAND_MAX_RANGE_TEST, RAND_MAX >= (1 << 30)-1);
-uns
+uint
random_u32(void)
{
return (random() & 0xffff) | ((random() & 0xffff) << 16);
}
-uns
-random_max(uns max)
+uint
+random_max(uint max)
{
- uns r, l;
+ uint r, l;
ASSERT(max <= (1 << 30));
l = (RAND_MAX + 1U) - ((RAND_MAX + 1U) % max);
#define TREE_CONSERVE_SPACE
#include "redblack.h"
-static void random_string(char *txt, uns max_len)
+static void random_string(char *txt, uint max_len)
{
- uns len = random() % max_len;
- uns j;
+ uint len = random() % max_len;
+ uint j;
for (j=0; j<len; j++)
txt[j] = random() % 96 + 32;
txt[len] = 0;
* TREE_WANT_SEARCH_DOWN node *search_down(key) -- find either the node with
* specified value, or if it does not exist, the node
* with nearest smaller value.
- * TREE_WANT_BOUNDARY node *boundary(uns direction) -- finds smallest
+ * TREE_WANT_BOUNDARY node *boundary(uint direction) -- finds smallest
* (direction==0) or largest (direction==1) node.
- * TREE_WANT_ADJACENT node *adjacent(node *, uns direction) -- finds next
+ * TREE_WANT_ADJACENT node *adjacent(node *, uint direction) -- finds next
* (direction==1) or previous (direction==0) node.
* TREE_WANT_NEW node *new(key) -- create new node with given key.
* If it already exists, it is created as the last one.
* and static strings, strcpy for end-allocated strings.
* TREE_GIVE_INIT_DATA void init_data(node *) -- initialize data fields in a
* newly created node. Very useful for lookup operations.
- * TREE_GIVE_ALLOC void *alloc(unsigned int size) -- allocate space for
+ * TREE_GIVE_ALLOC void *alloc(uint size) -- allocate space for
* a node. Default is either normal or pooled allocation
* depending on whether we want deletions.
* void free(void *) -- the converse.
struct P(bucket) *parent;
#endif
#if !defined(TREE_CONSERVE_SPACE) && (defined(TREE_GIVE_EXTRA_SIZE) || defined(TREE_KEY_ENDSTRING))
- uns red_flag:1;
+ uint red_flag:1;
#endif
P(node) n;
#if !defined(TREE_CONSERVE_SPACE) && !defined(TREE_GIVE_EXTRA_SIZE) && !defined(TREE_KEY_ENDSTRING)
- uns red_flag:1;
+ uint red_flag:1;
#endif
} P(bucket);
struct P(tree) {
- uns count;
- uns height; /* of black nodes */
+ uint count;
+ uint height; /* of black nodes */
P(bucket) *root;
};
typedef struct P(stack_entry) {
P(bucket) *buck;
- uns son;
+ uint son;
} P(stack_entry);
#define T struct P(tree)
#endif
#ifndef TREE_CONSERVE_SPACE
- static inline uns P(red_flag) (P(bucket) *node)
+ static inline uint P(red_flag) (P(bucket) *node)
{ return node->red_flag; }
- static inline void P(set_red_flag) (P(bucket) *node, uns flag)
+ static inline void P(set_red_flag) (P(bucket) *node, uint flag)
{ node->red_flag = flag; }
- static inline P(bucket) * P(tree_son) (P(bucket) *node, uns id)
+ static inline P(bucket) * P(tree_son) (P(bucket) *node, uint id)
{ return node->son[id]; }
- static inline void P(set_tree_son) (P(bucket) *node, uns id, P(bucket) *son)
+ static inline void P(set_tree_son) (P(bucket) *node, uint id, P(bucket) *son)
{ node->son[id] = son; }
#else
/* Pointers are aligned, hence we can use lower bits. */
- static inline uns P(red_flag) (P(bucket) *node)
+ static inline uint P(red_flag) (P(bucket) *node)
{ return ((uintptr_t) node->son[0]) & 1L; }
- static inline void P(set_red_flag) (P(bucket) *node, uns flag)
+ static inline void P(set_red_flag) (P(bucket) *node, uint flag)
{ node->son[0] = (void*) ( (((uintptr_t) node->son[0]) & ~1L) | (flag & 1L) ); }
- static inline P(bucket) * P(tree_son) (P(bucket) *node, uns id)
+ static inline P(bucket) * P(tree_son) (P(bucket) *node, uint id)
{ return (void *) (((uintptr_t) node->son[id]) & ~1L); }
- static inline void P(set_tree_son) (P(bucket) *node, uns id, P(bucket) *son)
+ static inline void P(set_tree_son) (P(bucket) *node, uint id, P(bucket) *son)
{ node->son[id] = (void *) ((uintptr_t) son | (((uintptr_t) node->son[id]) & 1L) ); }
#endif
#ifndef TREE_GIVE_ALLOC
# ifdef TREE_USE_POOL
- static inline void * P(alloc) (T *t UNUSED, unsigned int size)
+ static inline void * P(alloc) (T *t UNUSED, uint size)
{ return mp_alloc_fast(TREE_USE_POOL, size); }
# define TREE_SAFE_FREE(t, x)
# else
- static inline void * P(alloc) (T *t UNUSED, unsigned int size)
+ static inline void * P(alloc) (T *t UNUSED, uint size)
{ return xmalloc(size); }
static inline void P(free) (T *t UNUSED, void *x)
}
#endif
-static uns P(fill_stack) (P(stack_entry) *stack, uns max_depth, P(bucket) *node, TREE_KEY_DECL, uns son_id UNUSED)
+static uint P(fill_stack) (P(stack_entry) *stack, uint max_depth, P(bucket) *node, TREE_KEY_DECL, uint son_id UNUSED)
{
- uns i;
+ uint i;
stack[0].buck = node;
for (i=0; stack[i].buck; i++)
{
#ifdef TREE_DUPLICATES
if (stack[i].buck)
{
- uns idx;
+ uint idx;
/* Find first/last of equal keys according to son_id. */
idx = P(fill_stack) (stack+i+1, max_depth-i-1,
P(tree_son) (stack[i].buck, son_id), TREE_KEY(), son_id);
STATIC P(node) * P(find) (T *t, TREE_KEY_DECL)
{
P(stack_entry) stack[TREE_MAX_DEPTH];
- uns depth;
+ uint depth;
depth = P(fill_stack) (stack, TREE_MAX_DEPTH, t->root, TREE_KEY(), 0);
return stack[depth].buck ? &stack[depth].buck->n : NULL;
}
#endif
#ifdef TREE_WANT_BOUNDARY
-STATIC P(node) * P(boundary) (T *t, uns direction)
+STATIC P(node) * P(boundary) (T *t, uint direction)
{
P(bucket) *n = t->root, *ns;
if (!n)
return NULL;
else
{
- uns son = !!direction;
+ uint son = !!direction;
while ((ns = P(tree_son) (n, son)))
n = ns;
return &n->n;
#endif
#ifdef TREE_STORE_PARENT
-STATIC P(node) * P(adjacent) (P(node) *start, uns direction)
+STATIC P(node) * P(adjacent) (P(node) *start, uint direction)
{
P(bucket) *node = SKIP_BACK(P(bucket), n, start);
P(bucket) *next = P(tree_son) (node, direction);
#endif
#if defined(TREE_DUPLICATES) || defined(TREE_WANT_DELETE) || defined(TREE_WANT_REMOVE)
-static int P(find_next_node) (P(stack_entry) *stack, uns max_depth, uns direction)
+static int P(find_next_node) (P(stack_entry) *stack, uint max_depth, uint direction)
{
- uns depth = 0;
+ uint depth = 0;
if (stack[0].buck)
{
ASSERT(depth+1 < max_depth);
STATIC P(node) * P(search) (T *t, TREE_KEY_DECL)
{
P(stack_entry) stack[TREE_MAX_DEPTH];
- uns depth;
+ uint depth;
depth = P(fill_stack) (stack, TREE_MAX_DEPTH, t->root, TREE_KEY(), 0);
if (!stack[depth].buck)
{
#define TREE_TRACE(txt...)
#endif
-static inline P(bucket) * P(rotation) (P(bucket) *node, uns son_id)
+static inline P(bucket) * P(rotation) (P(bucket) *node, uint son_id)
{
/* Destroys red_flag's in node, son. Returns new root. */
P(bucket) *son = P(tree_son) (node, son_id);
return son;
}
-static void P(rotate_after_insert) (T *t, P(stack_entry) *stack, uns depth)
+static void P(rotate_after_insert) (T *t, P(stack_entry) *stack, uint depth)
{
P(bucket) *node;
P(bucket) *parent, *grand, *uncle;
{
P(stack_entry) stack[TREE_MAX_DEPTH];
P(bucket) *added;
- uns depth;
+ uint depth;
depth = P(fill_stack) (stack, TREE_MAX_DEPTH, t->root, TREE_KEY(), 1);
#ifdef TREE_DUPLICATES
/* It is the last found value, hence everything in the right subtree is
#if defined(TREE_WANT_REMOVE) || defined(TREE_WANT_DELETE)
static void P(rotate_after_delete) (T *t, P(stack_entry) *stack, int depth)
{
- uns iteration = 0;
+ uint iteration = 0;
P(bucket) *parent, *sibling, *instead;
- uns parent_red, del_son, sibl_red;
+ uint parent_red, del_son, sibl_red;
missing_black:
if (depth < 0)
{
if (!sibl_red)
{
P(bucket) *son[2];
- uns red[2];
+ uint red[2];
son[0] = P(tree_son) (sibling, 0);
son[1] = P(tree_son) (sibling, 1);
red[0] = son[0] ? P(red_flag) (son[0]) : 0;
} else /* sibl_red */
{
P(bucket) *grand[2], *son;
- uns red[2];
+ uint red[2];
ASSERT(!parent_red);
son = P(tree_son) (sibling, del_son);
ASSERT(son && !P(red_flag) (son));
t->root = instead;
}
-static void P(remove_by_stack) (T *t, P(stack_entry) *stack, uns depth)
+static void P(remove_by_stack) (T *t, P(stack_entry) *stack, uint depth)
{
P(bucket) *node = stack[depth].buck;
P(bucket) *son;
- uns i;
+ uint i;
for (i=0; i<depth; i++)
ASSERT(P(tree_son) (stack[i].buck, stack[i].son) == stack[i+1].buck);
if (P(tree_son) (node, 0) && P(tree_son) (node, 1))
{
P(bucket) *xchg;
- uns flag_node, flag_xchg;
- uns d = P(find_next_node) (stack+depth, TREE_MAX_DEPTH-depth, 1);
+ uint flag_node, flag_xchg;
+ uint d = P(find_next_node) (stack+depth, TREE_MAX_DEPTH-depth, 1);
ASSERT(d >= 2);
d--;
{
P(stack_entry) stack[TREE_MAX_DEPTH];
P(bucket) *node = SKIP_BACK(P(bucket), n, Node);
- uns depth = 0, i;
+ uint depth = 0, i;
stack[0].buck = node;
stack[0].son = 10;
while (node->parent)
STATIC int P(delete) (T *t, TREE_KEY_DECL)
{
P(stack_entry) stack[TREE_MAX_DEPTH];
- uns depth;
+ uint depth;
depth = P(fill_stack) (stack, TREE_MAX_DEPTH, t->root, TREE_KEY(), 1);
if (stack[depth].buck)
{
#endif
#ifdef TREE_WANT_DUMP
-static void P(dump_subtree) (struct fastbuf *fb, T *t, P(bucket) *node, P(bucket) *parent, int cmp_res, int level, uns black)
+static void P(dump_subtree) (struct fastbuf *fb, T *t, P(bucket) *node, P(bucket) *parent, int cmp_res, int level, uint black)
{
- uns flag;
+ uint flag;
int i;
if (!node)
{
/* And the iterator */
#ifdef TREE_WANT_ITERATOR
-static P(node) * P(first_node) (T *t, uns direction)
+static P(node) * P(first_node) (T *t, uint direction)
{
P(bucket) *node = t->root, *prev = NULL;
while (node)
}
int
-rx_subst(regex *r, const char *by, const char *src, char *dest, uns destlen)
+rx_subst(regex *r, const char *by, const char *src, char *dest, uint destlen)
{
char *end = dest + destlen - 1;
by++;
if (*by >= '0' && *by <= '9') /* \0 gets replaced by entire pattern */
{
- uns j = *by++ - '0';
+ uint j = *by++ - '0';
if (j <= r->rx.re_nsub && r->matches[j].rm_so >= 0)
{
const char *s = src + r->matches[j].rm_so;
- uns i = r->matches[j].rm_eo - r->matches[j].rm_so;
+ uint i = r->matches[j].rm_eo - r->matches[j].rm_so;
if (dest + i >= end)
return -1;
memcpy(dest, s, i);
struct regex {
pcre *rx;
pcre_extra *extra;
- uns match_array_size;
- uns real_matches;
+ uint match_array_size;
+ uint real_matches;
int matches[0]; /* (max_matches+1) pairs (pos,len) plus some workspace */
};
}
int
-rx_subst(regex *r, const char *by, const char *src, char *dest, uns destlen)
+rx_subst(regex *r, const char *by, const char *src, char *dest, uint destlen)
{
char *end = dest + destlen - 1;
by++;
if (*by >= '0' && *by <= '9') /* \0 gets replaced by entire pattern */
{
- uns j = *by++ - '0';
+ uint j = *by++ - '0';
if (j < r->real_matches && r->matches[2*j] >= 0)
{
const char *s = src + r->matches[2*j];
- uns i = r->matches[2*j+1] - r->matches[2*j];
+ uint i = r->matches[2*j+1] - r->matches[2*j];
if (dest + i >= end)
return -1;
memcpy(dest, s, i);
r->buf.allocated = INITIAL_MEM;
if (icase)
{
- unsigned i;
+ uint i;
r->buf.translate = xmalloc (CHAR_SET_SIZE);
/* Map uppercase characters to corresponding lowercase ones. */
for (i = 0; i < CHAR_SET_SIZE; i++)
}
int
-rx_subst(regex *r, const char *by, const char *src, char *dest, uns destlen)
+rx_subst(regex *r, const char *by, const char *src, char *dest, uint destlen)
{
char *end = dest + destlen - 1;
by++;
if (*by >= '0' && *by <= '9') /* \0 gets replaced by entire pattern */
{
- uns j = *by++ - '0';
+ uint j = *by++ - '0';
if (j < r->regs.num_regs)
{
const char *s = src + r->regs.start[j];
- uns i = r->regs.end[j] - r->regs.start[j];
+ uint i = r->regs.end[j] - r->regs.start[j];
if (r->regs.start[j] > r->len_cache || r->regs.end[j] > r->len_cache)
return -1;
if (dest + i >= end)
regex *rx_compile(const char *r, int icase);
void rx_free(regex *r);
int rx_match(regex *r, const char *s);
-int rx_subst(regex *r, const char *by, const char *src, char *dest, uns destlen);
+int rx_subst(regex *r, const char *by, const char *src, char *dest, uint destlen);
#endif
}
static void
-ep_res_dump(struct resource *r, uns indent UNUSED)
+ep_res_dump(struct resource *r, uint indent UNUSED)
{
printf(" pool=%p\n", r->priv);
}
}
static void
-fd_res_dump(struct resource *r, uns indent UNUSED)
+fd_res_dump(struct resource *r, uint indent UNUSED)
{
printf(" fd=%d\n", (int)(intptr_t) r->priv);
}
}
static void
-mem_res_dump(struct resource *r, uns indent UNUSED)
+mem_res_dump(struct resource *r, uint indent UNUSED)
{
struct res_mem *rm = (struct res_mem *) r;
printf(" size=%zu, ptr=%p\n", rm->size, r->priv);
}
static void
-mp_res_dump(struct resource *r, uns indent UNUSED)
+mp_res_dump(struct resource *r, uint indent UNUSED)
{
printf(" pool=%p\n", r->priv);
}
}
static void
-subpool_res_dump(struct resource *r, uns indent)
+subpool_res_dump(struct resource *r, uint indent)
{
printf(":\n");
rp_dump(r->priv, indent);
}
void
-rp_dump(struct respool *rp, uns indent)
+rp_dump(struct respool *rp, uint indent)
{
printf("%*sResource pool %s at %p (%s)%s:\n",
indent, "",
{
struct respool *rp = rp_current();
ASSERT(rp);
- uns size = (rc->res_size ? : sizeof(struct resource));
+ uint size = (rc->res_size ? : sizeof(struct resource));
struct resource *r;
if (rp->mpool)
{
}
void
-res_dump(struct resource *r, uns indent)
+res_dump(struct resource *r, uint indent)
{
printf("%*s%p %s %s", indent, "", r, ((r->flags & RES_FLAG_TEMP) ? "TEMP" : "PERM"), r->rclass->name);
if (r->rclass->dump)
const char *name;
struct mempool *mpool; // If set, resources are allocated from the mempool, otherwise by xmalloc()
struct resource *subpool_of;
- uns default_res_flags; // RES_FLAG_xxx for newly allocated resources
+ uint default_res_flags; // RES_FLAG_xxx for newly allocated resources
};
/**
struct resource {
cnode n;
struct respool *rpool;
- uns flags; // RES_FLAG_xxx
+ uint flags; // RES_FLAG_xxx
const struct res_class *rclass;
void *priv; // Private to the class
// More data specific for the particular class can follow
void rp_delete(struct respool *rp); /** Deletes a resource pool, freeing all resources. **/
void rp_detach(struct respool *rp); /** Deletes a resource pool, detaching all resources. **/
void rp_commit(struct respool *rp); /** Deletes a resource pool. Temporary resources are freed, stable resources are detached. **/
-void rp_dump(struct respool *rp, uns indent); /** Prints out a debugging dump of a pool to stdout. **/
+void rp_dump(struct respool *rp, uint indent); /** Prints out a debugging dump of a pool to stdout. **/
/** Returns a pointer to the currently active resource pool or NULL, if none exists. **/
static inline struct respool *rp_current(void)
struct resource *res_alloc(const struct res_class *rc) LIKE_MALLOC; // Dies if there is no pool active
-void res_dump(struct resource *r, uns indent); /** Prints out a debugging dump of the resource to stdout. **/
+void res_dump(struct resource *r, uint indent); /** Prints out a debugging dump of the resource to stdout. **/
/**
* Frees a resource, unlinking it from its pool.
const char *name; // The name of the class (included in debugging dumps)
void (*detach)(struct resource *r); // The callbacks
void (*free)(struct resource *r);
- void (*dump)(struct resource *r, uns indent);
- uns res_size; // Size of the resource structure (0=default)
+ void (*dump)(struct resource *r, uint indent);
+ uint res_size; // Size of the resource structure (0=default)
};
/**
#include <string.h>
void
-sha1_hmac_init(sha1_hmac_context *hd, const byte *key, uns keylen)
+sha1_hmac_init(sha1_hmac_context *hd, const byte *key, uint keylen)
{
byte keybuf[SHA1_BLOCK_SIZE], buf[SHA1_BLOCK_SIZE];
}
void
-sha1_hmac_update(sha1_hmac_context *hd, const byte *data, uns datalen)
+sha1_hmac_update(sha1_hmac_context *hd, const byte *data, uint datalen)
{
// Just update the inner digest
sha1_update(&hd->ictx, data, datalen);
}
void
-sha1_hmac(byte *outbuf, const byte *key, uns keylen, const byte *data, uns datalen)
+sha1_hmac(byte *outbuf, const byte *key, uint keylen, const byte *data, uint datalen)
{
sha1_hmac_context hd;
sha1_hmac_init(&hd, key, keylen);
#include <stdio.h>
#include <ucw/string.h>
-static uns rd(char *dest)
+static uint rd(char *dest)
{
char buf[1024];
if (!fgets(buf, sizeof(buf), stdin))
{
char key[1024], data[1024];
byte hmac[SHA1_SIZE];
- uns kl = rd(key);
- uns dl = rd(data);
+ uint kl = rd(key);
+ uint dl = rd(data);
sha1_hmac(hmac, key, kl, data, dl);
mem_to_hex(data, hmac, SHA1_SIZE, 0);
puts(data);
* of INBUF with length INLEN.
*/
void
-sha1_update(sha1_context *hd, const byte *inbuf, uns inlen)
+sha1_update(sha1_context *hd, const byte *inbuf, uint inlen)
{
if( hd->count == 64 ) /* flush the buffer */
{
* into outbuf which must have a size of 20 bytes.
*/
void
-sha1_hash_buffer(byte *outbuf, const byte *buffer, uns length)
+sha1_hash_buffer(byte *outbuf, const byte *buffer, uint length)
{
sha1_context hd;
* @sha1_init()). It has the same effect as concatenating all the data
* together and passing them at once.
*/
-void sha1_update(sha1_context *hd, const byte *inbuf, uns inlen);
+void sha1_update(sha1_context *hd, const byte *inbuf, uint inlen);
/**
* No more @sha1_update() calls will be done. This terminates the hash
* and returns a pointer to it.
* sha1_update(&hd, buffer, length);
* memcpy(outbuf, sha1_final(&hd), SHA1_SIZE);
*/
-void sha1_hash_buffer(byte *outbuf, const byte *buffer, uns length);
+void sha1_hash_buffer(byte *outbuf, const byte *buffer, uint length);
/**
* SHA1 HMAC message authentication. If you provide @key and @data,
* the result will be stored in @outbuf.
*/
-void sha1_hmac(byte *outbuf, const byte *key, uns keylen, const byte *data, uns datalen);
+void sha1_hmac(byte *outbuf, const byte *key, uint keylen, const byte *data, uint datalen);
/**
* The HMAC also exists in a stream version in a way analogous to the
sha1_context octx;
} sha1_hmac_context;
-void sha1_hmac_init(sha1_hmac_context *hd, const byte *key, uns keylen); /** Initialize HMAC with context @hd and the given key. See sha1_init(). */
-void sha1_hmac_update(sha1_hmac_context *hd, const byte *data, uns datalen); /** Hash another @datalen bytes of data. See sha1_update(). */
+void sha1_hmac_init(sha1_hmac_context *hd, const byte *key, uint keylen); /** Initialize HMAC with context @hd and the given key. See sha1_init(). */
+void sha1_hmac_update(sha1_hmac_context *hd, const byte *data, uint datalen); /** Hash another @datalen bytes of data. See sha1_update(). */
byte *sha1_hmac_final(sha1_hmac_context *hd); /** Terminate the HMAC and return a pointer to the allocated hash. See sha1_final(). */
#define SHA1_SIZE 20 /** Size of the SHA1 hash in its binary representation **/
struct item {
cnode node;
- uns flags;
+ uint flags;
struct cf_item cf;
union value value;
- uns index;
+ uint index;
};
struct section {
struct item item;
clist list;
- uns count;
- uns size;
+ uint count;
+ uint size;
};
static struct mempool *pool;
byte *name = pos;
while (Cword(*pos))
pos++;
- uns len = pos - name;
+ uint len = pos - name;
if (!len)
die("Expected item/section name");
byte *buf = mp_alloc(pool, len + 1);
parse_section(struct section *section)
{
#define TRY(x) do{ byte *_err=(x); if (_err) die("%s", _err); }while(0)
- for (uns sep = 0; ; sep = 1)
+ for (uint sep = 0; ; sep = 1)
{
parse_white();
if (!*pos || *pos == '}')
{
pos++;
byte *start = d;
- uns esc = 0;
+ uint esc = 0;
while (*pos != '"' || esc)
{
if (!*pos)
else
*d++ = *pos++;
}
- uns len = d - def;
+ uint len = d - def;
byte *buf = mp_alloc(pool, len + 1);
memcpy(buf, def, len);
buf[len] = 0;
static void
parse_outer(void)
{
- for (uns sep = 0; ; sep = 1)
+ for (uint sep = 0; ; sep = 1)
{
parse_white();
if (!*pos)
static bb_t path;
static void
-dump_value(uns array, struct item *item, void *v)
+dump_value(uint array, struct item *item, void *v)
{
byte buf[128], *value = buf;
if (!array)
}
static void
-dump_item(struct item *item, void *ptr, uns path_len)
+dump_item(struct item *item, void *ptr, uint path_len)
{
if (item->flags & FLAG_HIDE)
return;
byte *val = (byte *)((uintptr_t)ptr + (uintptr_t)item->cf.ptr);
if (item->cf.cls == CC_LIST)
{
- uns len = strlen(item->cf.name);
+ uint len = strlen(item->cf.name);
bb_grow(&path, path_len + len + 1);
path.ptr[path_len] = '_';
memcpy(path.ptr + path_len + 1, item->cf.name, len);
else
{
val = *(void **)val;
- uns len = DARY_LEN(val);
- uns size = cf_type_size(item->cf.type, NULL);
- for (uns i = 0; i < len; i++, val += size)
+ uint len = DARY_LEN(val);
+ uint size = cf_type_size(item->cf.type, NULL);
+ for (uint i = 0; i < len; i++, val += size)
dump_value(1, item, val);
}
}
bb_init(&path);
CLIST_FOR_EACH(struct section *, section, sections)
{
- uns len = strlen(section->item.cf.name);
+ uint len = strlen(section->item.cf.name);
memcpy(bb_grow(&path, len), section->item.cf.name, len);
CLIST_FOR_EACH(struct item *, item, section->list)
dump_item(item, NULL, len);
else
log_init(argv[1]);
- uns level = 0;
+ uint level = 0;
while (level < L_MAX && LS_LEVEL_LETTER(level) != argv[2][0])
level++;
if (level >= L_MAX)
#include <sys/wait.h>
#include <errno.h>
-static uns max_line = 1024;
+static uint max_line = 1024;
static int launch_finish_messages = 1;
static int nonzero_status_message = 1;
static struct cf_section cfsec_logoutput = {
CF_ITEMS {
- CF_UNS("LineMax", &max_line),
+ CF_UINT("LineMax", &max_line),
CF_END
}
};
cnode node;
int pipe[2];
int fdnum;
- uns level;
+ uint level;
int long_continue;
struct main_rec_io rio;
};
fd->long_continue = long_continue;
}
-static uns
+static uint
handle_read(struct main_rec_io *r)
{
char buf[max_line + 5];
}
*eol = 0;
byte *b = r->read_rec_start;
- while ((uns)(eol - b) > max_line) {
+ while ((uint)(eol - b) > max_line) {
char cc = b[max_line];
b[max_line]=0;
do_msg(r->data, b, 1);
parseerror = 1;
if (parseerror) die("Bad argument `%s' to -l, expects number:letter.", optarg);
- uns level = 0;
+ uint level = 0;
while (level < L_MAX && LS_LEVEL_LETTER(level) != c[0])
level++;
if (level >= L_MAX)
int
sig_name_to_number(const char *name)
{
- for (uns i=0; i < ARRAY_SIZE(sig_names); i++)
+ for (uint i=0; i < ARRAY_SIZE(sig_names); i++)
if (!strcmp(sig_names[i].name, name))
return sig_names[i].number;
return -1;
const char *
sig_number_to_name(int number)
{
- for (uns i=0; i < ARRAY_SIZE(sig_names); i++)
+ for (uint i=0; i < ARRAY_SIZE(sig_names); i++)
if (sig_names[i].number == number)
return sig_names[i].name;
return NULL;
char *s;
void *p;
int i;
- uns u;
+ uint u;
};
} simp_node;
char *s1;
void *p1;
int i1;
- uns u1;
+ uint u1;
};
union {
char *s2;
void *p2;
int i2;
- uns u2;
+ uint u2;
};
} simp2_node;
/**
* Compute the number of nodes in @l. Beware linear time complexity.
**/
-static inline uns slist_size(slist *l)
+static inline uint slist_size(slist *l)
{
- uns i = 0;
+ uint i = 0;
SLIST_FOR_EACH(snode *, n, *l)
i++;
return i;
* ASORT_EXTRA_ARGS extra arguments for the sort function (they are always
* visible in all the macros supplied above), starts with comma
*
- * After including this file, a function ASORT_PREFIX(sort)(uns array_size)
- * or ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uns array_size) [if ASORT_ELT
+ * After including this file, a function ASORT_PREFIX(sort)(uint array_size)
+ * or ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uint array_size) [if ASORT_ELT
* is not defined] is declared and all parameter macros are automatically
* undef'd.
*/
* sorted. If the macro is provided, this parameter is omitted. In that case,
* you can sort global variables or pass your structure by @ASORT_EXTRA_ARGS.
**/
-static void ASORT_PREFIX(sort)(ASORT_ARRAY_ARG uns array_size ASORT_EXTRA_ARGS)
+static void ASORT_PREFIX(sort)(ASORT_ARRAY_ARG uint array_size ASORT_EXTRA_ARGS)
{
- struct stk { int l, r; } stack[8*sizeof(uns)];
+ struct stk { int l, r; } stack[8*sizeof(uint)];
int l, r, left, right, m;
- uns sp = 0;
+ uint sp = 0;
ASORT_KEY_TYPE pivot;
if (array_size <= 1)
#define ASORT_XTRACE(level, x...) do { if (sorter_trace_array >= level) msg(L_DEBUG, x); } while(0)
static void
-asort_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
+asort_radix(struct asort_context *ctx, void *array, void *buffer, uint num_elts, uint hash_bits, uint swapped_output)
{
// swap_output == 0 if result should be returned in `array', otherwise in `buffer'
- uns buckets = (1 << ctx->radix_bits);
- uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
- uns cnt[buckets];
+ uint buckets = (1 << ctx->radix_bits);
+ uint shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
+ uint cnt[buckets];
#if 0
static int reported[64];
bzero(cnt, sizeof(cnt));
ctx->radix_count(array, num_elts, cnt, shift);
- uns pos = 0;
- for (uns i=0; i<buckets; i++)
+ uint pos = 0;
+ for (uint i=0; i<buckets; i++)
{
- uns j = cnt[i];
+ uint j = cnt[i];
cnt[i] = pos;
pos += j;
}
ctx->radix_split(array, buffer, num_elts, cnt, shift);
pos = 0;
- for (uns i=0; i<buckets; i++)
+ for (uint i=0; i<buckets; i++)
{
- uns n = cnt[i] - pos;
+ uint n = cnt[i] - pos;
if (n < ctx->radix_threshold || shift < ASORT_MIN_SHIFT)
{
ctx->quicksort(buffer, n);
#include <ucw/workqueue.h>
#include <ucw/eltpool.h>
-static uns asort_threads_use_count;
-static uns asort_threads_ready;
+static uint asort_threads_use_count;
+static uint asort_threads_ready;
static struct worker_pool asort_thread_pool;
-static uns
+static uint
rs_estimate_stack(void)
{
// Stack space needed by the recursive radix-sorter
- uns ctrsize = sizeof(uns) * (1 << CONFIG_UCW_RADIX_SORTER_BITS);
- uns maxdepth = (64 / CONFIG_UCW_RADIX_SORTER_BITS) + 1;
+ uint ctrsize = sizeof(uint) * (1 << CONFIG_UCW_RADIX_SORTER_BITS);
+ uint maxdepth = (64 / CONFIG_UCW_RADIX_SORTER_BITS) + 1;
return ctrsize * maxdepth;
}
void
-asort_start_threads(uns run)
+asort_start_threads(uint run)
{
ucwlib_lock();
asort_threads_use_count++;
struct work w;
struct asort_context *ctx;
void *array;
- uns num_elts;
+ uint num_elts;
int left, right;
#define LR_UNDEF -100
};
struct work w;
struct asort_context *ctx;
void *array, *buffer; // Like asort_radix().
- uns num_elts;
- uns shift;
- uns swap_output;
- uns cnt[0];
+ uint num_elts;
+ uint shift;
+ uint swap_output;
+ uint cnt[0];
};
static void
}
static void
-rs_radix(struct asort_context *ctx, void *array, void *buffer, uns num_elts, uns hash_bits, uns swapped_output)
+rs_radix(struct asort_context *ctx, void *array, void *buffer, uint num_elts, uint hash_bits, uint swapped_output)
{
- uns buckets = (1 << ctx->radix_bits);
- uns shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
- uns cnt[buckets];
- uns blksize = num_elts / sorter_threads;
+ uint buckets = (1 << ctx->radix_bits);
+ uint shift = (hash_bits > ctx->radix_bits) ? (hash_bits - ctx->radix_bits) : 0;
+ uint cnt[buckets];
+ uint blksize = num_elts / sorter_threads;
DBG(">>> n=%u h=%d s=%d blk=%u sw=%d", num_elts, hash_bits, shift, blksize, swapped_output);
// If there are any small chunks in progress, wait for them to finish
// Start parallel counting
void *iptr = array;
- for (uns i=0; i<sorter_threads; i++)
+ for (uint i=0; i<sorter_threads; i++)
{
struct rs_work *w = ctx->rs_works[i];
w->w.priority = 0;
w->num_elts += num_elts % sorter_threads;
w->shift = shift;
iptr += w->num_elts * ctx->elt_size;
- bzero(w->cnt, sizeof(uns) * buckets);
+ bzero(w->cnt, sizeof(uint) * buckets);
work_submit(ctx->rs_work_queue, &w->w);
}
// Get bucket sizes from the counts
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<sorter_threads; i++)
+ for (uint i=0; i<sorter_threads; i++)
{
struct rs_work *w = (struct rs_work *) work_wait(ctx->rs_work_queue);
ASSERT(w);
- for (uns j=0; j<buckets; j++)
+ for (uint j=0; j<buckets; j++)
cnt[j] += w->cnt[j];
}
// Calculate bucket starts
- uns pos = 0;
- for (uns i=0; i<buckets; i++)
+ uint pos = 0;
+ for (uint i=0; i<buckets; i++)
{
- uns j = cnt[i];
+ uint j = cnt[i];
cnt[i] = pos;
pos += j;
}
ASSERT(pos == num_elts);
// Start parallel splitting
- for (uns i=0; i<sorter_threads; i++)
+ for (uint i=0; i<sorter_threads; i++)
{
struct rs_work *w = ctx->rs_works[i];
w->w.go = rs_split;
- for (uns j=0; j<buckets; j++)
+ for (uint j=0; j<buckets; j++)
{
- uns k = w->cnt[j];
+ uint k = w->cnt[j];
w->cnt[j] = cnt[j];
cnt[j] += k;
}
// Recurse on buckets
pos = 0;
- for (uns i=0; i<buckets; i++)
+ for (uint i=0; i<buckets; i++)
{
- uns n = cnt[i] - pos;
+ uint n = cnt[i] - pos;
if (!n)
continue;
if (n < ctx->thread_threshold || shift < ASORT_MIN_SHIFT)
}
static void
-threaded_radixsort(struct asort_context *ctx, uns swap)
+threaded_radixsort(struct asort_context *ctx, uint swap)
{
struct work_queue q;
// We use big_alloc(), because we want to avoid cacheline aliasing between threads.
ctx->rs_work_queue = &q;
ctx->rs_works = alloca(sizeof(struct rs_work *) * sorter_threads);
- for (uns i=0; i<sorter_threads; i++)
- ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
+ for (uint i=0; i<sorter_threads; i++)
+ ctx->rs_works[i] = big_alloc(sizeof(struct rs_work) + sizeof(uint) * (1 << ctx->radix_bits));
// Prepare a pool for all remaining small bits which will be sorted on background.
ctx->eltpool = ep_new(sizeof(struct rs_work), 1000);
// Do the big splitting
rs_radix(ctx, ctx->array, ctx->buffer, ctx->num_elts, ctx->hash_bits, swap);
- for (uns i=0; i<sorter_threads; i++)
- big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uns) * (1 << ctx->radix_bits));
+ for (uint i=0; i<sorter_threads; i++)
+ big_free(ctx->rs_works[i], sizeof(struct rs_work) + sizeof(uint) * (1 << ctx->radix_bits));
// Finish the small blocks
rs_wait_small(ctx);
#else
-void asort_start_threads(uns run UNUSED) { }
+void asort_start_threads(uint run UNUSED) { }
void asort_stop_threads(void) { }
#endif
-static uns
+static uint
predict_swap(struct asort_context *ctx)
{
- uns bits = ctx->radix_bits;
- uns elts = ctx->num_elts;
- uns swap = 0;
+ uint bits = ctx->radix_bits;
+ uint elts = ctx->num_elts;
+ uint swap = 0;
while (elts >= ctx->radix_threshold && bits >= ASORT_MIN_SHIFT)
{
ASORT_TRACE("Array-sorting %u items per %u bytes, hash_bits=%d", ctx->num_elts, ctx->elt_size, ctx->hash_bits);
ASORT_XTRACE(2, "Limits: thread_threshold=%u, thread_chunk=%u, radix_threshold=%u",
- ctx->thread_threshold, ctx->thread_chunk, ctx->radix_threshold);
- uns allow_threads UNUSED = (sorter_threads > 1 &&
+ ctx->thread_threshold, ctx->thread_chunk, ctx->radix_threshold);
+ uint allow_threads UNUSED = (sorter_threads > 1 &&
ctx->num_elts >= ctx->thread_threshold &&
!(sorter_debug & SORT_DEBUG_ASORT_NO_THREADS));
}
else
{
- uns swap = predict_swap(ctx);
+ uint swap = predict_swap(ctx);
#ifdef CONFIG_UCW_THREADS
if (allow_threads)
{
* radix-sorting.
*
* After including this file, a function
- * ASORT_KEY_TYPE *ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uns num_elts [, ASORT_KEY_TYPE *buf, uns hash_bits])
+ * ASORT_KEY_TYPE *ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uint num_elts [, ASORT_KEY_TYPE *buf, uint hash_bits])
* is declared and all parameter macros are automatically undef'd. Here `buf' is an
* auxiliary buffer of the same size as the input array, required whenever radix
* sorting should be used, and `hash_bits' is the number of significant bits returned
/* QuickSort with optimizations a'la Sedgewick, inspired by qsort() from GNU libc. */
-static void Q(quicksort)(void *array_ptr, uns num_elts)
+static void Q(quicksort)(void *array_ptr, uint num_elts)
{
Q(key) *array = array_ptr;
- struct stk { int l, r; } stack[8*sizeof(uns)];
+ struct stk { int l, r; } stack[8*sizeof(uint)];
int l, r, left, right, m;
- uns sp = 0;
+ uint sp = 0;
Q(key) pivot;
if (num_elts <= 1)
/* Just the splitting part of QuickSort */
-static void Q(quicksplit)(void *array_ptr, uns num_elts, int *leftp, int *rightp)
+static void Q(quicksplit)(void *array_ptr, uint num_elts, int *leftp, int *rightp)
{
Q(key) *array = array_ptr;
int l, r, m;
#ifdef ASORT_HASH
-static void Q(radix_count)(void *src_ptr, uns num_elts, uns *cnt, uns shift)
+static void Q(radix_count)(void *src_ptr, uint num_elts, uint *cnt, uint shift)
{
Q(key) *src = src_ptr;
- uns i;
+ uint i;
switch (shift)
{
#undef RC
}
-static void Q(radix_split)(void *src_ptr, void *dest_ptr, uns num_elts, uns *ptrs, uns shift)
+static void Q(radix_split)(void *src_ptr, void *dest_ptr, uint num_elts, uint *ptrs, uint shift)
{
Q(key) *src = src_ptr, *dest = dest_ptr;
- uns i;
+ uint i;
switch (shift)
{
#endif
#ifdef ASORT_HASH
-#define ASORT_HASH_ARGS , Q(key) *buffer, uns hash_bits
+#define ASORT_HASH_ARGS , Q(key) *buffer, uint hash_bits
#else
#define ASORT_HASH_ARGS
#endif
* the `ASORT_HASH_ARGS` is empty (there are only the two parameters in that
* case). When you provide it, the function gains two more parameters in the
* `ASORT_HASH_ARGS` macro. They are `ASORT_KEY_TYPE *@buffer`, which must be a
- * memory buffer of the same size as the input array, and `uns @hash_bits`,
+ * memory buffer of the same size as the input array, and `uint @hash_bits`,
* specifying how many significant bits the hash function returns.
*
* The function returns pointer to the sorted data, either the @array or the
* @buffer argument.
**/
-static ASORT_KEY_TYPE *ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uns num_elts ASORT_HASH_ARGS)
+static ASORT_KEY_TYPE *ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, uint num_elts ASORT_HASH_ARGS)
{
struct asort_context ctx = {
.array = array,
#endif
/* Configuration variables */
-extern uns sorter_trace, sorter_trace_array, sorter_stream_bufsize;
-extern uns sorter_debug, sorter_min_radix_bits, sorter_max_radix_bits, sorter_add_radix_bits;
-extern uns sorter_min_multiway_bits, sorter_max_multiway_bits;
-extern uns sorter_threads;
+extern uint sorter_trace, sorter_trace_array, sorter_stream_bufsize;
+extern uint sorter_debug, sorter_min_radix_bits, sorter_max_radix_bits, sorter_add_radix_bits;
+extern uint sorter_min_multiway_bits, sorter_max_multiway_bits;
+extern uint sorter_threads;
extern u64 sorter_bufsize, sorter_small_input;
extern u64 sorter_thread_threshold, sorter_thread_chunk, sorter_radix_threshold;
extern struct fb_params sorter_fb_params, sorter_small_fb_params;
struct sort_context {
struct fastbuf *in_fb;
struct fastbuf *out_fb;
- uns hash_bits;
+ uint hash_bits;
u64 in_size;
struct fb_params *fb_params;
void (*multiway_merge)(struct sort_context *ctx, struct sort_bucket **ins, struct sort_bucket *out);
// Radix split according to hash function
- void (*radix_split)(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket **outs, uns bitpos, uns numbits);
+ void (*radix_split)(struct sort_context *ctx, struct sort_bucket *in, struct sort_bucket **outs, uint bitpos, uint numbits);
// State variables of internal_sort
void *key_buf;
// Timing
timestamp_t start_time;
- uns last_pass_time;
- uns total_int_time, total_pre_time, total_ext_time;
+ uint last_pass_time;
+ uint total_int_time, total_pre_time, total_ext_time;
};
void sorter_run(struct sort_context *ctx);
/* Buffers */
-void *sorter_alloc(struct sort_context *ctx, uns size);
+void *sorter_alloc(struct sort_context *ctx, uint size);
void sorter_prepare_buf(struct sort_context *ctx);
void sorter_alloc_buf(struct sort_context *ctx);
void sorter_free_buf(struct sort_context *ctx);
struct sort_bucket {
cnode n;
struct sort_context *ctx;
- uns flags;
+ uint flags;
struct fastbuf *fb;
byte *filename;
u64 size; // Size in bytes (not valid when writing)
- uns runs; // Number of runs, 0 if not sorted
- uns hash_bits; // Remaining bits of the hash function
+ uint runs; // Number of runs, 0 if not sorted
+ uint hash_bits; // Remaining bits of the hash function
byte *ident; // Identifier used in debug messages
};
// Interface between generic code in array.c and functions generated by array.h
void *array; // Array to sort
void *buffer; // Auxiliary buffer (required when radix-sorting)
- uns num_elts; // Number of elements in the array
- uns elt_size; // Bytes per element
- uns hash_bits; // Remaining bits of the hash function
- uns radix_bits; // How many bits to process in a single radix-sort pass
- void (*quicksort)(void *array_ptr, uns num_elts);
- void (*quicksplit)(void *array_ptr, uns num_elts, int *leftp, int *rightp);
- void (*radix_count)(void *src_ptr, uns num_elts, uns *cnt, uns shift);
- void (*radix_split)(void *src_ptr, void *dest_ptr, uns num_elts, uns *ptrs, uns shift);
+ uint num_elts; // Number of elements in the array
+ uint elt_size; // Bytes per element
+ uint hash_bits; // Remaining bits of the hash function
+ uint radix_bits; // How many bits to process in a single radix-sort pass
+ void (*quicksort)(void *array_ptr, uint num_elts);
+ void (*quicksplit)(void *array_ptr, uint num_elts, int *leftp, int *rightp);
+ void (*radix_count)(void *src_ptr, uint num_elts, uint *cnt, uint shift);
+ void (*radix_split)(void *src_ptr, void *dest_ptr, uint num_elts, uint *ptrs, uint shift);
// Used internally by array.c
struct rs_work **rs_works;
struct eltpool *eltpool;
// Configured limits translated from bytes to elements
- uns thread_threshold;
- uns thread_chunk;
- uns radix_threshold;
+ uint thread_threshold;
+ uint thread_chunk;
+ uint radix_threshold;
};
void asort_run(struct asort_context *ctx);
-void asort_start_threads(uns run);
+void asort_start_threads(uint run);
void asort_stop_threads(void);
#endif
#include <ucw/fastbuf.h>
#include <ucw/sorter/common.h>
-uns sorter_trace;
-uns sorter_trace_array;
+uint sorter_trace;
+uint sorter_trace_array;
u64 sorter_bufsize = 65536;
-uns sorter_debug;
-uns sorter_min_radix_bits;
-uns sorter_max_radix_bits;
-uns sorter_add_radix_bits;
-uns sorter_min_multiway_bits;
-uns sorter_max_multiway_bits;
-uns sorter_threads;
+uint sorter_debug;
+uint sorter_min_radix_bits;
+uint sorter_max_radix_bits;
+uint sorter_add_radix_bits;
+uint sorter_min_multiway_bits;
+uint sorter_max_multiway_bits;
+uint sorter_threads;
u64 sorter_thread_threshold = 1048576;
u64 sorter_thread_chunk = 4096;
u64 sorter_radix_threshold = 4096;
static struct cf_section sorter_config = {
CF_ITEMS {
- CF_UNS("Trace", &sorter_trace),
- CF_UNS("TraceArray", &sorter_trace_array),
+ CF_UINT("Trace", &sorter_trace),
+ CF_UINT("TraceArray", &sorter_trace_array),
CF_SECTION("FileAccess", &sorter_fb_params, &fbpar_cf),
CF_SECTION("SmallFileAccess", &sorter_fb_params, &fbpar_cf),
CF_U64("SmallInput", &sorter_small_input),
CF_U64("SortBuffer", &sorter_bufsize),
- CF_UNS("Debug", &sorter_debug),
- CF_UNS("MinRadixBits", &sorter_min_radix_bits),
- CF_UNS("MaxRadixBits", &sorter_max_radix_bits),
- CF_UNS("AddRadixBits", &sorter_add_radix_bits),
- CF_UNS("MinMultiwayBits", &sorter_min_multiway_bits),
- CF_UNS("MaxMultiwayBits", &sorter_max_multiway_bits),
- CF_UNS("Threads", &sorter_threads),
+ CF_UINT("Debug", &sorter_debug),
+ CF_UINT("MinRadixBits", &sorter_min_radix_bits),
+ CF_UINT("MaxRadixBits", &sorter_max_radix_bits),
+ CF_UINT("AddRadixBits", &sorter_add_radix_bits),
+ CF_UINT("MinMultiwayBits", &sorter_min_multiway_bits),
+ CF_UINT("MaxMultiwayBits", &sorter_max_multiway_bits),
+ CF_UINT("Threads", &sorter_threads),
CF_U64("ThreadThreshold", &sorter_thread_threshold),
CF_U64("ThreadChunk", &sorter_thread_chunk),
CF_U64("RadixThreshold", &sorter_radix_threshold),
int main(int argc, char **argv)
{
- uns files, bufsize;
+ uint files, bufsize;
u64 total_size;
if (argc != 4 ||
cf_parse_int(argv[1], (int*) &files) ||
return 1;
}
u64 cnt, cnt_rep;
- uns cnt_ms;
+ uint cnt_ms;
int fd[files];
byte name[files][16];
struct asio_request *req[files];
ASSERT(in_fd >= 0);
ASSERT(!(total_size % bufsize));
P_INIT;
- for (uns i=0; i<total_size/bufsize; i++)
+ for (uint i=0; i<total_size/bufsize; i++)
{
struct asio_request *r = asio_get(&io_queue);
r->op = ASIO_WRITE_BACK;
r->fd = in_fd;
r->len = bufsize;
byte *xbuf = r->buffer;
- for (uns j=0; j<bufsize; j++)
+ for (uint j=0; j<bufsize; j++)
xbuf[j] = i+j;
asio_submit(r);
P_UPDATE(bufsize);
#endif
msg(L_INFO, "Initializing output files");
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
sprintf(name[i], "tmp/ft-%d", i);
fd[i] = ucw_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
msg(L_INFO, "Writing %d MB to %d files in parallel with %d byte buffers", (int)(total_size >> 20), files, bufsize);
P_INIT;
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
req[i] = asio_get(&io_queue);
- for (uns round=0; round<total_size/bufsize/files; round++)
+ for (uint round=0; round<total_size/bufsize/files; round++)
{
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
struct asio_request *r = req[i];
#ifdef COPY
memcpy(r->buffer, rd->buffer, bufsize);
asio_put(rr);
#else
- for (uns j=0; j<bufsize; j++)
+ for (uint j=0; j<bufsize; j++)
r->buffer[j] = round+i+j;
#endif
r->op = ASIO_WRITE_BACK;
req[i] = asio_get(&io_queue);
}
}
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
asio_put(req[i]);
asio_sync(&io_queue);
#ifdef COPY
msg(L_INFO, "Reading the files sequentially");
P_INIT;
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
lseek(fd[i], 0, SEEK_SET);
- for (uns round=0; round<total_size/bufsize/files; round++)
+ for (uint round=0; round<total_size/bufsize/files; round++)
{
struct asio_request *rr, *r = asio_get(&io_queue);
r->op = ASIO_READ;
}
P_FINAL;
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
unlink(name[i]);
#ifdef COPY
unlink("tmp/ft-in");
int main(int argc, char **argv)
{
- uns files, bufsize;
+ uint files, bufsize;
u64 total_size;
if (argc != 4 ||
cf_parse_int(argv[1], (int*) &files) ||
return 1;
}
u64 cnt, cnt_rep;
- uns cnt_ms;
+ uint cnt_ms;
int fd[files];
byte *buf[files], name[files][16];
- uns xbufsize = bufsize; // Used for single-file I/O
+ uint xbufsize = bufsize; // Used for single-file I/O
byte *xbuf = big_alloc(xbufsize);
init_timer(&timer);
ASSERT(in_fd >= 0);
ASSERT(!(total_size % xbufsize));
P_INIT;
- for (uns i=0; i<total_size/xbufsize; i++)
+ for (uint i=0; i<total_size/xbufsize; i++)
{
- for (uns j=0; j<xbufsize; j++)
+ for (uint j=0; j<xbufsize; j++)
xbuf[j] = i+j;
- uns c = write(in_fd, xbuf, xbufsize);
+ uint c = write(in_fd, xbuf, xbufsize);
ASSERT(c == xbufsize);
P_UPDATE(c);
}
#endif
msg(L_INFO, "Initializing output files");
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
sprintf(name[i], "tmp/ft-%d", i);
fd[i] = ucw_open(name[i], O_RDWR | O_CREAT | O_TRUNC | DIRECT, 0666);
msg(L_INFO, "Writing %d MB to %d files in parallel with %d byte buffers", (int)(total_size >> 20), files, bufsize);
P_INIT;
- for (uns r=0; r<total_size/bufsize/files; r++)
+ for (uint r=0; r<total_size/bufsize/files; r++)
{
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
#ifdef COPY
- uns ci = read(in_fd, buf[i], bufsize);
+ uint ci = read(in_fd, buf[i], bufsize);
ASSERT(ci == bufsize);
#else
- for (uns j=0; j<bufsize; j++)
+ for (uint j=0; j<bufsize; j++)
buf[i][j] = r+i+j;
#endif
- uns c = write(fd[i], buf[i], bufsize);
+ uint c = write(fd[i], buf[i], bufsize);
ASSERT(c == bufsize);
P_UPDATE(c);
}
msg(L_INFO, "Reading the files sequentially");
P_INIT;
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
{
lseek(fd[i], 0, SEEK_SET);
- for (uns r=0; r<total_size/xbufsize/files; r++)
+ for (uint r=0; r<total_size/xbufsize/files; r++)
{
- uns c = read(fd[i], xbuf, xbufsize);
+ uint c = read(fd[i], xbuf, xbufsize);
ASSERT(c == xbufsize);
P_UPDATE(c);
}
}
P_FINAL;
- for (uns i=0; i<files; i++)
+ for (uint i=0; i<files; i++)
unlink(name[i]);
#ifdef COPY
unlink("tmp/ft-in");
};
static struct elt *ary, *alt, **ind, *array0, *array1;
-static uns n = 10000000;
+static uint n = 10000000;
static u32 sum;
-static struct elt *alloc_elts(uns n)
+static struct elt *alloc_elts(uint n)
{
return big_alloc(n * sizeof(struct elt));
}
-static void free_elts(struct elt *a, uns n)
+static void free_elts(struct elt *a, uint n)
{
big_free(a, n * sizeof(struct elt));
}
{
struct elt *from = ary, *to = alt, *tmp;
#define BITS 8
- uns cnt[1 << BITS];
- for (uns sh=0; sh<32; sh+=BITS)
+ uint cnt[1 << BITS];
+ for (uint sh=0; sh<32; sh+=BITS)
{
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
cnt[(from[i].key >> sh) & ((1 << BITS) - 1)]++;
- uns pos = 0;
- for (uns i=0; i<(1<<BITS); i++)
+ uint pos = 0;
+ for (uint i=0; i<(1<<BITS); i++)
{
- uns c = cnt[i];
+ uint c = cnt[i];
cnt[i] = pos;
pos += c;
}
ASSERT(pos == n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
to[cnt[(from[i].key >> sh) & ((1 << BITS) - 1)]++] = from[i];
ASSERT(cnt[(1 << BITS)-1] == n);
tmp=from, from=to, to=tmp;
{
struct elt *from = ary, *to = alt, *tmp;
#define BITS 8
- uns cnt[1 << BITS], cnt2[1 << BITS];
- for (uns sh=0; sh<32; sh+=BITS)
+ uint cnt[1 << BITS], cnt2[1 << BITS];
+ for (uint sh=0; sh<32; sh+=BITS)
{
if (sh)
memcpy(cnt, cnt2, sizeof(cnt));
else
{
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
cnt[(from[i].key >> sh) & ((1 << BITS) - 1)]++;
}
- uns pos = 0;
- for (uns i=0; i<(1<<BITS); i++)
+ uint pos = 0;
+ for (uint i=0; i<(1<<BITS); i++)
{
- uns c = cnt[i];
+ uint c = cnt[i];
cnt[i] = pos;
pos += c;
}
ASSERT(pos == n);
bzero(cnt2, sizeof(cnt2));
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
cnt2[(from[i].key >> (sh + BITS)) & ((1 << BITS) - 1)]++;
to[cnt[(from[i].key >> sh) & ((1 << BITS) - 1)]++] = from[i];
static void r1c_sort(void)
{
- uns cnt[256];
+ uint cnt[256];
struct elt *ptrs[256], *x, *lim;
x = ary; lim = ary + n;
while (x < lim)
cnt[x++->key & 255]++;
-#define PTRS(start) x=start; for (uns i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
+#define PTRS(start) x=start; for (uint i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
PTRS(alt);
x = ary; lim = ary + n;
static void r1c_sse_sort(void)
{
- uns cnt[256];
+ uint cnt[256];
struct elt *ptrs[256], *x, *lim;
ASSERT(sizeof(struct elt) == 16);
while (x < lim)
cnt[x++->key & 255]++;
-#define PTRS(start) x=start; for (uns i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
+#define PTRS(start) x=start; for (uint i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
PTRS(alt);
x = ary; lim = ary + n;
static void r1d_sort(void)
{
- uns cnt[256];
+ uint cnt[256];
struct elt *ptrs[256], *x, *y, *lim;
ASSERT(!(n % 4));
cnt[x++->key & 255]++;
}
-#define PTRS(start) x=start; for (uns i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
+#define PTRS(start) x=start; for (uint i=0; i<256; i++) { ptrs[i]=x; x+=cnt[i]; }
PTRS(alt);
x = ary; y = ary+n/2; lim = ary + n/2;
{
struct elt *from = ary, *to = alt;
#define BITS 14
- uns cnt[1 << BITS];
+ uint cnt[1 << BITS];
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
cnt[(from[i].key >> (32 - BITS)) & ((1 << BITS) - 1)]++;
- uns pos = 0;
- for (uns i=0; i<(1<<BITS); i++)
+ uint pos = 0;
+ for (uint i=0; i<(1<<BITS); i++)
{
- uns c = cnt[i];
+ uint c = cnt[i];
cnt[i] = pos;
pos += c;
}
ASSERT(pos == n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
to[cnt[(from[i].key >> (32 - BITS)) & ((1 << BITS) - 1)]++] = from[i];
ASSERT(cnt[(1 << BITS)-1] == n);
pos = 0;
- for (uns i=0; i<(1 << BITS); i++)
+ for (uint i=0; i<(1 << BITS); i++)
{
as_sort(cnt[i] - pos, alt+pos);
pos = cnt[i];
#define THRESHOLD 5000
#define ODDEVEN 0
- auto void r3(struct elt *from, struct elt *to, uns n, uns lev);
- void r3(struct elt *from, struct elt *to, uns n, uns lev)
+ auto void r3(struct elt *from, struct elt *to, uint n, uint lev);
+ void r3(struct elt *from, struct elt *to, uint n, uint lev)
{
- uns sh = 32 - lev*BITS;
- uns cnt[BUCKS];
+ uint sh = 32 - lev*BITS;
+ uint cnt[BUCKS];
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
cnt[(from[i].key >> sh) & (BUCKS - 1)]++;
- uns pos = 0;
- for (uns i=0; i<BUCKS; i++)
+ uint pos = 0;
+ for (uint i=0; i<BUCKS; i++)
{
- uns c = cnt[i];
+ uint c = cnt[i];
cnt[i] = pos;
pos += c;
}
ASSERT(pos == n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
#if 1
to[cnt[(from[i].key >> sh) & (BUCKS - 1)]++] = from[i];
#else
sse_copy_elt(&to[cnt[(from[i].key >> sh) & (BUCKS - 1)]++], &from[i]);
#endif
pos = 0;
- for (uns i=0; i<BUCKS; i++)
+ for (uint i=0; i<BUCKS; i++)
{
- uns l = cnt[i]-pos;
+ uint l = cnt[i]-pos;
if (lev >= LEVELS || l <= THRESHOLD)
{
as_sort(l, to+pos);
static void mergesort(void)
{
struct elt *from, *to;
- uns lev = 0;
+ uint lev = 0;
if (1)
{
struct elt *x = ary, *z = alt, *last = ary + (n & ~1U);
x = from;
z = to;
last = from + n;
- uns step = 1 << lev;
+ uint step = 1 << lev;
while (x + 2*step <= last)
{
z = mrg(x, x+step, x+step, x+2*step, z);
ary = alt;
}
-static void sampsort(uns n, struct elt *ar, struct elt *al, struct elt *dest, byte *wbuf)
+static void sampsort(uint n, struct elt *ar, struct elt *al, struct elt *dest, byte *wbuf)
{
#define WAYS 256
struct elt k[WAYS];
- uns cnt[WAYS];
+ uint cnt[WAYS];
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
k[i] = ar[random() % n];
as_sort(WAYS, k);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
- uns w = 0;
+ uint w = 0;
#define FW(delta) if (ar[i].key > k[w+delta].key) w += delta
FW(128);
FW(64);
cnt[w]++;
}
struct elt *y = al, *way[WAYS], *z;
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
{
way[i] = y;
y += cnt[i];
}
ASSERT(y == al+n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
- uns w = wbuf[i];
+ uint w = wbuf[i];
*way[w]++ = ar[i];
}
y = al;
z = ar;
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
{
if (cnt[i] >= 1000)
sampsort(cnt[i], y, z, dest, wbuf);
xfree(aux);
}
-static void sampsort2(uns n, struct elt *ar, struct elt *al, struct elt *dest, byte *wbuf)
+static void sampsort2(uint n, struct elt *ar, struct elt *al, struct elt *dest, byte *wbuf)
{
#define WAYS 256
struct elt k[WAYS];
- uns cnt[WAYS];
+ uint cnt[WAYS];
bzero(cnt, sizeof(cnt));
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
k[i] = ar[random() % n];
as_sort(WAYS, k);
struct elt *k1 = ar, *k2 = ar+1, *kend = ar+n;
byte *ww = wbuf;
while (k2 < kend)
{
- uns w1 = 0, w2 = 0;
+ uint w1 = 0, w2 = 0;
#define FW1(delta) if (k1->key > k[w1+delta].key) w1 += delta
#define FW2(delta) if (k2->key > k[w2+delta].key) w2 += delta
FW1(128); FW2(128);
}
if (k1 < kend)
{
- uns w1 = 0;
+ uint w1 = 0;
FW1(128); FW1(64); FW1(32); FW1(16);
FW1(8); FW1(4); FW1(2); FW1(1);
*ww++ = w1;
cnt[w1]++;
}
struct elt *y = al, *way[WAYS], *z;
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
{
way[i] = y;
y += cnt[i];
}
ASSERT(y == al+n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
- uns w = wbuf[i];
+ uint w = wbuf[i];
*way[w]++ = ar[i];
}
y = al;
z = ar;
- for (uns i=0; i<WAYS; i++)
+ for (uint i=0; i<WAYS; i++)
{
if (cnt[i] >= 1000)
sampsort2(cnt[i], y, z, dest, wbuf);
#define H_LESS(_a,_b) ((_a).key > (_b).key)
struct elt *heap = ary-1;
HEAP_INIT(struct elt, heap, n, H_LESS, HEAP_SWAP);
- uns nn = n;
+ uint nn = n;
while (nn)
HEAP_DELETE_MIN(struct elt, heap, nn, H_LESS, HEAP_SWAP);
#undef H_LESS
#define H_LESS(_a,_b) ((_a)->key > (_b)->key)
struct elt **heap = ind-1;
HEAP_INIT(struct elt *, heap, n, H_LESS, HEAP_SWAP);
- uns nn = n;
+ uint nn = n;
while (nn)
HEAP_DELETE_MIN(struct elt *, heap, nn, H_LESS, HEAP_SWAP);
#undef H_LESS
bzero(block, sizeof(block));
sum = 0;
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
{
#if 1
if (!(i % 4))
#else
ary[i].key = i*(~0U/(n-1));
#endif
- for (uns j=1; j<sizeof(struct elt)/4; j++)
+ for (uint j=1; j<sizeof(struct elt)/4; j++)
((u32*)&ary[i])[j] = ROL(ary[i].key, 3*j);
sum ^= ary[i].key;
}
static void chk_ary(void)
{
u32 s = ary[0].key;
- for (uns i=1; i<n; i++)
+ for (uint i=1; i<n; i++)
if (ary[i].key < ary[i-1].key)
die("Missorted at %d", i);
else
{
mk_ary();
ind = xmalloc(sizeof(struct elt *) * n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
ind[i] = &ary[i];
}
static void chk_ind(void)
{
u32 s = ind[0]->key;
- for (uns i=1; i<n; i++)
+ for (uint i=1; i<n; i++)
if (ind[i]->key < ind[i-1]->key)
die("Missorted at %d", i);
else
log_init(argv[0]);
int opt;
- uns op = 0;
+ uint op = 0;
while ((opt = cf_getopt(argc, argv, CF_SHORT_OPTS "1", CF_NO_LONG_OPTS, NULL)) >= 0)
switch (opt)
{
array0 = alloc_elts(n);
array1 = alloc_elts(n);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
array0[i] = array1[i] = (struct elt) { 0 };
msg(L_INFO, "Testing with %u elements", n);
mk_ary();
timestamp_t timer;
init_timer(&timer);
- for (uns i=0; i<5; i++)
+ for (uint i=0; i<5; i++)
{
#if 1
memcpy(alt, ary, sizeof(struct elt) * n);
memcpy(ary, alt, sizeof(struct elt) * n);
#else
- for (uns j=0; j<n; j++)
+ for (uint j=0; j<n; j++)
alt[j] = ary[j];
- for (uns j=0; j<n; j++)
+ for (uint j=0; j<n; j++)
ary[j] = alt[j];
#endif
}
}
static void
-sorter_stop_timer(struct sort_context *ctx, uns *account_to)
+sorter_stop_timer(struct sort_context *ctx, uint *account_to)
{
ctx->last_pass_time = get_timer(&ctx->start_time);
*account_to += ctx->last_pass_time;
}
-static uns
+static uint
sorter_speed(struct sort_context *ctx, u64 size)
{
if (!size)
return 0;
if (!ctx->last_pass_time)
return 0;
- return (uns)((double)size / (1<<20) * 1000 / ctx->last_pass_time);
+ return (uint)((double)size / (1<<20) * 1000 / ctx->last_pass_time);
}
static int
}
SORT_XTRACE(3, "Main sorting");
- uns pass = 0;
+ uint pass = 0;
do {
++pass;
sorter_start_timer(ctx);
cnode *list_pos = b->n.prev;
ucw_off_t join_size;
struct sort_bucket *join = sbuck_join_to(b, &join_size);
- uns trace_level = (b->flags & SBF_SOURCE) ? 1 : 3;
+ uint trace_level = (b->flags & SBF_SOURCE) ? 1 : 3;
clist_init(&parts);
ASSERT(!(sorter_debug & SORT_DEBUG_NO_PRESORT));
SORT_XTRACE(3, "%s", ((b->flags & SBF_CUSTOM_PRESORT) ? "Custom presorting" : "Presorting"));
- uns cont;
- uns part_cnt = 0;
+ uint cont;
+ uint part_cnt = 0;
u64 total_size = 0;
sorter_start_timer(ctx);
do
SORT_TRACE("Multi-way presorting pass (%d parts, %s, %dMB/s)", part_cnt, stk_fsize(total_size), sorter_speed(ctx, total_size));
- uns max_ways = 1 << sorter_max_multiway_bits;
+ uint max_ways = 1 << sorter_max_multiway_bits;
struct sort_bucket *ways[max_ways+1];
SORT_XTRACE(3, "Starting up to %d-way merge", max_ways);
for (;;)
{
- uns n = 0;
+ uint n = 0;
struct sort_bucket *p;
while (n < max_ways && (p = clist_head(&parts)))
{
ctx->multiway_merge(ctx, ways, out);
sorter_stop_timer(ctx, &ctx->total_ext_time);
- for (uns i=0; i<n; i++)
+ for (uint i=0; i<n; i++)
sbuck_drop(ways[i]);
if (clist_empty(&parts))
}
static void
-sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uns bits)
+sorter_radix(struct sort_context *ctx, struct sort_bucket *b, uint bits)
{
// Add more bits if requested and allowed.
bits = MIN(bits + sorter_add_radix_bits, sorter_max_radix_bits);
- uns nbuck = 1 << bits;
+ uint nbuck = 1 << bits;
SORT_XTRACE(3, "Running radix split on %s with hash %d bits of %d (expecting %s buckets)",
F_BSIZE(b), bits, b->hash_bits, stk_fsize(sbuck_size(b) / nbuck));
sorter_free_buf(ctx);
sorter_start_timer(ctx);
struct sort_bucket **outs = alloca(nbuck * sizeof(struct sort_bucket *));
- for (uns i=nbuck; i--; )
+ for (uint i=nbuck; i--; )
{
outs[i] = sbuck_new(ctx);
outs[i]->hash_bits = b->hash_bits - bits;
ctx->radix_split(ctx, b, outs, b->hash_bits - bits, bits);
u64 min = ~(u64)0, max = 0, sum = 0;
- for (uns i=0; i<nbuck; i++)
+ for (uint i=0; i<nbuck; i++)
{
u64 s = sbuck_size(outs[i]);
min = MIN(min, s);
// (this is insanely large if the input size is unknown, but it serves our purpose)
u64 insize = sbuck_size(b);
u64 mem = ctx->internal_estimate(ctx, b) * 0.8; // Magical factor accounting for various non-uniformities
- uns bits = 0;
+ uint bits = 0;
while ((insize >> bits) > mem)
bits++;
// Calculate the possibilities of radix splits
- uns radix_bits;
+ uint radix_bits;
if (!ctx->radix_split ||
(b->flags & SBF_CUSTOM_PRESORT) ||
(sorter_debug & SORT_DEBUG_NO_RADIX))
}
// The same for multi-way merges
- uns multiway_bits;
+ uint multiway_bits;
if (!ctx->multiway_merge ||
(sorter_debug & SORT_DEBUG_NO_MULTIWAY) ||
(sorter_debug & SORT_DEBUG_NO_PRESORT))
return workspace;
}
-static uns P(internal_num_keys)(struct sort_context *ctx)
+static uint P(internal_num_keys)(struct sort_context *ctx)
{
size_t bufsize = ctx->big_buf_size;
size_t workspace = P(internal_workspace)();
if (workspace)
bufsize -= CPU_PAGE_SIZE;
u64 maxkeys = bufsize / (sizeof(P(key)) + workspace);
- return MIN(maxkeys, ~0U); // The number of records must fit in uns
+ return MIN(maxkeys, ~0U); // The number of records must fit in uint
}
static int P(internal)(struct sort_context *ctx, struct sort_bucket *bin, struct sort_bucket *bout, struct sort_bucket *bout_only)
sorter_alloc_buf(ctx);
struct fastbuf *in = sbuck_read(bin);
P(key) *buf = ctx->big_buf;
- uns maxkeys = P(internal_num_keys)(ctx);
+ uint maxkeys = P(internal_num_keys)(ctx);
SORT_XTRACE(5, "s-fixint: Reading (maxkeys=%u, hash_bits=%d)", maxkeys, bin->hash_bits);
- uns n = 0;
+ uint n = 0;
while (n < maxkeys && P(read_key)(in, &buf[n]))
n++;
if (!n)
bout = bout_only;
struct fastbuf *out = sbuck_write(bout);
bout->runs++;
- uns merged UNUSED = 0;
- for (uns i=0; i<n; i++)
+ uint merged UNUSED = 0;
+ for (uint i=0; i<n; i++)
{
#ifdef SORT_UNIFY
if (i < n-1 && !P(compare)(&buf[i], &buf[i+1]))
{
P(key) **keys = workspace;
- uns n = 2;
+ uint n = 2;
keys[0] = &buf[i];
keys[1] = &buf[i+1];
while (!P(compare)(&buf[i], &buf[i+n]))
static inline void *P(internal_get_data)(P(key) *key)
{
- uns ksize = SORT_KEY_SIZE(*key);
+ uint ksize = SORT_KEY_SIZE(*key);
#ifdef SORT_UNIFY
ksize = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
#endif
size_t remains = bufsize - CPU_PAGE_SIZE;
do
{
- uns ksize = SORT_KEY_SIZE(key);
+ uint ksize = SORT_KEY_SIZE(key);
#ifdef SORT_UNIFY
- uns ksize_aligned = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
+ uint ksize_aligned = ALIGN_TO(ksize, CPU_STRUCT_ALIGN);
#else
- uns ksize_aligned = ksize;
+ uint ksize_aligned = ksize;
#endif
- uns dsize = SORT_DATA_SIZE(key);
- uns recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN);
+ uint dsize = SORT_DATA_SIZE(key);
+ uint recsize = ALIGN_TO(ksize_aligned + dsize, CPU_STRUCT_ALIGN);
size_t totalsize = recsize + sizeof(P(internal_item_t)) + P(internal_workspace)(&key);
if (unlikely(totalsize > remains
#ifdef CPU_64BIT_POINTERS
- || item >= item_array + ~0U // The number of items must fit in an uns
+ || item >= item_array + ~0U // The number of items must fit in an uint
#endif
))
{
while (P(read_key)(in, &key));
last_item = item;
- uns count = last_item - item_array;
+ uint count = last_item - item_array;
void *workspace UNUSED = ALIGN_PTR(last_item, CPU_PAGE_SIZE);
SORT_XTRACE(4, "s-internal: Read %u items (%s items, %s workspace, %s data)",
count,
bout = bout_only;
struct fastbuf *out = sbuck_write(bout);
bout->runs++;
- uns merged UNUSED = 0;
+ uint merged UNUSED = 0;
for (item = item_array; item < last_item; item++)
{
#ifdef SORT_UNIFY
void **data_array = workspace;
key_array[0] = item[0].key;
data_array[0] = P(internal_get_data)(key_array[0]);
- uns cnt;
+ uint cnt;
for (cnt=1; item+cnt < last_item && !P(compare)(key_array[0], item[cnt].key); cnt++)
{
key_array[cnt] = item[cnt].key;
{
// Most of this is just wild guesses
#ifdef SORT_VAR_KEY
- uns avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN);
+ uint avg = ALIGN_TO(sizeof(P(key))/4, CPU_STRUCT_ALIGN);
#else
- uns avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN);
+ uint avg = ALIGN_TO(sizeof(P(key)), CPU_STRUCT_ALIGN);
#endif
- uns ws = 0;
+ uint ws = 0;
#ifdef SORT_UNIFY
ws += sizeof(void *);
#endif
#endif
} P(mwt);
-static inline void P(update_tree)(P(key) *keys, P(mwt) *tree, uns i)
+static inline void P(update_tree)(P(key) *keys, P(mwt) *tree, uint i)
{
while (i /= 2)
{
asm volatile ("" : : : "memory");
}
-static inline void P(set_tree)(P(key) *keys, P(mwt) *tree, uns i, int val)
+static inline void P(set_tree)(P(key) *keys, P(mwt) *tree, uint i, int val)
{
tree[i].i = val;
P(update_tree)(keys, tree, i);
static void P(multiway_merge)(struct sort_context *ctx UNUSED, struct sort_bucket **ins, struct sort_bucket *out)
{
- uns num_ins = 0;
+ uint num_ins = 0;
while (ins[num_ins])
num_ins++;
- uns n2 = 1;
+ uint n2 = 1;
while (n2 < num_ins)
n2 *= 2;
struct fastbuf *fins[num_ins];
P(key) keys[num_ins];
P(mwt) tree[2*n2];
- for (uns i=1; i<2*n2; i++)
+ for (uint i=1; i<2*n2; i++)
tree[i] = (P(mwt)) { .i = -1 };
- for (uns i=0; i<num_ins; i++)
+ for (uint i=0; i<num_ins; i++)
{
fins[i] = sbuck_read(ins[i]);
if (P(read_key)(fins[i], &keys[i]))
#ifdef SORT_UNIFY
- uns hits[num_ins];
+ uint hits[num_ins];
P(key) *mkeys[num_ins], *key;
struct fastbuf *mfb[num_ins];
continue;
}
- uns m = 0;
+ uint m = 0;
key = &keys[i];
do
{
P(copy_merged)(mkeys, mfb, m, fout);
- for (uns j=0; j<m; j++)
+ for (uint j=0; j<m; j++)
{
i = hits[j];
if (likely(P(read_key)(fins[i], &keys[i])))
/* Simplified version which does not support any unification */
while (likely(tree[1].i >= 0))
{
- uns i = tree[1].i;
+ uint i = tree[1].i;
P(key) UNUSED key = keys[i];
P(copy_data)(&keys[i], fins[i], fout);
if (unlikely(!P(read_key)(fins[i], &keys[i])))
#include <string.h>
-static void P(radix_split)(struct sort_context *ctx UNUSED, struct sort_bucket *bin, struct sort_bucket **bouts, uns bitpos, uns numbits)
+static void P(radix_split)(struct sort_context *ctx UNUSED, struct sort_bucket *bin, struct sort_bucket **bouts, uint bitpos, uint numbits)
{
- uns nbucks = 1 << numbits;
- uns mask = nbucks - 1;
+ uint nbucks = 1 << numbits;
+ uint mask = nbucks - 1;
struct fastbuf *in = sbuck_read(bin);
P(key) k;
while (P(read_key)(in, &k))
{
P(hash_t) h = P(hash)(&k);
- uns i = (h >> bitpos) & mask;
+ uint i = (h >> bitpos) & mask;
if (unlikely(!outs[i]))
outs[i] = sbuck_write(bouts[i]);
P(copy_data)(&k, in, outs[i]);
P(key) *kout = NULL, *ktmp;
int next1, next2, run1, run2;
int comp;
- uns run_count = 0;
+ uint run_count = 0;
fin1 = sbuck_read(ins[0]);
next1 = P(read_key)(fin1, kin1);
#include <fcntl.h>
void *
-sorter_alloc(struct sort_context *ctx, uns size)
+sorter_alloc(struct sort_context *ctx, uint size)
{
return mp_alloc_zero(ctx->pool, size);
}
/*** Time measurement ***/
static timestamp_t timer;
-static uns test_id;
+static uint test_id;
static void
start(void)
static void
test_int(int mode, u64 size)
{
- uns N = size ? nextprime(MIN(size/4, 0xffff0000)) : 0;
- uns K = N/4*3;
+ uint N = size ? nextprime(MIN(size/4, 0xffff0000)) : 0;
+ uint K = N/4*3;
msg(L_INFO, ">>> Integers (%s, N=%u)", ((char *[]) { "increasing", "decreasing", "random" })[mode], N);
struct fastbuf *f = bopen_tmp(65536);
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
bputl(f, (mode==0) ? i : (mode==1) ? N-1-i : ((u64)i * K + 17) % N);
brewind(f);
stop();
SORT_XTRACE(2, "Verifying");
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
- uns j = bgetl(f);
+ uint j = bgetl(f);
if (i != j)
die("Discrepancy: %u instead of %u", j, i);
}
u32 cnt;
};
-static inline void s2_write_merged(struct fastbuf *f, struct key2 **k, void **d UNUSED, uns n, void *buf UNUSED)
+static inline void s2_write_merged(struct fastbuf *f, struct key2 **k, void **d UNUSED, uint n, void *buf UNUSED)
{
- for (uns i=1; i<n; i++)
+ for (uint i=1; i<n; i++)
k[0]->cnt += k[i]->cnt;
bwrite(f, k[0], sizeof(struct key2));
}
test_counted(int mode, u64 size)
{
u64 items = size / sizeof(struct key2);
- uns mult = 2;
+ uint mult = 2;
while (items/(2*mult) > 0xffff0000)
mult++;
- uns N = items ? nextprime(items/(2*mult)) : 0;
- uns K = N/4*3;
+ uint N = items ? nextprime(items/(2*mult)) : 0;
+ uint K = N/4*3;
msg(L_INFO, ">>> Counted integers (%s, N=%u, mult=%u)", ((char *[]) { "increasing", "decreasing", "random" })[mode], N, mult);
struct fastbuf *f = bopen_tmp(65536);
- for (uns m=0; m<mult; m++)
- for (uns i=0; i<N; i++)
- for (uns j=0; j<2; j++)
+ for (uint m=0; m<mult; m++)
+ for (uint i=0; i<N; i++)
+ for (uint j=0; j<2; j++)
{
bputl(f, (mode==0) ? (i%N) : (mode==1) ? N-1-(i%N) : ((u64)i * K + 17) % N);
bputl(f, 1);
stop();
SORT_XTRACE(2, "Verifying");
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
- uns j = bgetl(f);
+ uint j = bgetl(f);
if (i != j)
die("Discrepancy: %u instead of %u", j, i);
- uns k = bgetl(f);
+ uint k = bgetl(f);
if (k != 2*mult)
die("Discrepancy: %u has count %u instead of %u", j, k, 2*mult);
}
return 0;
}
-static inline uns s3_hash(struct key3 *x)
+static inline uint s3_hash(struct key3 *x)
{
return x->hash[0];
}
#include <ucw/sorter/sorter.h>
static void
-gen_hash_key(int mode, struct key3 *k, uns i)
+gen_hash_key(int mode, struct key3 *k, uint i)
{
k->i = i;
k->payload[0] = 7*i + 13;
static void
test_hashes(int mode, u64 size)
{
- uns N = MIN(size / sizeof(struct key3), 0xffffffff);
+ uint N = MIN(size / sizeof(struct key3), 0xffffffff);
msg(L_INFO, ">>> Hashes (%s, N=%u)", ((char *[]) { "increasing", "decreasing", "random" })[mode], N);
struct key3 k, lastk;
struct fastbuf *f = bopen_tmp(65536);
- uns hash_sum = 0;
- for (uns i=0; i<N; i++)
+ uint hash_sum = 0;
+ for (uint i=0; i<N; i++)
{
gen_hash_key(mode, &k, i);
hash_sum += k.hash[3];
stop();
SORT_XTRACE(2, "Verifying");
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
int ok = breadb(f, &k, sizeof(k));
ASSERT(ok);
#define KEY4_MAX 256
struct key4 {
- uns len;
+ uint len;
byte s[KEY4_MAX];
};
static inline int s4_compare(struct key4 *x, struct key4 *y)
{
- uns l = MIN(x->len, y->len);
+ uint l = MIN(x->len, y->len);
int c = memcmp(x->s, y->s, l);
if (c)
return c;
#define s4b_read_key s4_read_key
#define s4b_write_key s4_write_key
-static inline uns s4_data_size(struct key4 *x)
+static inline uint s4_data_size(struct key4 *x)
{
return x->len ? (x->s[0] ^ 0xad) : 0;
}
gen_key4(struct key4 *k)
{
k->len = random_max(KEY4_MAX);
- for (uns i=0; i<k->len; i++)
+ for (uint i=0; i<k->len; i++)
k->s[i] = random();
}
static void
-gen_data4(byte *buf, uns len, uns h)
+gen_data4(byte *buf, uint len, uint h)
{
while (len--)
{
}
static void
-test_strings(uns mode, u64 size)
+test_strings(uint mode, u64 size)
{
- uns avg_item_size = KEY4_MAX/2 + 4 + (mode ? 128 : 0);
- uns N = MIN(size / avg_item_size, 0xffffffff);
+ uint avg_item_size = KEY4_MAX/2 + 4 + (mode ? 128 : 0);
+ uint N = MIN(size / avg_item_size, 0xffffffff);
msg(L_INFO, ">>> Strings %s(N=%u)", (mode ? "with data " : ""), N);
srand(1);
struct key4 k, lastk;
byte buf[256], buf2[256];
- uns sum = 0;
+ uint sum = 0;
struct fastbuf *f = bopen_tmp(65536);
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
gen_key4(&k);
s4_write_key(f, &k);
- uns h = hash_block(k.s, k.len);
+ uint h = hash_block(k.s, k.len);
sum += h;
if (mode)
{
stop();
SORT_XTRACE(2, "Verifying");
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
int ok = s4_read_key(f, &k);
ASSERT(ok);
- uns h = hash_block(k.s, k.len);
+ uint h = hash_block(k.s, k.len);
if (mode && s4_data_size(&k))
{
ok = breadb(f, buf, s4_data_size(&k));
u32 cnt;
};
-static uns s5_N, s5_K, s5_L, s5_i, s5_j;
+static uint s5_N, s5_K, s5_L, s5_i, s5_j;
struct s5_pair {
- uns x, y;
+ uint x, y;
};
static int s5_gen(struct s5_pair *p)
#define ASORT_KEY_TYPE u32
#include <ucw/sorter/array-simple.h>
-static void s5_write_merged(struct fastbuf *f, struct key5 **keys, void **data, uns n, void *buf)
+static void s5_write_merged(struct fastbuf *f, struct key5 **keys, void **data, uint n, void *buf)
{
u32 *a = buf;
- uns m = 0;
- for (uns i=0; i<n; i++)
+ uint m = 0;
+ for (uint i=0; i<n; i++)
{
memcpy(&a[m], data[i], 4*keys[i]->cnt);
m += keys[i]->cnt;
bwrite(f, a, 4*m);
}
-static void s5_copy_merged(struct key5 **keys, struct fastbuf **data, uns n, struct fastbuf *dest)
+static void s5_copy_merged(struct key5 **keys, struct fastbuf **data, uint n, struct fastbuf *dest)
{
u32 k[n];
- uns m = 0;
- for (uns i=0; i<n; i++)
+ uint m = 0;
+ for (uint i=0; i<n; i++)
{
k[i] = bgetl(data[i]);
m += keys[i]->cnt;
bwrite(dest, &key, sizeof(key));
while (key.cnt--)
{
- uns b = 0;
- for (uns i=1; i<n; i++)
+ uint b = 0;
+ for (uint i=1; i<n; i++)
if (k[i] < k[b])
b = i;
bputl(dest, k[b]);
static int s5_presort(struct fastbuf *dest, void *buf, size_t bufsize)
{
- uns max = MIN(bufsize/sizeof(struct s5_pair), 0xffffffff);
+ uint max = MIN(bufsize/sizeof(struct s5_pair), 0xffffffff);
struct s5_pair *a = buf;
- uns n = 0;
+ uint n = 0;
while (n<max && s5_gen(&a[n]))
n++;
if (!n)
return 0;
s5p_sort(a, n);
- uns i = 0;
+ uint i = 0;
while (i < n)
{
- uns j = i;
+ uint j = i;
while (i < n && a[i].x == a[j].x)
i++;
struct key5 k = { .x = a[j].x, .cnt = i-j };
#include <ucw/sorter/sorter.h>
static void
-test_graph(uns mode, u64 size)
+test_graph(uint mode, u64 size)
{
- uns N = 3;
+ uint N = 3;
while ((u64)N*(N+2)*4 < size)
N = nextprime(N);
if (!size)
stop();
SORT_XTRACE(2, "Verifying");
- uns c = bgetl(f);
+ uint c = bgetl(f);
ASSERT(c == 0xfeedcafe);
- for (uns i=0; i<N; i++)
+ for (uint i=0; i<N; i++)
{
struct key5 k;
int ok = breadb(f, &k, sizeof(k));
ASSERT(ok);
ASSERT(k.x == i);
ASSERT(k.cnt == N);
- for (uns j=0; j<N; j++)
+ for (uint j=0; j<N; j++)
{
- uns y = bgetl(f);
+ uint y = bgetl(f);
ASSERT(y == j);
}
}
/*** Main ***/
static void
-run_test(uns i, u64 size)
+run_test(uint i, u64 size)
{
test_id = i;
switch (i)
log_init(NULL);
int c;
u64 size = 10000000;
- uns t = ~0;
+ uint t = ~0;
while ((c = cf_getopt(argc, argv, CF_SHORT_OPTS "d:s:t:v", CF_NO_LONG_OPTS, NULL)) >= 0)
switch (c)
if (optind != argc)
goto usage;
- for (uns i=0; i<TMAX; i++)
+ for (uint i=0; i<TMAX; i++)
if (t & (1 << i))
run_test(i, size);
* bits is available. A monotone hash is a function f from keys to integers
* such that f(x) < f(y) implies x < y, which is approximately uniformly
* distributed. It should be declared as:
- * uns PREFIX_hash(SORT_KEY *a)
+ * uint PREFIX_hash(SORT_KEY *a)
*
* Unification:
*
* SORT_UNIFY merge items with identical keys. It requires the following functions:
- * void PREFIX_write_merged(struct fastbuf *f, SORT_KEY **keys, void **data, uns n, void *buf)
+ * void PREFIX_write_merged(struct fastbuf *f, SORT_KEY **keys, void **data, uint n, void *buf)
* takes n records in memory with keys which compare equal and writes
* a single record to the given fastbuf. `buf' points to a buffer which
* is guaranteed to hold the sum of workspace requirements (see below)
* over all given records. The function is allowed to modify all its inputs.
- * void PREFIX_copy_merged(SORT_KEY **keys, struct fastbuf **data, uns n, struct fastbuf *dest)
+ * void PREFIX_copy_merged(SORT_KEY **keys, struct fastbuf **data, uint n, struct fastbuf *dest)
* takes n records with keys in memory and data in fastbufs and writes
* a single record. Used only if SORT_DATA_SIZE or SORT_UNIFY_WORKSPACE
* is defined.
#define SORT_INT SORT_INT64
#define SORT_LONG_HASH
#else
-typedef uns P(hash_t);
+typedef uint P(hash_t);
#endif
#ifdef SORT_INT
}
#if defined(SORT_UNIFY) && !defined(SORT_VAR_DATA) && !defined(SORT_UNIFY_WORKSPACE)
-static inline void P(copy_merged)(P(key) **keys, struct fastbuf **data UNUSED, uns n, struct fastbuf *dest)
+static inline void P(copy_merged)(P(key) **keys, struct fastbuf **data UNUSED, uint n, struct fastbuf *dest)
{
P(write_merged)(dest, keys, NULL, n, NULL);
}
#include <stdio.h>
-uns
-stk_array_len(char **s, uns cnt)
+uint
+stk_array_len(char **s, uint cnt)
{
- uns l = 1;
+ uint l = 1;
while (cnt--)
l += strlen(*s++);
return l;
}
void
-stk_array_join(char *x, char **s, uns cnt, uns sep)
+stk_array_join(char *x, char **s, uint cnt, uint sep)
{
while (cnt--)
{
- uns l = strlen(*s);
+ uint l = strlen(*s);
memcpy(x, *s, l);
x += l;
s++;
*x = 0;
}
-uns
+uint
stk_printf_internal(const char *fmt, ...)
{
- uns len = 256;
+ uint len = 256;
char *buf = alloca(len);
va_list args, args2;
va_start(args, fmt);
}
}
-uns
+uint
stk_vprintf_internal(const char *fmt, va_list args)
{
- uns len = 256;
+ uint len = 256;
char *buf = alloca(len);
va_list args2;
for (;;)
}
void
-stk_hexdump_internal(char *dst, const byte *src, uns n)
+stk_hexdump_internal(char *dst, const byte *src, uint n)
{
mem_to_hex(dst, src, n, ' ');
}
#define stk_vprintf_internal ucw_stk_vprintf_internal
#endif
-#define stk_strdup(s) ({ const char *_s=(s); uns _l=strlen(_s)+1; char *_x=alloca(_l); memcpy(_x, _s, _l); _x; })
-#define stk_strndup(s,n) ({ const char *_s=(s); uns _l=strnlen(_s,(n)); char *_x=alloca(_l+1); memcpy(_x, _s, _l); _x[_l]=0; _x; })
-#define stk_strcat(s1,s2) ({ const char *_s1=(s1); const char *_s2=(s2); uns _l1=strlen(_s1); uns _l2=strlen(_s2); char *_x=alloca(_l1+_l2+1); memcpy(_x,_s1,_l1); memcpy(_x+_l1,_s2,_l2+1); _x; })
+#define stk_strdup(s) ({ const char *_s=(s); uint _l=strlen(_s)+1; char *_x=alloca(_l); memcpy(_x, _s, _l); _x; })
+#define stk_strndup(s,n) ({ const char *_s=(s); uint _l=strnlen(_s,(n)); char *_x=alloca(_l+1); memcpy(_x, _s, _l); _x[_l]=0; _x; })
+#define stk_strcat(s1,s2) ({ const char *_s1=(s1); const char *_s2=(s2); uint _l1=strlen(_s1); uint _l2=strlen(_s2); char *_x=alloca(_l1+_l2+1); memcpy(_x,_s1,_l1); memcpy(_x+_l1,_s2,_l2+1); _x; })
#define stk_strmulticat(s...) ({ char *_s[]={s}; char *_x=alloca(stk_array_len(_s, ARRAY_SIZE(_s)-1)); stk_array_join(_x, _s, ARRAY_SIZE(_s)-1, 0); _x; })
#define stk_strarraycat(s,n) ({ char **_s=(s); int _n=(n); char *_x=alloca(stk_array_len(_s,_n)); stk_array_join(_x, _s, _n, 0); _x; })
#define stk_strjoin(s,n,sep) ({ char **_s=(s); int _n=(n); char *_x=alloca(stk_array_len(_s,_n)+_n-1); stk_array_join(_x, _s, _n, (sep)); _x; })
-#define stk_printf(f...) ({ uns _l=stk_printf_internal(f); char *_x=alloca(_l); sprintf(_x, f); _x; })
-#define stk_vprintf(f, args) ({ uns _l=stk_vprintf_internal(f, args); char *_x=alloca(_l); vsprintf(_x, f, args); _x; })
-#define stk_hexdump(s,n) ({ uns _n=(n); char *_x=alloca(3*_n+1); stk_hexdump_internal(_x,(char*)(s),_n); _x; })
+#define stk_printf(f...) ({ uint _l=stk_printf_internal(f); char *_x=alloca(_l); sprintf(_x, f); _x; })
+#define stk_vprintf(f, args) ({ uint _l=stk_vprintf_internal(f, args); char *_x=alloca(_l); vsprintf(_x, f, args); _x; })
+#define stk_hexdump(s,n) ({ uint _n=(n); char *_x=alloca(3*_n+1); stk_hexdump_internal(_x,(char*)(s),_n); _x; })
#define stk_str_unesc(s) ({ const char *_s=(s); char *_d=alloca(strlen(_s)+1); str_unesc(_d, _s); _d; })
#define stk_fsize(n) ({ char *_s=alloca(16); stk_fsize_internal(_s, n); _s; })
-uns stk_array_len(char **s, uns cnt);
-void stk_array_join(char *x, char **s, uns cnt, uns sep);
-uns stk_printf_internal(const char *x, ...) FORMAT_CHECK(printf,1,2);
-uns stk_vprintf_internal(const char *x, va_list args);
-void stk_hexdump_internal(char *dst, const byte *src, uns n);
+uint stk_array_len(char **s, uint cnt);
+void stk_array_join(char *x, char **s, uint cnt, uint sep);
+uint stk_printf_internal(const char *x, ...) FORMAT_CHECK(printf,1,2);
+uint stk_vprintf_internal(const char *x, va_list args);
+void stk_hexdump_internal(char *dst, const byte *src, uint n);
void stk_fsize_internal(char *dst, u64 size);
#endif
else
{
char *p;
- uns v = strtoul(s + 2, &p, 16);
+ uint v = strtoul(s + 2, &p, 16);
if (v <= 255)
*d++ = v;
else
default:
if (s[1] >= '0' && s[1] <= '7')
{
- uns v = s[1] - '0';
+ uint v = s[1] - '0';
s += 2;
- for (uns i = 0; i < 2 && *s >= '0' && *s <= '7'; s++, i++)
+ for (uint i = 0; i < 2 && *s >= '0' && *s <= '7'; s++, i++)
v = (v << 3) + *s - '0';
if (v <= 255)
*d++ = v;
}
int
-str_hier_prefix(const char *str, const char *prefix, uns sep)
+str_hier_prefix(const char *str, const char *prefix, uint sep)
{
while (*str && *prefix)
{
size_t sl=0, pl=0;
- while (str[sl] && (uns) str[sl] != sep)
+ while (str[sl] && (uint) str[sl] != sep)
sl++;
- while (prefix[pl] && (uns) prefix[pl] != sep)
+ while (prefix[pl] && (uint) prefix[pl] != sep)
pl++;
if (sl != pl || memcmp(str, prefix, sl))
return 0;
}
int
-str_hier_suffix(const char *str, const char *suffix, uns sep)
+str_hier_suffix(const char *str, const char *suffix, uint sep)
{
const char *st = str + strlen(str);
const char *sx = suffix + strlen(suffix);
while (st > str && sx > suffix)
{
size_t sl=0, pl=0;
- while (st-sl > str && (uns) st[-sl-1] != sep)
+ while (st-sl > str && (uint) st[-sl-1] != sep)
sl++;
- while (sx-pl > suffix && (uns) sx[-pl-1] != sep)
+ while (sx-pl > suffix && (uint) sx[-pl-1] != sep)
pl++;
if (sl != pl || memcmp(st-sl, sx-pl, sl))
return 0;
#include <ucw/string.h>
#include <ucw/chartype.h>
-static uns
-hex_make(uns x)
+static uint
+hex_make(uint x)
{
return (x < 10) ? (x + '0') : (x - 10 + 'a');
}
void
-mem_to_hex(char *dest, const byte *src, uns bytes, uns flags)
+mem_to_hex(char *dest, const byte *src, uint bytes, uint flags)
{
- uns sep = flags & 0xff;
+ uint sep = flags & 0xff;
while (bytes--)
{
*dest = 0;
}
-static uns
-hex_parse(uns c)
+static uint
+hex_parse(uint c)
{
c = Cupcase(c);
c -= '0';
}
const char *
-hex_to_mem(byte *dest, const char *src, uns max_bytes, uns flags)
+hex_to_mem(byte *dest, const char *src, uint max_bytes, uint flags)
{
- uns sep = flags & 0xff;
+ uint sep = flags & 0xff;
while (max_bytes-- && Cxdigit(src[0]) && Cxdigit(src[1]))
{
*dest++ = (hex_parse(src[0]) << 4) | hex_parse(src[1]);
#include <string.h>
int
-str_sepsplit(char *str, uns sep, char **rec, uns max)
+str_sepsplit(char *str, uint sep, char **rec, uint max)
{
- uns cnt = 0;
+ uint cnt = 0;
while (1)
{
rec[cnt++] = str;
}
int
-str_wordsplit(char *src, char **dst, uns max)
+str_wordsplit(char *src, char **dst, uint max)
{
- uns cnt = 0;
+ uint cnt = 0;
for(;;)
{
#include <ucw/string.h>
#ifdef CONFIG_DARWIN
-uns
-strnlen(const char *str, uns n)
+uint
+strnlen(const char *str, uint n)
{
const char *end = str + n;
const char *c;
#endif
char *
-str_format_flags(char *dest, const char *fmt, uns flags)
+str_format_flags(char *dest, const char *fmt, uint flags)
{
char *start = dest;
- for (uns i=0; fmt[i]; i++)
+ for (uint i=0; fmt[i]; i++)
{
if (flags & (1 << i))
*dest++ = fmt[i];
return start;
}
-uns
-str_count_char(const char *str, uns chr)
+uint
+str_count_char(const char *str, uint chr)
{
const byte *s = str;
- uns i = 0;
+ uint i = 0;
while (*s)
if (*s++ == chr)
i++;
/* string.c */
#ifdef CONFIG_DARWIN
-uns strnlen(const char *str, uns n); // NOAPI
+uint strnlen(const char *str, uint n); // NOAPI
#endif
/**
* Format a set of flag bits. When the i-th bit of @flags is 1,
* set the i-th character of @dest to @fmt[i], otherwise to '-'.
**/
-char *str_format_flags(char *dest, const char *fmt, uns flags);
+char *str_format_flags(char *dest, const char *fmt, uint flags);
/** Counts occurrences of @chr in @str. **/
-uns str_count_char(const char *str, uns chr);
+uint str_count_char(const char *str, uint chr);
/* str-esc.c */
* When there are more than @max fields in @str, the first @max fields
* are processed and -1 is returned.
**/
-int str_sepsplit(char *str, uns sep, char **rec, uns max);
+int str_sepsplit(char *str, uint sep, char **rec, uint max);
/**
* Split @str to words separated by white-space characters. The spaces
* Fields surrounded by double quotes are also recognized. They can contain
* spaces, but no mechanism for escaping embedded quotes is defined.
**/
-int str_wordsplit(char *str, char **rec, uns max);
+int str_wordsplit(char *str, char **rec, uint max);
/* str-(i)match.c: Matching of shell patterns */
* not be separated), possibly OR-ed with `MEM_TO_HEX_UPCASE` when upper-case
* characters should be used.
**/
-void mem_to_hex(char *dest, const byte *src, uns bytes, uns flags);
+void mem_to_hex(char *dest, const byte *src, uint bytes, uint flags);
/**
* An inverse function to @mem_to_hex(). Takes a hexdump of at most @max_bytes
* bytes and stores the bytes to a buffer starting at @dest. Returns a pointer
* at the first character after the dump.
**/
-const char *hex_to_mem(byte *dest, const char *src, uns max_bytes, uns flags);
+const char *hex_to_mem(byte *dest, const char *src, uint max_bytes, uint flags);
// Bottom 8 bits of flags are an optional separator of bytes, the rest is:
#define MEM_TO_HEX_UPCASE 0x100
* - "/" is a prefix,
* - "" is a prefix.
**/
-int str_hier_prefix(const char *str, const char *prefix, uns sep);
-int str_hier_suffix(const char *str, const char *suffix, uns sep); /** Like @str_hier_prefix(), but for suffixes. **/
+int str_hier_prefix(const char *str, const char *prefix, uint sep);
+int str_hier_suffix(const char *str, const char *suffix, uint sep); /** Like @str_hier_prefix(), but for suffixes. **/
#endif
#define STN_MAX ((STN_TYPE)(-1))
static const STN_TYPE S(tops)[STN_DBASES_MASK+1] = { [2] = STN_MAX/2, [8] = STN_MAX/8, [10] = STN_MAX/10, [16] = STN_MAX/16 };
-static const char *S(parse_string)(const char **pp, const uns flags, const uns sign, const uns base, STN_TYPE *num)
+static const char *S(parse_string)(const char **pp, const uint flags, const uint sign, const uint base, STN_TYPE *num)
{
const STN_TYPE max = STN_MAX;
const STN_TYPE top = S(tops)[base];
const STN_TYPE sign_max = ((flags & STN_SIGNED) || sign) ? max/2 + sign : max;
STN_TYPE val = 0;
- uns digits = 0;
+ uint digits = 0;
int overflow = 0;
for (;; (*pp)++)
{
- const uns c = (byte)**pp;
+ const uint c = (byte)**pp;
if (c == '_')
{
break;
}
- const uns d = get_digit(c);
+ const uint d = get_digit(c);
if (d >= base)
break;
{
const char *err = NULL;
- uns sign, base;
+ uint sign, base;
err = str_to_num_init(&str, flags, &sign, &base);
if (!err)
#include <stdio.h>
-static uns str_to_flags(const char *str)
+static uint str_to_flags(const char *str)
{
- uns flags = 0;
+ uint flags = 0;
for(const char *p = str; *p; ++p)
{
switch(*p)
static void convert(const char *str_flags, const char *str_num)
{
- const uns flags = str_to_flags(str_flags);
-
+ const uint flags = str_to_flags(str_flags);
+
const char *next1, *next2;
- uns ux = 1234567890;
+ uint ux = 1234567890;
uintmax_t um = 1234567890;
- const char *err1 = str_to_uns(&ux, str_num, &next1, flags);
+ const char *err1 = str_to_uint(&ux, str_num, &next1, flags);
const char *err2 = str_to_uintmax(&um, str_num, &next2, flags);
if (flags & STN_SIGNED)
# Test for the strtonum module
-## Testing str_to_uns(.) (It is supoposed that uns is 4 bytes integer)
+## Testing str_to_uint(.) (It is supoposed that uint is 4 bytes integer)
# 1
Run: ../obj/ucw/strtonum-test '0o+-_' '0o100_000xc' | grep '^i'
#include <ucw/chartype.h>
#include <ucw/strtonum.h>
-static uns detect_base(const char **pp, const uns flags)
+static uint detect_base(const char **pp, const uint flags)
{
if ((flags & STN_BASES0) && **pp == '0')
{
return 0;
}
-static const char *str_to_num_init(const char **pp, const uns flags, uns *sign, uns *base)
+static const char *str_to_num_init(const char **pp, const uint flags, uint *sign, uint *base)
{
ASSERT(*pp);
return err;
}
-static inline uns get_digit(const uns c)
+static inline uint get_digit(const uint c)
{
if (c <= '9')
return c - '0';
else
{
const int a = c & 0xDF;
- unsigned d = a - 'A';
+ uint d = a - 'A';
d &= 0xFF;
d += 10;
return d;
}
}
-#define STN_TYPE uns
-#define STN_SUFFIX uns
+#define STN_TYPE uint
+#define STN_SUFFIX uint
#include <ucw/strtonum-gen.h>
#define STN_TYPE uintmax_t
#ifdef CONFIG_UCW_CLEAN_ABI
#define str_to_uintmax ucw_str_to_uintmax
-#define str_to_uns ucw_str_to_uns
+#define str_to_uint ucw_str_to_uint
#endif
// Set (flags & 0x1f) in the range 1 to 31 to denote the default base of the number
#define STN_USFLAGS (STN_SFLAGS | STN_UNDERSCORE)
#define STN_DECLARE_CONVERTOR(type, suffix) \
-const char *str_to_##suffix(type *num, const char *str, const char **next, const uns flags)
+const char *str_to_##suffix(type *num, const char *str, const char **next, const uint flags)
#define STN_SIGNED_CONVERTOR(type, suffix, usuffix) \
-static inline const char *str_to_##suffix(type *num, const char *str, const char **next, const uns flags) \
+static inline const char *str_to_##suffix(type *num, const char *str, const char **next, const uint flags) \
{ \
return str_to_##usuffix((void *) num, str, next, flags | STN_SIGNED | STN_PLUS | STN_MINUS); \
}
-STN_DECLARE_CONVERTOR(uns, uns);
-STN_SIGNED_CONVERTOR(int, int, uns)
+STN_DECLARE_CONVERTOR(uint, uint);
+STN_SIGNED_CONVERTOR(int, int, uint)
STN_DECLARE_CONVERTOR(uintmax_t, uintmax);
STN_SIGNED_CONVERTOR(intmax_t, intmax, uintmax)
b = MIN(b, f->burst);
if (b >= 1)
{
- uns dropped = f->drop_count;
+ uint dropped = f->drop_count;
f->bucket = b - 1;
f->drop_count = 0;
return dropped;
for (timestamp_t now = 0; now < 3000; now += 77)
{
int res = tbf_limit(&t, now);
- msg(L_DEBUG, "t=%u result=%d bucket=%f", (uns) now, res, t.bucket);
+ msg(L_DEBUG, "t=%u result=%d bucket=%f", (uint) now, res, t.bucket);
}
return 0;
}
/** A data structure describing a single TBF. **/
struct token_bucket_filter {
double rate; // Number of tokens received per second
- uns burst; // Capacity of the bucket
+ uint burst; // Capacity of the bucket
timestamp_t last_hit; // Internal state...
double bucket;
- uns drop_count;
+ uint drop_count;
};
/** Initialize the bucket. **/
struct timeval tv;
if (gettimeofday(&tv, NULL))
die("gettimeofday() failed: %m");
- len = snprintf(name_buf, TEMP_FILE_NAME_LEN, "%s/%s%u", dir, temp_prefix, (uns) tv.tv_usec);
+ len = snprintf(name_buf, TEMP_FILE_NAME_LEN, "%s/%s%u", dir, temp_prefix, (uint) tv.tv_usec);
if (open_flags)
*open_flags = O_EXCL;
}
#include <ucw/threads.h>
#include <ucw/conf.h>
-uns ucwlib_thread_stack_size = 65556;
+uint ucwlib_thread_stack_size = 65556;
static struct cf_section threads_config = {
CF_ITEMS {
- CF_UNS("DefaultStackSize", &ucwlib_thread_stack_size),
+ CF_UINT("DefaultStackSize", &ucwlib_thread_stack_size),
CF_END
}
};
void ucwlib_lock(void);
void ucwlib_unlock(void);
-extern uns ucwlib_thread_stack_size;
+extern uint ucwlib_thread_stack_size;
#else
*timer = get_timestamp();
}
-uns
+uint
get_timer(timestamp_t *timer)
{
timestamp_t t = *timer;
return MIN(*timer-t, ~0U);
}
-uns
+uint
switch_timer(timestamp_t *oldt, timestamp_t *newt)
{
*newt = get_timestamp();
/* time-timer.c */
void init_timer(timestamp_t *timer); /** Initialize a timer. **/
-uns get_timer(timestamp_t *timer); /** Get the number of milliseconds since last init/get of a timer. **/
-uns switch_timer(timestamp_t *oldt, timestamp_t *newt); /** Stop ticking of one timer and resume another. **/
+uint get_timer(timestamp_t *timer); /** Get the number of milliseconds since last init/get of a timer. **/
+uint switch_timer(timestamp_t *oldt, timestamp_t *newt); /** Stop ticking of one timer and resume another. **/
#endif
#define MAX_STRINGS 200
-static uns count;
+static uint count;
static char *str[MAX_STRINGS];
static char *
gen_string(void)
{
- uns l = random_max(11);
+ uint l = random_max(11);
char *s = xmalloc(l + 1);
- for (uns i = 0; i < l; i++)
+ for (uint i = 0; i < l; i++)
s[i] = random_max('z' - 'a') + 'a';
s[l] = 0;
return s;
char *s;
again:
s = gen_string();
- for (uns i = 0; i < count; i++)
+ for (uint i = 0; i < count; i++)
if (!strcmp(s, str[i]))
{
xfree(s);
{
if (!count)
return;
- uns i = random_max(count);
+ uint i = random_max(count);
DBG("remove '%s'", str[i]);
random_remove(str[i]);
random_audit();
}
else
{
- uns i = random_max(count);
+ uint i = random_max(count);
DBG("positive find '%s'", str[i]);
if (random_find(str[i]) != str[i])
ASSERT(0);
{
DBG("reset");
random_cleanup();
- for (uns i = 0; i < count; i++)
+ for (uint i = 0; i < count; i++)
xfree(str[i]);
count = 0;
random_init();
random_test(void)
{
random_init();
- for (uns i = 0; i < 10000; i++)
+ for (uint i = 0; i < 10000; i++)
{
int r = random_max(1000);
if ((r -= 300) < 0)
* TRIE_WANT_CLEANUP cleanup()
*
* TRIE_WANT_FIND node *find(char *str)
- * TRIE_WANT_FIND_BUF node *find_buf(byte *ptr, uns len)
+ * TRIE_WANT_FIND_BUF node *find_buf(byte *ptr, uint len)
* TRIE_WANT_ADD add(*node)
* TRIE_WANT_REPLACE node *replace(*node)
* TRIE_WANT_DELETE delete(char *str)
- * TRIE_WANT_DELETE_BUF delete_buf(byte *ptr, uns len)
+ * TRIE_WANT_DELETE_BUF delete_buf(byte *ptr, uint len)
* TRIE_WANT_REMOVE remove(*node)
*
* TRIE_WANT_AUDIT audit()
enum { P(bucket_rank) = TRIE_BUCKET_RANK };
#define TRIE_COMPILE_ASSERT(x, y) typedef char TRIE_PREFIX(x##_compile_assert)[!!(y)-1]
-TRIE_COMPILE_ASSERT(len_type, sizeof(P(len_t)) <= sizeof(uns));
+TRIE_COMPILE_ASSERT(len_type, sizeof(P(len_t)) <= sizeof(uint));
TRIE_COMPILE_ASSERT(hash_threshold, TRIE_HASH_THRESHOLD >= 2);
TRIE_COMPILE_ASSERT(bucket_size, TRIE_BUCKET_RANK >= 1 && TRIE_BUCKET_MASK < sizeof(void *));
{
TRIE_DBG("Initializing");
bzero(&T, sizeof(T));
- for (uns i = 0; i < ARRAY_SIZE(T.epool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.epool); i++)
{
- uns size = sizeof(struct P(edge)) + i * sizeof(void *);
+ uint size = sizeof(struct P(edge)) + i * sizeof(void *);
T.epool[i] = ep_new(size, MAX(TRIE_ELTPOOL_SIZE / size, 1));
}
- for (uns i = 0; i < ARRAY_SIZE(T.hpool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.hpool); i++)
{
- uns size = sizeof(struct P(edge)) + ((sizeof(void *) << TRIE_BUCKET_RANK) << i);
+ uint size = sizeof(struct P(edge)) + ((sizeof(void *) << TRIE_BUCKET_RANK) << i);
T.hpool[i] = ep_new(size, MAX(TRIE_ELTPOOL_SIZE / size, 1));
}
}
P(cleanup)(TA)
{
TRIE_DBG("Cleaning up");
- for (uns i = 0; i < ARRAY_SIZE(T.epool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.epool); i++)
ep_delete(T.epool[i]);
- for (uns i = 0; i < ARRAY_SIZE(T.hpool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.hpool); i++)
ep_delete(T.hpool[i]);
}
#endif
static struct P(edge) *
-P(edge_alloc)(TAC uns flags)
+P(edge_alloc)(TAC uint flags)
{
struct P(edge) *edge;
if (flags & TRIE_FLAG_HASH)
{
- uns rank = 0, deg = flags & TRIE_FLAG_DEG;
+ uint rank = 0, deg = flags & TRIE_FLAG_DEG;
while ((TRIE_BUCKET_MASK << rank) < deg * 2) // 25-50% density
rank++;
ASSERT(rank < ARRAY_SIZE(T.hpool));
return TRIE_NODE_KEY((*node));
}
-static inline uns
+static inline uint
P(str_len)(P(node_t) *node)
{
return TRIE_NODE_LEN((*node));
}
-static inline uns
-P(str_char)(byte *ptr, uns len UNUSED, uns pos)
+static inline uint
+P(str_char)(byte *ptr, uint len UNUSED, uint pos)
{
#ifndef TRIE_REV
return ptr[pos];
}
static inline byte *
-P(str_prefix)(byte *ptr, uns len UNUSED, uns prefix UNUSED)
+P(str_prefix)(byte *ptr, uint len UNUSED, uint prefix UNUSED)
{
#ifndef TRIE_REV
return ptr;
}
static inline byte *
-P(str_suffix)(byte *ptr, uns len UNUSED, uns suffix UNUSED)
+P(str_suffix)(byte *ptr, uint len UNUSED, uint suffix UNUSED)
{
#ifndef TRIE_REV
return ptr + len - suffix;
#endif
}
-static inline uns
-P(common_prefix)(byte *ptr1, uns len1, byte *ptr2, uns len2)
+static inline uint
+P(common_prefix)(byte *ptr1, uint len1, byte *ptr2, uint len2)
{
- uns l = MIN(len1, len2), i;
+ uint l = MIN(len1, len2), i;
for (i = 0; i < l; i++)
if (P(str_char)(ptr1, len1, i) != P(str_char)(ptr2, len2, i))
break;
/*** Sons ***/
-static inline uns
-P(hash_func)(uns c)
+static inline uint
+P(hash_func)(uint c)
{
return hash_u32(c) >> 16;
}
static inline struct P(edge) **
-P(hash_find)(struct P(edge) *edge, uns c)
+P(hash_find)(struct P(edge) *edge, uint c)
{
- uns mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
- for (uns i = P(hash_func)(c); ; i++)
+ uint mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
+ for (uint i = P(hash_func)(c); ; i++)
if (((i &= mask) & TRIE_BUCKET_MASK) && (uintptr_t)edge->son[i] != 1)
if (!edge->son[i])
return NULL;
}
static inline struct P(edge) **
-P(hash_insert)(struct P(edge) *edge, uns c)
+P(hash_insert)(struct P(edge) *edge, uint c)
{
- uns mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
- for (uns i = P(hash_func)(c); ; i++)
+ uint mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
+ for (uint i = P(hash_func)(c); ; i++)
if (((i &= mask) & TRIE_BUCKET_MASK) && (uintptr_t)edge->son[i] <= 1)
{
edge->hash_deleted -= (uintptr_t)edge->son[i];
#ifdef TRIE_WANT_DO_DELETE
static inline void
-P(hash_delete)(struct P(edge) *edge, uns c)
+P(hash_delete)(struct P(edge) *edge, uint c)
{
- uns mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
- for (uns i = P(hash_func)(c); ; i++)
+ uint mask = (TRIE_BUCKET_SIZE << edge->hash_rank) - 1;
+ for (uint i = P(hash_func)(c); ; i++)
if (((i &= mask) & TRIE_BUCKET_MASK) && (uintptr_t)edge->son[i] > 1 &&
((byte *)&edge->son[i & ~TRIE_BUCKET_MASK])[i & TRIE_BUCKET_MASK] == c)
{
#define TRIE_HASH_FOR_ALL(xedge, xtrans, xson) do { \
struct P(edge) *_edge = (xedge); \
- for (uns _i = (TRIE_BUCKET_SIZE << _edge->hash_rank); _i--; ) \
+ for (uint _i = (TRIE_BUCKET_SIZE << _edge->hash_rank); _i--; ) \
if ((_i & TRIE_BUCKET_MASK) && (uintptr_t)_edge->son[_i] > 1) { \
- UNUSED uns xtrans = ((byte *)&_edge->son[_i & ~TRIE_BUCKET_MASK])[_i & TRIE_BUCKET_MASK]; \
+ UNUSED uint xtrans = ((byte *)&_edge->son[_i & ~TRIE_BUCKET_MASK])[_i & TRIE_BUCKET_MASK]; \
UNUSED struct P(edge) *xson = _edge->son[_i]; \
do {
#define TRIE_HASH_END_FOR }while(0);}}while(0)
/*** Finding/inserting/deleting sons ***/
static struct P(edge) **
-P(son_find)(struct P(edge) *edge, uns c)
+P(son_find)(struct P(edge) *edge, uint c)
{
if (edge->flags & TRIE_FLAG_HASH)
return P(hash_find)(edge, c);
else
- for (uns i = edge->flags & TRIE_FLAG_DEG; i--; )
+ for (uint i = edge->flags & TRIE_FLAG_DEG; i--; )
if (edge->trans[i] == c)
return &edge->son[i];
return NULL;
}
static struct P(edge) **
-P(son_insert)(TAC struct P(edge) **ref, uns c)
+P(son_insert)(TAC struct P(edge) **ref, uint c)
{
struct P(edge) *old = *ref, *edge;
- uns deg = old->flags & TRIE_FLAG_DEG;
+ uint deg = old->flags & TRIE_FLAG_DEG;
if (old->flags & TRIE_FLAG_HASH)
{
old->flags++;
edge = P(edge_alloc)(TTC (old->flags + 1) | TRIE_FLAG_HASH);
edge->node = old->node;
edge->len = old->len;
- for (uns i = 0; i < deg; i++)
+ for (uint i = 0; i < deg; i++)
*P(hash_insert)(edge, old->trans[i]) = old->son[i];
P(edge_free)(TTC old);
}
#ifdef TRIE_WANT_DO_DELETE
static void
-P(son_delete)(TAC struct P(edge) **ref, uns c)
+P(son_delete)(TAC struct P(edge) **ref, uint c)
{
struct P(edge) *old = *ref, *edge;
- uns deg = old->flags & TRIE_FLAG_DEG;
+ uint deg = old->flags & TRIE_FLAG_DEG;
ASSERT(deg);
if (old->flags & TRIE_FLAG_HASH)
{
{
TRIE_DBG("Reducing hash table to array");
edge = P(edge_alloc)(TTC old->flags & ~TRIE_FLAG_HASH);
- uns k = 0;
+ uint k = 0;
TRIE_HASH_FOR_ALL(old, trans, son)
edge->trans[k] = trans;
edge->son[k] = son;
{
TRIE_DBG("Reducing array");
edge = P(edge_alloc)(TTC old->flags - 1);
- uns j = 0;
- for (uns i = 0; i < deg; i++)
+ uint j = 0;
+ for (uint i = 0; i < deg; i++)
if (old->trans[i] != c)
{
edge->trans[j] = old->trans[i];
if (!(edge->flags & TRIE_FLAG_HASH))
return edge->son[0];
else
- for (uns i = 0; ; i++)
+ for (uint i = 0; ; i++)
if ((i & TRIE_BUCKET_MASK) && (uintptr_t)edge->son[i] > 1)
return edge->son[i];
}
#ifdef TRIE_WANT_DO_FIND
static struct P(edge) *
-P(do_find)(TAC byte *ptr, uns len)
+P(do_find)(TAC byte *ptr, uint len)
{
TRIE_DBG("do_find('%.*s')", len, ptr);
struct P(edge) **ref = &T.root, *edge;
#endif
static struct P(edge) *
-P(do_lookup)(TAC byte *ptr, uns len)
+P(do_lookup)(TAC byte *ptr, uint len)
{
TRIE_DBG("do_lookup('%.*s')", len, ptr);
struct P(edge) **ref, *edge, *leaf, *newleaf;
- uns prefix, elen, trans, pos;
+ uint prefix, elen, trans, pos;
byte *eptr;
if (!(edge = T.root))
#ifdef TRIE_WANT_DO_DELETE
static P(node_t) *
-P(do_delete)(TAC byte *ptr, uns len)
+P(do_delete)(TAC byte *ptr, uint len)
{
TRIE_DBG("do_delete('%.*s')", len, ptr);
struct P(edge) **ref = &T.root, **pref = NULL, *edge, *parent, *leaf, *pold = NULL;
}
P(node_t) *node = edge->node;
- uns deg = edge->flags & TRIE_FLAG_DEG;
+ uint deg = edge->flags & TRIE_FLAG_DEG;
if (!deg)
{
#ifdef TRIE_WANT_FIND_BUF
static inline P(node_t) *
-P(find_buf)(TAC byte *ptr, uns len)
+P(find_buf)(TAC byte *ptr, uint len)
{
struct P(edge) *edge = P(do_find)(TTC ptr, len);
return edge ? edge->node : NULL;
#ifdef TRIE_WANT_DELETE_BUF
static inline P(node_t) *
-P(delete_buf)(TAC byte *ptr, uns len)
+P(delete_buf)(TAC byte *ptr, uint len)
{
return P(do_delete)(TTC ptr, len);
}
do \
{ \
byte *_ptr = (xptr); \
- uns _len = (xlen); \
+ uint _len = (xlen); \
struct px##trie *_trie = (xtrie); \
struct px##edge *xedge, **_ref; \
if (!(xedge = _trie->root)) \
xedge = *_ref; \
if (!(xedge->flags & TRIE_FLAG_NODE)) \
xedge = xedge->leaf; \
- uns _prefix = px##common_prefix(_ptr, _len, px##str_get(xedge->node), xedge->len); \
+ uint _prefix = px##common_prefix(_ptr, _len, px##str_get(xedge->node), xedge->len); \
for (_ref = &_trie->root; _ref && ((xedge = *_ref)->len <= _prefix || _prefix == _len); \
_ref = (xedge->len < _prefix) ? px##son_find(xedge, px##str_char(_ptr, _len, xedge->len)) : NULL) \
{
#define TRIE_FOR_SUBTREE_EDGES(px, xstart, xedge) \
do \
{ \
- struct { struct px##edge *edge; uns pos; } \
+ struct { struct px##edge *edge; uint pos; } \
*_sbuf = alloca(sizeof(*_sbuf) * 16), \
*_sptr = _sbuf, *_send = _sbuf + 16; \
struct px##edge *_next = (xstart), *xedge; \
{ \
if (_sptr == _send) \
{ \
- uns stack_size = _sptr - _sbuf; \
+ uint stack_size = _sptr - _sbuf; \
_sptr = alloca(sizeof(*_sptr) * (stack_size * 2)); \
memcpy(_sptr, _sbuf, sizeof(*_sptr) * stack_size); \
_sbuf = _sptr; \
break; \
} \
_next = (--_sptr)->edge; \
- uns pos = --(_sptr->pos); \
- uns flags = _next->flags; \
+ uint pos = --(_sptr->pos); \
+ uint flags = _next->flags; \
_next = _next->son[pos]; \
if (pos) \
_sptr++; \
static void
P(audit)(TA)
{
- uns count = 0;
+ uint count = 0;
TRIE_FOR_ALL_EDGES(TRIE_PREFIX(), &T, edge)
{
ASSERT(edge);
- uns deg = edge->flags & TRIE_FLAG_DEG;
+ uint deg = edge->flags & TRIE_FLAG_DEG;
ASSERT(edge->node);
struct P(edge) * leaf = (edge->flags & TRIE_FLAG_NODE) ? edge : edge->leaf;
if (leaf != edge)
if (edge->flags & TRIE_FLAG_HASH)
{
ASSERT(deg > 1 && deg <= 256);
- uns count = 0, deleted = 0;
- for (uns i = TRIE_BUCKET_SIZE << edge->hash_rank; i--; )
+ uint count = 0, deleted = 0;
+ for (uint i = TRIE_BUCKET_SIZE << edge->hash_rank; i--; )
if (i & TRIE_BUCKET_MASK)
if ((uintptr_t)edge->son[i] == 1)
deleted++;
else
{
ASSERT(deg <= TRIE_HASH_THRESHOLD);
- for (uns i = 0; i < deg; i++)
+ for (uint i = 0; i < deg; i++)
ASSERT(edge->son[i]->len > edge->len);
}
count++;
P(stats)(TAC struct P(stats) *stats)
{
bzero(stats, sizeof(*stats));
- for (uns i = 0; i < ARRAY_SIZE(T.epool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.epool); i++)
stats->small_size += ep_total_size(T.epool[i]);
- for (uns i = 0; i < ARRAY_SIZE(T.hpool); i++)
+ for (uint i = 0; i < ARRAY_SIZE(T.hpool); i++)
stats->hash_size += ep_total_size(T.hpool[i]);
stats->total_size = stats->small_size + stats->hash_size + sizeof(T);
}
/* Big endian format */
#if defined(CPU_ALLOW_UNALIGNED) && defined(CPU_BIG_ENDIAN)
-static inline uns get_u16_be(const void *p) { return *(u16 *)p; } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (big-endian version). **/
-static inline u32 get_u32_be(const void *p) { return *(u32 *)p; } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (big-endian version). **/
-static inline u64 get_u64_be(const void *p) { return *(u64 *)p; } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (big-endian version). **/
-static inline void put_u16_be(void *p, uns x) { *(u16 *)p = x; } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (big-endian version). **/
-static inline void put_u32_be(void *p, u32 x) { *(u32 *)p = x; } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (big-endian version). **/
-static inline void put_u64_be(void *p, u64 x) { *(u64 *)p = x; } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (big-endian version). **/
+static inline uint get_u16_be(const void *p) { return *(u16 *)p; } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (big-endian version). **/
+static inline u32 get_u32_be(const void *p) { return *(u32 *)p; } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (big-endian version). **/
+static inline u64 get_u64_be(const void *p) { return *(u64 *)p; } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (big-endian version). **/
+static inline void put_u16_be(void *p, uint x) { *(u16 *)p = x; } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (big-endian version). **/
+static inline void put_u32_be(void *p, u32 x) { *(u32 *)p = x; } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (big-endian version). **/
+static inline void put_u64_be(void *p, u64 x) { *(u64 *)p = x; } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (big-endian version). **/
#else
-static inline uns get_u16_be(const void *p)
+static inline uint get_u16_be(const void *p)
{
const byte *c = (const byte *)p;
return (c[0] << 8) | c[1];
{
return ((u64) get_u32_be(p) << 32) | get_u32_be((const byte *)p+4);
}
-static inline void put_u16_be(void *p, uns x)
+static inline void put_u16_be(void *p, uint x)
{
byte *c = (byte *)p;
c[0] = x >> 8;
/* Little-endian format */
#if defined(CPU_ALLOW_UNALIGNED) && !defined(CPU_BIG_ENDIAN)
-static inline uns get_u16_le(const void *p) { return *(u16 *)p; } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (little-endian version). **/
-static inline u32 get_u32_le(const void *p) { return *(u32 *)p; } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (little-endian version). **/
-static inline u64 get_u64_le(const void *p) { return *(u64 *)p; } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (little-endian version). **/
-static inline void put_u16_le(void *p, uns x) { *(u16 *)p = x; } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (little-endian version). **/
-static inline void put_u32_le(void *p, u32 x) { *(u32 *)p = x; } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (little-endian version). **/
-static inline void put_u64_le(void *p, u64 x) { *(u64 *)p = x; } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (little-endian version). **/
+static inline uint get_u16_le(const void *p) { return *(u16 *)p; } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (little-endian version). **/
+static inline u32 get_u32_le(const void *p) { return *(u32 *)p; } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (little-endian version). **/
+static inline u64 get_u64_le(const void *p) { return *(u64 *)p; } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (little-endian version). **/
+static inline void put_u16_le(void *p, uint x) { *(u16 *)p = x; } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (little-endian version). **/
+static inline void put_u32_le(void *p, u32 x) { *(u32 *)p = x; } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (little-endian version). **/
+static inline void put_u64_le(void *p, u64 x) { *(u64 *)p = x; } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (little-endian version). **/
#else
-static inline uns get_u16_le(const void *p)
+static inline uint get_u16_le(const void *p)
{
const byte *c = p;
return c[0] | (c[1] << 8);
{
return get_u32_le(p) | ((u64) get_u32_le((const byte *)p+4) << 32);
}
-static inline void put_u16_le(void *p, uns x)
+static inline void put_u16_le(void *p, uint x)
{
byte *c = p;
c[0] = x;
#ifdef CPU_BIG_ENDIAN
-static inline uns get_u16(const void *p) { return get_u16_be(p); } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (native byte-order). **/
-static inline u32 get_u32(const void *p) { return get_u32_be(p); } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (native byte-order). **/
-static inline u64 get_u64(const void *p) { return get_u64_be(p); } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (native byte-order). **/
-static inline u64 get_u40(const void *p) { return get_u40_be(p); } /** Read 40-bit integer value from an unaligned sequence of 5 bytes (native byte-order). **/
-static inline void put_u16(void *p, uns x) { return put_u16_be(p, x); } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (native byte-order). **/
-static inline void put_u32(void *p, u32 x) { return put_u32_be(p, x); } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (native byte-order). **/
-static inline void put_u64(void *p, u64 x) { return put_u64_be(p, x); } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (native byte-order). **/
-static inline void put_u40(void *p, u64 x) { return put_u40_be(p, x); } /** Write 40-bit integer value to an unaligned sequence of 5 bytes (native byte-order). **/
+static inline uint get_u16(const void *p) { return get_u16_be(p); } /** Read 16-bit integer value from an unaligned sequence of 2 bytes (native byte-order). **/
+static inline u32 get_u32(const void *p) { return get_u32_be(p); } /** Read 32-bit integer value from an unaligned sequence of 4 bytes (native byte-order). **/
+static inline u64 get_u64(const void *p) { return get_u64_be(p); } /** Read 64-bit integer value from an unaligned sequence of 8 bytes (native byte-order). **/
+static inline u64 get_u40(const void *p) { return get_u40_be(p); } /** Read 40-bit integer value from an unaligned sequence of 5 bytes (native byte-order). **/
+static inline void put_u16(void *p, uint x) { return put_u16_be(p, x); } /** Write 16-bit integer value to an unaligned sequence of 2 bytes (native byte-order). **/
+static inline void put_u32(void *p, u32 x) { return put_u32_be(p, x); } /** Write 32-bit integer value to an unaligned sequence of 4 bytes (native byte-order). **/
+static inline void put_u64(void *p, u64 x) { return put_u64_be(p, x); } /** Write 64-bit integer value to an unaligned sequence of 8 bytes (native byte-order). **/
+static inline void put_u40(void *p, u64 x) { return put_u40_be(p, x); } /** Write 40-bit integer value to an unaligned sequence of 5 bytes (native byte-order). **/
#else
-static inline uns get_u16(const void *p) { return get_u16_le(p); }
+static inline uint get_u16(const void *p) { return get_u16_le(p); }
static inline u32 get_u32(const void *p) { return get_u32_le(p); }
static inline u64 get_u64(const void *p) { return get_u64_le(p); }
static inline u64 get_u40(const void *p) { return get_u40_le(p); }
-static inline void put_u16(void *p, uns x) { return put_u16_le(p, x); }
+static inline void put_u16(void *p, uint x) { return put_u16_le(p, x); }
static inline void put_u32(void *p, u32 x) { return put_u32_le(p, x); }
static inline void put_u64(void *p, u64 x) { return put_u64_le(p, x); }
static inline void put_u40(void *p, u64 x) { return put_u40_le(p, x); }
/* Just for completeness */
-static inline uns get_u8(const void *p) { return *(const byte *)p; } /** Read 8-bit integer value. **/
-static inline void put_u8(void *p, uns x) { *(byte *)p = x; } /** Write 8-bit integer value. **/
+static inline uint get_u8(const void *p) { return *(const byte *)p; } /** Read 8-bit integer value. **/
+static inline void put_u8(void *p, uint x) { *(byte *)p = x; } /** Write 8-bit integer value. **/
/* Backward compatibility macros */
#include <ucw/lib.h>
#include <ucw/unicode.h>
-uns
+uint
utf8_strlen(const byte *str)
{
- uns len = 0;
+ uint len = 0;
while (*str)
{
UTF8_SKIP(str);
return len;
}
-uns
-utf8_strnlen(const byte *str, uns n)
+uint
+utf8_strnlen(const byte *str, uint n)
{
- uns len = 0;
+ uint len = 0;
const byte *end = str + n;
while (str < end)
{
#undef F
};
- uns func = ~0U;
+ uint func = ~0U;
if (argc > 1)
- for (uns i = 0; i < ARRAY_SIZE(names); i++)
+ for (uint i = 0; i < ARRAY_SIZE(names); i++)
if (!strcasecmp(names[i], argv[1]))
func = i;
if (!~func)
if (func < FUNC_UTF8_PUT)
{
byte *p = buf, *q = buf, *last;
- uns u;
+ uint u;
bzero(buf, sizeof(buf));
while (scanf("%x", &u) == 1)
*q++ = u;
}
else
{
- uns u, i=0;
+ uint u, i=0;
while (scanf("%x", &u) == 1)
{
byte *p = buf, *q = buf;
* Encode a value from the range `[0, 0xFFFF]`
* (basic multilingual plane); up to 3 bytes needed (RFC2279).
**/
-static inline byte *utf8_put(byte *p, uns u)
+static inline byte *utf8_put(byte *p, uint u)
{
if (u < 0x80)
*p++ = u;
* Encode a value from the range `[0, 0x7FFFFFFF]`;
* (superset of Unicode 4.0) up to 6 bytes needed (RFC2279).
**/
-static inline byte *utf8_32_put(byte *p, uns u)
+static inline byte *utf8_32_put(byte *p, uint u)
{
if (u < 0x80)
*p++ = u;
* Decode a value from the range `[0, 0xFFFF]` (basic multilingual plane)
* or return @repl if the encoding has been corrupted.
**/
-static inline byte *utf8_get_repl(const byte *p, uns *uu, uns repl)
+static inline byte *utf8_get_repl(const byte *p, uint *uu, uint repl)
{
- uns u = *p++;
+ uint u = *p++;
if (u < 0x80)
;
else if (unlikely(u < 0xc0))
* Decode a value from the range `[0, 0x7FFFFFFF]`
* or return @repl if the encoding has been corrupted.
**/
-static inline byte *utf8_32_get_repl(const byte *p, uns *uu, uns repl)
+static inline byte *utf8_32_get_repl(const byte *p, uint *uu, uint repl)
{
- uns u = *p++;
+ uint u = *p++;
if (u < 0x80)
;
else if (unlikely(u < 0xc0))
* Decode a value from the range `[0, 0xFFFF]` (basic multilingual plane)
* or return `UNI_REPLACEMENT` if the encoding has been corrupted.
**/
-static inline byte *utf8_get(const byte *p, uns *uu)
+static inline byte *utf8_get(const byte *p, uint *uu)
{
return utf8_get_repl(p, uu, UNI_REPLACEMENT);
}
* Decode a value from the range `[0, 0x7FFFFFFF]`
* or return `UNI_REPLACEMENT` if the encoding has been corrupted.
**/
-static inline byte *utf8_32_get(const byte *p, uns *uu)
+static inline byte *utf8_32_get(const byte *p, uint *uu)
{
return utf8_32_get_repl(p, uu, UNI_REPLACEMENT);
}
#define UTF8_SKIP(p) do { \
- uns c = *p++; \
+ uint c = *p++; \
if (c >= 0xc0) \
while (c & 0x40 && *p >= 0x80 && *p < 0xc0) \
p++, c <<= 1; \
/**
* Return the number of bytes needed to encode a given value from the range `[0, 0x7FFFFFFF]` to UTF-8.
**/
-static inline uns utf8_space(uns u)
+static inline uint utf8_space(uint u)
{
if (u < 0x80)
return 1;
/**
* Compute the length of a single UTF-8 character from its first byte. The encoding must be valid.
**/
-static inline uns utf8_encoding_len(uns c)
+static inline uint utf8_encoding_len(uint c)
{
if (c < 0x80)
return 1;
* Encode an UTF-16LE character from the range `[0, 0xD7FF]` or `[0xE000,0x11FFFF]`;
* up to 4 bytes needed.
**/
-static inline void *utf16_le_put(void *p, uns u)
+static inline void *utf16_le_put(void *p, uint u)
{
if (u < 0xd800 || (u < 0x10000 && u >= 0xe000))
{
* Encode a UTF-16BE character from the range `[0, 0xD7FF]` or `[0xE000,0x11FFFF]`;
* up to 4 bytes needed.
**/
-static inline void *utf16_be_put(void *p, uns u)
+static inline void *utf16_be_put(void *p, uint u)
{
if (u < 0xd800 || (u < 0x10000 && u >= 0xe000))
{
* Decode a UTF-16LE character from the range `[0, 0xD7FF]` or `[0xE000,11FFFF]`
* or return @repl if the encoding has been corrupted.
**/
-static inline void *utf16_le_get_repl(const void *p, uns *uu, uns repl)
+static inline void *utf16_le_get_repl(const void *p, uint *uu, uint repl)
{
- uns u = get_u16_le(p), x, y;
+ uint u = get_u16_le(p), x, y;
x = u - 0xd800;
if (x < 0x800)
if (x < 0x400 && (y = get_u16_le(p + 2) - 0xdc00) < 0x400)
* Decode a UTF-16BE character from the range `[0, 0xD7FF]` or `[0xE000,11FFFF]`
* or return @repl if the encoding has been corrupted.
**/
-static inline void *utf16_be_get_repl(const void *p, uns *uu, uns repl)
+static inline void *utf16_be_get_repl(const void *p, uint *uu, uint repl)
{
- uns u = get_u16_be(p), x, y;
+ uint u = get_u16_be(p), x, y;
x = u - 0xd800;
if (x < 0x800)
if (x < 0x400 && (y = get_u16_be(p + 2) - 0xdc00) < 0x400)
* Decode a UTF-16LE character from the range `[0, 0xD7FF]` or `[0xE000,11FFFF]`
* or return `UNI_REPLACEMENT` if the encoding has been corrupted.
**/
-static inline void *utf16_le_get(const void *p, uns *uu)
+static inline void *utf16_le_get(const void *p, uint *uu)
{
return utf16_le_get_repl(p, uu, UNI_REPLACEMENT);
}
* Decode a UTF-16BE character from the range `[0, 0xD7FF]` or `[0xE000,11FFFF]`
* or return `UNI_REPLACEMENT` if the encoding has been corrupted.
**/
-static inline void *utf16_be_get(const void *p, uns *uu)
+static inline void *utf16_be_get(const void *p, uint *uu)
{
return utf16_be_get_repl(p, uu, UNI_REPLACEMENT);
}
* character is a surrogate, ASCII or Latin-1 control character different from the tab,
* or if it lies outside the basic plane. In all other cases, it acts as an identity.
**/
-static inline uns unicode_sanitize_char(uns u)
+static inline uint unicode_sanitize_char(uint u)
{
if (u >= 0x10000 || // We don't accept anything outside the basic plane
u >= 0xd800 && u < 0xf900 || // neither we do surrogates
* Count the number of Unicode characters in a zero-terminated UTF-8 string.
* Returned value for corrupted encoding is undefined, but is never greater than strlen().
**/
-uns utf8_strlen(const byte *str);
+uint utf8_strlen(const byte *str);
/**
* Same as @utf8_strlen(), but returns at most @n characters.
**/
-uns utf8_strnlen(const byte *str, uns n);
+uint utf8_strnlen(const byte *str, uint n);
#endif
/* Configuration */
-static uns url_ignore_spaces;
-static uns url_ignore_underflow;
+static uint url_ignore_spaces;
+static uint url_ignore_underflow;
static char *url_component_separators = "";
-static uns url_min_repeat_count = 0x7fffffff;
-static uns url_max_repeat_length = 0;
-static uns url_max_occurences = ~0U;
+static uint url_min_repeat_count = 0x7fffffff;
+static uint url_max_repeat_length = 0;
+static uint url_max_occurences = ~0U;
#ifndef TEST
static struct cf_section url_config = {
CF_ITEMS {
- CF_UNS("IgnoreSpaces", &url_ignore_spaces),
- CF_UNS("IgnoreUnderflow", &url_ignore_underflow),
+ CF_UINT("IgnoreSpaces", &url_ignore_spaces),
+ CF_UINT("IgnoreUnderflow", &url_ignore_underflow),
CF_STRING("ComponentSeparators", &url_component_separators),
- CF_UNS("MinRepeatCount", &url_min_repeat_count),
- CF_UNS("MaxRepeatLength", &url_max_repeat_length),
- CF_UNS("MaxOccurences", &url_max_occurences),
+ CF_UINT("MinRepeatCount", &url_min_repeat_count),
+ CF_UINT("MaxRepeatLength", &url_max_repeat_length),
+ CF_UINT("MaxOccurences", &url_max_occurences),
CF_END
}
};
/* Escaping and de-escaping */
-static uns
-enhex(uns x)
+static uint
+enhex(uint x)
{
return (x<10) ? (x + '0') : (x - 10 + 'A');
}
return URL_ERR_TOO_LONG;
if (*s == '%')
{
- unsigned int val;
+ uint val;
if (!Cxdigit(s[1]) || !Cxdigit(s[2]))
return URL_ERR_INVALID_ESCAPE;
val = Cxvalue(s[1])*16 + Cxvalue(s[2]);
url_enescape(const char *s, char *d)
{
char *end = d + MAX_URL_SIZE - 10;
- unsigned int c;
+ uint c;
while (c = *s)
{
*d++ = *s++;
else
{
- uns val = (byte)(((byte)*s < NCC_MAX) ? NCC_CHARS[(byte)*s] : *s);
+ uint val = (byte)(((byte)*s < NCC_MAX) ? NCC_CHARS[(byte)*s] : *s);
*d++ = '%';
*d++ = enhex(val >> 4);
*d++ = enhex(val & 0x0f);
char *url_proto_names[URL_PROTO_MAX] = URL_PNAMES;
static int url_proto_path_flags[URL_PROTO_MAX] = URL_PATH_FLAGS;
-uns
+uint
url_identify_protocol(const char *p)
{
- uns i;
+ uint i;
for(i=1; i<URL_PROTO_MAX; i++)
if (!strcasecmp(p, url_proto_names[i]))
e = strchr(at, ':');
if (e) /* host:port present */
{
- uns p;
+ uint p;
*e++ = 0;
p = strtoul(e, &ep, 10);
if (ep && *ep || p > 65535)
/* Normalization according to given base URL */
-static uns std_ports[] = URL_DEFPORTS; /* Default port numbers */
+static uint std_ports[] = URL_DEFPORTS; /* Default port numbers */
static int
relpath_merge(struct url *u, struct url *b)
};
char *
-url_error(uns err)
+url_error(uint err)
{
if (err >= sizeof(errmsg) / sizeof(char *))
err = 0;
struct component {
const char *start;
int length;
- uns count;
+ uint count;
u32 hash;
};
return hf;
}
-static inline uns
-repeat_count(struct component *comp, uns count, uns len)
+static inline uint
+repeat_count(struct component *comp, uint count, uint len)
{
struct component *orig_comp = comp;
- uns found = 0;
+ uint found = 0;
while (1)
{
- uns i;
+ uint i;
comp += len;
count -= len;
found++;
url_has_repeated_component(const char *url)
{
struct component *comp;
- uns comps, comp_len, rep_prefix, hash_size, *hash, *next;
+ uint comps, comp_len, rep_prefix, hash_size, *hash, *next;
const char *c;
- uns i, j, k;
+ uint i, j, k;
for (comps=0, c=url; c; comps++)
{
struct url {
char *protocol;
- uns protoid;
+ uint protoid;
char *user;
char *pass;
char *host;
- uns port; /* ~0 if unspec */
+ uint port; /* ~0 if unspec */
char *rest;
char *buf, *bufend;
};
int url_pack(struct url *u, char *d);
int url_canon_split_rel(const char *url, char *buf1, char *buf2, struct url *u, struct url *base);
int url_auto_canonicalize_rel(const char *src, char *dst, struct url *base);
-uns url_identify_protocol(const char *p);
+uint url_identify_protocol(const char *p);
int url_has_repeated_component(const char *url);
static inline int url_canon_split(const char *url, char *buf1, char *buf2, struct url *u)
/* Error codes */
-char *url_error(uns);
+char *url_error(uint);
#define URL_ERR_TOO_LONG 1
#define URL_ERR_INVALID_CHAR 2
};
static const struct {
- uns (*function)(byte *, const byte *, uns);
- uns in_block, out_block, num_blocks;
- uns add_prefix;
+ uint (*function)(byte *, const byte *, uint);
+ uint in_block, out_block, num_blocks;
+ uint add_prefix;
} actions[] = {
{
base64_encode,
// Choose mode
int mode = -1;
char *prefix = NULL;
- uns blocks = 0;
+ uint blocks = 0;
int opt;
while ((opt = getopt_long(argc, argv, "edEDp:b:", opts, NULL)) >= 0)
switch (opt)
struct fastbuf *in = bfdopen_shared(0, 4096);
struct fastbuf *out = bfdopen_shared(1, 4096);
int has_offset = !actions[mode].add_prefix && prefix;
- uns offset = has_offset ? strlen(prefix) : 0;
- uns read_size = actions[mode].in_block * blocks + offset + has_offset;
- uns write_size = actions[mode].out_block * blocks;
+ uint offset = has_offset ? strlen(prefix) : 0;
+ uint read_size = actions[mode].in_block * blocks + offset + has_offset;
+ uint write_size = actions[mode].out_block * blocks;
byte in_buff[read_size], out_buff[write_size];
- uns isize;
+ uint isize;
// Recode it
while (isize = bread(in, in_buff, read_size))
|| (strncmp(prefix, in_buff, offset)))
die("Invalid line syntax");
}
- uns osize = actions[mode].function(out_buff, in_buff + offset, isize - offset - has_offset);
+ uint osize = actions[mode].function(out_buff, in_buff + offset, isize - offset - has_offset);
bwrite(out, out_buff, osize);
if (actions[mode].add_prefix && prefix)
bputc(out, '\n');
static byte *base_url;
static struct url base;
-static uns opt_split = 0, opt_normalize = 0, opt_forgive = 0;
+static uint opt_split = 0, opt_normalize = 0, opt_forgive = 0;
static struct fastbuf *fout;
-static uns err_count;
+static uint err_count;
static void
process_url(byte *url)
#define PUTB(j,i) p[j] = (byte)((u >> (8*(i))));
#define PUTB4(b) PUTB(0,b-1) PUTB(1,b-2) PUTB(2,b-3) PUTB(3,b-4)
-uns varint_put_big(byte *p, u64 u)
+uint varint_put_big(byte *p, u64 u)
{
ASSERT(u >= VARINT_SHIFT_L4);
#define PUTB4(b) PUTB(0,b-1) PUTB(1,b-2) PUTB(2,b-3) PUTB(3,b-4)
/* for internal use only, need the length > 4 */
-uns varint_put_big(byte *p, u64 u);
+uint varint_put_big(byte *p, u64 u);
const byte *varint_get_big(const byte *p, u64 *r);
/**
* Encode u64 value u into byte sequence p.
* Returns the number of bytes used (at least 1 and at most 9).
**/
-static inline uns varint_put(byte *p, u64 u)
+static inline uint varint_put(byte *p, u64 u)
{
if (u < VARINT_SHIFT_L1) {
p[0] = (byte)u;
* Store the invalid sequence.
* Returns always 2 (2 bytes were used, to be consistent with varint_put).
**/
-static inline uns varint_put_invalid(byte *p)
+static inline uint varint_put_invalid(byte *p)
{
p[0] = p[1] = 0xff;
return 2;
}
/** Compute the length of encoding in bytes from the first byte hdr of the encoding. **/
-static inline uns varint_len(const byte hdr)
+static inline uint varint_len(const byte hdr)
{
byte b = ~hdr;
- uns l = 0;
+ uint l = 0;
if (!b)
l = -1;
else {
}
/** Compute the number of bytes needed to store the value u. **/
-static inline uns varint_space(u64 u)
+static inline uint varint_space(u64 u)
{
if (u < VARINT_SHIFT_L1)
return 1;
struct nfa_state nfa[MAX_STATES];
struct dfa_state *hash[HASH_SIZE];
struct dfa_state *dfa_start;
- uns nfa_states;
- uns dfa_cache_counter;
+ uint nfa_states;
+ uint dfa_cache_counter;
struct mempool *pool;
struct dfa_state *free_states;
};
-static inline unsigned
+static inline uint
wp_hash(u32 set)
{
set ^= set >> 16;
static struct dfa_state *
wp_new_state(struct wildpatt *w, u32 set)
{
- unsigned h = wp_hash(set);
+ uint h = wp_hash(set);
struct dfa_state *d;
- unsigned bit;
+ uint bit;
u32 def_set;
while (d = w->hash[h])
}
if (def_set)
{
- unsigned i;
+ uint i;
def_set |= 1;
for(i=0; i<256; i++)
d->edge[i] |= def_set;
wp_compile(const char *p, struct mempool *pool)
{
struct wildpatt *w;
- uns i;
+ uint i;
if (strlen(p) >= MAX_STATES) /* Too long */
return NULL;
pthread_attr_setstacksize(&attr, p->stack_size ? : ucwlib_thread_stack_size) < 0)
ASSERT(0);
- for (uns i=0; i < p->num_threads; i++)
+ for (uint i=0; i < p->num_threads; i++)
{
struct worker_thread *t = (p->new_thread ? p->new_thread() : xmalloc(sizeof(*t)));
t->pool = p;
void
worker_pool_cleanup(struct worker_pool *p)
{
- for (uns i=0; i < p->num_threads; i++)
+ for (uint i=0; i < p->num_threads; i++)
{
struct work w = {
.go = worker_thread_signal_finish
struct w {
struct work w;
- uns id;
+ uint id;
};
static void go(struct worker_thread *t, struct work *w)
struct work_queue q;
work_queue_init(&pool, &q);
- for (uns i=0; i<500; i++)
+ for (uint i=0; i<500; i++)
{
struct w *w = xmalloc_zero(sizeof(*w));
w->w.go = go;
pthread_mutex_t queue_mutex;
clist pri0_queue; // Ordinary queue for requests with priority=0
struct work **pri_heap; // A heap for request with priority>0
- uns heap_cnt, heap_max;
+ uint heap_cnt, heap_max;
sem_t *queue_sem; // Number of requests queued
};
struct worker_pool {
struct raw_queue requests;
- uns num_threads;
- uns stack_size; // 0 for default
+ uint num_threads;
+ uint stack_size; // 0 for default
struct worker_thread *(*new_thread)(void); // default: xmalloc the struct
void (*free_thread)(struct worker_thread *t); // default: xfree
void (*init_thread)(struct worker_thread *t); // default: empty
struct work_queue {
struct worker_pool *pool;
- uns nr_running; // Number of requests in service
+ uint nr_running; // Number of requests in service
struct raw_queue finished; // Finished requests queue up here
};
struct work { // A single request
cnode n;
- uns priority;
+ uint priority;
struct work_queue *reply_to; // Where to queue the request when it's finished
void (*go)(struct worker_thread *t, struct work *w); // Called inside the worker thread
};
/*** Memory management ***/
void *
-xml_hash_new(struct mempool *pool, uns size)
+xml_hash_new(struct mempool *pool, uint size)
{
void *tab = mp_alloc_zero(pool, size + XML_HASH_HDR_SIZE);
*(void **)tab = pool;
struct xml_dtd_enodes_table;
-static inline uns
+static inline uint
xml_dtd_enodes_hash(struct xml_dtd_enodes_table *tab UNUSED, struct xml_dtd_elem_node *parent, struct xml_dtd_elem *elem)
{
return hash_pointer(parent) ^ hash_pointer(elem);
struct xml_dtd_attrs_table;
-static inline uns
+static inline uint
xml_dtd_attrs_hash(struct xml_dtd_attrs_table *tab UNUSED, struct xml_dtd_elem *elem, char *name)
{
return hash_pointer(elem) ^ hash_string(name);
struct xml_dtd_evals_table;
-static inline uns
+static inline uint
xml_dtd_evals_hash(struct xml_dtd_evals_table *tab UNUSED, struct xml_dtd_attr *attr, char *val)
{
return hash_pointer(attr) ^ hash_string(val);
struct xml_dtd_enotns_table;
-static inline uns
+static inline uint
xml_dtd_enotns_hash(struct xml_dtd_enotns_table *tab UNUSED, struct xml_dtd_attr *attr, struct xml_dtd_notn *notn)
{
return hash_pointer(attr) ^ hash_pointer(notn);
xml_dec(ctx);
}
-static uns
-xml_parse_dtd_pe(struct xml_context *ctx, uns entity_decl)
+static uint
+xml_parse_dtd_pe(struct xml_context *ctx, uint entity_decl)
{
/* Already parsed: '%' */
do
return 1;
}
-static inline uns
-xml_parse_dtd_white(struct xml_context *ctx, uns mandatory)
+static inline uint
+xml_parse_dtd_white(struct xml_context *ctx, uint mandatory)
{
/* Whitespace or parameter entity,
* mandatory==~0U has a special maening of the whitespace before the '%' character in an parameter entity declaration */
- uns cnt = 0;
+ uint cnt = 0;
while (xml_peek_cat(ctx) & XML_CHAR_WHITE)
{
xml_skip_char(ctx);
}
static void
-xml_dtd_parse_external_id(struct xml_context *ctx, char **system_id, char **public_id, uns allow_public)
+xml_dtd_parse_external_id(struct xml_context *ctx, char **system_id, char **public_id, uint allow_public)
{
struct xml_dtd *dtd = ctx->dtd;
- uns c = xml_peek_char(ctx);
+ uint c = xml_peek_char(ctx);
if (c == 'S')
{
xml_parse_seq(ctx, "SYSTEM");
/* Already parsed: '<!ENTITY' */
TRACE(ctx, "parse_entity_decl");
struct xml_dtd *dtd = ctx->dtd;
- uns flags = ~xml_parse_dtd_white(ctx, ~0U) ? 0 : XML_DTD_ENTITY_PARAMETER;
+ uint flags = ~xml_parse_dtd_white(ctx, ~0U) ? 0 : XML_DTD_ENTITY_PARAMETER;
if (flags)
xml_parse_dtd_white(ctx, 1);
struct xml_dtd_entity *ent = xml_dtd_ents_lookup(flags ? dtd->tab_pents : dtd->tab_ents, xml_parse_name(ctx, dtd->pool));
xml_fatal(ctx, "Entity &%s; already declared, skipping not implemented", ent->name);
// FIXME: should be only warning
}
- uns c, sep = xml_get_char(ctx);
+ uint c, sep = xml_get_char(ctx);
if (sep == '\'' || sep == '"')
{
/* Internal entity:
char *n = xml_parse_name(ctx, ctx->stack);
xml_parse_char(ctx, ';');
xml_dec(ctx);
- uns l = strlen(n);
+ uint l = strlen(n);
p = mp_spread(dtd->pool, p, 3 + l);
*p++ = '&';
memcpy(p, n, l);
xml_fatal(ctx, "Element <%s> already declared", name);
/* contentspec ::= 'EMPTY' | 'ANY' | Mixed | children */
- uns c = xml_peek_char(ctx);
+ uint c = xml_peek_char(ctx);
if (c == 'E')
{
xml_parse_seq(ctx, "EMPTY");
elem->type = XML_DTD_ELEM_CHILDREN;
parent->type = XML_DTD_ELEM_PCDATA;
- uns c;
+ uint c;
goto first;
while (1)
{
char *name = xml_parse_name(ctx, dtd->pool);
struct xml_dtd_attr *attr = xml_dtd_attrs_find(dtd->tab_attrs, elem, name);
- uns ignored = 0;
+ uint ignored = 0;
if (attr)
{
xml_warn(ctx, "Duplicate attribute definition");
{
TRACE(ctx, "skip_internal_subset");
/* AlreadyParsed: '[' */
- uns c;
+ uint c;
while ((c = xml_get_char(ctx)) != ']')
{
if (c != '<')
/*** Validation of attribute values ***/
-static uns
-xml_check_tokens(char *value, uns first_cat, uns next_cat, uns seq)
+static uint
+xml_check_tokens(char *value, uint first_cat, uint next_cat, uint seq)
{
char *p = value;
- uns u;
+ uint u;
while (1)
{
p = utf8_32_get(p, &u);
}
}
-static uns
+static uint
xml_is_name(struct xml_context *ctx, char *value)
{
/* Name ::= NameStartChar (NameChar)* */
return xml_check_tokens(value, ctx->cat_sname, ctx->cat_name, 0);
}
-static uns
+static uint
xml_is_names(struct xml_context *ctx, char *value)
{
/* Names ::= Name (#x20 Name)* */
return xml_check_tokens(value, ctx->cat_sname, ctx->cat_name, 1);
}
-static uns
+static uint
xml_is_nmtoken(struct xml_context *ctx, char *value)
{
/* Nmtoken ::= (NameChar)+ */
return xml_check_tokens(value, ctx->cat_name, ctx->cat_name, 0);
}
-static uns
+static uint
xml_is_nmtokens(struct xml_context *ctx, char *value)
{
/* Nmtokens ::= Nmtoken (#x20 Nmtoken)* */
struct xml_dtd_notn {
snode n; /* Node in xml_dtd.notns */
- uns flags; /* XML_DTD_NOTN_x */
+ uint flags; /* XML_DTD_NOTN_x */
char *name; /* Notation name */
char *system_id; /* External ID */
char *public_id;
struct xml_dtd_entity {
snode n; /* Node in xml_dtd.[gp]ents */
- uns flags; /* XML_DTD_ENT_x */
+ uint flags; /* XML_DTD_ENT_x */
char *name; /* Entity name */
char *text; /* Replacement text / expanded replacement text (XML_DTD_ENT_TRIVIAL) */
- uns len; /* Text length */
+ uint len; /* Text length */
char *system_id; /* External ID */
char *public_id;
struct xml_dtd_notn *notn; /* Notation (XML_DTD_ENT_UNPARSED only) */
struct xml_dtd_elem {
snode n;
- uns flags;
- uns type;
+ uint flags;
+ uint type;
char *name;
struct xml_dtd_elem_node *node;
slist attrs;
struct xml_dtd_elem_node *parent;
struct xml_dtd_elem *elem;
slist sons;
- uns type;
- uns occur;
+ uint type;
+ uint occur;
void *user; /* User-defined */
};
snode n;
char *name; /* Attribute name */
struct xml_dtd_elem *elem; /* Owner element */
- uns type; /* See enum xml_dtd_attr_type */
- uns default_mode; /* See enum xml_dtd_attr_default */
+ uint type; /* See enum xml_dtd_attr_type */
+ uint default_mode; /* See enum xml_dtd_attr_default */
char *default_value; /* The default value defined in DTD (or NULL) */
};
struct xml_stack {
struct xml_stack *next;
struct mempool_state state;
- uns flags;
+ uint flags;
};
static inline void *
-xml_do_push(struct xml_context *ctx, uns size)
+xml_do_push(struct xml_context *ctx, uint size)
{
/* Saves ctx->stack and ctx->flags state */
struct mempool_state state;
}
static inline void
-xml_pop_dom(struct xml_context *ctx, uns free)
+xml_pop_dom(struct xml_context *ctx, uint free)
{
/* Leave DOM subtree */
TRACE(ctx, "pop_dom");
#define XML_HASH_HDR_SIZE ALIGN_TO(sizeof(void *), CPU_STRUCT_ALIGN)
#define XML_HASH_GIVE_ALLOC struct HASH_PREFIX(table); \
- static inline void *HASH_PREFIX(alloc)(struct HASH_PREFIX(table) *t, uns size) \
+ static inline void *HASH_PREFIX(alloc)(struct HASH_PREFIX(table) *t, uint size) \
{ return mp_alloc(*(void **)((void *)t - XML_HASH_HDR_SIZE), size); } \
static inline void HASH_PREFIX(free)(struct HASH_PREFIX(table) *t UNUSED, void *p UNUSED) {}
-void *xml_hash_new(struct mempool *pool, uns size);
+void *xml_hash_new(struct mempool *pool, uint size);
void xml_spout_chars(struct fastbuf *fb);
#include "obj/xml/unicat.h"
-static inline uns
-xml_char_cat(uns c)
+static inline uint
+xml_char_cat(uint c)
{
if (c < 0x10000)
return 1U << xml_char_tab1[(c & 0xff) + xml_char_tab2[c >> 8]];
return 1;
}
-static inline uns
-xml_ascii_cat(uns c)
+static inline uint
+xml_ascii_cat(uint c)
{
return xml_char_tab1[c];
}
void xml_refill(struct xml_context *ctx);
-static inline uns
+static inline uint
xml_peek_char(struct xml_context *ctx)
{
if (ctx->bptr == ctx->bstop)
return ctx->bptr[0];
}
-static inline uns
+static inline uint
xml_peek_cat(struct xml_context *ctx)
{
if (ctx->bptr == ctx->bstop)
return ctx->bptr[1];
}
-static inline uns
+static inline uint
xml_get_char(struct xml_context *ctx)
{
- uns c = xml_peek_char(ctx);
+ uint c = xml_peek_char(ctx);
ctx->bptr += 2;
return c;
}
-static inline uns
+static inline uint
xml_get_cat(struct xml_context *ctx)
{
- uns c = xml_peek_cat(ctx);
+ uint c = xml_peek_cat(ctx);
ctx->bptr += 2;
return c;
}
-static inline uns
+static inline uint
xml_last_char(struct xml_context *ctx)
{
return ctx->bptr[-2];
}
-static inline uns
+static inline uint
xml_last_cat(struct xml_context *ctx)
{
return ctx->bptr[-1];
}
-static inline uns
+static inline uint
xml_skip_char(struct xml_context *ctx)
{
- uns c = ctx->bptr[0];
+ uint c = ctx->bptr[0];
ctx->bptr += 2;
return c;
}
-static inline uns
+static inline uint
xml_unget_char(struct xml_context *ctx)
{
return *(ctx->bptr -= 2);
/*** Parsing ***/
-void NONRET xml_fatal_expected(struct xml_context *ctx, uns c);
+void NONRET xml_fatal_expected(struct xml_context *ctx, uint c);
void NONRET xml_fatal_expected_white(struct xml_context *ctx);
void NONRET xml_fatal_expected_quot(struct xml_context *ctx);
-static inline uns
-xml_parse_white(struct xml_context *ctx, uns mandatory)
+static inline uint
+xml_parse_white(struct xml_context *ctx, uint mandatory)
{
/* mandatory=1 -> S ::= (#x20 | #x9 | #xD | #xA)+
* mandatory=0 -> S? */
- uns cnt = 0;
+ uint cnt = 0;
while (xml_peek_cat(ctx) & XML_CHAR_WHITE)
{
xml_skip_char(ctx);
}
static inline void
-xml_parse_char(struct xml_context *ctx, uns c)
+xml_parse_char(struct xml_context *ctx, uint c)
{
/* Consumes a given Unicode character */
if (unlikely(c != xml_get_char(ctx)))
void xml_parse_eq(struct xml_context *ctx);
-static inline uns
+static inline uint
xml_parse_quote(struct xml_context *ctx)
{
/* "'" | '"' */
- uns c = xml_get_char(ctx);
+ uint c = xml_get_char(ctx);
if (unlikely(c != '\'' && c != '\"'))
xml_fatal_expected_quot(ctx);
return c;
char *xml_parse_system_literal(struct xml_context *ctx, struct mempool *pool);
char *xml_parse_pubid_literal(struct xml_context *ctx, struct mempool *pool);
-uns xml_parse_char_ref(struct xml_context *ctx);
+uint xml_parse_char_ref(struct xml_context *ctx);
void xml_parse_pe_ref(struct xml_context *ctx);
char *xml_parse_attr_value(struct xml_context *ctx, struct xml_dtd_attr *attr);
/*** Basic parsing ***/
void NONRET
-xml_fatal_expected(struct xml_context *ctx, uns c)
+xml_fatal_expected(struct xml_context *ctx, uint c)
{
if (c >= 32 && c < 128)
xml_fatal(ctx, "Expected '%c'", c);
/*** Names and nmtokens ***/
static char *
-xml_parse_string(struct xml_context *ctx, struct mempool *pool, uns first_cat, uns next_cat, char *err)
+xml_parse_string(struct xml_context *ctx, struct mempool *pool, uint first_cat, uint next_cat, char *err)
{
char *p = mp_start_noalign(pool, 1);
if (unlikely(!(xml_peek_cat(ctx) & first_cat)))
}
static void
-xml_skip_string(struct xml_context *ctx, uns first_cat, uns next_cat, char *err)
+xml_skip_string(struct xml_context *ctx, uint first_cat, uint next_cat, char *err)
{
if (unlikely(!(xml_get_cat(ctx) & first_cat)))
xml_fatal(ctx, "%s", err);
{
/* SystemLiteral ::= ('"' [^"]* '"') | ("'" [^']* "'") */
char *p = mp_start_noalign(pool, 1);
- uns q = xml_parse_quote(ctx), c;
+ uint q = xml_parse_quote(ctx), c;
while ((c = xml_get_char(ctx)) != q)
{
p = mp_spread(pool, p, 5);
{
/* PubidLiteral ::= '"' PubidChar* '"' | "'" (PubidChar - "'")* "'" */
char *p = mp_start_noalign(pool, 1);
- uns q = xml_parse_quote(ctx), c;
+ uint q = xml_parse_quote(ctx), c;
while ((c = xml_get_char(ctx)) != q)
{
if (unlikely(!(xml_last_cat(ctx) & XML_CHAR_PUBID)))
/*** Character references ***/
-uns
+uint
xml_parse_char_ref(struct xml_context *ctx)
{
TRACE(ctx, "parse_char_ref");
/* CharRef ::= '&#' [0-9]+ ';' | '&#x' [0-9a-fA-F]+ ';'
* Already parsed: '&#' */
- uns v = 0;
+ uint v = 0;
if (xml_get_char(ctx) == 'x')
{
if (!(xml_get_cat(ctx) & XML_CHAR_XDIGIT))
}
while (v < 0x110000 && (xml_get_cat(ctx) & XML_CHAR_DIGIT));
}
- uns cat = xml_char_cat(v);
+ uint cat = xml_char_cat(v);
if (!(cat & ctx->cat_unrestricted))
{
xml_error(ctx, "Character reference out of range");
if (fb->bufend != fb->buffer)
{
TRACE(ctx, "growing chars");
- uns len = fb->bufend - fb->buffer;
- uns reported = fb->bstop - fb->buffer;
+ uint len = fb->bufend - fb->buffer;
+ uint reported = fb->bstop - fb->buffer;
fb->buffer = mp_expand(pool);
fb->bufend = fb->buffer + mp_avail(pool);
fb->bptr = fb->buffer + len;
}
}
-static inline uns
+static inline uint
xml_end_chars(struct xml_context *ctx, char **out)
{
struct fastbuf *fb = &ctx->chars;
- uns len = fb->bptr - fb->buffer;
+ uint len = fb->bptr - fb->buffer;
if (len)
{
TRACE(ctx, "ending chars");
return len;
}
-static inline uns
+static inline uint
xml_report_chars(struct xml_context *ctx, char **out)
{
struct fastbuf *fb = &ctx->chars;
- uns len = fb->bptr - fb->buffer;
+ uint len = fb->bptr - fb->buffer;
if (len)
{
*fb->bptr = 0;
return len;
}
-static inline uns
+static inline uint
xml_flush_chars(struct xml_context *ctx)
{
char *text, *rtext;
- uns len = xml_end_chars(ctx, &text), rlen;
+ uint len = xml_end_chars(ctx, &text), rlen;
if (len)
{
if (ctx->flags & XML_NO_CHARS)
}
xml_parse_seq(ctx, "CDATA[");
struct fastbuf *out = &ctx->chars;
- uns rlen;
+ uint rlen;
char *rtext;
if ((ctx->flags & XML_REPORT_CHARS) && ctx->h_block && (rlen = xml_report_chars(ctx, &rtext)))
ctx->h_block(ctx, rtext, rlen);
/* AttValue ::= '"' ([^<&"] | Reference)* '"' | "'" ([^<&'] | Reference)* "'" */
/* FIXME: -- check value constrains / normalize leading/trailing WS and repeated WS */
struct mempool_state state;
- uns quote = xml_parse_quote(ctx);
+ uint quote = xml_parse_quote(ctx);
mp_save(ctx->stack, &state);
struct fastbuf *out = &ctx->chars;
struct xml_source *src = ctx->src;
while (1)
{
- uns c = xml_get_char(ctx);
+ uint c = xml_get_char(ctx);
if (c == '&')
{
xml_inc(ctx);
return xml_end_chars(ctx, &text) ? text : "";
}
-uns
+uint
xml_normalize_white(struct xml_context *ctx UNUSED, char *text)
{
char *s = text, *d = text;
struct xml_attrs_table;
-static inline uns
+static inline uint
xml_attrs_hash(struct xml_attrs_table *t UNUSED, struct xml_node *e, char *n)
{
return hash_pointer(e) ^ hash_string(n);
/*** Elements ***/
-static uns
+static uint
xml_validate_element(struct xml_dtd_elem_node *root, struct xml_dtd_elem *elem)
{
if (root->elem)
}
while (1)
{
- uns white = xml_parse_white(ctx, 0);
- uns c = xml_get_char(ctx);
+ uint white = xml_parse_white(ctx, 0);
+ uint c = xml_get_char(ctx);
if (c == '/')
{
xml_parse_char(ctx, '>');
if ((ctx->flags & XML_REPORT_TAGS) && ctx->h_etag)
ctx->h_etag(ctx);
struct xml_node *e = ctx->node;
- uns free = !(ctx->flags & XML_ALLOC_TAGS);
+ uint free = !(ctx->flags & XML_ALLOC_TAGS);
if (free)
{
if (!e->parent)
char *n = e->name;
while (*n)
{
- uns c;
+ uint c;
n = utf8_32_get(n, &c);
if (xml_get_char(ctx) != c)
goto recover;
xml_parse_white(ctx, 1);
ctx->doctype = xml_parse_name(ctx, ctx->pool);
TRACE(ctx, "doctype=%s", ctx->doctype);
- uns c;
+ uint c;
if (xml_parse_white(ctx, 0) && ((c = xml_peek_char(ctx)) == 'S' || c == 'P'))
{
if (c == 'S')
/* DTD: Internal subset */
static void
-xml_parse_subset(struct xml_context *ctx, uns external)
+xml_parse_subset(struct xml_context *ctx, uint external)
{
// FIXME:
// -- comments/pi have no parent
while (1)
{
xml_parse_white(ctx, 0);
- uns c = xml_get_char(ctx);
+ uint c = xml_get_char(ctx);
xml_inc(ctx);
if (c == '<')
if ((c = xml_get_char(ctx)) == '!')
/*** The State Machine ***/
-uns
+uint
xml_next(struct xml_context *ctx)
{
/* A nasty state machine */
TRACE(ctx, "raised fatal error");
return ctx->state = XML_STATE_EOF;
}
- uns c;
+ uint c;
switch (ctx->state)
{
case XML_STATE_START:
ASSERT(0);
}
-uns
-xml_next_state(struct xml_context *ctx, uns pull)
+uint
+xml_next_state(struct xml_context *ctx, uint pull)
{
- uns saved = ctx->pull;
+ uint saved = ctx->pull;
ctx->pull = pull;
- uns res = xml_next(ctx);
+ uint res = xml_next(ctx);
ctx->pull = saved;
return res;
}
-uns
+uint
xml_skip_element(struct xml_context *ctx)
{
ASSERT(ctx->state == XML_STATE_STAG);
struct xml_node *node = ctx->node;
- uns saved = ctx->pull, res;
+ uint saved = ctx->pull, res;
ctx->pull = XML_PULL_ETAG;
while ((res = xml_next(ctx)) && ctx->node != node);
ctx->pull = saved;
return res;
}
-uns
+uint
xml_parse(struct xml_context *ctx)
{
/* This cycle should run only once unless the user overrides the value of ctx->pull in a SAX handler */
}
static inline void
-xml_add_char(u32 **bstop, uns c)
+xml_add_char(u32 **bstop, uint c)
{
*(*bstop)++ = c;
*(*bstop)++ = xml_char_cat(c);
src->refill_cat2 = ctx->cat_new_line;
}
-static uns
-xml_error_restricted(struct xml_context *ctx, uns c)
+static uint
+xml_error_restricted(struct xml_context *ctx, uint c)
{
if (c == ~1U)
xml_error(ctx, "Corrupted encoding");
struct fastbuf *fb = src->fb; \
if (ctx->bptr == ctx->bstop) \
ctx->bptr = ctx->bstop = src->buf; \
- uns c, t1 = src->refill_cat1, t2 = src->refill_cat2, row = src->row; \
+ uint c, t1 = src->refill_cat1, t2 = src->refill_cat2, row = src->row; \
u32 *bend = src->buf + ARRAY_SIZE(src->buf), *bstop = ctx->bstop, \
*last_0xd = src->pending_0xd ? bstop : NULL; \
do \
{ \
c = func(fb, ##params); \
- uns t = xml_char_cat(c); \
+ uint t = xml_char_cat(c); \
if (t & t1) \
/* Typical branch */ \
*bstop++ = c, *bstop++ = t; \
else
{
ctx->src->refill(ctx);
- TRACE(ctx, "refilled %u characters", (uns)((ctx->bstop - ctx->bptr) / 2));
+ TRACE(ctx, "refilled %u characters", (uint)((ctx->bstop - ctx->bptr) / 2));
}
}
while (ctx->bptr == ctx->bstop);
}
-static uns
+static uint
xml_source_row(struct xml_context *ctx, struct xml_source *src)
{
- uns row = src->row;
+ uint row = src->row;
for (u32 *p = ctx->bstop; p != ctx->bptr; p -= 2)
if (p[-1] & src->refill_cat2)
row--;
return row + 1;
}
-uns
+uint
xml_row(struct xml_context *ctx)
{
return ctx->src ? xml_source_row(ctx, ctx->src) : 0;
{
/* EncName ::= '"' [A-Za-z] ([A-Za-z0-9._] | '-')* '"' | "'" [A-Za-z] ([A-Za-z0-9._] | '-')* "'" */
char *p = mp_start_noalign(ctx->pool, 1);
- uns q = xml_parse_quote(ctx);
+ uint q = xml_parse_quote(ctx);
if (unlikely(!(xml_get_cat(ctx) & XML_CHAR_ENC_SNAME)))
xml_fatal(ctx, "Invalid character in the encoding name");
while (1)
TRACE(ctx, "xml_parse_decl");
struct xml_source *src = ctx->src;
ctx->flags &= ~XML_SRC_EXPECTED_DECL;
- uns doc = ctx->flags & XML_SRC_DOCUMENT;
+ uint doc = ctx->flags & XML_SRC_DOCUMENT;
/* Setup valid Unicode ranges and force the reader to abort refill() after each '>', where we can switch encoding or XML version */
if (doc)
expected_encoding = NULL;
}
}
- uns utf16 = src->refill == xml_refill_utf16_le || src->refill == xml_refill_utf16_be;
+ uint utf16 = src->refill == xml_refill_utf16_le || src->refill == xml_refill_utf16_be;
if (utf16)
src->fb_encoding = (src->refill == xml_refill_utf16_be) ? "UTF-16BE" : "UTF-16LE";
if (!expected_encoding)
if (!(ctx->flags & XML_SRC_EOF) && ctx->bstop != src->buf + ARRAY_SIZE(src->buf))
xml_refill(ctx);
u32 *bptr = ctx->bptr;
- uns have_decl = (12 <= ctx->bstop - ctx->bptr && (bptr[11] & XML_CHAR_WHITE) &&
+ uint have_decl = (12 <= ctx->bstop - ctx->bptr && (bptr[11] & XML_CHAR_WHITE) &&
bptr[0] == '<' && bptr[2] == '?' && (bptr[4] & 0xdf) == 'X' && (bptr[6] & 0xdf) == 'M' && (bptr[8] & 0xdf) == 'L');
if (!have_decl)
{
xml_parse_eq(ctx);
char *version = xml_parse_pubid_literal(ctx, ctx->pool);
TRACE(ctx, "version=%s", version);
- uns v = 0;
+ uint v = 0;
if (!strcmp(version, "1.1"))
v = XML_VERSION_1_1;
else if (strcmp(version, "1.0"))
{
xml_parse_seq(ctx, "standalone");
xml_parse_eq(ctx);
- uns c = xml_parse_quote(ctx);
+ uint c = xml_parse_quote(ctx);
if (ctx->standalone = (xml_peek_char(ctx) == 'y'))
xml_parse_seq(ctx, "yes");
else
my %hash = ();
print H "extern const byte xml_char_tab1[];\n";
- print H "extern const uns xml_char_tab2[];\n";
+ print H "extern const uint xml_char_tab2[];\n";
print H "extern const byte xml_char_tab3[];\n";
- print C "const uns xml_char_tab2[] = {\n ";
+ print C "const uint xml_char_tab2[] = {\n ";
for (my $t=0; $t<256; $t++) {
my $i = $t * 256;
my @x = ();
exit(1);
}
-static uns want_sax;
-static uns want_pull;
-static uns want_dom;
-static uns want_parse_dtd;
-static uns want_hide_errors;
-static uns want_ignore_comments;
-static uns want_ignore_pis;
-static uns want_report_blocks;
-static uns want_report_ignorable;
-static uns want_file_entities;
+static uint want_sax;
+static uint want_pull;
+static uint want_dom;
+static uint want_parse_dtd;
+static uint want_hide_errors;
+static uint want_ignore_comments;
+static uint want_ignore_pis;
+static uint want_report_blocks;
+static uint want_report_ignorable;
+static uint want_file_entities;
static struct fastbuf *out;
}
static void
-show_tree(struct xml_node *node, uns level)
+show_tree(struct xml_node *node, uint level)
{
if (!node)
return;
bputs(out, "DOM: ");
- for (uns i = 0; i < level; i++)
+ for (uint i = 0; i < level; i++)
bputs(out, " ");
bputs(out, node_type(node));
show_node(node);
}
static void
-h_block(struct xml_context *ctx UNUSED, char *text, uns len UNUSED)
+h_block(struct xml_context *ctx UNUSED, char *text, uint len UNUSED)
{
bprintf(out, "SAX: block text='%s'\n", text);
}
static void
-h_cdata(struct xml_context *ctx UNUSED, char *text, uns len UNUSED)
+h_cdata(struct xml_context *ctx UNUSED, char *text, uint len UNUSED)
{
bprintf(out, "SAX: cdata text='%s'\n", text);
}
static void
-h_ignorable(struct xml_context *ctx UNUSED, char *text, uns len UNUSED)
+h_ignorable(struct xml_context *ctx UNUSED, char *text, uint len UNUSED)
{
bprintf(out, "SAX: ignorable text='%s'\n", text);
}
if (want_pull)
{
ctx.pull = XML_PULL_CHARS | XML_PULL_STAG | XML_PULL_ETAG | XML_PULL_COMMENT | XML_PULL_PI;
- uns state;
+ uint state;
while (state = xml_next(&ctx))
switch (state)
{
struct xml_node {
cnode n; /* Node for list of parent's sons */
- uns type; /* XML_NODE_x */
+ uint type; /* XML_NODE_x */
struct xml_node *parent; /* Parent node */
char *name; /* Element name / PI target */
clist sons; /* Children nodes */
union {
struct {
char *text; /* PI text / Comment / CDATA */
- uns len; /* Text length in bytes */
+ uint len; /* Text length in bytes */
};
struct {
struct xml_dtd_elem *dtd; /* Element DTD */
struct fastbuf wrap_fb; /* Fbmem wrapper */
u32 buf[2 * XML_BUF_SIZE]; /* Read buffer with Unicode values and categories */
u32 *bptr, *bstop; /* Current state of the buffer */
- uns row; /* File position */
+ uint row; /* File position */
char *expected_encoding; /* Initial encoding before any transformation has been made (expected in XMLDecl/TextDecl) */
char *fb_encoding; /* Encoding of the source fastbuf */
char *decl_encoding; /* Encoding read from the XMLDecl/TextDecl */
- uns refill_cat1; /* Character categories, which should be directly passed to the buffer */
- uns refill_cat2; /* Character categories, which should be processed as newlines (possibly in some built-in
+ uint refill_cat1; /* Character categories, which should be directly passed to the buffer */
+ uint refill_cat2; /* Character categories, which should be processed as newlines (possibly in some built-in
sequences) */
void (*refill)(struct xml_context *ctx); /* Callback to decode source characters to the buffer */
unsigned short *refill_in_to_x; /* Libucw-charset input table */
- uns saved_depth; /* Saved ctx->depth */
- uns pending_0xd; /* The last read character is 0xD */
+ uint saved_depth; /* Saved ctx->depth */
+ uint pending_0xd; /* The last read character is 0xD */
};
struct xml_context {
struct mempool *pool; /* DOM pool */
struct mempool *stack; /* Stack pool (freed as soon as possible) */
struct xml_stack *stack_list; /* See xml_push(), xml_pop() */
- uns flags; /* XML_FLAG_x (restored on xml_pop()) */
- uns depth; /* Nesting level (for checking of valid source nesting -> valid pushes/pops on memory pools) */
+ uint flags; /* XML_FLAG_x (restored on xml_pop()) */
+ uint depth; /* Nesting level (for checking of valid source nesting -> valid pushes/pops on memory pools) */
struct fastbuf chars; /* Character data / attribute value */
struct mempool_state chars_state; /* Mempool state before the current character block has started */
char *chars_trivial; /* If not empty, it will be appended to chars */
/* Input */
struct xml_source *src; /* Current source */
u32 *bptr, *bstop; /* Buffer with preprocessed characters (validated UCS-4 + category flags) */
- uns cat_chars; /* Unicode range of supported characters (cdata, attribute values, ...) */
- uns cat_unrestricted; /* Unrestricted characters (may appear in document/external entities) */
- uns cat_new_line; /* New line characters */
- uns cat_name; /* Characters that may appear in names */
- uns cat_sname; /* Characters that may begin a name */
+ uint cat_chars; /* Unicode range of supported characters (cdata, attribute values, ...) */
+ uint cat_unrestricted; /* Unrestricted characters (may appear in document/external entities) */
+ uint cat_new_line; /* New line characters */
+ uint cat_name; /* Characters that may appear in names */
+ uint cat_sname; /* Characters that may begin a name */
/* SAX-like interface */
void (*h_document_start)(struct xml_context *ctx); /* Called before entering prolog */
void (*h_stag)(struct xml_context *ctx); /* Called after STag or EmptyElemTag (only with XML_REPORT_TAGS) */
void (*h_etag)(struct xml_context *ctx); /* Called before ETag or after EmptyElemTag (only with XML_REPORT_TAGS) */
void (*h_chars)(struct xml_context *ctx); /* Called after some characters (only with XML_REPORT_CHARS) */
- void (*h_block)(struct xml_context *ctx, char *text, uns len); /* Called for each continuous block of characters not reported by h_cdata() (only with XML_REPORT_CHARS) */
- void (*h_cdata)(struct xml_context *ctx, char *text, uns len); /* Called for each CDATA section (only with XML_REPORT_CHARS) */
- void (*h_ignorable)(struct xml_context *ctx, char *text, uns len); /* Called for ignorable whitespace (content in tags without #PCDATA) */
+ void (*h_block)(struct xml_context *ctx, char *text, uint len); /* Called for each continuous block of characters not reported by h_cdata() (only with XML_REPORT_CHARS) */
+ void (*h_cdata)(struct xml_context *ctx, char *text, uint len); /* Called for each CDATA section (only with XML_REPORT_CHARS) */
+ void (*h_ignorable)(struct xml_context *ctx, char *text, uint len); /* Called for ignorable whitespace (content in tags without #PCDATA) */
void (*h_dtd_start)(struct xml_context *ctx); /* Called just after the DTD structure is initialized */
void (*h_dtd_end)(struct xml_context *ctx); /* Called after DTD subsets subsets */
struct xml_dtd_entity *(*h_find_entity)(struct xml_context *ctx, char *name); /* Called when needed to resolve a general entity */
struct xml_node *node; /* Current DOM node */
char *version_str;
- uns standalone;
+ uint standalone;
char *doctype; /* The document type (or NULL if unknown) */
char *system_id; /* DTD external id */
char *public_id; /* DTD public id */
struct xml_dtd *dtd; /* The DTD structure (or NULL) */
- uns state; /* Current state for the PULL interface (XML_STATE_x) */
- uns pull; /* Parameters for the PULL interface (XML_PULL_x) */
+ uint state; /* Current state for the PULL interface (XML_STATE_x) */
+ uint pull; /* Parameters for the PULL interface (XML_PULL_x) */
};
/* Initialize XML context */
struct xml_source *xml_push_fastbuf(struct xml_context *ctx, struct fastbuf *fb);
/* Parse without the PULL interface, return XML_ERR_x code (zero on success) */
-uns xml_parse(struct xml_context *ctx);
+uint xml_parse(struct xml_context *ctx);
/* Parse with the PULL interface, return XML_STATE_x (zero on EOF or fatal error) */
-uns xml_next(struct xml_context *ctx);
+uint xml_next(struct xml_context *ctx);
/* Equivalent to xml_next, but with temporarily changed ctx->pull value */
-uns xml_next_state(struct xml_context *ctx, uns pull);
+uint xml_next_state(struct xml_context *ctx, uint pull);
/* May be called on XML_STATE_STAG to skip it's content; can return XML_STATE_ETAG or XML_STATE_EOF on fatal error */
-uns xml_skip_element(struct xml_context *ctx);
+uint xml_skip_element(struct xml_context *ctx);
/* Returns the current row number in the document entity */
-uns xml_row(struct xml_context *ctx);
+uint xml_row(struct xml_context *ctx);
/* Finds a given attribute value in a XML_NODE_ELEM node */
struct xml_attr *xml_attr_find(struct xml_context *ctx, struct xml_node *node, char *name);
void xml_def_resolve_entity(struct xml_context *ctx, struct xml_dtd_entity *ent);
/* Remove leading/trailing spaces and replaces sequences of spaces to a single space character (non-CDATA attribute normalization) */
-uns xml_normalize_white(struct xml_context *ctx, char *value);
+uint xml_normalize_white(struct xml_context *ctx, char *value);
/* Merge character contents of a given element to a single string (not recursive) */
char *xml_merge_chars(struct xml_context *ctx, struct xml_node *node, struct mempool *pool);