2 * UCW Library -- URL Functions
4 * (c) 1997--2004 Martin Mares <mj@ucw.cz>
5 * (c) 2001--2005 Robert Spalek <robert@ucw.cz>
7 * This software may be freely distributed and used according to the terms
8 * of the GNU Lesser General Public License.
10 * The URL syntax corresponds to RFC 2396 with several exceptions:
12 * o Escaping of special characters still follows RFC 1738.
13 * o Interpretation of path parameters follows RFC 1808.
15 * XXX: The buffer handling in this module is really horrible, but it works.
20 #include "lib/chartype.h"
30 static uns url_ignore_spaces;
31 static uns url_ignore_underflow;
32 static byte *url_component_separators = "";
33 static uns url_min_repeat_count = 0x7fffffff;
34 static uns url_max_repeat_length = 0;
36 static struct cf_section url_config = {
38 CF_UNS("IgnoreSpaces", &url_ignore_spaces),
39 CF_UNS("IgnoreUnderflow", &url_ignore_underflow),
40 CF_STRING("ComponentSeparators", &url_component_separators),
41 CF_UNS("MinRepeatCount", &url_min_repeat_count),
42 CF_UNS("MaxRepeatLength", &url_max_repeat_length),
47 static void CONSTRUCTOR url_init_config(void)
49 cf_declare_section("URL", &url_config, 0);
52 /* Escaping and de-escaping */
57 return (x<10) ? (x + '0') : (x - 10 + 'A');
61 url_deescape(const byte *s, byte *d)
64 byte *end = d + MAX_URL_SIZE - 10;
68 return URL_ERR_TOO_LONG;
72 if (!Cxdigit(s[1]) || !Cxdigit(s[2]))
73 return URL_ERR_INVALID_ESCAPE;
74 val = Cxvalue(s[1])*16 + Cxvalue(s[2]);
76 return URL_ERR_INVALID_ESCAPED_CHAR;
80 val = NCC_SEMICOLON; break;
82 val = NCC_SLASH; break;
84 val = NCC_QUEST; break;
86 val = NCC_COLON; break;
90 val = NCC_EQUAL; break;
94 val = NCC_HASH; break;
106 if (!url_ignore_spaces || !(!*s || d == dstart))
111 return URL_ERR_TOO_LONG;
117 return URL_ERR_INVALID_CHAR;
124 url_enescape(const byte *s, byte *d)
126 byte *end = d + MAX_URL_SIZE - 10;
132 return URL_ERR_TOO_LONG;
133 if (Calnum(c) || /* RFC 1738(2.2): Only alphanumerics ... */
134 c == '$' || c == '-' || c == '_' || c == '.' || c == '+' || /* ... and several other exceptions ... */
135 c == '!' || c == '*' || c == '\'' || c == '(' || c == ')' ||
137 c == '/' || c == '?' || c == ':' || c == '@' || /* ... and reserved chars used for reserved purpose */
138 c == '=' || c == '&' || c == '#' || c == ';')
142 uns val = (*s < NCC_MAX) ? NCC_CHARS[*s] : *s;
144 *d++ = enhex(val >> 4);
145 *d++ = enhex(val & 0x0f);
154 url_enescape_friendly(const byte *src, byte *dest)
156 byte *end = dest + MAX_URL_SIZE - 10;
160 return URL_ERR_TOO_LONG;
162 *dest++ = NCC_CHARS[*src++];
163 else if (*src >= 0x20 && *src < 0x7f)
168 *dest++ = enhex(*src >> 4);
169 *dest++ = enhex(*src++ & 0x0f);
176 /* Split an URL (several parts may be copied to the destination buffer) */
178 byte *url_proto_names[URL_PROTO_MAX] = URL_PNAMES;
179 static int url_proto_path_flags[URL_PROTO_MAX] = URL_PATH_FLAGS;
182 identify_protocol(const byte *p)
186 for(i=1; i<URL_PROTO_MAX; i++)
187 if (!strcasecmp(p, url_proto_names[i]))
189 return URL_PROTO_UNKNOWN;
193 url_split(byte *s, struct url *u, byte *d)
195 bzero(u, sizeof(struct url));
197 u->bufend = d + MAX_URL_SIZE - 10;
199 if (s[0] != '/') /* Seek for "protocol:" */
202 while (*p && Calnum(*p))
204 if (p != s && *p == ':')
210 u->protoid = identify_protocol(u->protocol);
212 if (url_proto_path_flags[u->protoid] && (s[0] != '/' || s[1] != '/'))
214 /* The protocol requires complete host spec, but it's missing -> treat as a relative path instead */
215 int len = d - u->protocol;
224 if (s[0] == '/') /* Host spec or absolute path */
226 if (s[1] == '/') /* Host spec */
234 while (*s && *s != '/' && *s != '?') /* Copy user:passwd@host:port */
243 else /* This shouldn't happen with sane URL's, but we need to be sure */
248 if (at) /* user:passwd present */
251 if (e = strchr(q, ':'))
260 if (e) /* host:port present */
264 p = strtoul(e, &ep, 10);
265 if (ep && *ep || p > 65535)
266 return URL_ERR_INVALID_PORT;
267 else if (p) /* Port 0 (e.g. in :/) is treated as default port */
279 /* Normalization according to given base URL */
281 static uns std_ports[] = URL_DEFPORTS; /* Default port numbers */
284 relpath_merge(struct url *u, struct url *b)
292 if (a[0] == '/') /* Absolute path => OK */
294 if (o[0] != '/' && o[0] != '?')
295 return URL_PATH_UNDERFLOW;
297 if (!a[0]) /* Empty URL -> inherit everything */
303 u->rest = d; /* We know we'll need to copy the path somewhere else */
305 if (a[0] == '#') /* Another fragment */
307 for(p=o; *p && *p != '#'; p++)
311 if (a[0] == '?') /* New query */
313 for(p=o; *p && *p != '#' && *p != '?'; p++)
317 if (a[0] == ';') /* Change parameters */
319 for(p=o; *p && *p != ';' && *p != '?' && *p != '#'; p++)
324 p = NULL; /* Copy original path and find the last slash */
325 while (*o && *o != ';' && *o != '?' && *o != '#')
328 return URL_ERR_TOO_LONG;
329 if ((*d++ = *o++) == '/')
333 return URL_ERR_REL_NOTHING;
340 if (a[1] == '/' || !a[1]) /* Skip "./" and ".$" */
347 else if (a[1] == '.' && (a[2] == '/' || !a[2])) /* "../" */
355 * RFC 1808 says we should leave ".." as a path segment, but
356 * we intentionally break the rule and refuse the URL.
358 if (!url_ignore_underflow)
359 return URL_PATH_UNDERFLOW;
363 d--; /* Discard trailing slash */
370 while (a[0] && a[0] != '/')
373 return URL_ERR_TOO_LONG;
385 copy: /* Combine part of old URL with the new one */
390 return URL_ERR_TOO_LONG;
395 return URL_ERR_TOO_LONG;
400 url_normalize(struct url *u, struct url *b)
405 if (url_proto_path_flags[u->protoid] && (!u->host || !*u->host) ||
406 !u->host && u->user ||
407 !u->user && u->pass ||
409 return URL_SYNTAX_ERROR;
413 /* Now we know it's a relative URL. Do we have any base? */
414 if (!b || !url_proto_path_flags[b->protoid])
415 return URL_ERR_REL_NOTHING;
416 u->protocol = b->protocol;
417 u->protoid = b->protoid;
419 /* Reference to the same host */
426 if (err = relpath_merge(u, b))
431 /* Change path "?" to "/?" because it's the true meaning */
432 if (u->rest[0] == '?')
434 int l = strlen(u->rest);
435 if (u->bufend - u->buf < l+1)
436 return URL_ERR_TOO_LONG;
438 memcpy(u->buf+1, u->rest, l+1);
443 /* Fill in missing info */
445 u->port = std_ports[u->protoid];
450 /* Name canonicalization */
458 if (*b >= 'A' && *b <= 'Z')
465 kill_end_dot(byte *b)
471 k = b + strlen(b) - 1;
472 while (k > b && *k == '.')
478 url_canonicalize(struct url *u)
482 lowercase(u->protocol);
484 kill_end_dot(u->host);
485 if ((!u->rest || !*u->rest) && url_proto_path_flags[u->protoid])
487 if (u->rest && (c = strchr(u->rest, '#'))) /* Kill fragment reference */
492 /* Pack a broken-down URL */
495 append(byte *d, const byte *s, byte *e)
508 url_pack(struct url *u, byte *d)
510 byte *e = d + MAX_URL_SIZE - 10;
514 d = append(d, u->protocol, e);
515 d = append(d, ":", e);
516 u->protoid = identify_protocol(u->protocol);
520 d = append(d, "//", e);
523 d = append(d, u->user, e);
526 d = append(d, ":", e);
527 d = append(d, u->pass, e);
529 d = append(d, "@", e);
531 d = append(d, u->host, e);
532 if (u->port != std_ports[u->protoid] && u->port != ~0U)
535 sprintf(z, "%d", u->port);
536 d = append(d, ":", e);
541 d = append(d, u->rest, e);
543 return URL_ERR_TOO_LONG;
550 static char *errmsg[] = {
551 "Something is wrong",
555 "Invalid escaped character",
556 "Invalid port number",
557 "Relative URL not allowed",
566 if (err >= sizeof(errmsg) / sizeof(char *))
571 /* Standard cookbook recipes */
574 url_canon_split_rel(const byte *u, byte *buf1, byte *buf2, struct url *url, struct url *base)
578 if (err = url_deescape(u, buf1))
580 if (err = url_split(buf1, url, buf2))
582 if (err = url_normalize(url, base))
584 return url_canonicalize(url);
588 url_auto_canonicalize_rel(const byte *src, byte *dst, struct url *base)
590 byte buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE];
594 (void)((err = url_canon_split_rel(src, buf1, buf2, &ur, base)) ||
595 (err = url_pack(&ur, buf3)) ||
596 (err = url_enescape(buf3, dst)));
604 int main(int argc, char **argv)
606 char buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE], buf4[MAX_URL_SIZE];
608 struct url url, url0;
609 char *base = "http://mj@www.hell.org/123/sub_dir/index.html;param?query&zzz/subquery#fragment";
611 if (argc != 2 && argc != 3)
615 if (err = url_deescape(argv[1], buf1))
617 printf("deesc: error %d\n", err);
620 printf("deesc: %s\n", buf1);
621 if (err = url_split(buf1, &url, buf2))
623 printf("split: error %d\n", err);
626 printf("split: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
627 if (err = url_split(base, &url0, buf3))
629 printf("split base: error %d\n", err);
632 if (err = url_normalize(&url0, NULL))
634 printf("normalize base: error %d\n", err);
637 printf("base: @%s@%s@%s@%s@%d@%s\n", url0.protocol, url0.user, url0.pass, url0.host, url0.port, url0.rest);
638 if (err = url_normalize(&url, &url0))
640 printf("normalize: error %d\n", err);
643 printf("normalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
644 if (err = url_canonicalize(&url))
646 printf("canonicalize: error %d\n", err);
649 printf("canonicalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
650 if (err = url_pack(&url, buf4))
652 printf("pack: error %d\n", err);
655 printf("pack: %s\n", buf4);
656 if (err = url_enescape(buf4, buf2))
658 printf("enesc: error %d\n", err);
661 printf("enesc: %s\n", buf2);
674 hashf(const byte *start, int length)
678 hf = (hf << 8 | hf >> 24) ^ *start++;
683 repeat_count(struct component *comp, uns count, uns len)
685 struct component *orig_comp = comp;
695 for (i=0; i<len; i++)
696 if (comp[i].hash != orig_comp[i].hash
697 || comp[i].length != orig_comp[i].length
698 || memcmp(comp[i].start, orig_comp[i].start, comp[i].length))
704 url_has_repeated_component(const byte *url)
706 struct component *comp;
707 uns comps, comp_len, rep_prefix;
711 for (comps=0, c=url; c; comps++)
713 c = strpbrk(c, url_component_separators);
717 if (comps < url_min_repeat_count)
719 comp = alloca(comps * sizeof(struct component));
720 for (i=0, c=url; c; i++)
723 c = strpbrk(c, url_component_separators);
726 comp[i].length = c - comp[i].start;
730 comp[i].length = strlen(comp[i].start);
733 for (i=0; i<comps; i++)
734 comp[i].hash = hashf(comp[i].start, comp[i].length);
735 for (comp_len = 1; comp_len <= url_max_repeat_length && comp_len <= comps; comp_len++)
736 for (rep_prefix = 0; rep_prefix <= comps - comp_len; rep_prefix++)
737 if (repeat_count(comp + rep_prefix, comps - rep_prefix, comp_len) >= url_min_repeat_count)