2 * UCW Library -- URL Functions
4 * (c) 1997--2004 Martin Mares <mj@ucw.cz>
5 * (c) 2001--2005 Robert Spalek <robert@ucw.cz>
7 * This software may be freely distributed and used according to the terms
8 * of the GNU Lesser General Public License.
10 * The URL syntax corresponds to RFC 2396 with several exceptions:
12 * o Escaping of special characters still follows RFC 1738.
13 * o Interpretation of path parameters follows RFC 1808.
15 * XXX: The buffer handling in this module is really horrible, but it works.
20 #include "lib/chartype.h"
30 static uns url_ignore_spaces;
31 static uns url_ignore_underflow;
32 static char *url_component_separators = "";
33 static uns url_min_repeat_count = 0x7fffffff;
34 static uns url_max_repeat_length = 0;
35 static uns url_max_occurences = ~0U;
37 static struct cf_section url_config = {
39 CF_UNS("IgnoreSpaces", &url_ignore_spaces),
40 CF_UNS("IgnoreUnderflow", &url_ignore_underflow),
41 CF_STRING("ComponentSeparators", &url_component_separators),
42 CF_UNS("MinRepeatCount", &url_min_repeat_count),
43 CF_UNS("MaxRepeatLength", &url_max_repeat_length),
44 CF_UNS("MaxOccurences", &url_max_occurences),
49 static void CONSTRUCTOR url_init_config(void)
51 cf_declare_section("URL", &url_config, 0);
54 /* Escaping and de-escaping */
59 return (x<10) ? (x + '0') : (x - 10 + 'A');
63 url_deescape(const byte *s, byte *d)
66 byte *end = d + MAX_URL_SIZE - 10;
70 return URL_ERR_TOO_LONG;
74 if (!Cxdigit(s[1]) || !Cxdigit(s[2]))
75 return URL_ERR_INVALID_ESCAPE;
76 val = Cxvalue(s[1])*16 + Cxvalue(s[2]);
78 return URL_ERR_INVALID_ESCAPED_CHAR;
82 val = NCC_SEMICOLON; break;
84 val = NCC_SLASH; break;
86 val = NCC_QUEST; break;
88 val = NCC_COLON; break;
92 val = NCC_EQUAL; break;
96 val = NCC_HASH; break;
108 if (!url_ignore_spaces || !(!*s || d == dstart))
113 return URL_ERR_TOO_LONG;
119 return URL_ERR_INVALID_CHAR;
126 url_enescape(const byte *s, byte *d)
128 byte *end = d + MAX_URL_SIZE - 10;
134 return URL_ERR_TOO_LONG;
135 if (Calnum(c) || /* RFC 1738(2.2): Only alphanumerics ... */
136 c == '$' || c == '-' || c == '_' || c == '.' || c == '+' || /* ... and several other exceptions ... */
137 c == '!' || c == '*' || c == '\'' || c == '(' || c == ')' ||
139 c == '/' || c == '?' || c == ':' || c == '@' || /* ... and reserved chars used for reserved purpose */
140 c == '=' || c == '&' || c == '#' || c == ';')
144 uns val = (*s < NCC_MAX) ? NCC_CHARS[*s] : *s;
146 *d++ = enhex(val >> 4);
147 *d++ = enhex(val & 0x0f);
156 url_enescape_friendly(const byte *src, byte *dest)
158 byte *end = dest + MAX_URL_SIZE - 10;
162 return URL_ERR_TOO_LONG;
164 *dest++ = NCC_CHARS[*src++];
165 else if (*src >= 0x20 && *src < 0x7f)
170 *dest++ = enhex(*src >> 4);
171 *dest++ = enhex(*src++ & 0x0f);
178 /* Split an URL (several parts may be copied to the destination buffer) */
180 byte *url_proto_names[URL_PROTO_MAX] = URL_PNAMES;
181 static int url_proto_path_flags[URL_PROTO_MAX] = URL_PATH_FLAGS;
184 identify_protocol(const byte *p)
188 for(i=1; i<URL_PROTO_MAX; i++)
189 if (!strcasecmp(p, url_proto_names[i]))
191 return URL_PROTO_UNKNOWN;
195 url_split(byte *s, struct url *u, byte *d)
197 bzero(u, sizeof(struct url));
199 u->bufend = d + MAX_URL_SIZE - 10;
201 if (s[0] != '/') /* Seek for "protocol:" */
204 while (*p && Calnum(*p))
206 if (p != s && *p == ':')
212 u->protoid = identify_protocol(u->protocol);
214 if (url_proto_path_flags[u->protoid] && (s[0] != '/' || s[1] != '/'))
216 /* The protocol requires complete host spec, but it's missing -> treat as a relative path instead */
217 int len = d - u->protocol;
226 if (s[0] == '/') /* Host spec or absolute path */
228 if (s[1] == '/') /* Host spec */
236 while (*s && *s != '/' && *s != '?') /* Copy user:passwd@host:port */
245 else /* This shouldn't happen with sane URL's, but we need to be sure */
250 if (at) /* user:passwd present */
253 if (e = strchr(q, ':'))
262 if (e) /* host:port present */
266 p = strtoul(e, &ep, 10);
267 if (ep && *ep || p > 65535)
268 return URL_ERR_INVALID_PORT;
269 else if (p) /* Port 0 (e.g. in :/) is treated as default port */
281 /* Normalization according to given base URL */
283 static uns std_ports[] = URL_DEFPORTS; /* Default port numbers */
286 relpath_merge(struct url *u, struct url *b)
294 if (a[0] == '/') /* Absolute path => OK */
296 if (o[0] != '/' && o[0] != '?')
297 return URL_PATH_UNDERFLOW;
299 if (!a[0]) /* Empty URL -> inherit everything */
305 u->rest = d; /* We know we'll need to copy the path somewhere else */
307 if (a[0] == '#') /* Another fragment */
309 for(p=o; *p && *p != '#'; p++)
313 if (a[0] == '?') /* New query */
315 for(p=o; *p && *p != '#' && *p != '?'; p++)
319 if (a[0] == ';') /* Change parameters */
321 for(p=o; *p && *p != ';' && *p != '?' && *p != '#'; p++)
326 p = NULL; /* Copy original path and find the last slash */
327 while (*o && *o != ';' && *o != '?' && *o != '#')
330 return URL_ERR_TOO_LONG;
331 if ((*d++ = *o++) == '/')
335 return URL_ERR_REL_NOTHING;
342 if (a[1] == '/' || !a[1]) /* Skip "./" and ".$" */
349 else if (a[1] == '.' && (a[2] == '/' || !a[2])) /* "../" */
357 * RFC 1808 says we should leave ".." as a path segment, but
358 * we intentionally break the rule and refuse the URL.
360 if (!url_ignore_underflow)
361 return URL_PATH_UNDERFLOW;
365 d--; /* Discard trailing slash */
372 while (a[0] && a[0] != '/')
375 return URL_ERR_TOO_LONG;
387 copy: /* Combine part of old URL with the new one */
392 return URL_ERR_TOO_LONG;
397 return URL_ERR_TOO_LONG;
402 url_normalize(struct url *u, struct url *b)
407 if (url_proto_path_flags[u->protoid] && (!u->host || !*u->host) ||
408 !u->host && u->user ||
409 !u->user && u->pass ||
411 return URL_SYNTAX_ERROR;
415 /* Now we know it's a relative URL. Do we have any base? */
416 if (!b || !url_proto_path_flags[b->protoid])
417 return URL_ERR_REL_NOTHING;
418 u->protocol = b->protocol;
419 u->protoid = b->protoid;
421 /* Reference to the same host */
428 if (err = relpath_merge(u, b))
433 /* Change path "?" to "/?" because it's the true meaning */
434 if (u->rest[0] == '?')
436 int l = strlen(u->rest);
437 if (u->bufend - u->buf < l+1)
438 return URL_ERR_TOO_LONG;
440 memcpy(u->buf+1, u->rest, l+1);
445 /* Fill in missing info */
447 u->port = std_ports[u->protoid];
452 /* Name canonicalization */
460 if (*b >= 'A' && *b <= 'Z')
467 kill_end_dot(byte *b)
473 k = b + strlen(b) - 1;
474 while (k > b && *k == '.')
480 url_canonicalize(struct url *u)
484 lowercase(u->protocol);
486 kill_end_dot(u->host);
487 if ((!u->rest || !*u->rest) && url_proto_path_flags[u->protoid])
489 if (u->rest && (c = strchr(u->rest, '#'))) /* Kill fragment reference */
494 /* Pack a broken-down URL */
497 append(byte *d, const byte *s, byte *e)
510 url_pack(struct url *u, byte *d)
512 byte *e = d + MAX_URL_SIZE - 10;
516 d = append(d, u->protocol, e);
517 d = append(d, ":", e);
518 u->protoid = identify_protocol(u->protocol);
522 d = append(d, "//", e);
525 d = append(d, u->user, e);
528 d = append(d, ":", e);
529 d = append(d, u->pass, e);
531 d = append(d, "@", e);
533 d = append(d, u->host, e);
534 if (u->port != std_ports[u->protoid] && u->port != ~0U)
537 sprintf(z, "%d", u->port);
538 d = append(d, ":", e);
543 d = append(d, u->rest, e);
545 return URL_ERR_TOO_LONG;
552 static char *errmsg[] = {
553 "Something is wrong",
557 "Invalid escaped character",
558 "Invalid port number",
559 "Relative URL not allowed",
568 if (err >= sizeof(errmsg) / sizeof(char *))
573 /* Standard cookbook recipes */
576 url_canon_split_rel(const byte *u, byte *buf1, byte *buf2, struct url *url, struct url *base)
580 if (err = url_deescape(u, buf1))
582 if (err = url_split(buf1, url, buf2))
584 if (err = url_normalize(url, base))
586 return url_canonicalize(url);
590 url_auto_canonicalize_rel(const byte *src, byte *dst, struct url *base)
592 byte buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE];
596 (void)((err = url_canon_split_rel(src, buf1, buf2, &ur, base)) ||
597 (err = url_pack(&ur, buf3)) ||
598 (err = url_enescape(buf3, dst)));
606 int main(int argc, char **argv)
608 char buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE], buf4[MAX_URL_SIZE];
610 struct url url, url0;
611 char *base = "http://mj@www.hell.org/123/sub_dir/index.html;param?query&zzz/subquery#fragment";
613 if (argc != 2 && argc != 3)
617 if (err = url_deescape(argv[1], buf1))
619 printf("deesc: error %d\n", err);
622 printf("deesc: %s\n", buf1);
623 if (err = url_split(buf1, &url, buf2))
625 printf("split: error %d\n", err);
628 printf("split: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
629 if (err = url_split(base, &url0, buf3))
631 printf("split base: error %d\n", err);
634 if (err = url_normalize(&url0, NULL))
636 printf("normalize base: error %d\n", err);
639 printf("base: @%s@%s@%s@%s@%d@%s\n", url0.protocol, url0.user, url0.pass, url0.host, url0.port, url0.rest);
640 if (err = url_normalize(&url, &url0))
642 printf("normalize: error %d\n", err);
645 printf("normalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
646 if (err = url_canonicalize(&url))
648 printf("canonicalize: error %d\n", err);
651 printf("canonicalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
652 if (err = url_pack(&url, buf4))
654 printf("pack: error %d\n", err);
657 printf("pack: %s\n", buf4);
658 if (err = url_enescape(buf4, buf2))
660 printf("enesc: error %d\n", err);
663 printf("enesc: %s\n", buf2);
677 hashf(const byte *start, int length)
681 hf = (hf << 8 | hf >> 24) ^ *start++;
686 repeat_count(struct component *comp, uns count, uns len)
688 struct component *orig_comp = comp;
698 for (i=0; i<len; i++)
699 if (comp[i].hash != orig_comp[i].hash
700 || comp[i].length != orig_comp[i].length
701 || memcmp(comp[i].start, orig_comp[i].start, comp[i].length))
707 url_has_repeated_component(const byte *url)
709 struct component *comp;
710 uns comps, comp_len, rep_prefix, hash_size, *hash, *next;
714 for (comps=0, c=url; c; comps++)
716 c = strpbrk(c, url_component_separators);
720 if (comps < url_min_repeat_count && comps <= url_max_occurences)
722 comp = alloca(comps * sizeof(*comp));
723 for (i=0, c=url; c; i++)
726 c = strpbrk(c, url_component_separators);
729 comp[i].length = c - comp[i].start;
733 comp[i].length = strlen(comp[i].start);
736 for (i=0; i<comps; i++)
737 comp[i].hash = hashf(comp[i].start, comp[i].length);
738 if (comps > url_max_occurences)
740 hash_size = next_table_prime(comps);
741 hash = alloca(hash_size * sizeof(*hash));
742 next = alloca(comps * sizeof(*next));
743 memset(hash, 255, hash_size * sizeof(*hash));
744 for (i=0; i<comps; i++)
746 j = comp[i].hash % hash_size;
747 for (k = hash[j]; ~k && (comp[i].hash != comp[k].hash || comp[i].length != comp[k].length ||
748 memcmp(comp[k].start, comp[i].start, comp[i].length)); k = next[k]);
757 if (comp[k].count++ >= url_max_occurences)
762 for (comp_len = 1; comp_len <= url_max_repeat_length && comp_len <= comps; comp_len++)
763 for (rep_prefix = 0; rep_prefix <= comps - comp_len; rep_prefix++)
764 if (repeat_count(comp + rep_prefix, comps - rep_prefix, comp_len) >= url_min_repeat_count)