2 * UCW Library -- URL Functions
4 * (c) 1997--2004 Martin Mares <mj@ucw.cz>
5 * (c) 2001--2005 Robert Spalek <robert@ucw.cz>
7 * This software may be freely distributed and used according to the terms
8 * of the GNU Lesser General Public License.
10 * The URL syntax corresponds to RFC 2396 with several exceptions:
12 * o Escaping of special characters still follows RFC 1738.
13 * o Interpretation of path parameters follows RFC 1808.
15 * XXX: The buffer handling in this module is really horrible, but it works.
20 #include "lib/chartype.h"
30 static uns url_ignore_spaces;
31 static uns url_ignore_underflow;
32 static byte *url_component_separators = "";
33 static uns url_min_repeat_count = 0x7fffffff;
34 static uns url_max_repeat_length = 0;
36 static struct cfitem url_config[] = {
37 { "URL", CT_SECTION, NULL },
38 { "IgnoreSpaces", CT_INT, &url_ignore_spaces },
39 { "IgnoreUnderflow", CT_INT, &url_ignore_underflow },
40 { "ComponentSeparators", CT_STRING, &url_component_separators },
41 { "MinRepeatCount", CT_INT, &url_min_repeat_count },
42 { "MaxRepeatLength", CT_INT, &url_max_repeat_length },
43 { NULL, CT_STOP, NULL }
46 static void CONSTRUCTOR url_init_config(void)
48 cf_register(url_config);
51 /* Escaping and de-escaping */
56 return (x<10) ? (x + '0') : (x - 10 + 'A');
60 url_deescape(byte *s, byte *d)
63 byte *end = d + MAX_URL_SIZE - 10;
67 return URL_ERR_TOO_LONG;
71 if (!Cxdigit(s[1]) || !Cxdigit(s[2]))
72 return URL_ERR_INVALID_ESCAPE;
73 val = Cxvalue(s[1])*16 + Cxvalue(s[2]);
75 return URL_ERR_INVALID_ESCAPED_CHAR;
79 val = NCC_SEMICOLON; break;
81 val = NCC_SLASH; break;
83 val = NCC_QUEST; break;
85 val = NCC_COLON; break;
89 val = NCC_EQUAL; break;
93 val = NCC_HASH; break;
105 if (!url_ignore_spaces || !(!*s || d == dstart))
110 return URL_ERR_TOO_LONG;
116 return URL_ERR_INVALID_CHAR;
123 url_enescape(byte *s, byte *d)
125 byte *end = d + MAX_URL_SIZE - 10;
131 return URL_ERR_TOO_LONG;
132 if (Calnum(c) || /* RFC 1738(2.2): Only alphanumerics ... */
133 c == '$' || c == '-' || c == '_' || c == '.' || c == '+' || /* ... and several other exceptions ... */
134 c == '!' || c == '*' || c == '\'' || c == '(' || c == ')' ||
136 c == '/' || c == '?' || c == ':' || c == '@' || /* ... and reserved chars used for reserved purpose */
137 c == '=' || c == '&' || c == '#' || c == ';')
141 uns val = (*s < NCC_MAX) ? NCC_CHARS[*s] : *s;
143 *d++ = enhex(val >> 4);
144 *d++ = enhex(val & 0x0f);
153 url_enescape_friendly(byte *src, byte *dest)
155 byte *end = dest + MAX_URL_SIZE - 10;
159 return URL_ERR_TOO_LONG;
161 *dest++ = NCC_CHARS[*src++];
162 else if (*src >= 0x20 && *src < 0x7f)
167 *dest++ = enhex(*src >> 4);
168 *dest++ = enhex(*src++ & 0x0f);
175 /* Split an URL (several parts may be copied to the destination buffer) */
177 byte *url_proto_names[URL_PROTO_MAX] = URL_PNAMES;
178 static int url_proto_path_flags[URL_PROTO_MAX] = URL_PATH_FLAGS;
181 identify_protocol(byte *p)
185 for(i=1; i<URL_PROTO_MAX; i++)
186 if (!strcasecmp(p, url_proto_names[i]))
188 return URL_PROTO_UNKNOWN;
192 url_split(byte *s, struct url *u, byte *d)
194 bzero(u, sizeof(struct url));
196 u->bufend = d + MAX_URL_SIZE - 10;
198 if (s[0] != '/') /* Seek for "protocol:" */
201 while (*p && Calnum(*p))
203 if (p != s && *p == ':')
209 u->protoid = identify_protocol(u->protocol);
211 if (url_proto_path_flags[u->protoid] && (s[0] != '/' || s[1] != '/'))
213 /* The protocol requires complete host spec, but it's missing -> treat as a relative path instead */
214 int len = d - u->protocol;
223 if (s[0] == '/') /* Host spec or absolute path */
225 if (s[1] == '/') /* Host spec */
233 while (*s && *s != '/' && *s != '?') /* Copy user:passwd@host:port */
242 else /* This shouldn't happen with sane URL's, but we need to be sure */
247 if (at) /* user:passwd present */
250 if (e = strchr(q, ':'))
259 if (e) /* host:port present */
263 p = strtoul(e, &ep, 10);
264 if (ep && *ep || p > 65535)
265 return URL_ERR_INVALID_PORT;
266 else if (p) /* Port 0 (e.g. in :/) is treated as default port */
278 /* Normalization according to given base URL */
280 static uns std_ports[] = URL_DEFPORTS; /* Default port numbers */
283 relpath_merge(struct url *u, struct url *b)
291 if (a[0] == '/') /* Absolute path => OK */
293 if (o[0] != '/' && o[0] != '?')
294 return URL_PATH_UNDERFLOW;
296 if (!a[0]) /* Empty URL -> inherit everything */
302 u->rest = d; /* We know we'll need to copy the path somewhere else */
304 if (a[0] == '#') /* Another fragment */
306 for(p=o; *p && *p != '#'; p++)
310 if (a[0] == '?') /* New query */
312 for(p=o; *p && *p != '#' && *p != '?'; p++)
316 if (a[0] == ';') /* Change parameters */
318 for(p=o; *p && *p != ';' && *p != '?' && *p != '#'; p++)
323 p = NULL; /* Copy original path and find the last slash */
324 while (*o && *o != ';' && *o != '?' && *o != '#')
327 return URL_ERR_TOO_LONG;
328 if ((*d++ = *o++) == '/')
332 return URL_ERR_REL_NOTHING;
339 if (a[1] == '/' || !a[1]) /* Skip "./" and ".$" */
346 else if (a[1] == '.' && (a[2] == '/' || !a[2])) /* "../" */
354 * RFC 1808 says we should leave ".." as a path segment, but
355 * we intentionally break the rule and refuse the URL.
357 if (!url_ignore_underflow)
358 return URL_PATH_UNDERFLOW;
362 d--; /* Discard trailing slash */
369 while (a[0] && a[0] != '/')
372 return URL_ERR_TOO_LONG;
384 copy: /* Combine part of old URL with the new one */
389 return URL_ERR_TOO_LONG;
394 return URL_ERR_TOO_LONG;
399 url_normalize(struct url *u, struct url *b)
404 if (url_proto_path_flags[u->protoid] && (!u->host || !*u->host) ||
405 !u->host && u->user ||
406 !u->user && u->pass ||
408 return URL_SYNTAX_ERROR;
412 /* Now we know it's a relative URL. Do we have any base? */
413 if (!b || !url_proto_path_flags[b->protoid])
414 return URL_ERR_REL_NOTHING;
415 u->protocol = b->protocol;
416 u->protoid = b->protoid;
418 /* Reference to the same host */
425 if (err = relpath_merge(u, b))
430 /* Change path "?" to "/?" because it's the true meaning */
431 if (u->rest[0] == '?')
433 int l = strlen(u->rest);
434 if (u->bufend - u->buf < l+1)
435 return URL_ERR_TOO_LONG;
437 memcpy(u->buf+1, u->rest, l+1);
442 /* Fill in missing info */
444 u->port = std_ports[u->protoid];
449 /* Name canonicalization */
457 if (*b >= 'A' && *b <= 'Z')
464 kill_end_dot(byte *b)
470 k = b + strlen(b) - 1;
471 while (k > b && *k == '.')
477 url_canonicalize(struct url *u)
481 lowercase(u->protocol);
483 kill_end_dot(u->host);
484 if ((!u->rest || !*u->rest) && url_proto_path_flags[u->protoid])
486 if (u->rest && (c = strchr(u->rest, '#'))) /* Kill fragment reference */
491 /* Pack a broken-down URL */
494 append(byte *d, byte *s, byte *e)
507 url_pack(struct url *u, byte *d)
509 byte *e = d + MAX_URL_SIZE - 10;
513 d = append(d, u->protocol, e);
514 d = append(d, ":", e);
515 u->protoid = identify_protocol(u->protocol);
519 d = append(d, "//", e);
522 d = append(d, u->user, e);
525 d = append(d, ":", e);
526 d = append(d, u->pass, e);
528 d = append(d, "@", e);
530 d = append(d, u->host, e);
531 if (u->port != std_ports[u->protoid] && u->port != ~0U)
534 sprintf(z, "%d", u->port);
535 d = append(d, ":", e);
540 d = append(d, u->rest, e);
542 return URL_ERR_TOO_LONG;
549 static char *errmsg[] = {
550 "Something is wrong",
554 "Invalid escaped character",
555 "Invalid port number",
556 "Relative URL not allowed",
565 if (err >= sizeof(errmsg) / sizeof(char *))
570 /* Standard cookbook recipes */
573 url_canon_split_rel(byte *u, byte *buf1, byte *buf2, struct url *url, struct url *base)
577 if (err = url_deescape(u, buf1))
579 if (err = url_split(buf1, url, buf2))
581 if (err = url_normalize(url, base))
583 return url_canonicalize(url);
587 url_auto_canonicalize_rel(byte *src, byte *dst, struct url *base)
589 byte buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE];
593 (void)((err = url_canon_split_rel(src, buf1, buf2, &ur, base)) ||
594 (err = url_pack(&ur, buf3)) ||
595 (err = url_enescape(buf3, dst)));
603 int main(int argc, char **argv)
605 char buf1[MAX_URL_SIZE], buf2[MAX_URL_SIZE], buf3[MAX_URL_SIZE], buf4[MAX_URL_SIZE];
607 struct url url, url0;
608 char *base = "http://mj@www.hell.org/123/sub_dir/index.html;param?query&zzz/subquery#fragment";
610 if (argc != 2 && argc != 3)
614 if (err = url_deescape(argv[1], buf1))
616 printf("deesc: error %d\n", err);
619 printf("deesc: %s\n", buf1);
620 if (err = url_split(buf1, &url, buf2))
622 printf("split: error %d\n", err);
625 printf("split: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
626 if (err = url_split(base, &url0, buf3))
628 printf("split base: error %d\n", err);
631 if (err = url_normalize(&url0, NULL))
633 printf("normalize base: error %d\n", err);
636 printf("base: @%s@%s@%s@%s@%d@%s\n", url0.protocol, url0.user, url0.pass, url0.host, url0.port, url0.rest);
637 if (err = url_normalize(&url, &url0))
639 printf("normalize: error %d\n", err);
642 printf("normalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
643 if (err = url_canonicalize(&url))
645 printf("canonicalize: error %d\n", err);
648 printf("canonicalize: @%s@%s@%s@%s@%d@%s\n", url.protocol, url.user, url.pass, url.host, url.port, url.rest);
649 if (err = url_pack(&url, buf4))
651 printf("pack: error %d\n", err);
654 printf("pack: %s\n", buf4);
655 if (err = url_enescape(buf4, buf2))
657 printf("enesc: error %d\n", err);
660 printf("enesc: %s\n", buf2);
673 hashf(byte *start, int length)
677 hf = (hf << 8 | hf >> 24) ^ *start++;
682 repeat_count(struct component *comp, uns count, uns len)
684 struct component *orig_comp = comp;
694 for (i=0; i<len; i++)
695 if (comp[i].hash != orig_comp[i].hash
696 || comp[i].length != orig_comp[i].length
697 || memcmp(comp[i].start, orig_comp[i].start, comp[i].length))
703 url_has_repeated_component(byte *url)
705 struct component *comp;
706 uns comps, comp_len, rep_prefix;
710 for (comps=0, c=url; c; comps++)
712 c = strpbrk(c, url_component_separators);
716 if (comps < url_min_repeat_count)
718 comp = alloca(comps * sizeof(struct component));
719 for (i=0, c=url; c; i++)
722 c = strpbrk(c, url_component_separators);
725 comp[i].length = c - comp[i].start;
729 comp[i].length = strlen(comp[i].start);
732 for (i=0; i<comps; i++)
733 comp[i].hash = hashf(comp[i].start, comp[i].length);
734 for (comp_len = 1; comp_len <= url_max_repeat_length && comp_len <= comps; comp_len++)
735 for (rep_prefix = 0; rep_prefix <= comps - comp_len; rep_prefix++)
736 if (repeat_count(comp + rep_prefix, comps - rep_prefix, comp_len) >= url_min_repeat_count)