patchable raw sockbuf sizes
[unix-history] / usr / src / sys / net / radix.c
CommitLineData
35e117b3
KS
1/*
2 * Copyright (c) 1982, 1988 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms are permitted
6 * provided that the above copyright notice and this paragraph are
7 * duplicated in all such forms and that any documentation,
8 * advertising materials, and other materials related to such
9 * distribution and use acknowledge that the software was developed
10 * by the University of California, Berkeley. The name of the
11 * University may not be used to endorse or promote products derived
12 * from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16 *
573d82c4 17 * @(#)radix.c 7.2 (Berkeley) %G%
35e117b3
KS
18 */
19
20/*
21 * Routines to build and maintain radix trees for routing lookups.
22 */
23#ifndef RNF_NORMAL
573d82c4 24#include "param.h"
35e117b3
KS
25#include "radix.h"
26#endif
27struct radix_node_head *radix_node_head;
28struct radix_mask *rn_mkfreelist;
29#define rn_maskhead radix_node_head->rnh_treetop
30/*
31 * The data structure for the keys is a radix tree with one way
32 * branching removed. The index rn_b at an internal node n represents a bit
33 * position to be tested. The tree is arranged so that all descendants
34 * of a node n have keys whose bits all agree up to position rn_b - 1.
35 * (We say the index of n is rn_b.)
36 *
37 * There is at least one descendant which has a one bit at position rn_b,
38 * and at least one with a zero there.
39 *
40 * A route is determined by a pair of key and mask. We require that the
41 * bit-wise logical and of the key and mask to be the key.
42 * We define the index of a route to associated with the mask to be
43 * the first bit number in the mask where 0 occurs (with bit number 0
44 * representing the highest order bit).
45 *
46 * We say a mask is normal if every bit is 0, past the index of the mask.
47 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
48 * and m is a normal mask, then the route applies to every descendant of n.
49 * If the index(m) < rn_b, this implies the trailing last few bits of k
50 * before bit b are all 0, (and hence consequently true of every descendant
51 * of n), so the route applies to all descendants of the node as well.
52 *
53 * The present version of the code makes no use of normal routes,
54 * but similar logic shows that a non-normal mask m such that
55 * index(m) <= index(n) could potentially apply to many children of n.
56 * Thus, for each non-host route, we attach its mask to a list at an internal
57 * node as high in the tree as we can go.
58 */
59
60struct radix_node *
61rn_search(v, head)
62 struct radix_node *head;
573d82c4 63 register caddr_t v;
35e117b3
KS
64{
65 register struct radix_node *x;
66
67 for (x = head; x->rn_b >= 0;) {
68 if (x->rn_bmask & v[x->rn_off])
69 x = x->rn_r;
70 else
71 x = x->rn_l;
72 }
73 return x;
74};
75
76
77static int gotOddMasks;
78static char maskedKey[MAXKEYLEN];
79
80struct radix_node *
81rn_match(v, head)
82 struct radix_node *head;
573d82c4 83 caddr_t v;
35e117b3
KS
84{
85 register struct radix_node *t = head, *x;
573d82c4
KS
86 register caddr_t cp = v, cp2, cp3;
87 caddr_t cplim, mstart;
35e117b3 88 struct radix_node *saved_t;
573d82c4 89 int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
35e117b3
KS
90
91 /*
92 * Open code rn_search(v, head) to avoid overhead of extra
93 * subroutine call.
94 */
95 for (; t->rn_b >= 0; ) {
96 if (t->rn_bmask & cp[t->rn_off])
97 t = t->rn_r;
98 else
99 t = t->rn_l;
100 }
101 /*
102 * See if we match exactly as a host destination
103 */
104 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
105 for (; cp < cplim; cp++, cp2++)
106 if (*cp != *cp2)
107 goto on1;
108 return t;
109on1:
110 matched_off = cp - v;
111 saved_t = t;
112 do {
113 if (t->rn_mask) {
114 /*
115 * Even if we don't match exactly as a hosts;
116 * we may match if the leaf we wound up at is
117 * a route to a net.
118 */
119 cp3 = matched_off + t->rn_mask;
120 for (; cp < cplim; cp++)
121 if ((*cp2++ ^ *cp) & *cp3++)
122 break;
123 if (cp == cplim)
124 return t;
125 }
126 } while (t = t->rn_dupedkey);
127 t = saved_t;
128 /* start searching up the tree */
129 do {
130 register struct radix_mask *m;
131 t = t->rn_p;
132 if (m = t->rn_mklist) {
133 off = min(t->rn_off, matched_off);
134 mstart = maskedKey + off;
135 do {
136 cp2 = mstart;
137 cp3 = m->rm_mask + off;
138 for (cp = v + off; cp < cplim;)
139 *cp2++ = *cp++ & *cp3++;
140 x = rn_search(maskedKey, t);
141 while (x && x->rn_mask != m->rm_mask)
142 x = x->rn_dupedkey;
143 if (x &&
144 (Bcmp(mstart, x->rn_key + off,
145 vlen - off) == 0))
146 return x;
147 } while (m = m->rm_mklist);
148 }
149 } while (t != head);
150 return 0;
151};
152
153#ifdef RN_DEBUG
154int rn_nodenum;
155struct radix_node *rn_clist;
156int rn_saveinfo;
157#endif
158
159struct radix_node *
160rn_newpair(v, b, nodes)
573d82c4 161 caddr_t v;
35e117b3
KS
162 struct radix_node nodes[2];
163{
164 register struct radix_node *tt = nodes, *t = tt + 1;
165 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
166 t->rn_l = tt; t->rn_off = b >> 3;
167 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
168 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
169#ifdef RN_DEBUG
170 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
171 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
172#endif
173 return t;
174}
175
176int rn_debug = 1;
177struct radix_node *
178rn_insert(v, head, dupentry, nodes)
573d82c4 179 caddr_t v;
35e117b3
KS
180 struct radix_node *head;
181 int *dupentry;
182 struct radix_node nodes[2];
183{
184 int head_off = head->rn_off, vlen = (int)*((u_char *)v);
185 register struct radix_node *t = rn_search(v, head);
573d82c4 186 register caddr_t cp = v + head_off;
35e117b3
KS
187 register int b;
188 struct radix_node *tt;
189 /*
190 *find first bit at which v and t->rn_key differ
191 */
192 {
573d82c4 193 register caddr_t cp2 = t->rn_key + head_off;
35e117b3 194 register int cmp_res;
573d82c4 195 caddr_t cplim = v + vlen;
35e117b3
KS
196
197 while (cp < cplim)
198 if (*cp2++ != *cp++)
199 goto on1;
200 *dupentry = 1;
201 return t;
202on1:
203 *dupentry = 0;
204 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
205 for (b = (cp - v) << 3; cmp_res; b--)
206 cmp_res >>= 1;
207 }
208 {
209 register struct radix_node *p, *x = head;
210 cp = v;
211 do {
212 p = x;
213 if (cp[x->rn_off] & x->rn_bmask)
214 x = x->rn_r;
215 else x = x->rn_l;
216 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
217#ifdef RN_DEBUG
218 if (rn_debug)
219 printf("Going In:\n"), traverse(p);
220#endif
221 t = rn_newpair(v, b, nodes); tt = t->rn_l;
222 if ((cp[p->rn_off] & p->rn_bmask) == 0)
223 p->rn_l = t;
224 else
225 p->rn_r = t;
226 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
227 if ((cp[t->rn_off] & t->rn_bmask) == 0) {
228 t->rn_r = x;
229 } else {
230 t->rn_r = tt; t->rn_l = x;
231 }
232#ifdef RN_DEBUG
233 if (rn_debug)
234 printf("Coming out:\n"), traverse(p);
235#endif
236 }
237 return (tt);
238}
239
240struct radix_node *
241rn_addroute(v, netmask, head, treenodes)
242 struct radix_node *head;
573d82c4 243 caddr_t netmask, v;
35e117b3
KS
244 struct radix_node treenodes[2];
245{
246 register int j;
573d82c4 247 register caddr_t cp;
35e117b3
KS
248 register struct radix_node *t, *x, *tt;
249 short b = 0, b_leaf;
250 int vlen = *(u_char *)v, maskduplicated = 0, mlen, keyduplicated;
573d82c4 251 caddr_t cplim; unsigned char *maskp;
35e117b3
KS
252 struct radix_mask *m, **mp;
253 struct radix_node *saved_tt;
254
255 /*
256 * In dealing with non-contiguous masks, there may be
257 * many different routes which have the same mask.
258 * We will find it useful to have a unique pointer to
259 * the mask to speed avoiding duplicate references at
260 * nodes and possibly save time in calculating indices.
261 */
262 if (netmask) {
263 x = rn_search(netmask, rn_maskhead);
264 mlen = *(u_char *)netmask;
265 if (Bcmp(netmask, x->rn_key, mlen) == 0) {
266 maskduplicated = 1;
267 netmask = x->rn_key;
268 b = -1 - x->rn_b;
269 }
270 }
271 /*
272 * Deal with duplicated keys: attach node to previous instance
273 */
274 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
275 if (keyduplicated) {
276 if (tt->rn_mask == netmask)
277 return (0);
278 for (; tt->rn_dupedkey; tt = tt->rn_dupedkey)
279 if (tt->rn_mask == netmask)
280 return (0);
281 /*
282 * If the mask is not duplicated, we wouldn't
283 * find it among possible duplicate key entries
284 * anyway, so the above test doesn't hurt.
285 *
286 * XXX: we really ought to sort the masks
287 * for a duplicated key the same way as in a masklist.
288 * It is an unfortunate pain having to relocate
289 * the head of the list.
290 */
291 tt->rn_dupedkey = treenodes;
292 tt = treenodes;
293#ifdef RN_DEBUG
294 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
295 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
296#endif
297 t = saved_tt;
298 tt->rn_key = t->rn_key;
299 tt->rn_b = t->rn_b;
300 tt->rn_flags = t->rn_flags & ~RNF_ROOT;
301 }
302 /*
303 * Put mask in tree.
304 */
305 if (netmask == 0)
306 goto on1;
307 if (maskduplicated == 0) {
573d82c4 308 R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x));
35e117b3
KS
309 if (x == 0)
310 return (0);
311 Bzero(x, MAXKEYLEN + 2 * sizeof (*x));
573d82c4 312 cp = (caddr_t)(x + 2);
35e117b3
KS
313 bcopy(netmask, cp, mlen);
314 netmask = cp;
315 x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
316 /*
317 * Calculate index of mask.
318 */
319 cplim = netmask + vlen;
320 for (cp = netmask + head->rn_off; cp < cplim; cp++)
321 if (*(u_char *)cp != 0xff)
322 break;
323 b = (cp - netmask) << 3;
324 if (cp != cplim) {
325 if (*cp != 0) {
326 gotOddMasks = 1;
327 for (j = 0x80; j; b++, j >>= 1)
328 if ((j & *cp) == 0)
329 break;
330 }
331 }
332 x->rn_b = -1 - b;
333 }
334 /*
335 * Set up usual parameters
336 */
337 tt->rn_mask = netmask;
338 tt->rn_b = x->rn_b;
339on1:
340 t = saved_tt->rn_p;
341 b_leaf = -1 - t->rn_b;
342 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
343 /* Promote general routes from below */
344 if (x->rn_b < 0) {
345 if (x->rn_mask && (x->rn_b >= b_leaf)) {
346 MKGet(m);
347 if (m) {
348 Bzero(m, sizeof *m);
349 m->rm_b = x->rn_b;
350 m->rm_mask = x->rn_mask;
351 t->rn_mklist = m;
352 }
353 }
354 } else if (x->rn_mklist) {
355 /*
356 * Skip over masks whose index is > that of new node
357 */
358 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
359 if (m->rm_b >= b_leaf)
360 break;
361 t->rn_mklist = m; *mp = 0;
362 }
363 /* Add new route to highest possible ancestor's list */
364 if ((netmask == 0) || (b > t->rn_b ))
365 return tt; /* can't lift at all */
366 b_leaf = tt->rn_b;
367 do {
368 x = t;
369 t = t->rn_p;
370 } while (b <= t->rn_b && x != head);
371 /*
372 * Search through routes associated with node to
373 * insert new route according to index.
374 * For nodes of equal index, place more specific
375 * masks first.
376 */
377 cplim = netmask + mlen;
378 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
379 if (m->rm_b < b_leaf)
380 continue;
381 if (m->rm_b > b_leaf)
382 break;
383 if (m->rm_mask == netmask) {
384 m->rm_refs++;
385 tt->rn_mklist = m;
386 return tt;
387 }
388 maskp = (u_char *)m->rm_mask;
389 for (cp = netmask; cp < cplim; cp++)
390 if (*(u_char *)cp > *maskp++)
391 goto on2;
392 }
393on2:
394 MKGet(m);
395 if (m == 0) {
396 printf("Mask for route not entered\n");
397 return (tt);
398 }
399 Bzero(m, sizeof *m);
400 m->rm_b = b_leaf;
401 m->rm_mask = netmask;
402 m->rm_mklist = *mp;
403 *mp = m;
404 tt->rn_mklist = m;
405 return tt;
406}
407
408struct radix_node *
409rn_delete(v, netmask, head)
573d82c4 410 caddr_t v, netmask;
35e117b3
KS
411 struct radix_node *head;
412{
413 register struct radix_node *t, *p, *x = head;
414 register struct radix_node *tt = rn_search(v, x);
415 int b, head_off = x->rn_off, vlen = * (u_char *) v;
416 struct radix_mask *m, **mp;
417 struct radix_node *dupedkey, *saved_tt = tt;
418
419 if (tt == 0 ||
420 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
421 return (0);
422 /*
423 * Delete our route from mask lists.
424 */
425 if (dupedkey = tt->rn_dupedkey) {
426 if (netmask)
427 netmask = rn_search(netmask, rn_maskhead)->rn_key;
428 for (; tt->rn_mask != netmask; tt = tt->rn_dupedkey)
429 if (tt == 0)
430 return (0);
431 }
432 if (tt->rn_mask == 0)
433 goto on1;
434 if ((m = tt->rn_mklist) && --m->rm_refs >= 0)
435 goto on1;
436 b = -1 - tt->rn_b;
437 t = saved_tt->rn_p;
438 if (b > t->rn_b)
439 goto on1; /* Wasn't lifted at all */
440 do {
441 x = t;
442 t = t->rn_p;
443 } while (b <= t->rn_b && x != head);
444 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
445 if (m->rm_mask == tt->rn_mask)
446 break;
447 if (m) {
448 if (m->rm_refs < 0) {
449 *mp = m->rm_mklist;
450 MKFree(m);
451 }
452 } else
453 printf("rn_delete: couldn't find our mask\n");
454on1:
455 /*
456 * Eliminate us from tree
457 */
458 if (tt->rn_flags & RNF_ROOT)
459 return (0);
460#ifdef RN_DEBUG
461 /* Get us out of the creation list */
462 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
463 if (t) t->rn_ybro = tt->rn_ybro;
464#endif RN_DEBUG
465 t = tt->rn_p;
466 if (dupedkey) {
467 if (tt == saved_tt) {
468 x = dupedkey; x->rn_p = t;
469 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
470#ifndef RN_DEBUG
471 x++; t = tt + 1; *x = *t; p = t->rn_p;
472#else
473 x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p;
474 x->rn_info = b;
475#endif
476 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
477 x->rn_l->rn_p = x; x->rn_r->rn_p = x;
478 } else {
479 for (p = saved_tt; p && p->rn_dupedkey != tt;)
480 p = p->rn_dupedkey;
481 if (p) p->rn_dupedkey = tt->rn_dupedkey;
482 else printf("rn_delete: couldn't find us\n");
483 }
484 goto out;
485 }
486 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
487 p = t->rn_p;
488 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
489 x->rn_p = p;
490 /*
491 * Demote routes attached to us.
492 */
493 if (t->rn_mklist) {
494 if (x->rn_b >= 0) {
495 if (m = x->rn_mklist) {
496 while (m->rm_mklist)
497 m = m->rm_mklist;
498 m->rm_mklist = t->rn_mklist;
499 } else
500 x->rn_mklist = m;
501 } else {
502 for (m = t->rn_mklist; m;) {
503 struct radix_mask *mm = m->rm_mklist;
504 MKFree(m);
505 m = mm;
506 }
507 }
508 }
509 /*
510 * We may be holding an active internal node in the tree.
511 */
512 x = tt + 1;
513 if (t != x) {
514#ifndef RN_DEBUG
515 *t = *x;
516#else
517 b = t->rn_info; *t = *x; t->rn_info = b;
518#endif
519 t->rn_l->rn_p = t; t->rn_r->rn_p = t;
520 p = x->rn_p;
521 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
522 }
523out:
524 tt->rn_flags &= ~RNF_ACTIVE;
525 tt[1].rn_flags &= ~RNF_ACTIVE;
526 return (tt);
527}
528char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN];
529
530rn_inithead(head, off, af)
531struct radix_node_head **head;
532int off;
533{
573d82c4 534 register struct radix_node_head *rnh;
35e117b3
KS
535 register struct radix_node *t, *tt, *ttt;
536 if (*head)
537 return (1);
573d82c4
KS
538 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
539 if (rnh == 0)
35e117b3 540 return (0);
573d82c4
KS
541 Bzero(rnh, sizeof (*rnh));
542 *head = rnh;
543 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
544 ttt = rnh->rnh_nodes + 2;
35e117b3
KS
545 t->rn_r = ttt;
546 t->rn_p = t;
547 tt = t->rn_l;
548 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
549 tt->rn_b = -1 - off;
550 *ttt = *tt;
551 ttt->rn_key = rn_ones;
573d82c4
KS
552 rnh->rnh_af = af;
553 rnh->rnh_treetop = t;
35e117b3 554 if (radix_node_head == 0) {
573d82c4 555 caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN;
35e117b3
KS
556 while (cp < cplim)
557 *cp++ = -1;
558 if (rn_inithead(&radix_node_head, 0, 0) == 0) {
573d82c4 559 Free(rnh);
35e117b3
KS
560 *head = 0;
561 return (0);
562 }
563 }
573d82c4
KS
564 rnh->rnh_next = radix_node_head->rnh_next;
565 if (radix_node_head != rnh)
566 radix_node_head->rnh_next = rnh;
35e117b3
KS
567 return (1);
568}