bpf doesn't need sys/dir.h
[unix-history] / usr / src / sys / net / radix.c
CommitLineData
35e117b3 1/*
b72a6efb 2 * Copyright (c) 1988, 1989 Regents of the University of California.
35e117b3
KS
3 * All rights reserved.
4 *
dbf0c423 5 * %sccs.include.redist.c%
35e117b3 6 *
5548a02f 7 * @(#)radix.c 7.17 (Berkeley) %G%
35e117b3
KS
8 */
9
10/*
11 * Routines to build and maintain radix trees for routing lookups.
12 */
13#ifndef RNF_NORMAL
5548a02f
KB
14#include <sys/param.h>
15#include <sys/systm.h>
16#include <sys/malloc.h>
6b75995b 17#define M_DONTWAIT M_NOWAIT
5c48f39f 18#ifdef KERNEL
5548a02f 19#include <sys/domain.h>
35e117b3 20#endif
5c48f39f 21#endif
5548a02f
KB
22
23#include <net/radix.h>
24
5c48f39f 25int max_keylen;
35e117b3 26struct radix_mask *rn_mkfreelist;
7444940e 27struct radix_node_head *mask_rnhead;
5c48f39f
KS
28static int gotOddMasks;
29static char *maskedKey;
30static char *rn_zeros, *rn_ones;
31
7444940e 32#define rn_maskhead (mask_rnhead->rnh_treetop)
5ba4cdd6
KS
33#undef Bcmp
34#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
35e117b3
KS
35/*
36 * The data structure for the keys is a radix tree with one way
37 * branching removed. The index rn_b at an internal node n represents a bit
38 * position to be tested. The tree is arranged so that all descendants
39 * of a node n have keys whose bits all agree up to position rn_b - 1.
40 * (We say the index of n is rn_b.)
41 *
42 * There is at least one descendant which has a one bit at position rn_b,
43 * and at least one with a zero there.
44 *
45 * A route is determined by a pair of key and mask. We require that the
46 * bit-wise logical and of the key and mask to be the key.
47 * We define the index of a route to associated with the mask to be
48 * the first bit number in the mask where 0 occurs (with bit number 0
49 * representing the highest order bit).
50 *
51 * We say a mask is normal if every bit is 0, past the index of the mask.
52 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
53 * and m is a normal mask, then the route applies to every descendant of n.
54 * If the index(m) < rn_b, this implies the trailing last few bits of k
55 * before bit b are all 0, (and hence consequently true of every descendant
56 * of n), so the route applies to all descendants of the node as well.
57 *
58 * The present version of the code makes no use of normal routes,
59 * but similar logic shows that a non-normal mask m such that
60 * index(m) <= index(n) could potentially apply to many children of n.
61 * Thus, for each non-host route, we attach its mask to a list at an internal
62 * node as high in the tree as we can go.
63 */
64
65struct radix_node *
66rn_search(v, head)
67 struct radix_node *head;
573d82c4 68 register caddr_t v;
35e117b3
KS
69{
70 register struct radix_node *x;
71
72 for (x = head; x->rn_b >= 0;) {
73 if (x->rn_bmask & v[x->rn_off])
74 x = x->rn_r;
75 else
76 x = x->rn_l;
77 }
78 return x;
79};
80
b72a6efb
KS
81struct radix_node *
82rn_search_m(v, head, m)
83 struct radix_node *head;
84 register caddr_t v, m;
85{
86 register struct radix_node *x;
87
88 for (x = head; x->rn_b >= 0;) {
89 if ((x->rn_bmask & m[x->rn_off]) &&
90 (x->rn_bmask & v[x->rn_off]))
91 x = x->rn_r;
92 else
93 x = x->rn_l;
94 }
95 return x;
96};
b72a6efb 97
d370f493
KS
98rn_refines(m, n)
99 register caddr_t m, n;
100{
101 register caddr_t lim, lim2 = lim = n + *(u_char *)n;
102 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
f21ae3e6 103 int masks_are_equal = 1;
d370f493
KS
104
105 if (longer > 0)
106 lim -= longer;
f21ae3e6
KS
107 while (n < lim) {
108 if (*n & ~(*m))
d370f493 109 return 0;
f21ae3e6
KS
110 if (*n++ != *m++)
111 masks_are_equal = 0;
112
113 }
d370f493
KS
114 while (n < lim2)
115 if (*n++)
116 return 0;
f21ae3e6
KS
117 if (masks_are_equal && (longer < 0))
118 for (lim2 = m - longer; m < lim2; )
119 if (*m++)
120 return 1;
121 return (!masks_are_equal);
d370f493
KS
122}
123
35e117b3 124
35e117b3
KS
125struct radix_node *
126rn_match(v, head)
127 struct radix_node *head;
573d82c4 128 caddr_t v;
35e117b3
KS
129{
130 register struct radix_node *t = head, *x;
573d82c4
KS
131 register caddr_t cp = v, cp2, cp3;
132 caddr_t cplim, mstart;
35e117b3 133 struct radix_node *saved_t;
573d82c4 134 int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
35e117b3
KS
135
136 /*
137 * Open code rn_search(v, head) to avoid overhead of extra
138 * subroutine call.
139 */
140 for (; t->rn_b >= 0; ) {
141 if (t->rn_bmask & cp[t->rn_off])
142 t = t->rn_r;
143 else
144 t = t->rn_l;
145 }
146 /*
147 * See if we match exactly as a host destination
148 */
149 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
150 for (; cp < cplim; cp++, cp2++)
151 if (*cp != *cp2)
152 goto on1;
b72a6efb
KS
153 /*
154 * This extra grot is in case we are explicitly asked
155 * to look up the default. Ugh!
156 */
157 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
158 t = t->rn_dupedkey;
35e117b3
KS
159 return t;
160on1:
161 matched_off = cp - v;
162 saved_t = t;
163 do {
164 if (t->rn_mask) {
165 /*
166 * Even if we don't match exactly as a hosts;
167 * we may match if the leaf we wound up at is
168 * a route to a net.
169 */
170 cp3 = matched_off + t->rn_mask;
5ba4cdd6 171 cp2 = matched_off + t->rn_key;
35e117b3
KS
172 for (; cp < cplim; cp++)
173 if ((*cp2++ ^ *cp) & *cp3++)
174 break;
175 if (cp == cplim)
176 return t;
5ba4cdd6 177 cp = matched_off + v;
35e117b3
KS
178 }
179 } while (t = t->rn_dupedkey);
180 t = saved_t;
181 /* start searching up the tree */
182 do {
183 register struct radix_mask *m;
184 t = t->rn_p;
185 if (m = t->rn_mklist) {
b72a6efb
KS
186 /*
187 * After doing measurements here, it may
188 * turn out to be faster to open code
189 * rn_search_m here instead of always
190 * copying and masking.
191 */
35e117b3
KS
192 off = min(t->rn_off, matched_off);
193 mstart = maskedKey + off;
194 do {
195 cp2 = mstart;
196 cp3 = m->rm_mask + off;
197 for (cp = v + off; cp < cplim;)
198 *cp2++ = *cp++ & *cp3++;
199 x = rn_search(maskedKey, t);
200 while (x && x->rn_mask != m->rm_mask)
201 x = x->rn_dupedkey;
202 if (x &&
203 (Bcmp(mstart, x->rn_key + off,
204 vlen - off) == 0))
205 return x;
206 } while (m = m->rm_mklist);
207 }
208 } while (t != head);
209 return 0;
210};
211
212#ifdef RN_DEBUG
213int rn_nodenum;
214struct radix_node *rn_clist;
215int rn_saveinfo;
216#endif
217
218struct radix_node *
219rn_newpair(v, b, nodes)
573d82c4 220 caddr_t v;
14625339 221 int b;
35e117b3
KS
222 struct radix_node nodes[2];
223{
224 register struct radix_node *tt = nodes, *t = tt + 1;
225 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
226 t->rn_l = tt; t->rn_off = b >> 3;
227 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
228 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
229#ifdef RN_DEBUG
230 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
231 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
232#endif
233 return t;
234}
235
236int rn_debug = 1;
237struct radix_node *
238rn_insert(v, head, dupentry, nodes)
573d82c4 239 caddr_t v;
35e117b3
KS
240 struct radix_node *head;
241 int *dupentry;
242 struct radix_node nodes[2];
243{
244 int head_off = head->rn_off, vlen = (int)*((u_char *)v);
245 register struct radix_node *t = rn_search(v, head);
573d82c4 246 register caddr_t cp = v + head_off;
35e117b3
KS
247 register int b;
248 struct radix_node *tt;
249 /*
250 *find first bit at which v and t->rn_key differ
251 */
252 {
573d82c4 253 register caddr_t cp2 = t->rn_key + head_off;
35e117b3 254 register int cmp_res;
573d82c4 255 caddr_t cplim = v + vlen;
35e117b3
KS
256
257 while (cp < cplim)
258 if (*cp2++ != *cp++)
259 goto on1;
260 *dupentry = 1;
261 return t;
262on1:
263 *dupentry = 0;
264 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
265 for (b = (cp - v) << 3; cmp_res; b--)
266 cmp_res >>= 1;
267 }
268 {
269 register struct radix_node *p, *x = head;
270 cp = v;
271 do {
272 p = x;
273 if (cp[x->rn_off] & x->rn_bmask)
274 x = x->rn_r;
275 else x = x->rn_l;
276 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
277#ifdef RN_DEBUG
278 if (rn_debug)
279 printf("Going In:\n"), traverse(p);
280#endif
281 t = rn_newpair(v, b, nodes); tt = t->rn_l;
282 if ((cp[p->rn_off] & p->rn_bmask) == 0)
283 p->rn_l = t;
284 else
285 p->rn_r = t;
286 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
287 if ((cp[t->rn_off] & t->rn_bmask) == 0) {
288 t->rn_r = x;
289 } else {
290 t->rn_r = tt; t->rn_l = x;
291 }
292#ifdef RN_DEBUG
293 if (rn_debug)
294 printf("Coming out:\n"), traverse(p);
295#endif
296 }
297 return (tt);
298}
299
d6bc9b28
KS
300struct radix_node *
301rn_addmask(netmask, search, skip)
14625339
CT
302 caddr_t netmask;
303 int search, skip;
d6bc9b28
KS
304{
305 register struct radix_node *x;
306 register caddr_t cp, cplim;
307 register int b, mlen, j;
308 int maskduplicated;
309
310 mlen = *(u_char *)netmask;
311 if (search) {
312 x = rn_search(netmask, rn_maskhead);
313 mlen = *(u_char *)netmask;
314 if (Bcmp(netmask, x->rn_key, mlen) == 0)
315 return (x);
316 }
5c48f39f 317 R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
d6bc9b28
KS
318 if (x == 0)
319 return (0);
5c48f39f 320 Bzero(x, max_keylen + 2 * sizeof (*x));
d6bc9b28
KS
321 cp = (caddr_t)(x + 2);
322 Bcopy(netmask, cp, mlen);
323 netmask = cp;
324 x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
325 /*
326 * Calculate index of mask.
327 */
328 cplim = netmask + mlen;
329 for (cp = netmask + skip; cp < cplim; cp++)
330 if (*(u_char *)cp != 0xff)
331 break;
332 b = (cp - netmask) << 3;
333 if (cp != cplim) {
334 if (*cp != 0) {
335 gotOddMasks = 1;
336 for (j = 0x80; j; b++, j >>= 1)
337 if ((j & *cp) == 0)
338 break;
339 }
340 }
341 x->rn_b = -1 - b;
342 return (x);
343}
344
35e117b3
KS
345struct radix_node *
346rn_addroute(v, netmask, head, treenodes)
14625339
CT
347 caddr_t v, netmask;
348 struct radix_node *head;
35e117b3
KS
349 struct radix_node treenodes[2];
350{
351 register int j;
573d82c4 352 register caddr_t cp;
35e117b3
KS
353 register struct radix_node *t, *x, *tt;
354 short b = 0, b_leaf;
d6bc9b28 355 int vlen = *(u_char *)v, mlen, keyduplicated;
573d82c4 356 caddr_t cplim; unsigned char *maskp;
35e117b3
KS
357 struct radix_mask *m, **mp;
358 struct radix_node *saved_tt;
359
360 /*
361 * In dealing with non-contiguous masks, there may be
362 * many different routes which have the same mask.
363 * We will find it useful to have a unique pointer to
364 * the mask to speed avoiding duplicate references at
365 * nodes and possibly save time in calculating indices.
366 */
367 if (netmask) {
368 x = rn_search(netmask, rn_maskhead);
369 mlen = *(u_char *)netmask;
d6bc9b28
KS
370 if (Bcmp(netmask, x->rn_key, mlen) != 0) {
371 x = rn_addmask(netmask, 0, head->rn_off);
5ba4cdd6
KS
372 if (x == 0)
373 return (0);
35e117b3 374 }
d6bc9b28
KS
375 netmask = x->rn_key;
376 b = -1 - x->rn_b;
35e117b3
KS
377 }
378 /*
379 * Deal with duplicated keys: attach node to previous instance
380 */
381 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
382 if (keyduplicated) {
5ba4cdd6 383 do {
35e117b3
KS
384 if (tt->rn_mask == netmask)
385 return (0);
5ba4cdd6 386 t = tt;
d370f493
KS
387 if (netmask == 0 ||
388 (tt->rn_mask && rn_refines(netmask, tt->rn_mask)))
389 break;
5ba4cdd6 390 } while (tt = tt->rn_dupedkey);
35e117b3
KS
391 /*
392 * If the mask is not duplicated, we wouldn't
393 * find it among possible duplicate key entries
394 * anyway, so the above test doesn't hurt.
395 *
f21ae3e6
KS
396 * We sort the masks for a duplicated key the same way as
397 * in a masklist -- most specific to least specific.
398 * This may require the unfortunate nuisance of relocating
35e117b3
KS
399 * the head of the list.
400 */
d370f493
KS
401 if (tt && t == saved_tt) {
402 struct radix_node *xx = x;
403 /* link in at head of list */
404 (tt = treenodes)->rn_dupedkey = t;
405 tt->rn_flags = t->rn_flags;
406 tt->rn_p = x = t->rn_p;
407 if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
408 saved_tt = tt; x = xx;
409 } else {
410 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
411 t->rn_dupedkey = tt;
412 }
35e117b3
KS
413#ifdef RN_DEBUG
414 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
415 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
416#endif
417 t = saved_tt;
5ba4cdd6
KS
418 tt->rn_key = (caddr_t) v;
419 tt->rn_b = -1;
35e117b3
KS
420 tt->rn_flags = t->rn_flags & ~RNF_ROOT;
421 }
422 /*
423 * Put mask in tree.
424 */
5ba4cdd6
KS
425 if (netmask) {
426 tt->rn_mask = netmask;
427 tt->rn_b = x->rn_b;
35e117b3 428 }
35e117b3
KS
429 t = saved_tt->rn_p;
430 b_leaf = -1 - t->rn_b;
431 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
432 /* Promote general routes from below */
433 if (x->rn_b < 0) {
1f14def8 434 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
35e117b3
KS
435 MKGet(m);
436 if (m) {
437 Bzero(m, sizeof *m);
438 m->rm_b = x->rn_b;
439 m->rm_mask = x->rn_mask;
5ba4cdd6 440 x->rn_mklist = t->rn_mklist = m;
35e117b3
KS
441 }
442 }
443 } else if (x->rn_mklist) {
444 /*
445 * Skip over masks whose index is > that of new node
446 */
447 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
448 if (m->rm_b >= b_leaf)
449 break;
450 t->rn_mklist = m; *mp = 0;
451 }
452 /* Add new route to highest possible ancestor's list */
453 if ((netmask == 0) || (b > t->rn_b ))
454 return tt; /* can't lift at all */
455 b_leaf = tt->rn_b;
456 do {
457 x = t;
458 t = t->rn_p;
459 } while (b <= t->rn_b && x != head);
460 /*
461 * Search through routes associated with node to
462 * insert new route according to index.
463 * For nodes of equal index, place more specific
464 * masks first.
465 */
466 cplim = netmask + mlen;
467 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
468 if (m->rm_b < b_leaf)
469 continue;
470 if (m->rm_b > b_leaf)
471 break;
472 if (m->rm_mask == netmask) {
473 m->rm_refs++;
474 tt->rn_mklist = m;
475 return tt;
476 }
d370f493
KS
477 if (rn_refines(netmask, m->rm_mask))
478 break;
35e117b3 479 }
35e117b3
KS
480 MKGet(m);
481 if (m == 0) {
482 printf("Mask for route not entered\n");
483 return (tt);
484 }
485 Bzero(m, sizeof *m);
486 m->rm_b = b_leaf;
487 m->rm_mask = netmask;
488 m->rm_mklist = *mp;
489 *mp = m;
490 tt->rn_mklist = m;
491 return tt;
492}
493
494struct radix_node *
495rn_delete(v, netmask, head)
573d82c4 496 caddr_t v, netmask;
35e117b3
KS
497 struct radix_node *head;
498{
499 register struct radix_node *t, *p, *x = head;
500 register struct radix_node *tt = rn_search(v, x);
501 int b, head_off = x->rn_off, vlen = * (u_char *) v;
5ba4cdd6 502 struct radix_mask *m, *saved_m, **mp;
35e117b3
KS
503 struct radix_node *dupedkey, *saved_tt = tt;
504
505 if (tt == 0 ||
506 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
507 return (0);
508 /*
509 * Delete our route from mask lists.
510 */
511 if (dupedkey = tt->rn_dupedkey) {
512 if (netmask)
513 netmask = rn_search(netmask, rn_maskhead)->rn_key;
7e940e66
KS
514 while (tt->rn_mask != netmask)
515 if ((tt = tt->rn_dupedkey) == 0)
35e117b3
KS
516 return (0);
517 }
5ba4cdd6
KS
518 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
519 goto on1;
520 if (m->rm_mask != tt->rn_mask) {
521 printf("rn_delete: inconsistent annotation\n");
35e117b3 522 goto on1;
5ba4cdd6
KS
523 }
524 if (--m->rm_refs >= 0)
35e117b3
KS
525 goto on1;
526 b = -1 - tt->rn_b;
527 t = saved_tt->rn_p;
528 if (b > t->rn_b)
529 goto on1; /* Wasn't lifted at all */
530 do {
531 x = t;
532 t = t->rn_p;
533 } while (b <= t->rn_b && x != head);
534 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
5ba4cdd6 535 if (m == saved_m) {
35e117b3
KS
536 *mp = m->rm_mklist;
537 MKFree(m);
5ba4cdd6 538 break;
35e117b3 539 }
5ba4cdd6
KS
540 if (m == 0)
541 printf("rn_delete: couldn't find our annotation\n");
35e117b3
KS
542on1:
543 /*
544 * Eliminate us from tree
545 */
546 if (tt->rn_flags & RNF_ROOT)
547 return (0);
548#ifdef RN_DEBUG
549 /* Get us out of the creation list */
550 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
551 if (t) t->rn_ybro = tt->rn_ybro;
552#endif RN_DEBUG
553 t = tt->rn_p;
554 if (dupedkey) {
555 if (tt == saved_tt) {
556 x = dupedkey; x->rn_p = t;
557 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
d370f493
KS
558 } else {
559 for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
560 p = p->rn_dupedkey;
561 if (p) p->rn_dupedkey = tt->rn_dupedkey;
562 else printf("rn_delete: couldn't find us\n");
563 }
564 t = tt + 1;
565 if (t->rn_flags & RNF_ACTIVE) {
35e117b3 566#ifndef RN_DEBUG
d370f493 567 *++x = *t; p = t->rn_p;
35e117b3 568#else
d370f493 569 b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
35e117b3
KS
570#endif
571 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
572 x->rn_l->rn_p = x; x->rn_r->rn_p = x;
35e117b3
KS
573 }
574 goto out;
575 }
576 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
577 p = t->rn_p;
578 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
579 x->rn_p = p;
580 /*
581 * Demote routes attached to us.
582 */
583 if (t->rn_mklist) {
584 if (x->rn_b >= 0) {
1f14def8
KS
585 for (mp = &x->rn_mklist; m = *mp;)
586 mp = &m->rm_mklist;
587 *mp = t->rn_mklist;
35e117b3
KS
588 } else {
589 for (m = t->rn_mklist; m;) {
590 struct radix_mask *mm = m->rm_mklist;
5ba4cdd6
KS
591 if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
592 x->rn_mklist = 0;
593 MKFree(m);
1f14def8
KS
594 } else
595 printf("%s %x at %x\n",
596 "rn_delete: Orphaned Mask", m, x);
35e117b3
KS
597 m = mm;
598 }
599 }
600 }
601 /*
602 * We may be holding an active internal node in the tree.
603 */
604 x = tt + 1;
605 if (t != x) {
606#ifndef RN_DEBUG
607 *t = *x;
608#else
609 b = t->rn_info; *t = *x; t->rn_info = b;
610#endif
611 t->rn_l->rn_p = t; t->rn_r->rn_p = t;
612 p = x->rn_p;
613 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
614 }
615out:
616 tt->rn_flags &= ~RNF_ACTIVE;
617 tt[1].rn_flags &= ~RNF_ACTIVE;
618 return (tt);
619}
7444940e
KS
620
621rn_walk(rn, f, w)
622 register struct radix_node *rn;
623 register int (*f)();
624 caddr_t w;
625{
626 int error;
5c48f39f
KS
627 struct radix_node *base, *next;
628 /*
629 * This gets complicated because we may delete the node
630 * while applying the function f to it, so we need to calculate
631 * the successor node in advance.
632 */
633 /* First time through node, go left */
634 while (rn->rn_b >= 0)
635 rn = rn->rn_l;
7444940e 636 for (;;) {
5c48f39f
KS
637 base = rn;
638 /* If at right child go back up, otherwise, go right */
639 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
640 rn = rn->rn_p;
641 /* Find the next *leaf* since next node might vanish, too */
642 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
643 rn = rn->rn_l;
644 next = rn;
645 /* Process leaves */
646 while (rn = base) {
647 base = rn->rn_dupedkey;
1f29be1c
KS
648 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
649 return (error);
7444940e 650 }
5c48f39f
KS
651 rn = next;
652 if (rn->rn_flags & RNF_ROOT)
653 return (0);
7444940e 654 }
7444940e 655}
35e117b3 656
7444940e 657rn_inithead(head, off)
5c48f39f 658 void **head;
14625339 659 int off;
35e117b3 660{
573d82c4 661 register struct radix_node_head *rnh;
35e117b3
KS
662 register struct radix_node *t, *tt, *ttt;
663 if (*head)
664 return (1);
573d82c4
KS
665 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
666 if (rnh == 0)
35e117b3 667 return (0);
573d82c4
KS
668 Bzero(rnh, sizeof (*rnh));
669 *head = rnh;
670 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
671 ttt = rnh->rnh_nodes + 2;
35e117b3
KS
672 t->rn_r = ttt;
673 t->rn_p = t;
674 tt = t->rn_l;
675 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
676 tt->rn_b = -1 - off;
677 *ttt = *tt;
678 ttt->rn_key = rn_ones;
7444940e
KS
679 rnh->rnh_add = rn_addroute;
680 rnh->rnh_delete = rn_delete;
681 rnh->rnh_match = rn_match;
682 rnh->rnh_walk = rn_walk;
573d82c4 683 rnh->rnh_treetop = t;
35e117b3
KS
684 return (1);
685}
5c48f39f
KS
686
687rn_init()
688{
689 char *cp, *cplim;
690#ifdef KERNEL
691 struct domain *dom;
692
693 for (dom = domains; dom; dom = dom->dom_next)
694 if (dom->dom_maxrtkey > max_keylen)
695 max_keylen = dom->dom_maxrtkey;
696#endif
697 if (max_keylen == 0) {
698 printf("rn_init: radix functions require max_keylen be set\n");
699 return;
700 }
701 R_Malloc(rn_zeros, char *, 3 * max_keylen);
702 if (rn_zeros == NULL)
703 panic("rn_init");
704 Bzero(rn_zeros, 3 * max_keylen);
705 rn_ones = cp = rn_zeros + max_keylen;
706 maskedKey = cplim = rn_ones + max_keylen;
707 while (cp < cplim)
708 *cp++ = -1;
709 if (rn_inithead((void **)&mask_rnhead, 0) == 0)
710 panic("rn_init 2");
711}