Commit | Line | Data |
---|---|---|
35e117b3 | 1 | /* |
ad787160 C |
2 | * Copyright (c) 1988, 1989, 1993 |
3 | * The Regents of the University of California. All rights reserved. | |
35e117b3 | 4 | * |
ad787160 C |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
35e117b3 | 20 | * |
ad787160 C |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
33 | * @(#)radix.c 8.1 (Berkeley) 6/10/93 | |
35e117b3 KS |
34 | */ |
35 | ||
36 | /* | |
37 | * Routines to build and maintain radix trees for routing lookups. | |
38 | */ | |
39 | #ifndef RNF_NORMAL | |
5548a02f KB |
40 | #include <sys/param.h> |
41 | #include <sys/systm.h> | |
42 | #include <sys/malloc.h> | |
6b75995b | 43 | #define M_DONTWAIT M_NOWAIT |
5c48f39f | 44 | #ifdef KERNEL |
5548a02f | 45 | #include <sys/domain.h> |
35e117b3 | 46 | #endif |
5c48f39f | 47 | #endif |
5548a02f KB |
48 | |
49 | #include <net/radix.h> | |
50 | ||
5c48f39f | 51 | int max_keylen; |
35e117b3 | 52 | struct radix_mask *rn_mkfreelist; |
7444940e | 53 | struct radix_node_head *mask_rnhead; |
5c48f39f KS |
54 | static int gotOddMasks; |
55 | static char *maskedKey; | |
56 | static char *rn_zeros, *rn_ones; | |
57 | ||
d2ca79ee | 58 | #define rn_masktop (mask_rnhead->rnh_treetop) |
5ba4cdd6 KS |
59 | #undef Bcmp |
60 | #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) | |
35e117b3 KS |
61 | /* |
62 | * The data structure for the keys is a radix tree with one way | |
63 | * branching removed. The index rn_b at an internal node n represents a bit | |
64 | * position to be tested. The tree is arranged so that all descendants | |
65 | * of a node n have keys whose bits all agree up to position rn_b - 1. | |
66 | * (We say the index of n is rn_b.) | |
67 | * | |
68 | * There is at least one descendant which has a one bit at position rn_b, | |
69 | * and at least one with a zero there. | |
70 | * | |
71 | * A route is determined by a pair of key and mask. We require that the | |
72 | * bit-wise logical and of the key and mask to be the key. | |
73 | * We define the index of a route to associated with the mask to be | |
74 | * the first bit number in the mask where 0 occurs (with bit number 0 | |
75 | * representing the highest order bit). | |
76 | * | |
77 | * We say a mask is normal if every bit is 0, past the index of the mask. | |
78 | * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, | |
79 | * and m is a normal mask, then the route applies to every descendant of n. | |
80 | * If the index(m) < rn_b, this implies the trailing last few bits of k | |
81 | * before bit b are all 0, (and hence consequently true of every descendant | |
82 | * of n), so the route applies to all descendants of the node as well. | |
83 | * | |
84 | * The present version of the code makes no use of normal routes, | |
85 | * but similar logic shows that a non-normal mask m such that | |
86 | * index(m) <= index(n) could potentially apply to many children of n. | |
87 | * Thus, for each non-host route, we attach its mask to a list at an internal | |
88 | * node as high in the tree as we can go. | |
89 | */ | |
90 | ||
91 | struct radix_node * | |
12d5780a KB |
92 | rn_search(v_arg, head) |
93 | void *v_arg; | |
35e117b3 | 94 | struct radix_node *head; |
35e117b3 KS |
95 | { |
96 | register struct radix_node *x; | |
12d5780a | 97 | register caddr_t v; |
35e117b3 | 98 | |
12d5780a | 99 | for (x = head, v = v_arg; x->rn_b >= 0;) { |
35e117b3 KS |
100 | if (x->rn_bmask & v[x->rn_off]) |
101 | x = x->rn_r; | |
102 | else | |
103 | x = x->rn_l; | |
104 | } | |
12d5780a | 105 | return (x); |
35e117b3 KS |
106 | }; |
107 | ||
b72a6efb | 108 | struct radix_node * |
12d5780a | 109 | rn_search_m(v_arg, head, m_arg) |
b72a6efb | 110 | struct radix_node *head; |
12d5780a | 111 | void *v_arg, *m_arg; |
b72a6efb KS |
112 | { |
113 | register struct radix_node *x; | |
12d5780a | 114 | register caddr_t v = v_arg, m = m_arg; |
b72a6efb KS |
115 | |
116 | for (x = head; x->rn_b >= 0;) { | |
117 | if ((x->rn_bmask & m[x->rn_off]) && | |
118 | (x->rn_bmask & v[x->rn_off])) | |
119 | x = x->rn_r; | |
120 | else | |
121 | x = x->rn_l; | |
122 | } | |
123 | return x; | |
124 | }; | |
b72a6efb | 125 | |
12d5780a KB |
126 | int |
127 | rn_refines(m_arg, n_arg) | |
128 | void *m_arg, *n_arg; | |
d370f493 | 129 | { |
12d5780a | 130 | register caddr_t m = m_arg, n = n_arg; |
d370f493 KS |
131 | register caddr_t lim, lim2 = lim = n + *(u_char *)n; |
132 | int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); | |
f21ae3e6 | 133 | int masks_are_equal = 1; |
d370f493 KS |
134 | |
135 | if (longer > 0) | |
136 | lim -= longer; | |
f21ae3e6 KS |
137 | while (n < lim) { |
138 | if (*n & ~(*m)) | |
d370f493 | 139 | return 0; |
f21ae3e6 KS |
140 | if (*n++ != *m++) |
141 | masks_are_equal = 0; | |
142 | ||
143 | } | |
d370f493 KS |
144 | while (n < lim2) |
145 | if (*n++) | |
146 | return 0; | |
f21ae3e6 KS |
147 | if (masks_are_equal && (longer < 0)) |
148 | for (lim2 = m - longer; m < lim2; ) | |
149 | if (*m++) | |
150 | return 1; | |
151 | return (!masks_are_equal); | |
d370f493 KS |
152 | } |
153 | ||
35e117b3 | 154 | |
12d5780a KB |
155 | struct radix_node * |
156 | rn_match(v_arg, head) | |
157 | void *v_arg; | |
d2ca79ee | 158 | struct radix_node_head *head; |
35e117b3 | 159 | { |
12d5780a | 160 | caddr_t v = v_arg; |
d2ca79ee | 161 | register struct radix_node *t = head->rnh_treetop, *x; |
573d82c4 KS |
162 | register caddr_t cp = v, cp2, cp3; |
163 | caddr_t cplim, mstart; | |
d2ca79ee | 164 | struct radix_node *saved_t, *top = t; |
573d82c4 | 165 | int off = t->rn_off, vlen = *(u_char *)cp, matched_off; |
35e117b3 KS |
166 | |
167 | /* | |
d2ca79ee | 168 | * Open code rn_search(v, top) to avoid overhead of extra |
35e117b3 KS |
169 | * subroutine call. |
170 | */ | |
171 | for (; t->rn_b >= 0; ) { | |
172 | if (t->rn_bmask & cp[t->rn_off]) | |
173 | t = t->rn_r; | |
174 | else | |
175 | t = t->rn_l; | |
176 | } | |
177 | /* | |
178 | * See if we match exactly as a host destination | |
179 | */ | |
180 | cp += off; cp2 = t->rn_key + off; cplim = v + vlen; | |
181 | for (; cp < cplim; cp++, cp2++) | |
182 | if (*cp != *cp2) | |
183 | goto on1; | |
b72a6efb KS |
184 | /* |
185 | * This extra grot is in case we are explicitly asked | |
186 | * to look up the default. Ugh! | |
187 | */ | |
188 | if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) | |
189 | t = t->rn_dupedkey; | |
35e117b3 KS |
190 | return t; |
191 | on1: | |
192 | matched_off = cp - v; | |
193 | saved_t = t; | |
194 | do { | |
195 | if (t->rn_mask) { | |
196 | /* | |
197 | * Even if we don't match exactly as a hosts; | |
198 | * we may match if the leaf we wound up at is | |
199 | * a route to a net. | |
200 | */ | |
201 | cp3 = matched_off + t->rn_mask; | |
5ba4cdd6 | 202 | cp2 = matched_off + t->rn_key; |
35e117b3 KS |
203 | for (; cp < cplim; cp++) |
204 | if ((*cp2++ ^ *cp) & *cp3++) | |
205 | break; | |
206 | if (cp == cplim) | |
207 | return t; | |
5ba4cdd6 | 208 | cp = matched_off + v; |
35e117b3 KS |
209 | } |
210 | } while (t = t->rn_dupedkey); | |
211 | t = saved_t; | |
212 | /* start searching up the tree */ | |
213 | do { | |
214 | register struct radix_mask *m; | |
215 | t = t->rn_p; | |
216 | if (m = t->rn_mklist) { | |
b72a6efb KS |
217 | /* |
218 | * After doing measurements here, it may | |
219 | * turn out to be faster to open code | |
220 | * rn_search_m here instead of always | |
221 | * copying and masking. | |
222 | */ | |
35e117b3 KS |
223 | off = min(t->rn_off, matched_off); |
224 | mstart = maskedKey + off; | |
225 | do { | |
226 | cp2 = mstart; | |
227 | cp3 = m->rm_mask + off; | |
228 | for (cp = v + off; cp < cplim;) | |
229 | *cp2++ = *cp++ & *cp3++; | |
230 | x = rn_search(maskedKey, t); | |
231 | while (x && x->rn_mask != m->rm_mask) | |
232 | x = x->rn_dupedkey; | |
233 | if (x && | |
234 | (Bcmp(mstart, x->rn_key + off, | |
235 | vlen - off) == 0)) | |
236 | return x; | |
237 | } while (m = m->rm_mklist); | |
238 | } | |
d2ca79ee | 239 | } while (t != top); |
35e117b3 KS |
240 | return 0; |
241 | }; | |
242 | ||
243 | #ifdef RN_DEBUG | |
244 | int rn_nodenum; | |
245 | struct radix_node *rn_clist; | |
246 | int rn_saveinfo; | |
d2ca79ee | 247 | int rn_debug = 1; |
35e117b3 KS |
248 | #endif |
249 | ||
250 | struct radix_node * | |
251 | rn_newpair(v, b, nodes) | |
12d5780a | 252 | void *v; |
14625339 | 253 | int b; |
35e117b3 KS |
254 | struct radix_node nodes[2]; |
255 | { | |
256 | register struct radix_node *tt = nodes, *t = tt + 1; | |
257 | t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); | |
258 | t->rn_l = tt; t->rn_off = b >> 3; | |
12d5780a | 259 | tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t; |
35e117b3 KS |
260 | tt->rn_flags = t->rn_flags = RNF_ACTIVE; |
261 | #ifdef RN_DEBUG | |
262 | tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; | |
263 | tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; | |
264 | #endif | |
265 | return t; | |
266 | } | |
267 | ||
12d5780a KB |
268 | struct radix_node * |
269 | rn_insert(v_arg, head, dupentry, nodes) | |
270 | void *v_arg; | |
d2ca79ee | 271 | struct radix_node_head *head; |
35e117b3 KS |
272 | int *dupentry; |
273 | struct radix_node nodes[2]; | |
274 | { | |
12d5780a | 275 | caddr_t v = v_arg; |
d2ca79ee KS |
276 | struct radix_node *top = head->rnh_treetop; |
277 | int head_off = top->rn_off, vlen = (int)*((u_char *)v); | |
12d5780a | 278 | register struct radix_node *t = rn_search(v_arg, top); |
573d82c4 | 279 | register caddr_t cp = v + head_off; |
35e117b3 KS |
280 | register int b; |
281 | struct radix_node *tt; | |
282 | /* | |
283 | *find first bit at which v and t->rn_key differ | |
284 | */ | |
285 | { | |
573d82c4 | 286 | register caddr_t cp2 = t->rn_key + head_off; |
35e117b3 | 287 | register int cmp_res; |
573d82c4 | 288 | caddr_t cplim = v + vlen; |
35e117b3 KS |
289 | |
290 | while (cp < cplim) | |
291 | if (*cp2++ != *cp++) | |
292 | goto on1; | |
293 | *dupentry = 1; | |
294 | return t; | |
295 | on1: | |
296 | *dupentry = 0; | |
297 | cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; | |
298 | for (b = (cp - v) << 3; cmp_res; b--) | |
299 | cmp_res >>= 1; | |
300 | } | |
301 | { | |
d2ca79ee | 302 | register struct radix_node *p, *x = top; |
35e117b3 KS |
303 | cp = v; |
304 | do { | |
305 | p = x; | |
306 | if (cp[x->rn_off] & x->rn_bmask) | |
307 | x = x->rn_r; | |
308 | else x = x->rn_l; | |
309 | } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ | |
310 | #ifdef RN_DEBUG | |
311 | if (rn_debug) | |
312 | printf("Going In:\n"), traverse(p); | |
313 | #endif | |
12d5780a | 314 | t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; |
35e117b3 KS |
315 | if ((cp[p->rn_off] & p->rn_bmask) == 0) |
316 | p->rn_l = t; | |
317 | else | |
318 | p->rn_r = t; | |
319 | x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ | |
320 | if ((cp[t->rn_off] & t->rn_bmask) == 0) { | |
321 | t->rn_r = x; | |
322 | } else { | |
323 | t->rn_r = tt; t->rn_l = x; | |
324 | } | |
325 | #ifdef RN_DEBUG | |
326 | if (rn_debug) | |
327 | printf("Coming out:\n"), traverse(p); | |
328 | #endif | |
329 | } | |
330 | return (tt); | |
331 | } | |
332 | ||
d6bc9b28 | 333 | struct radix_node * |
12d5780a | 334 | rn_addmask(n_arg, search, skip) |
14625339 | 335 | int search, skip; |
12d5780a | 336 | void *n_arg; |
d6bc9b28 | 337 | { |
12d5780a | 338 | caddr_t netmask = (caddr_t)n_arg; |
d6bc9b28 KS |
339 | register struct radix_node *x; |
340 | register caddr_t cp, cplim; | |
341 | register int b, mlen, j; | |
342 | int maskduplicated; | |
343 | ||
344 | mlen = *(u_char *)netmask; | |
345 | if (search) { | |
d2ca79ee | 346 | x = rn_search(netmask, rn_masktop); |
d6bc9b28 KS |
347 | mlen = *(u_char *)netmask; |
348 | if (Bcmp(netmask, x->rn_key, mlen) == 0) | |
349 | return (x); | |
350 | } | |
5c48f39f | 351 | R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); |
d6bc9b28 KS |
352 | if (x == 0) |
353 | return (0); | |
5c48f39f | 354 | Bzero(x, max_keylen + 2 * sizeof (*x)); |
d6bc9b28 KS |
355 | cp = (caddr_t)(x + 2); |
356 | Bcopy(netmask, cp, mlen); | |
357 | netmask = cp; | |
d2ca79ee | 358 | x = rn_insert(netmask, mask_rnhead, &maskduplicated, x); |
d6bc9b28 KS |
359 | /* |
360 | * Calculate index of mask. | |
361 | */ | |
362 | cplim = netmask + mlen; | |
363 | for (cp = netmask + skip; cp < cplim; cp++) | |
364 | if (*(u_char *)cp != 0xff) | |
365 | break; | |
366 | b = (cp - netmask) << 3; | |
367 | if (cp != cplim) { | |
368 | if (*cp != 0) { | |
369 | gotOddMasks = 1; | |
370 | for (j = 0x80; j; b++, j >>= 1) | |
371 | if ((j & *cp) == 0) | |
372 | break; | |
373 | } | |
374 | } | |
375 | x->rn_b = -1 - b; | |
376 | return (x); | |
377 | } | |
378 | ||
12d5780a KB |
379 | struct radix_node * |
380 | rn_addroute(v_arg, n_arg, head, treenodes) | |
381 | void *v_arg, *n_arg; | |
d2ca79ee | 382 | struct radix_node_head *head; |
35e117b3 KS |
383 | struct radix_node treenodes[2]; |
384 | { | |
12d5780a | 385 | caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; |
35e117b3 | 386 | register int j; |
573d82c4 | 387 | register caddr_t cp; |
35e117b3 | 388 | register struct radix_node *t, *x, *tt; |
d2ca79ee | 389 | struct radix_node *saved_tt, *top = head->rnh_treetop; |
35e117b3 | 390 | short b = 0, b_leaf; |
d6bc9b28 | 391 | int vlen = *(u_char *)v, mlen, keyduplicated; |
573d82c4 | 392 | caddr_t cplim; unsigned char *maskp; |
35e117b3 | 393 | struct radix_mask *m, **mp; |
35e117b3 KS |
394 | |
395 | /* | |
396 | * In dealing with non-contiguous masks, there may be | |
397 | * many different routes which have the same mask. | |
398 | * We will find it useful to have a unique pointer to | |
399 | * the mask to speed avoiding duplicate references at | |
400 | * nodes and possibly save time in calculating indices. | |
401 | */ | |
402 | if (netmask) { | |
d2ca79ee | 403 | x = rn_search(netmask, rn_masktop); |
35e117b3 | 404 | mlen = *(u_char *)netmask; |
d6bc9b28 | 405 | if (Bcmp(netmask, x->rn_key, mlen) != 0) { |
d2ca79ee | 406 | x = rn_addmask(netmask, 0, top->rn_off); |
5ba4cdd6 KS |
407 | if (x == 0) |
408 | return (0); | |
35e117b3 | 409 | } |
d6bc9b28 KS |
410 | netmask = x->rn_key; |
411 | b = -1 - x->rn_b; | |
35e117b3 KS |
412 | } |
413 | /* | |
414 | * Deal with duplicated keys: attach node to previous instance | |
415 | */ | |
416 | saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); | |
417 | if (keyduplicated) { | |
5ba4cdd6 | 418 | do { |
35e117b3 KS |
419 | if (tt->rn_mask == netmask) |
420 | return (0); | |
5ba4cdd6 | 421 | t = tt; |
d370f493 KS |
422 | if (netmask == 0 || |
423 | (tt->rn_mask && rn_refines(netmask, tt->rn_mask))) | |
424 | break; | |
5ba4cdd6 | 425 | } while (tt = tt->rn_dupedkey); |
35e117b3 KS |
426 | /* |
427 | * If the mask is not duplicated, we wouldn't | |
428 | * find it among possible duplicate key entries | |
429 | * anyway, so the above test doesn't hurt. | |
430 | * | |
f21ae3e6 KS |
431 | * We sort the masks for a duplicated key the same way as |
432 | * in a masklist -- most specific to least specific. | |
433 | * This may require the unfortunate nuisance of relocating | |
35e117b3 KS |
434 | * the head of the list. |
435 | */ | |
d370f493 KS |
436 | if (tt && t == saved_tt) { |
437 | struct radix_node *xx = x; | |
438 | /* link in at head of list */ | |
439 | (tt = treenodes)->rn_dupedkey = t; | |
440 | tt->rn_flags = t->rn_flags; | |
441 | tt->rn_p = x = t->rn_p; | |
442 | if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; | |
443 | saved_tt = tt; x = xx; | |
444 | } else { | |
445 | (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; | |
446 | t->rn_dupedkey = tt; | |
447 | } | |
35e117b3 KS |
448 | #ifdef RN_DEBUG |
449 | t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; | |
450 | tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; | |
451 | #endif | |
452 | t = saved_tt; | |
5ba4cdd6 KS |
453 | tt->rn_key = (caddr_t) v; |
454 | tt->rn_b = -1; | |
35e117b3 KS |
455 | tt->rn_flags = t->rn_flags & ~RNF_ROOT; |
456 | } | |
457 | /* | |
458 | * Put mask in tree. | |
459 | */ | |
5ba4cdd6 KS |
460 | if (netmask) { |
461 | tt->rn_mask = netmask; | |
462 | tt->rn_b = x->rn_b; | |
35e117b3 | 463 | } |
35e117b3 KS |
464 | t = saved_tt->rn_p; |
465 | b_leaf = -1 - t->rn_b; | |
466 | if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; | |
467 | /* Promote general routes from below */ | |
468 | if (x->rn_b < 0) { | |
1f14def8 | 469 | if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { |
35e117b3 KS |
470 | MKGet(m); |
471 | if (m) { | |
472 | Bzero(m, sizeof *m); | |
473 | m->rm_b = x->rn_b; | |
474 | m->rm_mask = x->rn_mask; | |
5ba4cdd6 | 475 | x->rn_mklist = t->rn_mklist = m; |
35e117b3 KS |
476 | } |
477 | } | |
478 | } else if (x->rn_mklist) { | |
479 | /* | |
480 | * Skip over masks whose index is > that of new node | |
481 | */ | |
482 | for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) | |
483 | if (m->rm_b >= b_leaf) | |
484 | break; | |
485 | t->rn_mklist = m; *mp = 0; | |
486 | } | |
487 | /* Add new route to highest possible ancestor's list */ | |
488 | if ((netmask == 0) || (b > t->rn_b )) | |
489 | return tt; /* can't lift at all */ | |
490 | b_leaf = tt->rn_b; | |
491 | do { | |
492 | x = t; | |
493 | t = t->rn_p; | |
d2ca79ee | 494 | } while (b <= t->rn_b && x != top); |
35e117b3 KS |
495 | /* |
496 | * Search through routes associated with node to | |
497 | * insert new route according to index. | |
498 | * For nodes of equal index, place more specific | |
499 | * masks first. | |
500 | */ | |
501 | cplim = netmask + mlen; | |
502 | for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) { | |
503 | if (m->rm_b < b_leaf) | |
504 | continue; | |
505 | if (m->rm_b > b_leaf) | |
506 | break; | |
507 | if (m->rm_mask == netmask) { | |
508 | m->rm_refs++; | |
509 | tt->rn_mklist = m; | |
510 | return tt; | |
511 | } | |
d370f493 KS |
512 | if (rn_refines(netmask, m->rm_mask)) |
513 | break; | |
35e117b3 | 514 | } |
35e117b3 KS |
515 | MKGet(m); |
516 | if (m == 0) { | |
517 | printf("Mask for route not entered\n"); | |
518 | return (tt); | |
519 | } | |
520 | Bzero(m, sizeof *m); | |
521 | m->rm_b = b_leaf; | |
522 | m->rm_mask = netmask; | |
523 | m->rm_mklist = *mp; | |
524 | *mp = m; | |
525 | tt->rn_mklist = m; | |
526 | return tt; | |
527 | } | |
528 | ||
12d5780a KB |
529 | struct radix_node * |
530 | rn_delete(v_arg, netmask_arg, head) | |
531 | void *v_arg, *netmask_arg; | |
d2ca79ee | 532 | struct radix_node_head *head; |
35e117b3 | 533 | { |
12d5780a | 534 | register struct radix_node *t, *p, *x, *tt; |
5ba4cdd6 | 535 | struct radix_mask *m, *saved_m, **mp; |
12d5780a KB |
536 | struct radix_node *dupedkey, *saved_tt, *top; |
537 | caddr_t v, netmask; | |
538 | int b, head_off, vlen; | |
35e117b3 | 539 | |
12d5780a KB |
540 | v = v_arg; |
541 | netmask = netmask_arg; | |
542 | x = head->rnh_treetop; | |
543 | tt = rn_search(v, x); | |
544 | head_off = x->rn_off; | |
545 | vlen = *(u_char *)v; | |
546 | saved_tt = tt; | |
547 | top = x; | |
35e117b3 KS |
548 | if (tt == 0 || |
549 | Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) | |
550 | return (0); | |
551 | /* | |
552 | * Delete our route from mask lists. | |
553 | */ | |
554 | if (dupedkey = tt->rn_dupedkey) { | |
555 | if (netmask) | |
d2ca79ee | 556 | netmask = rn_search(netmask, rn_masktop)->rn_key; |
7e940e66 KS |
557 | while (tt->rn_mask != netmask) |
558 | if ((tt = tt->rn_dupedkey) == 0) | |
35e117b3 KS |
559 | return (0); |
560 | } | |
5ba4cdd6 KS |
561 | if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) |
562 | goto on1; | |
563 | if (m->rm_mask != tt->rn_mask) { | |
564 | printf("rn_delete: inconsistent annotation\n"); | |
35e117b3 | 565 | goto on1; |
5ba4cdd6 KS |
566 | } |
567 | if (--m->rm_refs >= 0) | |
35e117b3 KS |
568 | goto on1; |
569 | b = -1 - tt->rn_b; | |
570 | t = saved_tt->rn_p; | |
571 | if (b > t->rn_b) | |
572 | goto on1; /* Wasn't lifted at all */ | |
573 | do { | |
574 | x = t; | |
575 | t = t->rn_p; | |
d2ca79ee | 576 | } while (b <= t->rn_b && x != top); |
35e117b3 | 577 | for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) |
5ba4cdd6 | 578 | if (m == saved_m) { |
35e117b3 KS |
579 | *mp = m->rm_mklist; |
580 | MKFree(m); | |
5ba4cdd6 | 581 | break; |
35e117b3 | 582 | } |
5ba4cdd6 KS |
583 | if (m == 0) |
584 | printf("rn_delete: couldn't find our annotation\n"); | |
35e117b3 KS |
585 | on1: |
586 | /* | |
587 | * Eliminate us from tree | |
588 | */ | |
589 | if (tt->rn_flags & RNF_ROOT) | |
590 | return (0); | |
591 | #ifdef RN_DEBUG | |
592 | /* Get us out of the creation list */ | |
593 | for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} | |
594 | if (t) t->rn_ybro = tt->rn_ybro; | |
1524bcb8 | 595 | #endif |
35e117b3 KS |
596 | t = tt->rn_p; |
597 | if (dupedkey) { | |
598 | if (tt == saved_tt) { | |
599 | x = dupedkey; x->rn_p = t; | |
600 | if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; | |
d370f493 KS |
601 | } else { |
602 | for (x = p = saved_tt; p && p->rn_dupedkey != tt;) | |
603 | p = p->rn_dupedkey; | |
604 | if (p) p->rn_dupedkey = tt->rn_dupedkey; | |
605 | else printf("rn_delete: couldn't find us\n"); | |
606 | } | |
607 | t = tt + 1; | |
608 | if (t->rn_flags & RNF_ACTIVE) { | |
35e117b3 | 609 | #ifndef RN_DEBUG |
d370f493 | 610 | *++x = *t; p = t->rn_p; |
35e117b3 | 611 | #else |
d370f493 | 612 | b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p; |
35e117b3 KS |
613 | #endif |
614 | if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; | |
615 | x->rn_l->rn_p = x; x->rn_r->rn_p = x; | |
35e117b3 KS |
616 | } |
617 | goto out; | |
618 | } | |
619 | if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; | |
620 | p = t->rn_p; | |
621 | if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; | |
622 | x->rn_p = p; | |
623 | /* | |
624 | * Demote routes attached to us. | |
625 | */ | |
626 | if (t->rn_mklist) { | |
627 | if (x->rn_b >= 0) { | |
1f14def8 KS |
628 | for (mp = &x->rn_mklist; m = *mp;) |
629 | mp = &m->rm_mklist; | |
630 | *mp = t->rn_mklist; | |
35e117b3 KS |
631 | } else { |
632 | for (m = t->rn_mklist; m;) { | |
633 | struct radix_mask *mm = m->rm_mklist; | |
5ba4cdd6 KS |
634 | if (m == x->rn_mklist && (--(m->rm_refs) < 0)) { |
635 | x->rn_mklist = 0; | |
636 | MKFree(m); | |
1f14def8 KS |
637 | } else |
638 | printf("%s %x at %x\n", | |
639 | "rn_delete: Orphaned Mask", m, x); | |
35e117b3 KS |
640 | m = mm; |
641 | } | |
642 | } | |
643 | } | |
644 | /* | |
645 | * We may be holding an active internal node in the tree. | |
646 | */ | |
647 | x = tt + 1; | |
648 | if (t != x) { | |
649 | #ifndef RN_DEBUG | |
650 | *t = *x; | |
651 | #else | |
652 | b = t->rn_info; *t = *x; t->rn_info = b; | |
653 | #endif | |
654 | t->rn_l->rn_p = t; t->rn_r->rn_p = t; | |
655 | p = x->rn_p; | |
656 | if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; | |
657 | } | |
658 | out: | |
659 | tt->rn_flags &= ~RNF_ACTIVE; | |
660 | tt[1].rn_flags &= ~RNF_ACTIVE; | |
661 | return (tt); | |
662 | } | |
7444940e | 663 | |
12d5780a | 664 | int |
d2ca79ee KS |
665 | rn_walktree(h, f, w) |
666 | struct radix_node_head *h; | |
7444940e | 667 | register int (*f)(); |
12d5780a | 668 | void *w; |
7444940e KS |
669 | { |
670 | int error; | |
5c48f39f | 671 | struct radix_node *base, *next; |
d2ca79ee | 672 | register struct radix_node *rn = h->rnh_treetop; |
5c48f39f KS |
673 | /* |
674 | * This gets complicated because we may delete the node | |
675 | * while applying the function f to it, so we need to calculate | |
676 | * the successor node in advance. | |
677 | */ | |
678 | /* First time through node, go left */ | |
679 | while (rn->rn_b >= 0) | |
680 | rn = rn->rn_l; | |
7444940e | 681 | for (;;) { |
5c48f39f KS |
682 | base = rn; |
683 | /* If at right child go back up, otherwise, go right */ | |
684 | while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) | |
685 | rn = rn->rn_p; | |
686 | /* Find the next *leaf* since next node might vanish, too */ | |
687 | for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) | |
688 | rn = rn->rn_l; | |
689 | next = rn; | |
690 | /* Process leaves */ | |
691 | while (rn = base) { | |
692 | base = rn->rn_dupedkey; | |
1f29be1c KS |
693 | if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) |
694 | return (error); | |
7444940e | 695 | } |
5c48f39f KS |
696 | rn = next; |
697 | if (rn->rn_flags & RNF_ROOT) | |
698 | return (0); | |
7444940e | 699 | } |
12d5780a | 700 | /* NOTREACHED */ |
7444940e | 701 | } |
35e117b3 | 702 | |
12d5780a | 703 | int |
7444940e | 704 | rn_inithead(head, off) |
5c48f39f | 705 | void **head; |
14625339 | 706 | int off; |
35e117b3 | 707 | { |
573d82c4 | 708 | register struct radix_node_head *rnh; |
35e117b3 KS |
709 | register struct radix_node *t, *tt, *ttt; |
710 | if (*head) | |
711 | return (1); | |
573d82c4 KS |
712 | R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); |
713 | if (rnh == 0) | |
35e117b3 | 714 | return (0); |
573d82c4 KS |
715 | Bzero(rnh, sizeof (*rnh)); |
716 | *head = rnh; | |
717 | t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); | |
718 | ttt = rnh->rnh_nodes + 2; | |
35e117b3 KS |
719 | t->rn_r = ttt; |
720 | t->rn_p = t; | |
721 | tt = t->rn_l; | |
722 | tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; | |
723 | tt->rn_b = -1 - off; | |
724 | *ttt = *tt; | |
725 | ttt->rn_key = rn_ones; | |
d2ca79ee KS |
726 | rnh->rnh_addaddr = rn_addroute; |
727 | rnh->rnh_deladdr = rn_delete; | |
728 | rnh->rnh_matchaddr = rn_match; | |
729 | rnh->rnh_walktree = rn_walktree; | |
573d82c4 | 730 | rnh->rnh_treetop = t; |
35e117b3 KS |
731 | return (1); |
732 | } | |
5c48f39f | 733 | |
12d5780a | 734 | void |
5c48f39f KS |
735 | rn_init() |
736 | { | |
737 | char *cp, *cplim; | |
738 | #ifdef KERNEL | |
739 | struct domain *dom; | |
740 | ||
741 | for (dom = domains; dom; dom = dom->dom_next) | |
742 | if (dom->dom_maxrtkey > max_keylen) | |
743 | max_keylen = dom->dom_maxrtkey; | |
744 | #endif | |
745 | if (max_keylen == 0) { | |
746 | printf("rn_init: radix functions require max_keylen be set\n"); | |
747 | return; | |
748 | } | |
749 | R_Malloc(rn_zeros, char *, 3 * max_keylen); | |
750 | if (rn_zeros == NULL) | |
751 | panic("rn_init"); | |
752 | Bzero(rn_zeros, 3 * max_keylen); | |
753 | rn_ones = cp = rn_zeros + max_keylen; | |
754 | maskedKey = cplim = rn_ones + max_keylen; | |
755 | while (cp < cplim) | |
756 | *cp++ = -1; | |
757 | if (rn_inithead((void **)&mask_rnhead, 0) == 0) | |
758 | panic("rn_init 2"); | |
759 | } |