typo
[unix-history] / usr / src / sys / kern / vfs_subr.c
CommitLineData
3c4390e8 1/*
ec54f0cc
KB
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
3c4390e8 4 *
dbf0c423 5 * %sccs.include.redist.c%
3c4390e8 6 *
2b5ada11 7 * @(#)vfs_subr.c 8.8 (Berkeley) %G%
3c4390e8
KM
8 */
9
10/*
11 * External virtual filesystem routines
12 */
13
cb796a23 14#include <sys/param.h>
917dc539 15#include <sys/systm.h>
cb796a23
KB
16#include <sys/proc.h>
17#include <sys/mount.h>
18#include <sys/time.h>
19#include <sys/vnode.h>
807cc430 20#include <sys/stat.h>
cb796a23
KB
21#include <sys/namei.h>
22#include <sys/ucred.h>
23#include <sys/buf.h>
24#include <sys/errno.h>
25#include <sys/malloc.h>
8981e258
MH
26#include <sys/domain.h>
27#include <sys/mbuf.h>
3c4390e8 28
bb4964fd
KM
29#include <vm/vm.h>
30#include <sys/sysctl.h>
31
021de758
JSP
32#include <miscfs/specfs/specdev.h>
33
807cc430
KM
34enum vtype iftovt_tab[16] = {
35 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
36 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
37};
38int vttoif_tab[9] = {
39 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
40 S_IFSOCK, S_IFIFO, S_IFMT,
41};
42
e3249ec0
KM
43/*
44 * Insq/Remq for the vnode usage lists.
45 */
3fc2ac18
KM
46#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
47#define bufremvn(bp) { \
48 LIST_REMOVE(bp, b_vnbufs); \
49 (bp)->b_vnbufs.le_next = NOLIST; \
50}
51
52TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
53struct mntlist mountlist; /* mounted filesystem list */
e3249ec0 54
3c4390e8 55/*
3fc2ac18 56 * Initialize the vnode management data structures.
3c4390e8 57 */
3fc2ac18 58vntblinit()
3c4390e8
KM
59{
60
3fc2ac18
KM
61 TAILQ_INIT(&vnode_free_list);
62 TAILQ_INIT(&mountlist);
3c4390e8
KM
63}
64
65/*
66 * Lock a filesystem.
67 * Used to prevent access to it while mounting and unmounting.
68 */
69vfs_lock(mp)
70 register struct mount *mp;
71{
72
54fb9dc2
KM
73 while(mp->mnt_flag & MNT_MLOCK) {
74 mp->mnt_flag |= MNT_MWAIT;
594501df
KM
75 sleep((caddr_t)mp, PVFS);
76 }
54fb9dc2 77 mp->mnt_flag |= MNT_MLOCK;
3c4390e8
KM
78 return (0);
79}
80
81/*
82 * Unlock a locked filesystem.
83 * Panic if filesystem is not locked.
84 */
85void
86vfs_unlock(mp)
87 register struct mount *mp;
88{
89
54fb9dc2 90 if ((mp->mnt_flag & MNT_MLOCK) == 0)
36ef03ec 91 panic("vfs_unlock: not locked");
54fb9dc2
KM
92 mp->mnt_flag &= ~MNT_MLOCK;
93 if (mp->mnt_flag & MNT_MWAIT) {
94 mp->mnt_flag &= ~MNT_MWAIT;
3c4390e8
KM
95 wakeup((caddr_t)mp);
96 }
97}
98
36ef03ec
KM
99/*
100 * Mark a mount point as busy.
101 * Used to synchronize access and to delay unmounting.
102 */
103vfs_busy(mp)
104 register struct mount *mp;
105{
106
54fb9dc2
KM
107 while(mp->mnt_flag & MNT_MPBUSY) {
108 mp->mnt_flag |= MNT_MPWANT;
109 sleep((caddr_t)&mp->mnt_flag, PVFS);
36ef03ec 110 }
d8b63609
KM
111 if (mp->mnt_flag & MNT_UNMOUNT)
112 return (1);
54fb9dc2 113 mp->mnt_flag |= MNT_MPBUSY;
36ef03ec
KM
114 return (0);
115}
116
117/*
118 * Free a busy filesystem.
119 * Panic if filesystem is not busy.
120 */
36ef03ec
KM
121vfs_unbusy(mp)
122 register struct mount *mp;
123{
124
54fb9dc2 125 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 126 panic("vfs_unbusy: not busy");
54fb9dc2
KM
127 mp->mnt_flag &= ~MNT_MPBUSY;
128 if (mp->mnt_flag & MNT_MPWANT) {
129 mp->mnt_flag &= ~MNT_MPWANT;
130 wakeup((caddr_t)&mp->mnt_flag);
36ef03ec
KM
131 }
132}
133
3c4390e8
KM
134/*
135 * Lookup a mount point by filesystem identifier.
136 */
137struct mount *
138getvfs(fsid)
139 fsid_t *fsid;
140{
141 register struct mount *mp;
142
3fc2ac18 143 for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
54fb9dc2 144 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
3fc2ac18 145 mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
d713f801 146 return (mp);
3fc2ac18 147 }
d713f801 148 return ((struct mount *)0);
3c4390e8
KM
149}
150
917dc539
JSP
151/*
152 * Get a new unique fsid
153 */
154void
155getnewfsid(mp, mtype)
156 struct mount *mp;
157 int mtype;
158{
159static u_short xxxfs_mntid;
160
161 fsid_t tfsid;
162
1209b9a4 163 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
917dc539
JSP
164 mp->mnt_stat.f_fsid.val[1] = mtype;
165 if (xxxfs_mntid == 0)
166 ++xxxfs_mntid;
1209b9a4 167 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
917dc539 168 tfsid.val[1] = mtype;
3fc2ac18 169 if (mountlist.tqh_first != NULL) {
17fd1cc7
JSP
170 while (getvfs(&tfsid)) {
171 tfsid.val[0]++;
172 xxxfs_mntid++;
173 }
917dc539
JSP
174 }
175 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
176}
177
3c4390e8
KM
178/*
179 * Set vnode attributes to VNOVAL
180 */
181void vattr_null(vap)
182 register struct vattr *vap;
183{
184
185 vap->va_type = VNON;
83504fd5 186 vap->va_size = vap->va_bytes = VNOVAL;
3c4390e8 187 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
83504fd5
KM
188 vap->va_fsid = vap->va_fileid =
189 vap->va_blocksize = vap->va_rdev =
ecf75a7d
KM
190 vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
191 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
192 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
8cf4d4fb 193 vap->va_flags = vap->va_gen = VNOVAL;
fcba749b 194 vap->va_vaflags = 0;
3c4390e8 195}
c60798ca 196
36d09cb1
KM
197/*
198 * Routines having to do with the management of the vnode table.
199 */
9342689a 200extern int (**dead_vnodeop_p)();
32339c94 201extern void vclean();
1a80f56e 202long numvnodes;
e781da98 203extern struct vattr va_null;
3e787e54
KM
204int newnodes = 0;
205int printcnt = 0;
36d09cb1
KM
206
207/*
208 * Return the next vnode from the free list.
209 */
210getnewvnode(tag, mp, vops, vpp)
211 enum vtagtype tag;
212 struct mount *mp;
cf74dd57 213 int (**vops)();
36d09cb1
KM
214 struct vnode **vpp;
215{
c768e50f 216 register struct vnode *vp;
1f9d2249 217 int s;
36d09cb1 218
3e787e54 219newnodes++;
3fc2ac18
KM
220 if ((vnode_free_list.tqh_first == NULL &&
221 numvnodes < 2 * desiredvnodes) ||
ecf75a7d 222 numvnodes < desiredvnodes) {
aacc1bff
KM
223 vp = (struct vnode *)malloc((u_long)sizeof *vp,
224 M_VNODE, M_WAITOK);
1a80f56e 225 bzero((char *)vp, sizeof *vp);
3e787e54
KM
226 vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
227 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
228 vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
229 vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
1a80f56e 230 numvnodes++;
3e787e54 231 vp->v_spare[0] = numvnodes;
1a80f56e 232 } else {
3fc2ac18 233 if ((vp = vnode_free_list.tqh_first) == NULL) {
1a80f56e
KM
234 tablefull("vnode");
235 *vpp = 0;
236 return (ENFILE);
237 }
238 if (vp->v_usecount)
239 panic("free vnode isn't");
3e787e54
KM
240 if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
241 vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
242 panic("getnewvnode: not on queue");
3fc2ac18 243 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3e787e54 244 vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
0bf9bb76
KM
245 /* see comment on why 0xdeadb is set at end of vgone (below) */
246 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
39b99eb6 247 vp->v_lease = NULL;
1a80f56e
KM
248 if (vp->v_type != VBAD)
249 vgone(vp);
1f9d2249 250#ifdef DIAGNOSTIC
2345b093
KM
251 if (vp->v_data)
252 panic("cleaned vnode isn't");
1f9d2249
MS
253 s = splbio();
254 if (vp->v_numoutput)
255 panic("Clean vnode has pending I/O's");
256 splx(s);
257#endif
1a80f56e 258 vp->v_flag = 0;
1a80f56e 259 vp->v_lastr = 0;
2b5ada11
MH
260 vp->v_ralen = 0;
261 vp->v_maxra = 0;
1f9d2249
MS
262 vp->v_lastw = 0;
263 vp->v_lasta = 0;
264 vp->v_cstart = 0;
265 vp->v_clen = 0;
1a80f56e 266 vp->v_socket = 0;
36d09cb1 267 }
b027498b 268 vp->v_type = VNON;
36d09cb1
KM
269 cache_purge(vp);
270 vp->v_tag = tag;
ef24f6dd 271 vp->v_op = vops;
36d09cb1 272 insmntque(vp, mp);
36d09cb1 273 *vpp = vp;
0bf9bb76 274 vp->v_usecount = 1;
3fc2ac18 275 vp->v_data = 0;
3e787e54 276 if (printcnt-- > 0) vprint("getnewvnode got", vp);
36d09cb1
KM
277 return (0);
278}
8981e258 279
36d09cb1
KM
280/*
281 * Move a vnode from one mount queue to another.
282 */
283insmntque(vp, mp)
284 register struct vnode *vp;
285 register struct mount *mp;
286{
36d09cb1
KM
287
288 /*
289 * Delete from old mount point vnode list, if on one.
290 */
3e787e54
KM
291 if (vp->v_mount != NULL) {
292 if (vp->v_mntvnodes.le_next == (struct vnode *)0xdeadf ||
293 vp->v_mntvnodes.le_prev == (struct vnode **)0xdeadb)
294 panic("insmntque: not on queue");
3fc2ac18 295 LIST_REMOVE(vp, v_mntvnodes);
3e787e54
KM
296 vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
297 vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
298 }
36d09cb1
KM
299 /*
300 * Insert into list of vnodes for the new mount point, if available.
301 */
3fc2ac18 302 if ((vp->v_mount = mp) == NULL)
36d09cb1 303 return;
3e787e54
KM
304 if (vp->v_mntvnodes.le_next != (struct vnode *)0xdeadf ||
305 vp->v_mntvnodes.le_prev != (struct vnode **)0xdeadb)
306 panic("insmntque: already on queue");
3fc2ac18 307 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
36d09cb1
KM
308}
309
76429560
KM
310/*
311 * Update outstanding I/O count and do wakeup if requested.
312 */
313vwakeup(bp)
314 register struct buf *bp;
315{
316 register struct vnode *vp;
317
a9338fad 318 bp->b_flags &= ~B_WRITEINPROG;
76429560
KM
319 if (vp = bp->b_vp) {
320 vp->v_numoutput--;
1f9d2249
MS
321 if (vp->v_numoutput < 0)
322 panic("vwakeup: neg numoutput");
76429560
KM
323 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
324 if (vp->v_numoutput < 0)
325 panic("vwakeup: neg numoutput");
326 vp->v_flag &= ~VBWAIT;
327 wakeup((caddr_t)&vp->v_numoutput);
328 }
329 }
330}
331
76429560
KM
332/*
333 * Flush out and invalidate all buffers associated with a vnode.
334 * Called with the underlying object locked.
335 */
d024c2ce 336int
c33e9e8b 337vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
76429560 338 register struct vnode *vp;
12079a9d 339 int flags;
d024c2ce
KM
340 struct ucred *cred;
341 struct proc *p;
c33e9e8b 342 int slpflag, slptimeo;
76429560
KM
343{
344 register struct buf *bp;
345 struct buf *nbp, *blist;
d024c2ce 346 int s, error;
76429560 347
12079a9d 348 if (flags & V_SAVE) {
d024c2ce
KM
349 if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
350 return (error);
3fc2ac18 351 if (vp->v_dirtyblkhd.lh_first != NULL)
d024c2ce
KM
352 panic("vinvalbuf: dirty bufs");
353 }
76429560 354 for (;;) {
3fc2ac18 355 if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
12079a9d 356 while (blist && blist->b_lblkno < 0)
3fc2ac18
KM
357 blist = blist->b_vnbufs.le_next;
358 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
e3249ec0 359 (flags & V_SAVEMETA))
12079a9d 360 while (blist && blist->b_lblkno < 0)
3fc2ac18 361 blist = blist->b_vnbufs.le_next;
12079a9d 362 if (!blist)
76429560 363 break;
12079a9d 364
76429560 365 for (bp = blist; bp; bp = nbp) {
3fc2ac18 366 nbp = bp->b_vnbufs.le_next;
12079a9d
MS
367 if (flags & V_SAVEMETA && bp->b_lblkno < 0)
368 continue;
76429560
KM
369 s = splbio();
370 if (bp->b_flags & B_BUSY) {
371 bp->b_flags |= B_WANTED;
c33e9e8b
KM
372 error = tsleep((caddr_t)bp,
373 slpflag | (PRIBIO + 1), "vinvalbuf",
374 slptimeo);
76429560 375 splx(s);
c33e9e8b
KM
376 if (error)
377 return (error);
76429560
KM
378 break;
379 }
380 bremfree(bp);
381 bp->b_flags |= B_BUSY;
382 splx(s);
c33e9e8b
KM
383 /*
384 * XXX Since there are no node locks for NFS, I believe
385 * there is a slight chance that a delayed write will
386 * occur while sleeping just above, so check for it.
387 */
388 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
389 (void) VOP_BWRITE(bp);
390 break;
391 }
12079a9d 392 bp->b_flags |= B_INVAL;
76429560
KM
393 brelse(bp);
394 }
395 }
e3249ec0 396 if (!(flags & V_SAVEMETA) &&
3fc2ac18 397 (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
76429560 398 panic("vinvalbuf: flush failed");
d024c2ce 399 return (0);
76429560
KM
400}
401
402/*
403 * Associate a buffer with a vnode.
404 */
405bgetvp(vp, bp)
406 register struct vnode *vp;
407 register struct buf *bp;
408{
409
410 if (bp->b_vp)
411 panic("bgetvp: not free");
412 VHOLD(vp);
413 bp->b_vp = vp;
414 if (vp->v_type == VBLK || vp->v_type == VCHR)
415 bp->b_dev = vp->v_rdev;
416 else
417 bp->b_dev = NODEV;
418 /*
419 * Insert onto list for new vnode.
420 */
e3249ec0 421 bufinsvn(bp, &vp->v_cleanblkhd);
76429560
KM
422}
423
424/*
425 * Disassociate a buffer from a vnode.
426 */
427brelvp(bp)
428 register struct buf *bp;
429{
76429560
KM
430 struct vnode *vp;
431
432 if (bp->b_vp == (struct vnode *) 0)
433 panic("brelvp: NULL");
434 /*
435 * Delete from old vnode list, if on one.
436 */
3fc2ac18 437 if (bp->b_vnbufs.le_next != NOLIST)
e3249ec0 438 bufremvn(bp);
76429560
KM
439 vp = bp->b_vp;
440 bp->b_vp = (struct vnode *) 0;
441 HOLDRELE(vp);
442}
443
444/*
445 * Reassign a buffer from one vnode to another.
446 * Used to assign file specific control information
447 * (indirect blocks) to the vnode to which they belong.
448 */
449reassignbuf(bp, newvp)
450 register struct buf *bp;
451 register struct vnode *newvp;
452{
3fc2ac18 453 register struct buflists *listheadp;
76429560 454
e5c3f16e
KM
455 if (newvp == NULL) {
456 printf("reassignbuf: NULL");
457 return;
458 }
76429560
KM
459 /*
460 * Delete from old vnode list, if on one.
461 */
3fc2ac18 462 if (bp->b_vnbufs.le_next != NOLIST)
e3249ec0 463 bufremvn(bp);
76429560
KM
464 /*
465 * If dirty, put on list of dirty buffers;
466 * otherwise insert onto list of clean buffers.
467 */
468 if (bp->b_flags & B_DELWRI)
469 listheadp = &newvp->v_dirtyblkhd;
470 else
471 listheadp = &newvp->v_cleanblkhd;
e3249ec0 472 bufinsvn(bp, listheadp);
76429560
KM
473}
474
36d09cb1 475/*
ef24f6dd
KM
476 * Create a vnode for a block device.
477 * Used for root filesystem, argdev, and swap areas.
478 * Also used for memory file system special devices.
479 */
480bdevvp(dev, vpp)
481 dev_t dev;
482 struct vnode **vpp;
483{
ef24f6dd
KM
484 register struct vnode *vp;
485 struct vnode *nvp;
486 int error;
487
1c89915d
KM
488 if (dev == NODEV)
489 return (0);
9342689a 490 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
ef24f6dd
KM
491 if (error) {
492 *vpp = 0;
493 return (error);
494 }
495 vp = nvp;
496 vp->v_type = VBLK;
c0de8792 497 if (nvp = checkalias(vp, dev, (struct mount *)0)) {
ef24f6dd
KM
498 vput(vp);
499 vp = nvp;
500 }
501 *vpp = vp;
502 return (0);
503}
504
505/*
506 * Check to see if the new vnode represents a special device
507 * for which we already have a vnode (either because of
508 * bdevvp() or because of a different vnode representing
509 * the same block device). If such an alias exists, deallocate
f0556f86 510 * the existing contents and return the aliased vnode. The
ef24f6dd
KM
511 * caller is responsible for filling it with its new contents.
512 */
513struct vnode *
c0de8792 514checkalias(nvp, nvp_rdev, mp)
ef24f6dd 515 register struct vnode *nvp;
c0de8792 516 dev_t nvp_rdev;
ef24f6dd
KM
517 struct mount *mp;
518{
519 register struct vnode *vp;
c0de8792 520 struct vnode **vpp;
ef24f6dd
KM
521
522 if (nvp->v_type != VBLK && nvp->v_type != VCHR)
54fb9dc2 523 return (NULLVP);
c0de8792
KM
524
525 vpp = &speclisth[SPECHASH(nvp_rdev)];
ef24f6dd 526loop:
c0de8792
KM
527 for (vp = *vpp; vp; vp = vp->v_specnext) {
528 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
ef24f6dd 529 continue;
c0de8792
KM
530 /*
531 * Alias, but not in use, so flush it out.
532 */
7f7b7d89 533 if (vp->v_usecount == 0) {
c0de8792
KM
534 vgone(vp);
535 goto loop;
536 }
3fc2ac18 537 if (vget(vp, 1))
ef62830d 538 goto loop;
ef24f6dd
KM
539 break;
540 }
c0de8792 541 if (vp == NULL || vp->v_tag != VT_NON) {
c0de8792
KM
542 MALLOC(nvp->v_specinfo, struct specinfo *,
543 sizeof(struct specinfo), M_VNODE, M_WAITOK);
544 nvp->v_rdev = nvp_rdev;
7f7b7d89 545 nvp->v_hashchain = vpp;
c0de8792 546 nvp->v_specnext = *vpp;
2c957a90 547 nvp->v_specflags = 0;
c0de8792 548 *vpp = nvp;
40452d5e
KM
549 if (vp != NULL) {
550 nvp->v_flag |= VALIASED;
551 vp->v_flag |= VALIASED;
552 vput(vp);
553 }
54fb9dc2 554 return (NULLVP);
ef24f6dd 555 }
2bae1875
KM
556 VOP_UNLOCK(vp);
557 vclean(vp, 0);
ef24f6dd
KM
558 vp->v_op = nvp->v_op;
559 vp->v_tag = nvp->v_tag;
560 nvp->v_type = VNON;
561 insmntque(vp, mp);
562 return (vp);
563}
564
565/*
566 * Grab a particular vnode from the free list, increment its
567 * reference count and lock it. The vnode lock bit is set the
568 * vnode is being eliminated in vgone. The process is awakened
569 * when the transition is completed, and an error returned to
570 * indicate that the vnode is no longer usable (possibly having
571 * been changed to a new file system type).
36d09cb1 572 */
3fc2ac18 573vget(vp, lockflag)
36d09cb1 574 register struct vnode *vp;
3fc2ac18 575 int lockflag;
36d09cb1 576{
36d09cb1 577
ef24f6dd
KM
578 if (vp->v_flag & VXLOCK) {
579 vp->v_flag |= VXWANT;
580 sleep((caddr_t)vp, PINOD);
581 return (1);
582 }
3e787e54
KM
583 if (vp->v_usecount == 0) {
584 if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
585 vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
586 panic("vget: not on queue");
3fc2ac18 587 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3e787e54
KM
588 vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
589 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
590 }
ec04fc59 591 vp->v_usecount++;
3fc2ac18
KM
592 if (lockflag)
593 VOP_LOCK(vp);
3e787e54 594 if (printcnt-- > 0) vprint("vget got", vp);
ef24f6dd 595 return (0);
36d09cb1
KM
596}
597
d32390ea
KM
598int bug_refs = 0;
599
36d09cb1
KM
600/*
601 * Vnode reference, just increment the count
602 */
603void vref(vp)
604 struct vnode *vp;
605{
606
ec04fc59
KM
607 if (vp->v_usecount <= 0)
608 panic("vref used where vget required");
3e787e54
KM
609 if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
610 vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
611 panic("vref: not free");
7f7b7d89 612 vp->v_usecount++;
3e787e54 613 if (printcnt-- > 0) vprint("vref get", vp);
d32390ea
KM
614 if (vp->v_type != VBLK && curproc)
615 curproc->p_spare[0]++;
616 if (bug_refs)
617 vprint("vref: ");
36d09cb1
KM
618}
619
620/*
621 * vput(), just unlock and vrele()
622 */
623void vput(vp)
624 register struct vnode *vp;
625{
4d1ee2eb 626
36d09cb1
KM
627 VOP_UNLOCK(vp);
628 vrele(vp);
629}
630
631/*
632 * Vnode release.
633 * If count drops to zero, call inactive routine and return to freelist.
634 */
635void vrele(vp)
636 register struct vnode *vp;
637{
638
65c3b3a8 639#ifdef DIAGNOSTIC
36d09cb1 640 if (vp == NULL)
ef24f6dd 641 panic("vrele: null vp");
65c3b3a8 642#endif
7f7b7d89 643 vp->v_usecount--;
3e787e54 644 if (printcnt-- > 0) vprint("vrele put", vp);
d32390ea
KM
645 if (vp->v_type != VBLK && curproc)
646 curproc->p_spare[0]--;
647 if (bug_refs)
648 vprint("vref: ");
7f7b7d89 649 if (vp->v_usecount > 0)
36d09cb1 650 return;
65c3b3a8
KM
651#ifdef DIAGNOSTIC
652 if (vp->v_usecount != 0 || vp->v_writecount != 0) {
653 vprint("vrele: bad ref count", vp);
654 panic("vrele: ref cnt");
655 }
656#endif
dc998e72
KM
657 /*
658 * insert at tail of LRU list
659 */
3e787e54
KM
660 if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
661 vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
662 panic("vrele: not free");
3fc2ac18 663 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
d024c2ce 664 VOP_INACTIVE(vp);
ef24f6dd
KM
665}
666
7f7b7d89
KM
667/*
668 * Page or buffer structure gets a reference.
669 */
451df175 670void vhold(vp)
7f7b7d89
KM
671 register struct vnode *vp;
672{
673
674 vp->v_holdcnt++;
675}
676
677/*
678 * Page or buffer structure frees a reference.
679 */
451df175 680void holdrele(vp)
7f7b7d89
KM
681 register struct vnode *vp;
682{
683
684 if (vp->v_holdcnt <= 0)
685 panic("holdrele: holdcnt");
686 vp->v_holdcnt--;
687}
688
f0556f86
KM
689/*
690 * Remove any vnodes in the vnode table belonging to mount point mp.
691 *
692 * If MNT_NOFORCE is specified, there should not be any active ones,
693 * return error if any are found (nb: this is a user error, not a
694 * system error). If MNT_FORCE is specified, detach any active vnodes
695 * that are found.
696 */
8981e258 697#ifdef DIAGNOSTIC
bb4964fd
KM
698int busyprt = 0; /* print out busy vnodes */
699struct ctldebug debug1 = { "busyprt", &busyprt };
8981e258 700#endif
f0556f86
KM
701
702vflush(mp, skipvp, flags)
703 struct mount *mp;
704 struct vnode *skipvp;
705 int flags;
706{
707 register struct vnode *vp, *nvp;
708 int busy = 0;
709
54fb9dc2 710 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 711 panic("vflush: not busy");
4597dd33 712loop:
3fc2ac18 713 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
4597dd33
KM
714 if (vp->v_mount != mp)
715 goto loop;
3fc2ac18 716 nvp = vp->v_mntvnodes.le_next;
f0556f86
KM
717 /*
718 * Skip over a selected vnode.
f0556f86
KM
719 */
720 if (vp == skipvp)
721 continue;
36ef03ec
KM
722 /*
723 * Skip over a vnodes marked VSYSTEM.
724 */
725 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
726 continue;
da374605
KM
727 /*
728 * If WRITECLOSE is set, only flush out regular file
729 * vnodes open for writing.
730 */
731 if ((flags & WRITECLOSE) &&
732 (vp->v_writecount == 0 || vp->v_type != VREG))
733 continue;
f0556f86 734 /*
7f7b7d89 735 * With v_usecount == 0, all we need to do is clear
f0556f86
KM
736 * out the vnode data structures and we are done.
737 */
7f7b7d89 738 if (vp->v_usecount == 0) {
f0556f86
KM
739 vgone(vp);
740 continue;
741 }
742 /*
da374605 743 * If FORCECLOSE is set, forcibly close the vnode.
f0556f86
KM
744 * For block or character devices, revert to an
745 * anonymous device. For all other files, just kill them.
746 */
36ef03ec 747 if (flags & FORCECLOSE) {
f0556f86
KM
748 if (vp->v_type != VBLK && vp->v_type != VCHR) {
749 vgone(vp);
750 } else {
751 vclean(vp, 0);
9342689a 752 vp->v_op = spec_vnodeop_p;
f0556f86
KM
753 insmntque(vp, (struct mount *)0);
754 }
755 continue;
756 }
8981e258 757#ifdef DIAGNOSTIC
f0556f86 758 if (busyprt)
0bf84b18 759 vprint("vflush: busy vnode", vp);
8981e258 760#endif
f0556f86
KM
761 busy++;
762 }
763 if (busy)
764 return (EBUSY);
765 return (0);
766}
767
ef24f6dd
KM
768/*
769 * Disassociate the underlying file system from a vnode.
ef24f6dd 770 */
ecf75a7d
KM
771void
772vclean(vp, flags)
ef24f6dd 773 register struct vnode *vp;
aacc1bff 774 int flags;
ef24f6dd 775{
2bae1875 776 int active;
ef24f6dd 777
2bae1875
KM
778 /*
779 * Check to see if the vnode is in use.
0bf84b18
KM
780 * If so we have to reference it before we clean it out
781 * so that its count cannot fall to zero and generate a
782 * race against ourselves to recycle it.
2bae1875 783 */
7f7b7d89 784 if (active = vp->v_usecount)
2bae1875 785 VREF(vp);
669df1aa
KM
786 /*
787 * Even if the count is zero, the VOP_INACTIVE routine may still
788 * have the object locked while it cleans it out. The VOP_LOCK
789 * ensures that the VOP_INACTIVE routine is done with its work.
790 * For active vnodes, it ensures that no other activity can
791 * occur while the underlying object is being cleaned out.
792 */
793 VOP_LOCK(vp);
2bae1875
KM
794 /*
795 * Prevent the vnode from being recycled or
796 * brought into use while we clean it out.
797 */
0bf84b18
KM
798 if (vp->v_flag & VXLOCK)
799 panic("vclean: deadlock");
ef24f6dd 800 vp->v_flag |= VXLOCK;
0bf84b18 801 /*
669df1aa 802 * Clean out any buffers associated with the vnode.
0bf84b18 803 */
36ef03ec 804 if (flags & DOCLOSE)
c33e9e8b 805 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
ef24f6dd 806 /*
669df1aa
KM
807 * Any other processes trying to obtain this lock must first
808 * wait for VXLOCK to clear, then call the new lock operation.
ef24f6dd 809 */
669df1aa 810 VOP_UNLOCK(vp);
ef24f6dd 811 /*
669df1aa
KM
812 * If purging an active vnode, it must be closed and
813 * deactivated before being reclaimed.
ef24f6dd 814 */
2bae1875 815 if (active) {
669df1aa
KM
816 if (flags & DOCLOSE)
817 VOP_CLOSE(vp, IO_NDELAY, NOCRED, NULL);
818 VOP_INACTIVE(vp);
ef24f6dd
KM
819 }
820 /*
821 * Reclaim the vnode.
822 */
669df1aa 823 if (VOP_RECLAIM(vp))
ef24f6dd 824 panic("vclean: cannot reclaim");
2bae1875
KM
825 if (active)
826 vrele(vp);
38c46eee 827
ef24f6dd 828 /*
669df1aa 829 * Done with purge, notify sleepers of the grim news.
ef24f6dd 830 */
669df1aa
KM
831 vp->v_op = dead_vnodeop_p;
832 vp->v_tag = VT_NON;
ef24f6dd
KM
833 vp->v_flag &= ~VXLOCK;
834 if (vp->v_flag & VXWANT) {
835 vp->v_flag &= ~VXWANT;
836 wakeup((caddr_t)vp);
837 }
838}
839
ef62830d
KM
840/*
841 * Eliminate all activity associated with the requested vnode
842 * and with all vnodes aliased to the requested vnode.
843 */
844void vgoneall(vp)
845 register struct vnode *vp;
846{
7f7b7d89 847 register struct vnode *vq;
ef62830d 848
7a7b3a95
KM
849 if (vp->v_flag & VALIASED) {
850 /*
851 * If a vgone (or vclean) is already in progress,
852 * wait until it is done and return.
853 */
854 if (vp->v_flag & VXLOCK) {
855 vp->v_flag |= VXWANT;
856 sleep((caddr_t)vp, PINOD);
857 return;
858 }
859 /*
860 * Ensure that vp will not be vgone'd while we
861 * are eliminating its aliases.
862 */
863 vp->v_flag |= VXLOCK;
864 while (vp->v_flag & VALIASED) {
865 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
866 if (vq->v_rdev != vp->v_rdev ||
867 vq->v_type != vp->v_type || vp == vq)
868 continue;
869 vgone(vq);
870 break;
871 }
ef62830d 872 }
7a7b3a95
KM
873 /*
874 * Remove the lock so that vgone below will
875 * really eliminate the vnode after which time
876 * vgone will awaken any sleepers.
877 */
878 vp->v_flag &= ~VXLOCK;
ef62830d
KM
879 }
880 vgone(vp);
881}
882
ef24f6dd
KM
883/*
884 * Eliminate all activity associated with a vnode
885 * in preparation for reuse.
886 */
887void vgone(vp)
888 register struct vnode *vp;
889{
7f7b7d89 890 register struct vnode *vq;
c0de8792 891 struct vnode *vx;
ef24f6dd 892
4f55e3ec
KM
893 /*
894 * If a vgone (or vclean) is already in progress,
895 * wait until it is done and return.
896 */
897 if (vp->v_flag & VXLOCK) {
898 vp->v_flag |= VXWANT;
899 sleep((caddr_t)vp, PINOD);
900 return;
901 }
ef24f6dd
KM
902 /*
903 * Clean out the filesystem specific data.
904 */
36ef03ec 905 vclean(vp, DOCLOSE);
ef24f6dd
KM
906 /*
907 * Delete from old mount point vnode list, if on one.
908 */
3fc2ac18 909 if (vp->v_mount != NULL) {
3e787e54
KM
910 if (vp->v_mntvnodes.le_next == (struct vnode *)0xdeadf ||
911 vp->v_mntvnodes.le_prev == (struct vnode **)0xdeadb)
912 panic("vgone: not on queue");
3fc2ac18 913 LIST_REMOVE(vp, v_mntvnodes);
3e787e54
KM
914 vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
915 vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
d10e9258 916 vp->v_mount = NULL;
ef24f6dd
KM
917 }
918 /*
919 * If special device, remove it from special device alias list.
920 */
921 if (vp->v_type == VBLK || vp->v_type == VCHR) {
7f7b7d89
KM
922 if (*vp->v_hashchain == vp) {
923 *vp->v_hashchain = vp->v_specnext;
ef24f6dd 924 } else {
7f7b7d89 925 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
c0de8792 926 if (vq->v_specnext != vp)
ef24f6dd 927 continue;
c0de8792 928 vq->v_specnext = vp->v_specnext;
ef24f6dd
KM
929 break;
930 }
c0de8792 931 if (vq == NULL)
ef24f6dd
KM
932 panic("missing bdev");
933 }
c0de8792 934 if (vp->v_flag & VALIASED) {
4d1ee2eb 935 vx = NULL;
7f7b7d89 936 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c
KM
937 if (vq->v_rdev != vp->v_rdev ||
938 vq->v_type != vp->v_type)
c0de8792 939 continue;
4d1ee2eb
CT
940 if (vx)
941 break;
c0de8792
KM
942 vx = vq;
943 }
4d1ee2eb 944 if (vx == NULL)
c0de8792 945 panic("missing alias");
4d1ee2eb 946 if (vq == NULL)
c0de8792
KM
947 vx->v_flag &= ~VALIASED;
948 vp->v_flag &= ~VALIASED;
949 }
950 FREE(vp->v_specinfo, M_VNODE);
951 vp->v_specinfo = NULL;
ef24f6dd
KM
952 }
953 /*
3387ef89 954 * If it is on the freelist and not already at the head,
0bf9bb76
KM
955 * move it to the head of the list. The test of the back
956 * pointer and the reference count of zero is because
957 * it will be removed from the free list by getnewvnode,
958 * but will not have its reference count incremented until
959 * after calling vgone. If the reference count were
960 * incremented first, vgone would (incorrectly) try to
961 * close the previous instance of the underlying object.
962 * So, the back pointer is explicitly set to `0xdeadb' in
963 * getnewvnode after removing it from the freelist to ensure
964 * that we do not try to move it here.
ef24f6dd 965 */
0bf9bb76
KM
966 if (vp->v_usecount == 0 &&
967 vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
968 vnode_free_list.tqh_first != vp) {
3e787e54
KM
969 if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf)
970 panic("vgone: use 0, not free");
3fc2ac18
KM
971 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
972 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
ef24f6dd 973 }
2bae1875 974 vp->v_type = VBAD;
36d09cb1 975}
ef62830d 976
2bcd6066
KM
977/*
978 * Lookup a vnode by device number.
979 */
980vfinddev(dev, type, vpp)
981 dev_t dev;
982 enum vtype type;
983 struct vnode **vpp;
984{
985 register struct vnode *vp;
986
987 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
988 if (dev != vp->v_rdev || type != vp->v_type)
989 continue;
990 *vpp = vp;
05378ee4 991 return (1);
2bcd6066 992 }
05378ee4 993 return (0);
2bcd6066
KM
994}
995
ef62830d
KM
996/*
997 * Calculate the total number of references to a special device.
998 */
999vcount(vp)
1000 register struct vnode *vp;
1001{
7f7b7d89 1002 register struct vnode *vq;
ef62830d
KM
1003 int count;
1004
1005 if ((vp->v_flag & VALIASED) == 0)
7f7b7d89 1006 return (vp->v_usecount);
ef62830d 1007loop:
7f7b7d89 1008 for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c 1009 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
ef62830d
KM
1010 continue;
1011 /*
1012 * Alias, but not in use, so flush it out.
1013 */
7f7b7d89 1014 if (vq->v_usecount == 0) {
ef62830d
KM
1015 vgone(vq);
1016 goto loop;
1017 }
7f7b7d89 1018 count += vq->v_usecount;
ef62830d
KM
1019 }
1020 return (count);
1021}
0bf84b18
KM
1022
1023/*
1024 * Print out a description of a vnode.
1025 */
1026static char *typename[] =
61f846a8 1027 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
0bf84b18
KM
1028
1029vprint(label, vp)
1030 char *label;
1031 register struct vnode *vp;
1032{
f2f730c6 1033 char buf[64];
0bf84b18
KM
1034
1035 if (label != NULL)
1036 printf("%s: ", label);
3e787e54 1037 printf("num %d ", vp->v_spare[0]);
65c3b3a8
KM
1038 printf("type %s, usecount %d, writecount %d, refcount %d,",
1039 typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1040 vp->v_holdcnt);
f2f730c6
KM
1041 buf[0] = '\0';
1042 if (vp->v_flag & VROOT)
1043 strcat(buf, "|VROOT");
1044 if (vp->v_flag & VTEXT)
1045 strcat(buf, "|VTEXT");
36ef03ec
KM
1046 if (vp->v_flag & VSYSTEM)
1047 strcat(buf, "|VSYSTEM");
36ef03ec
KM
1048 if (vp->v_flag & VXLOCK)
1049 strcat(buf, "|VXLOCK");
1050 if (vp->v_flag & VXWANT)
1051 strcat(buf, "|VXWANT");
f2f730c6
KM
1052 if (vp->v_flag & VBWAIT)
1053 strcat(buf, "|VBWAIT");
36ef03ec
KM
1054 if (vp->v_flag & VALIASED)
1055 strcat(buf, "|VALIASED");
f2f730c6
KM
1056 if (buf[0] != '\0')
1057 printf(" flags (%s)", &buf[1]);
3fc2ac18
KM
1058 if (vp->v_data == NULL) {
1059 printf("\n");
1060 } else {
1061 printf("\n\t");
1062 VOP_PRINT(vp);
1063 }
0bf84b18 1064}
985cbdd5 1065
34c62e18
KM
1066#ifdef DEBUG
1067/*
1068 * List all of the locked vnodes in the system.
1069 * Called when debugging the kernel.
1070 */
1071printlockedvnodes()
1072{
1073 register struct mount *mp;
1074 register struct vnode *vp;
1075
1076 printf("Locked vnodes\n");
3fc2ac18
KM
1077 for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
1078 for (vp = mp->mnt_vnodelist.lh_first;
1079 vp != NULL;
1080 vp = vp->v_mntvnodes.le_next)
34c62e18
KM
1081 if (VOP_ISLOCKED(vp))
1082 vprint((char *)0, vp);
3fc2ac18 1083 }
34c62e18
KM
1084}
1085#endif
1086
985cbdd5
MT
1087int kinfo_vdebug = 1;
1088int kinfo_vgetfailed;
1089#define KINFO_VNODESLOP 10
1090/*
786fb484 1091 * Dump vnode list (via sysctl).
985cbdd5
MT
1092 * Copyout address of vnode followed by vnode.
1093 */
aacc1bff 1094/* ARGSUSED */
786fb484 1095sysctl_vnode(where, sizep)
985cbdd5 1096 char *where;
c1909da4 1097 size_t *sizep;
985cbdd5 1098{
3fc2ac18 1099 register struct mount *mp, *nmp;
985cbdd5 1100 struct vnode *vp;
985cbdd5 1101 register char *bp = where, *savebp;
5bf57294 1102 char *ewhere;
985cbdd5
MT
1103 int error;
1104
1105#define VPTRSZ sizeof (struct vnode *)
1106#define VNODESZ sizeof (struct vnode)
1107 if (where == NULL) {
786fb484 1108 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
985cbdd5
MT
1109 return (0);
1110 }
786fb484 1111 ewhere = where + *sizep;
985cbdd5 1112
3fc2ac18
KM
1113 for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) {
1114 nmp = mp->mnt_list.tqe_next;
1115 if (vfs_busy(mp))
36ef03ec 1116 continue;
985cbdd5
MT
1117 savebp = bp;
1118again:
3fc2ac18
KM
1119 for (vp = mp->mnt_vnodelist.lh_first;
1120 vp != NULL;
1121 vp = vp->v_mntvnodes.le_next) {
41185b3b
KM
1122 /*
1123 * Check that the vp is still associated with
1124 * this filesystem. RACE: could have been
1125 * recycled onto the same filesystem.
1126 */
4597dd33
KM
1127 if (vp->v_mount != mp) {
1128 if (kinfo_vdebug)
1129 printf("kinfo: vp changed\n");
1130 bp = savebp;
1131 goto again;
1132 }
786fb484
KM
1133 if (bp + VPTRSZ + VNODESZ > ewhere) {
1134 *sizep = bp - where;
1135 return (ENOMEM);
1136 }
1137 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1138 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
985cbdd5 1139 return (error);
985cbdd5 1140 bp += VPTRSZ + VNODESZ;
985cbdd5 1141 }
3fc2ac18
KM
1142 vfs_unbusy(mp);
1143 }
985cbdd5 1144
786fb484 1145 *sizep = bp - where;
985cbdd5
MT
1146 return (0);
1147}
8981e258
MH
1148
1149/*
1150 * Check to see if a filesystem is mounted on a block device.
1151 */
1152int
1153vfs_mountedon(vp)
1154 register struct vnode *vp;
1155{
1156 register struct vnode *vq;
1157
1158 if (vp->v_specflags & SI_MOUNTEDON)
1159 return (EBUSY);
1160 if (vp->v_flag & VALIASED) {
1161 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1162 if (vq->v_rdev != vp->v_rdev ||
1163 vq->v_type != vp->v_type)
1164 continue;
1165 if (vq->v_specflags & SI_MOUNTEDON)
1166 return (EBUSY);
1167 }
1168 }
1169 return (0);
1170}
1171
1172/*
1173 * Build hash lists of net addresses and hang them off the mount point.
1174 * Called by ufs_mount() to set up the lists of export addresses.
1175 */
1176static int
1177vfs_hang_addrlist(mp, nep, argp)
1178 struct mount *mp;
1179 struct netexport *nep;
1180 struct export_args *argp;
1181{
1182 register struct netcred *np;
1183 register struct radix_node_head *rnh;
1184 register int i;
1185 struct radix_node *rn;
1186 struct sockaddr *saddr, *smask = 0;
1187 struct domain *dom;
1188 int error;
1189
1190 if (argp->ex_addrlen == 0) {
1191 if (mp->mnt_flag & MNT_DEFEXPORTED)
1192 return (EPERM);
1193 np = &nep->ne_defexported;
1194 np->netc_exflags = argp->ex_flags;
1195 np->netc_anon = argp->ex_anon;
1196 np->netc_anon.cr_ref = 1;
1197 mp->mnt_flag |= MNT_DEFEXPORTED;
1198 return (0);
1199 }
1200 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1201 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1202 bzero((caddr_t)np, i);
1203 saddr = (struct sockaddr *)(np + 1);
1204 if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen))
1205 goto out;
1206 if (saddr->sa_len > argp->ex_addrlen)
1207 saddr->sa_len = argp->ex_addrlen;
1208 if (argp->ex_masklen) {
1209 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1210 error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
1211 if (error)
1212 goto out;
1213 if (smask->sa_len > argp->ex_masklen)
1214 smask->sa_len = argp->ex_masklen;
1215 }
1216 i = saddr->sa_family;
1217 if ((rnh = nep->ne_rtable[i]) == 0) {
1218 /*
1219 * Seems silly to initialize every AF when most are not
1220 * used, do so on demand here
1221 */
1222 for (dom = domains; dom; dom = dom->dom_next)
1223 if (dom->dom_family == i && dom->dom_rtattach) {
1224 dom->dom_rtattach((void **)&nep->ne_rtable[i],
1225 dom->dom_rtoffset);
1226 break;
1227 }
1228 if ((rnh = nep->ne_rtable[i]) == 0) {
1229 error = ENOBUFS;
1230 goto out;
1231 }
1232 }
1233 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1234 np->netc_rnodes);
1235 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1236 error = EPERM;
1237 goto out;
1238 }
1239 np->netc_exflags = argp->ex_flags;
1240 np->netc_anon = argp->ex_anon;
1241 np->netc_anon.cr_ref = 1;
1242 return (0);
1243out:
1244 free(np, M_NETADDR);
1245 return (error);
1246}
1247
1248/* ARGSUSED */
1249static int
1250vfs_free_netcred(rn, w)
1251 struct radix_node *rn;
1252 caddr_t w;
1253{
1254 register struct radix_node_head *rnh = (struct radix_node_head *)w;
1255
1256 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1257 free((caddr_t)rn, M_NETADDR);
1258 return (0);
1259}
1260
1261/*
1262 * Free the net address hash lists that are hanging off the mount points.
1263 */
1264static void
1265vfs_free_addrlist(nep)
1266 struct netexport *nep;
1267{
1268 register int i;
1269 register struct radix_node_head *rnh;
1270
1271 for (i = 0; i <= AF_MAX; i++)
1272 if (rnh = nep->ne_rtable[i]) {
1273 (*rnh->rnh_walktree)(rnh, vfs_free_netcred,
1274 (caddr_t)rnh);
1275 free((caddr_t)rnh, M_RTABLE);
1276 nep->ne_rtable[i] = 0;
1277 }
1278}
1279
1280int
1281vfs_export(mp, nep, argp)
1282 struct mount *mp;
1283 struct netexport *nep;
1284 struct export_args *argp;
1285{
1286 int error;
1287
1288 if (argp->ex_flags & MNT_DELEXPORT) {
1289 vfs_free_addrlist(nep);
1290 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1291 }
1292 if (argp->ex_flags & MNT_EXPORTED) {
1293 if (error = vfs_hang_addrlist(mp, nep, argp))
1294 return (error);
1295 mp->mnt_flag |= MNT_EXPORTED;
1296 }
1297 return (0);
1298}
1299
1300struct netcred *
1301vfs_export_lookup(mp, nep, nam)
1302 register struct mount *mp;
1303 struct netexport *nep;
1304 struct mbuf *nam;
1305{
1306 register struct netcred *np;
1307 register struct radix_node_head *rnh;
1308 struct sockaddr *saddr;
1309
1310 np = NULL;
1311 if (mp->mnt_flag & MNT_EXPORTED) {
1312 /*
1313 * Lookup in the export list first.
1314 */
1315 if (nam != NULL) {
1316 saddr = mtod(nam, struct sockaddr *);
1317 rnh = nep->ne_rtable[saddr->sa_family];
1318 if (rnh != NULL) {
1319 np = (struct netcred *)
1320 (*rnh->rnh_matchaddr)((caddr_t)saddr,
1321 rnh);
1322 if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1323 np = NULL;
1324 }
1325 }
1326 /*
1327 * If no address match, use the default if it exists.
1328 */
1329 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1330 np = &nep->ne_defexported;
1331 }
1332 return (np);
1333}