delete USES
[unix-history] / usr / src / sys / kern / vfs_subr.c
CommitLineData
3c4390e8
KM
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
dbf0c423 5 * %sccs.include.redist.c%
3c4390e8 6 *
d024c2ce 7 * @(#)vfs_subr.c 7.81 (Berkeley) %G%
3c4390e8
KM
8 */
9
10/*
11 * External virtual filesystem routines
12 */
13
cb796a23 14#include <sys/param.h>
917dc539 15#include <sys/systm.h>
cb796a23
KB
16#include <sys/proc.h>
17#include <sys/mount.h>
18#include <sys/time.h>
19#include <sys/vnode.h>
807cc430 20#include <sys/stat.h>
cb796a23
KB
21#include <sys/specdev.h>
22#include <sys/namei.h>
23#include <sys/ucred.h>
24#include <sys/buf.h>
25#include <sys/errno.h>
26#include <sys/malloc.h>
3c4390e8 27
807cc430
KM
28enum vtype iftovt_tab[16] = {
29 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
30 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
31};
32int vttoif_tab[9] = {
33 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
34 S_IFSOCK, S_IFIFO, S_IFMT,
35};
36
3c4390e8
KM
37/*
38 * Remove a mount point from the list of mounted filesystems.
39 * Unmount of the root is illegal.
40 */
41void
42vfs_remove(mp)
43 register struct mount *mp;
44{
45
46 if (mp == rootfs)
47 panic("vfs_remove: unmounting root");
54fb9dc2
KM
48 mp->mnt_prev->mnt_next = mp->mnt_next;
49 mp->mnt_next->mnt_prev = mp->mnt_prev;
50 mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
3c4390e8
KM
51 vfs_unlock(mp);
52}
53
54/*
55 * Lock a filesystem.
56 * Used to prevent access to it while mounting and unmounting.
57 */
58vfs_lock(mp)
59 register struct mount *mp;
60{
61
54fb9dc2
KM
62 while(mp->mnt_flag & MNT_MLOCK) {
63 mp->mnt_flag |= MNT_MWAIT;
594501df
KM
64 sleep((caddr_t)mp, PVFS);
65 }
54fb9dc2 66 mp->mnt_flag |= MNT_MLOCK;
3c4390e8
KM
67 return (0);
68}
69
70/*
71 * Unlock a locked filesystem.
72 * Panic if filesystem is not locked.
73 */
74void
75vfs_unlock(mp)
76 register struct mount *mp;
77{
78
54fb9dc2 79 if ((mp->mnt_flag & MNT_MLOCK) == 0)
36ef03ec 80 panic("vfs_unlock: not locked");
54fb9dc2
KM
81 mp->mnt_flag &= ~MNT_MLOCK;
82 if (mp->mnt_flag & MNT_MWAIT) {
83 mp->mnt_flag &= ~MNT_MWAIT;
3c4390e8
KM
84 wakeup((caddr_t)mp);
85 }
86}
87
36ef03ec
KM
88/*
89 * Mark a mount point as busy.
90 * Used to synchronize access and to delay unmounting.
91 */
92vfs_busy(mp)
93 register struct mount *mp;
94{
95
54fb9dc2
KM
96 while(mp->mnt_flag & MNT_MPBUSY) {
97 mp->mnt_flag |= MNT_MPWANT;
98 sleep((caddr_t)&mp->mnt_flag, PVFS);
36ef03ec 99 }
d8b63609
KM
100 if (mp->mnt_flag & MNT_UNMOUNT)
101 return (1);
54fb9dc2 102 mp->mnt_flag |= MNT_MPBUSY;
36ef03ec
KM
103 return (0);
104}
105
106/*
107 * Free a busy filesystem.
108 * Panic if filesystem is not busy.
109 */
36ef03ec
KM
110vfs_unbusy(mp)
111 register struct mount *mp;
112{
113
54fb9dc2 114 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 115 panic("vfs_unbusy: not busy");
54fb9dc2
KM
116 mp->mnt_flag &= ~MNT_MPBUSY;
117 if (mp->mnt_flag & MNT_MPWANT) {
118 mp->mnt_flag &= ~MNT_MPWANT;
119 wakeup((caddr_t)&mp->mnt_flag);
36ef03ec
KM
120 }
121}
122
3c4390e8
KM
123/*
124 * Lookup a mount point by filesystem identifier.
125 */
126struct mount *
127getvfs(fsid)
128 fsid_t *fsid;
129{
130 register struct mount *mp;
131
d713f801
KM
132 mp = rootfs;
133 do {
54fb9dc2
KM
134 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
135 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
d713f801 136 return (mp);
3c4390e8 137 }
54fb9dc2 138 mp = mp->mnt_next;
d713f801
KM
139 } while (mp != rootfs);
140 return ((struct mount *)0);
3c4390e8
KM
141}
142
917dc539
JSP
143/*
144 * Get a new unique fsid
145 */
146void
147getnewfsid(mp, mtype)
148 struct mount *mp;
149 int mtype;
150{
151static u_short xxxfs_mntid;
152
153 fsid_t tfsid;
154
155 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + 11, 0); /* XXX */
156 mp->mnt_stat.f_fsid.val[1] = mtype;
157 if (xxxfs_mntid == 0)
158 ++xxxfs_mntid;
159 tfsid.val[0] = makedev(nblkdev, xxxfs_mntid);
160 tfsid.val[1] = mtype;
17fd1cc7
JSP
161 if (rootfs) {
162 while (getvfs(&tfsid)) {
163 tfsid.val[0]++;
164 xxxfs_mntid++;
165 }
917dc539
JSP
166 }
167 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
168}
169
3c4390e8
KM
170/*
171 * Set vnode attributes to VNOVAL
172 */
173void vattr_null(vap)
174 register struct vattr *vap;
175{
176
177 vap->va_type = VNON;
83504fd5 178 vap->va_size = vap->va_bytes = VNOVAL;
3c4390e8 179 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
83504fd5
KM
180 vap->va_fsid = vap->va_fileid =
181 vap->va_blocksize = vap->va_rdev =
ecf75a7d
KM
182 vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
183 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
184 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
8cf4d4fb 185 vap->va_flags = vap->va_gen = VNOVAL;
3c4390e8 186}
c60798ca 187
36d09cb1
KM
188/*
189 * Routines having to do with the management of the vnode table.
190 */
e781da98 191extern struct vnode *vfreeh, **vfreet;
9342689a
JH
192extern int (**dead_vnodeop_p)();
193extern int (**spec_vnodeop_p)();
32339c94 194extern void vclean();
1a80f56e 195long numvnodes;
e781da98 196extern struct vattr va_null;
36d09cb1
KM
197
198/*
199 * Return the next vnode from the free list.
200 */
201getnewvnode(tag, mp, vops, vpp)
202 enum vtagtype tag;
203 struct mount *mp;
cf74dd57 204 int (**vops)();
36d09cb1
KM
205 struct vnode **vpp;
206{
207 register struct vnode *vp, *vq;
208
ecf75a7d
KM
209 if ((vfreeh == NULL && numvnodes < 2 * desiredvnodes) ||
210 numvnodes < desiredvnodes) {
aacc1bff
KM
211 vp = (struct vnode *)malloc((u_long)sizeof *vp,
212 M_VNODE, M_WAITOK);
1a80f56e
KM
213 bzero((char *)vp, sizeof *vp);
214 numvnodes++;
215 } else {
216 if ((vp = vfreeh) == NULL) {
217 tablefull("vnode");
218 *vpp = 0;
219 return (ENFILE);
220 }
221 if (vp->v_usecount)
222 panic("free vnode isn't");
223 if (vq = vp->v_freef)
224 vq->v_freeb = &vfreeh;
225 else
226 vfreet = &vfreeh;
227 vfreeh = vq;
228 vp->v_freef = NULL;
229 vp->v_freeb = NULL;
39b99eb6 230 vp->v_lease = NULL;
1a80f56e
KM
231 if (vp->v_type != VBAD)
232 vgone(vp);
2345b093
KM
233 if (vp->v_data)
234 panic("cleaned vnode isn't");
1a80f56e 235 vp->v_flag = 0;
1a80f56e
KM
236 vp->v_lastr = 0;
237 vp->v_socket = 0;
36d09cb1 238 }
b027498b 239 vp->v_type = VNON;
36d09cb1
KM
240 cache_purge(vp);
241 vp->v_tag = tag;
ef24f6dd 242 vp->v_op = vops;
36d09cb1
KM
243 insmntque(vp, mp);
244 VREF(vp);
245 *vpp = vp;
246 return (0);
247}
248
249/*
250 * Move a vnode from one mount queue to another.
251 */
252insmntque(vp, mp)
253 register struct vnode *vp;
254 register struct mount *mp;
255{
8136adfc 256 register struct vnode *vq;
36d09cb1
KM
257
258 /*
259 * Delete from old mount point vnode list, if on one.
260 */
261 if (vp->v_mountb) {
262 if (vq = vp->v_mountf)
263 vq->v_mountb = vp->v_mountb;
264 *vp->v_mountb = vq;
265 }
266 /*
267 * Insert into list of vnodes for the new mount point, if available.
268 */
a45ff315 269 vp->v_mount = mp;
36d09cb1
KM
270 if (mp == NULL) {
271 vp->v_mountf = NULL;
272 vp->v_mountb = NULL;
273 return;
274 }
8136adfc
KM
275 if (vq = mp->mnt_mounth)
276 vq->v_mountb = &vp->v_mountf;
277 vp->v_mountf = vq;
278 vp->v_mountb = &mp->mnt_mounth;
279 mp->mnt_mounth = vp;
36d09cb1
KM
280}
281
76429560
KM
282/*
283 * Update outstanding I/O count and do wakeup if requested.
284 */
285vwakeup(bp)
286 register struct buf *bp;
287{
288 register struct vnode *vp;
289
290 bp->b_dirtyoff = bp->b_dirtyend = 0;
291 if (vp = bp->b_vp) {
292 vp->v_numoutput--;
293 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
294 if (vp->v_numoutput < 0)
295 panic("vwakeup: neg numoutput");
296 vp->v_flag &= ~VBWAIT;
297 wakeup((caddr_t)&vp->v_numoutput);
298 }
299 }
300}
301
76429560
KM
302/*
303 * Flush out and invalidate all buffers associated with a vnode.
304 * Called with the underlying object locked.
305 */
d024c2ce
KM
306int
307vinvalbuf(vp, save, cred, p)
76429560
KM
308 register struct vnode *vp;
309 int save;
d024c2ce
KM
310 struct ucred *cred;
311 struct proc *p;
76429560 312{
9342689a 313 USES_VOP_BWRITE;
d024c2ce 314 USES_VOP_FSYNC;
76429560
KM
315 register struct buf *bp;
316 struct buf *nbp, *blist;
d024c2ce 317 int s, error;
76429560 318
d024c2ce
KM
319 if (save) {
320 if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
321 return (error);
322 if (vp->v_dirtyblkhd != NULL)
323 panic("vinvalbuf: dirty bufs");
324 }
76429560 325 for (;;) {
d024c2ce 326 if (blist = vp->v_cleanblkhd)
76429560 327 /* void */;
d024c2ce 328 else if (blist = vp->v_dirtyblkhd)
76429560
KM
329 /* void */;
330 else
331 break;
332 for (bp = blist; bp; bp = nbp) {
333 nbp = bp->b_blockf;
334 s = splbio();
335 if (bp->b_flags & B_BUSY) {
336 bp->b_flags |= B_WANTED;
337 sleep((caddr_t)bp, PRIBIO + 1);
338 splx(s);
339 break;
340 }
341 bremfree(bp);
342 bp->b_flags |= B_BUSY;
343 splx(s);
76429560
KM
344 if (bp->b_vp != vp)
345 reassignbuf(bp, bp->b_vp);
346 else
347 bp->b_flags |= B_INVAL;
348 brelse(bp);
349 }
350 }
351 if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
352 panic("vinvalbuf: flush failed");
d024c2ce 353 return (0);
76429560
KM
354}
355
356/*
357 * Associate a buffer with a vnode.
358 */
359bgetvp(vp, bp)
360 register struct vnode *vp;
361 register struct buf *bp;
362{
8136adfc
KM
363 register struct vnode *vq;
364 register struct buf *bq;
76429560
KM
365
366 if (bp->b_vp)
367 panic("bgetvp: not free");
368 VHOLD(vp);
369 bp->b_vp = vp;
370 if (vp->v_type == VBLK || vp->v_type == VCHR)
371 bp->b_dev = vp->v_rdev;
372 else
373 bp->b_dev = NODEV;
374 /*
375 * Insert onto list for new vnode.
376 */
8136adfc
KM
377 if (bq = vp->v_cleanblkhd)
378 bq->b_blockb = &bp->b_blockf;
379 bp->b_blockf = bq;
380 bp->b_blockb = &vp->v_cleanblkhd;
381 vp->v_cleanblkhd = bp;
76429560
KM
382}
383
384/*
385 * Disassociate a buffer from a vnode.
386 */
387brelvp(bp)
388 register struct buf *bp;
389{
390 struct buf *bq;
391 struct vnode *vp;
392
393 if (bp->b_vp == (struct vnode *) 0)
394 panic("brelvp: NULL");
395 /*
396 * Delete from old vnode list, if on one.
397 */
398 if (bp->b_blockb) {
399 if (bq = bp->b_blockf)
400 bq->b_blockb = bp->b_blockb;
401 *bp->b_blockb = bq;
402 bp->b_blockf = NULL;
403 bp->b_blockb = NULL;
404 }
405 vp = bp->b_vp;
406 bp->b_vp = (struct vnode *) 0;
407 HOLDRELE(vp);
408}
409
410/*
411 * Reassign a buffer from one vnode to another.
412 * Used to assign file specific control information
413 * (indirect blocks) to the vnode to which they belong.
414 */
415reassignbuf(bp, newvp)
416 register struct buf *bp;
417 register struct vnode *newvp;
418{
419 register struct buf *bq, **listheadp;
420
e5c3f16e
KM
421 if (newvp == NULL) {
422 printf("reassignbuf: NULL");
423 return;
424 }
76429560
KM
425 /*
426 * Delete from old vnode list, if on one.
427 */
428 if (bp->b_blockb) {
429 if (bq = bp->b_blockf)
430 bq->b_blockb = bp->b_blockb;
431 *bp->b_blockb = bq;
432 }
433 /*
434 * If dirty, put on list of dirty buffers;
435 * otherwise insert onto list of clean buffers.
436 */
437 if (bp->b_flags & B_DELWRI)
438 listheadp = &newvp->v_dirtyblkhd;
439 else
440 listheadp = &newvp->v_cleanblkhd;
8136adfc
KM
441 if (bq = *listheadp)
442 bq->b_blockb = &bp->b_blockf;
443 bp->b_blockf = bq;
444 bp->b_blockb = listheadp;
445 *listheadp = bp;
76429560
KM
446}
447
36d09cb1 448/*
ef24f6dd
KM
449 * Create a vnode for a block device.
450 * Used for root filesystem, argdev, and swap areas.
451 * Also used for memory file system special devices.
452 */
453bdevvp(dev, vpp)
454 dev_t dev;
455 struct vnode **vpp;
456{
ef24f6dd
KM
457 register struct vnode *vp;
458 struct vnode *nvp;
459 int error;
460
1c89915d
KM
461 if (dev == NODEV)
462 return (0);
9342689a 463 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
ef24f6dd
KM
464 if (error) {
465 *vpp = 0;
466 return (error);
467 }
468 vp = nvp;
469 vp->v_type = VBLK;
c0de8792 470 if (nvp = checkalias(vp, dev, (struct mount *)0)) {
ef24f6dd
KM
471 vput(vp);
472 vp = nvp;
473 }
474 *vpp = vp;
475 return (0);
476}
477
478/*
479 * Check to see if the new vnode represents a special device
480 * for which we already have a vnode (either because of
481 * bdevvp() or because of a different vnode representing
482 * the same block device). If such an alias exists, deallocate
f0556f86 483 * the existing contents and return the aliased vnode. The
ef24f6dd
KM
484 * caller is responsible for filling it with its new contents.
485 */
486struct vnode *
c0de8792 487checkalias(nvp, nvp_rdev, mp)
ef24f6dd 488 register struct vnode *nvp;
c0de8792 489 dev_t nvp_rdev;
ef24f6dd
KM
490 struct mount *mp;
491{
9342689a 492 USES_VOP_UNLOCK;
ef24f6dd 493 register struct vnode *vp;
c0de8792 494 struct vnode **vpp;
ef24f6dd
KM
495
496 if (nvp->v_type != VBLK && nvp->v_type != VCHR)
54fb9dc2 497 return (NULLVP);
c0de8792
KM
498
499 vpp = &speclisth[SPECHASH(nvp_rdev)];
ef24f6dd 500loop:
c0de8792
KM
501 for (vp = *vpp; vp; vp = vp->v_specnext) {
502 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
ef24f6dd 503 continue;
c0de8792
KM
504 /*
505 * Alias, but not in use, so flush it out.
506 */
7f7b7d89 507 if (vp->v_usecount == 0) {
c0de8792
KM
508 vgone(vp);
509 goto loop;
510 }
ef62830d
KM
511 if (vget(vp))
512 goto loop;
ef24f6dd
KM
513 break;
514 }
c0de8792 515 if (vp == NULL || vp->v_tag != VT_NON) {
c0de8792
KM
516 MALLOC(nvp->v_specinfo, struct specinfo *,
517 sizeof(struct specinfo), M_VNODE, M_WAITOK);
518 nvp->v_rdev = nvp_rdev;
7f7b7d89 519 nvp->v_hashchain = vpp;
c0de8792 520 nvp->v_specnext = *vpp;
2c957a90 521 nvp->v_specflags = 0;
c0de8792 522 *vpp = nvp;
40452d5e
KM
523 if (vp != NULL) {
524 nvp->v_flag |= VALIASED;
525 vp->v_flag |= VALIASED;
526 vput(vp);
527 }
54fb9dc2 528 return (NULLVP);
ef24f6dd 529 }
2bae1875
KM
530 VOP_UNLOCK(vp);
531 vclean(vp, 0);
ef24f6dd
KM
532 vp->v_op = nvp->v_op;
533 vp->v_tag = nvp->v_tag;
534 nvp->v_type = VNON;
535 insmntque(vp, mp);
536 return (vp);
537}
538
539/*
540 * Grab a particular vnode from the free list, increment its
541 * reference count and lock it. The vnode lock bit is set the
542 * vnode is being eliminated in vgone. The process is awakened
543 * when the transition is completed, and an error returned to
544 * indicate that the vnode is no longer usable (possibly having
545 * been changed to a new file system type).
36d09cb1
KM
546 */
547vget(vp)
548 register struct vnode *vp;
549{
9342689a 550 USES_VOP_LOCK;
36d09cb1
KM
551 register struct vnode *vq;
552
ef24f6dd
KM
553 if (vp->v_flag & VXLOCK) {
554 vp->v_flag |= VXWANT;
555 sleep((caddr_t)vp, PINOD);
556 return (1);
557 }
7f7b7d89 558 if (vp->v_usecount == 0) {
ef24f6dd
KM
559 if (vq = vp->v_freef)
560 vq->v_freeb = vp->v_freeb;
561 else
562 vfreet = vp->v_freeb;
563 *vp->v_freeb = vq;
564 vp->v_freef = NULL;
565 vp->v_freeb = NULL;
566 }
36d09cb1 567 VREF(vp);
ef24f6dd
KM
568 VOP_LOCK(vp);
569 return (0);
36d09cb1
KM
570}
571
d32390ea
KM
572int bug_refs = 0;
573
36d09cb1
KM
574/*
575 * Vnode reference, just increment the count
576 */
577void vref(vp)
578 struct vnode *vp;
579{
580
7f7b7d89 581 vp->v_usecount++;
d32390ea
KM
582 if (vp->v_type != VBLK && curproc)
583 curproc->p_spare[0]++;
584 if (bug_refs)
585 vprint("vref: ");
36d09cb1
KM
586}
587
588/*
589 * vput(), just unlock and vrele()
590 */
591void vput(vp)
592 register struct vnode *vp;
593{
9342689a 594 USES_VOP_UNLOCK;
4d1ee2eb 595
36d09cb1
KM
596 VOP_UNLOCK(vp);
597 vrele(vp);
598}
599
600/*
601 * Vnode release.
602 * If count drops to zero, call inactive routine and return to freelist.
603 */
604void vrele(vp)
605 register struct vnode *vp;
606{
9342689a 607 USES_VOP_INACTIVE;
36d09cb1 608
65c3b3a8 609#ifdef DIAGNOSTIC
36d09cb1 610 if (vp == NULL)
ef24f6dd 611 panic("vrele: null vp");
65c3b3a8 612#endif
7f7b7d89 613 vp->v_usecount--;
d32390ea
KM
614 if (vp->v_type != VBLK && curproc)
615 curproc->p_spare[0]--;
616 if (bug_refs)
617 vprint("vref: ");
7f7b7d89 618 if (vp->v_usecount > 0)
36d09cb1 619 return;
65c3b3a8
KM
620#ifdef DIAGNOSTIC
621 if (vp->v_usecount != 0 || vp->v_writecount != 0) {
622 vprint("vrele: bad ref count", vp);
623 panic("vrele: ref cnt");
624 }
625#endif
54fb9dc2 626 if (vfreeh == NULLVP) {
36d09cb1
KM
627 /*
628 * insert into empty list
629 */
630 vfreeh = vp;
631 vp->v_freeb = &vfreeh;
36d09cb1
KM
632 } else {
633 /*
634 * insert at tail of list
635 */
636 *vfreet = vp;
637 vp->v_freeb = vfreet;
36d09cb1 638 }
ef24f6dd
KM
639 vp->v_freef = NULL;
640 vfreet = &vp->v_freef;
d024c2ce 641 VOP_INACTIVE(vp);
ef24f6dd
KM
642}
643
7f7b7d89
KM
644/*
645 * Page or buffer structure gets a reference.
646 */
451df175 647void vhold(vp)
7f7b7d89
KM
648 register struct vnode *vp;
649{
650
651 vp->v_holdcnt++;
652}
653
654/*
655 * Page or buffer structure frees a reference.
656 */
451df175 657void holdrele(vp)
7f7b7d89
KM
658 register struct vnode *vp;
659{
660
661 if (vp->v_holdcnt <= 0)
662 panic("holdrele: holdcnt");
663 vp->v_holdcnt--;
664}
665
f0556f86
KM
666/*
667 * Remove any vnodes in the vnode table belonging to mount point mp.
668 *
669 * If MNT_NOFORCE is specified, there should not be any active ones,
670 * return error if any are found (nb: this is a user error, not a
671 * system error). If MNT_FORCE is specified, detach any active vnodes
672 * that are found.
673 */
674int busyprt = 0; /* patch to print out busy vnodes */
675
676vflush(mp, skipvp, flags)
677 struct mount *mp;
678 struct vnode *skipvp;
679 int flags;
680{
681 register struct vnode *vp, *nvp;
682 int busy = 0;
683
54fb9dc2 684 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 685 panic("vflush: not busy");
4597dd33 686loop:
54fb9dc2 687 for (vp = mp->mnt_mounth; vp; vp = nvp) {
4597dd33
KM
688 if (vp->v_mount != mp)
689 goto loop;
f0556f86
KM
690 nvp = vp->v_mountf;
691 /*
692 * Skip over a selected vnode.
f0556f86
KM
693 */
694 if (vp == skipvp)
695 continue;
36ef03ec
KM
696 /*
697 * Skip over a vnodes marked VSYSTEM.
698 */
699 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
700 continue;
f0556f86 701 /*
7f7b7d89 702 * With v_usecount == 0, all we need to do is clear
f0556f86
KM
703 * out the vnode data structures and we are done.
704 */
7f7b7d89 705 if (vp->v_usecount == 0) {
f0556f86
KM
706 vgone(vp);
707 continue;
708 }
709 /*
710 * For block or character devices, revert to an
711 * anonymous device. For all other files, just kill them.
712 */
36ef03ec 713 if (flags & FORCECLOSE) {
f0556f86
KM
714 if (vp->v_type != VBLK && vp->v_type != VCHR) {
715 vgone(vp);
716 } else {
717 vclean(vp, 0);
9342689a 718 vp->v_op = spec_vnodeop_p;
f0556f86
KM
719 insmntque(vp, (struct mount *)0);
720 }
721 continue;
722 }
723 if (busyprt)
0bf84b18 724 vprint("vflush: busy vnode", vp);
f0556f86
KM
725 busy++;
726 }
727 if (busy)
728 return (EBUSY);
729 return (0);
730}
731
ef24f6dd
KM
732/*
733 * Disassociate the underlying file system from a vnode.
ef24f6dd 734 */
ecf75a7d
KM
735void
736vclean(vp, flags)
ef24f6dd 737 register struct vnode *vp;
aacc1bff 738 int flags;
ef24f6dd 739{
9342689a 740 USES_VOP_LOCK;
38c46eee
JH
741 USES_VOP_UNLOCK;
742 USES_VOP_CLOSE;
743 USES_VOP_INACTIVE;
744 USES_VOP_RECLAIM;
745 int (**origops)();
2bae1875 746 int active;
ef24f6dd 747
2bae1875
KM
748 /*
749 * Check to see if the vnode is in use.
0bf84b18
KM
750 * If so we have to reference it before we clean it out
751 * so that its count cannot fall to zero and generate a
752 * race against ourselves to recycle it.
2bae1875 753 */
7f7b7d89 754 if (active = vp->v_usecount)
2bae1875 755 VREF(vp);
2bae1875
KM
756 /*
757 * Prevent the vnode from being recycled or
758 * brought into use while we clean it out.
759 */
0bf84b18
KM
760 if (vp->v_flag & VXLOCK)
761 panic("vclean: deadlock");
ef24f6dd 762 vp->v_flag |= VXLOCK;
0bf84b18
KM
763 /*
764 * Even if the count is zero, the VOP_INACTIVE routine may still
765 * have the object locked while it cleans it out. The VOP_LOCK
766 * ensures that the VOP_INACTIVE routine is done with its work.
767 * For active vnodes, it ensures that no other activity can
768 * occur while the buffer list is being cleaned out.
769 */
770 VOP_LOCK(vp);
36ef03ec 771 if (flags & DOCLOSE)
d024c2ce 772 vinvalbuf(vp, 1, NOCRED, NULL);
ef24f6dd
KM
773 /*
774 * Prevent any further operations on the vnode from
775 * being passed through to the old file system.
776 */
777 origops = vp->v_op;
9342689a 778 vp->v_op = dead_vnodeop_p;
ef24f6dd
KM
779 vp->v_tag = VT_NON;
780 /*
2bae1875
KM
781 * If purging an active vnode, it must be unlocked, closed,
782 * and deactivated before being reclaimed.
ef24f6dd 783 */
38c46eee
JH
784 vop_unlock_a.a_desc = VDESC(vop_unlock);
785 vop_unlock_a.a_vp = vp;
786 VOCALL(origops,VOFFSET(vop_unlock),&vop_unlock_a);
2bae1875 787 if (active) {
38c46eee
JH
788 /*
789 * Note: these next two calls imply
790 * that vop_close and vop_inactive implementations
791 * cannot count on the ops vector being correctly
792 * set.
793 */
794 if (flags & DOCLOSE) {
795 vop_close_a.a_desc = VDESC(vop_close);
796 vop_close_a.a_vp = vp;
797 vop_close_a.a_fflag = IO_NDELAY;
d024c2ce 798 vop_close_a.a_p = NULL;
38c46eee
JH
799 VOCALL(origops,VOFFSET(vop_close),&vop_close_a);
800 };
801 vop_inactive_a.a_desc = VDESC(vop_inactive);
802 vop_inactive_a.a_vp = vp;
38c46eee 803 VOCALL(origops,VOFFSET(vop_inactive),&vop_inactive_a);
ef24f6dd
KM
804 }
805 /*
806 * Reclaim the vnode.
807 */
38c46eee
JH
808 /*
809 * Emulate VOP_RECLAIM.
810 */
811 vop_reclaim_a.a_desc = VDESC(vop_reclaim);
812 vop_reclaim_a.a_vp = vp;
813 if (VOCALL(origops,VOFFSET(vop_reclaim),&vop_reclaim_a))
ef24f6dd 814 panic("vclean: cannot reclaim");
2bae1875
KM
815 if (active)
816 vrele(vp);
38c46eee 817
ef24f6dd
KM
818 /*
819 * Done with purge, notify sleepers in vget of the grim news.
820 */
821 vp->v_flag &= ~VXLOCK;
822 if (vp->v_flag & VXWANT) {
823 vp->v_flag &= ~VXWANT;
824 wakeup((caddr_t)vp);
825 }
826}
827
ef62830d
KM
828/*
829 * Eliminate all activity associated with the requested vnode
830 * and with all vnodes aliased to the requested vnode.
831 */
832void vgoneall(vp)
833 register struct vnode *vp;
834{
7f7b7d89 835 register struct vnode *vq;
ef62830d 836
7a7b3a95
KM
837 if (vp->v_flag & VALIASED) {
838 /*
839 * If a vgone (or vclean) is already in progress,
840 * wait until it is done and return.
841 */
842 if (vp->v_flag & VXLOCK) {
843 vp->v_flag |= VXWANT;
844 sleep((caddr_t)vp, PINOD);
845 return;
846 }
847 /*
848 * Ensure that vp will not be vgone'd while we
849 * are eliminating its aliases.
850 */
851 vp->v_flag |= VXLOCK;
852 while (vp->v_flag & VALIASED) {
853 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
854 if (vq->v_rdev != vp->v_rdev ||
855 vq->v_type != vp->v_type || vp == vq)
856 continue;
857 vgone(vq);
858 break;
859 }
ef62830d 860 }
7a7b3a95
KM
861 /*
862 * Remove the lock so that vgone below will
863 * really eliminate the vnode after which time
864 * vgone will awaken any sleepers.
865 */
866 vp->v_flag &= ~VXLOCK;
ef62830d
KM
867 }
868 vgone(vp);
869}
870
ef24f6dd
KM
871/*
872 * Eliminate all activity associated with a vnode
873 * in preparation for reuse.
874 */
875void vgone(vp)
876 register struct vnode *vp;
877{
7f7b7d89 878 register struct vnode *vq;
c0de8792 879 struct vnode *vx;
ef24f6dd 880
4f55e3ec
KM
881 /*
882 * If a vgone (or vclean) is already in progress,
883 * wait until it is done and return.
884 */
885 if (vp->v_flag & VXLOCK) {
886 vp->v_flag |= VXWANT;
887 sleep((caddr_t)vp, PINOD);
888 return;
889 }
ef24f6dd
KM
890 /*
891 * Clean out the filesystem specific data.
892 */
36ef03ec 893 vclean(vp, DOCLOSE);
ef24f6dd
KM
894 /*
895 * Delete from old mount point vnode list, if on one.
896 */
897 if (vp->v_mountb) {
898 if (vq = vp->v_mountf)
899 vq->v_mountb = vp->v_mountb;
900 *vp->v_mountb = vq;
901 vp->v_mountf = NULL;
902 vp->v_mountb = NULL;
d10e9258 903 vp->v_mount = NULL;
ef24f6dd
KM
904 }
905 /*
906 * If special device, remove it from special device alias list.
907 */
908 if (vp->v_type == VBLK || vp->v_type == VCHR) {
7f7b7d89
KM
909 if (*vp->v_hashchain == vp) {
910 *vp->v_hashchain = vp->v_specnext;
ef24f6dd 911 } else {
7f7b7d89 912 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
c0de8792 913 if (vq->v_specnext != vp)
ef24f6dd 914 continue;
c0de8792 915 vq->v_specnext = vp->v_specnext;
ef24f6dd
KM
916 break;
917 }
c0de8792 918 if (vq == NULL)
ef24f6dd
KM
919 panic("missing bdev");
920 }
c0de8792 921 if (vp->v_flag & VALIASED) {
4d1ee2eb 922 vx = NULL;
7f7b7d89 923 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c
KM
924 if (vq->v_rdev != vp->v_rdev ||
925 vq->v_type != vp->v_type)
c0de8792 926 continue;
4d1ee2eb
CT
927 if (vx)
928 break;
c0de8792
KM
929 vx = vq;
930 }
4d1ee2eb 931 if (vx == NULL)
c0de8792 932 panic("missing alias");
4d1ee2eb 933 if (vq == NULL)
c0de8792
KM
934 vx->v_flag &= ~VALIASED;
935 vp->v_flag &= ~VALIASED;
936 }
937 FREE(vp->v_specinfo, M_VNODE);
938 vp->v_specinfo = NULL;
ef24f6dd
KM
939 }
940 /*
941 * If it is on the freelist, move it to the head of the list.
942 */
943 if (vp->v_freeb) {
944 if (vq = vp->v_freef)
945 vq->v_freeb = vp->v_freeb;
946 else
947 vfreet = vp->v_freeb;
948 *vp->v_freeb = vq;
949 vp->v_freef = vfreeh;
950 vp->v_freeb = &vfreeh;
951 vfreeh->v_freeb = &vp->v_freef;
952 vfreeh = vp;
953 }
2bae1875 954 vp->v_type = VBAD;
36d09cb1 955}
ef62830d 956
2bcd6066
KM
957/*
958 * Lookup a vnode by device number.
959 */
960vfinddev(dev, type, vpp)
961 dev_t dev;
962 enum vtype type;
963 struct vnode **vpp;
964{
965 register struct vnode *vp;
966
967 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
968 if (dev != vp->v_rdev || type != vp->v_type)
969 continue;
970 *vpp = vp;
971 return (0);
972 }
973 return (1);
974}
975
ef62830d
KM
976/*
977 * Calculate the total number of references to a special device.
978 */
979vcount(vp)
980 register struct vnode *vp;
981{
7f7b7d89 982 register struct vnode *vq;
ef62830d
KM
983 int count;
984
985 if ((vp->v_flag & VALIASED) == 0)
7f7b7d89 986 return (vp->v_usecount);
ef62830d 987loop:
7f7b7d89 988 for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c 989 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
ef62830d
KM
990 continue;
991 /*
992 * Alias, but not in use, so flush it out.
993 */
7f7b7d89 994 if (vq->v_usecount == 0) {
ef62830d
KM
995 vgone(vq);
996 goto loop;
997 }
7f7b7d89 998 count += vq->v_usecount;
ef62830d
KM
999 }
1000 return (count);
1001}
0bf84b18
KM
1002
1003/*
1004 * Print out a description of a vnode.
1005 */
1006static char *typename[] =
61f846a8 1007 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
0bf84b18
KM
1008
1009vprint(label, vp)
1010 char *label;
1011 register struct vnode *vp;
1012{
9342689a 1013 USES_VOP_PRINT;
f2f730c6 1014 char buf[64];
0bf84b18
KM
1015
1016 if (label != NULL)
1017 printf("%s: ", label);
65c3b3a8
KM
1018 printf("type %s, usecount %d, writecount %d, refcount %d,",
1019 typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1020 vp->v_holdcnt);
f2f730c6
KM
1021 buf[0] = '\0';
1022 if (vp->v_flag & VROOT)
1023 strcat(buf, "|VROOT");
1024 if (vp->v_flag & VTEXT)
1025 strcat(buf, "|VTEXT");
36ef03ec
KM
1026 if (vp->v_flag & VSYSTEM)
1027 strcat(buf, "|VSYSTEM");
36ef03ec
KM
1028 if (vp->v_flag & VXLOCK)
1029 strcat(buf, "|VXLOCK");
1030 if (vp->v_flag & VXWANT)
1031 strcat(buf, "|VXWANT");
f2f730c6
KM
1032 if (vp->v_flag & VBWAIT)
1033 strcat(buf, "|VBWAIT");
36ef03ec
KM
1034 if (vp->v_flag & VALIASED)
1035 strcat(buf, "|VALIASED");
f2f730c6
KM
1036 if (buf[0] != '\0')
1037 printf(" flags (%s)", &buf[1]);
1038 printf("\n\t");
0bf84b18
KM
1039 VOP_PRINT(vp);
1040}
985cbdd5 1041
34c62e18
KM
1042#ifdef DEBUG
1043/*
1044 * List all of the locked vnodes in the system.
1045 * Called when debugging the kernel.
1046 */
1047printlockedvnodes()
1048{
9342689a 1049 USES_VOP_ISLOCKED;
34c62e18
KM
1050 register struct mount *mp;
1051 register struct vnode *vp;
1052
1053 printf("Locked vnodes\n");
1054 mp = rootfs;
1055 do {
1056 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf)
1057 if (VOP_ISLOCKED(vp))
1058 vprint((char *)0, vp);
1059 mp = mp->mnt_next;
1060 } while (mp != rootfs);
1061}
1062#endif
1063
985cbdd5
MT
1064int kinfo_vdebug = 1;
1065int kinfo_vgetfailed;
1066#define KINFO_VNODESLOP 10
1067/*
1068 * Dump vnode list (via kinfo).
1069 * Copyout address of vnode followed by vnode.
1070 */
aacc1bff 1071/* ARGSUSED */
985cbdd5 1072kinfo_vnode(op, where, acopysize, arg, aneeded)
aacc1bff 1073 int op;
985cbdd5 1074 char *where;
aacc1bff 1075 int *acopysize, arg, *aneeded;
985cbdd5
MT
1076{
1077 register struct mount *mp = rootfs;
36ef03ec 1078 struct mount *omp;
985cbdd5 1079 struct vnode *vp;
985cbdd5 1080 register char *bp = where, *savebp;
5bf57294 1081 char *ewhere;
985cbdd5
MT
1082 int error;
1083
1084#define VPTRSZ sizeof (struct vnode *)
1085#define VNODESZ sizeof (struct vnode)
1086 if (where == NULL) {
1087 *aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1088 return (0);
1089 }
5bf57294 1090 ewhere = where + *acopysize;
985cbdd5 1091
985cbdd5 1092 do {
36ef03ec 1093 if (vfs_busy(mp)) {
54fb9dc2 1094 mp = mp->mnt_next;
36ef03ec
KM
1095 continue;
1096 }
985cbdd5
MT
1097 savebp = bp;
1098again:
4597dd33 1099 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
41185b3b
KM
1100 /*
1101 * Check that the vp is still associated with
1102 * this filesystem. RACE: could have been
1103 * recycled onto the same filesystem.
1104 */
4597dd33
KM
1105 if (vp->v_mount != mp) {
1106 if (kinfo_vdebug)
1107 printf("kinfo: vp changed\n");
1108 bp = savebp;
1109 goto again;
1110 }
985cbdd5
MT
1111 if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
1112 ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1113 (error = copyout((caddr_t)vp, bp + VPTRSZ,
41185b3b 1114 VNODESZ))))
985cbdd5 1115 return (error);
985cbdd5 1116 bp += VPTRSZ + VNODESZ;
985cbdd5 1117 }
36ef03ec 1118 omp = mp;
54fb9dc2 1119 mp = mp->mnt_next;
36ef03ec 1120 vfs_unbusy(omp);
985cbdd5
MT
1121 } while (mp != rootfs);
1122
1123 *aneeded = bp - where;
1124 if (bp > ewhere)
1125 *acopysize = ewhere - where;
1126 else
1127 *acopysize = bp - where;
1128 return (0);
1129}