nfs_rename made one line
[unix-history] / usr / src / sys / kern / vfs_subr.c
CommitLineData
3c4390e8
KM
1/*
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
dbf0c423 5 * %sccs.include.redist.c%
3c4390e8 6 *
c9df57ae 7 * @(#)vfs_subr.c 7.75 (Berkeley) %G%
3c4390e8
KM
8 */
9
10/*
11 * External virtual filesystem routines
12 */
13
cb796a23
KB
14#include <sys/param.h>
15#include <sys/proc.h>
16#include <sys/mount.h>
17#include <sys/time.h>
18#include <sys/vnode.h>
807cc430 19#include <sys/stat.h>
cb796a23
KB
20#include <sys/specdev.h>
21#include <sys/namei.h>
22#include <sys/ucred.h>
23#include <sys/buf.h>
24#include <sys/errno.h>
25#include <sys/malloc.h>
3c4390e8 26
807cc430
KM
27enum vtype iftovt_tab[16] = {
28 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
29 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
30};
31int vttoif_tab[9] = {
32 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
33 S_IFSOCK, S_IFIFO, S_IFMT,
34};
35
3c4390e8
KM
36/*
37 * Remove a mount point from the list of mounted filesystems.
38 * Unmount of the root is illegal.
39 */
40void
41vfs_remove(mp)
42 register struct mount *mp;
43{
44
45 if (mp == rootfs)
46 panic("vfs_remove: unmounting root");
54fb9dc2
KM
47 mp->mnt_prev->mnt_next = mp->mnt_next;
48 mp->mnt_next->mnt_prev = mp->mnt_prev;
49 mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
3c4390e8
KM
50 vfs_unlock(mp);
51}
52
53/*
54 * Lock a filesystem.
55 * Used to prevent access to it while mounting and unmounting.
56 */
57vfs_lock(mp)
58 register struct mount *mp;
59{
60
54fb9dc2
KM
61 while(mp->mnt_flag & MNT_MLOCK) {
62 mp->mnt_flag |= MNT_MWAIT;
594501df
KM
63 sleep((caddr_t)mp, PVFS);
64 }
54fb9dc2 65 mp->mnt_flag |= MNT_MLOCK;
3c4390e8
KM
66 return (0);
67}
68
69/*
70 * Unlock a locked filesystem.
71 * Panic if filesystem is not locked.
72 */
73void
74vfs_unlock(mp)
75 register struct mount *mp;
76{
77
54fb9dc2 78 if ((mp->mnt_flag & MNT_MLOCK) == 0)
36ef03ec 79 panic("vfs_unlock: not locked");
54fb9dc2
KM
80 mp->mnt_flag &= ~MNT_MLOCK;
81 if (mp->mnt_flag & MNT_MWAIT) {
82 mp->mnt_flag &= ~MNT_MWAIT;
3c4390e8
KM
83 wakeup((caddr_t)mp);
84 }
85}
86
36ef03ec
KM
87/*
88 * Mark a mount point as busy.
89 * Used to synchronize access and to delay unmounting.
90 */
91vfs_busy(mp)
92 register struct mount *mp;
93{
94
54fb9dc2
KM
95 while(mp->mnt_flag & MNT_MPBUSY) {
96 mp->mnt_flag |= MNT_MPWANT;
97 sleep((caddr_t)&mp->mnt_flag, PVFS);
36ef03ec 98 }
d8b63609
KM
99 if (mp->mnt_flag & MNT_UNMOUNT)
100 return (1);
54fb9dc2 101 mp->mnt_flag |= MNT_MPBUSY;
36ef03ec
KM
102 return (0);
103}
104
105/*
106 * Free a busy filesystem.
107 * Panic if filesystem is not busy.
108 */
36ef03ec
KM
109vfs_unbusy(mp)
110 register struct mount *mp;
111{
112
54fb9dc2 113 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 114 panic("vfs_unbusy: not busy");
54fb9dc2
KM
115 mp->mnt_flag &= ~MNT_MPBUSY;
116 if (mp->mnt_flag & MNT_MPWANT) {
117 mp->mnt_flag &= ~MNT_MPWANT;
118 wakeup((caddr_t)&mp->mnt_flag);
36ef03ec
KM
119 }
120}
121
3c4390e8
KM
122/*
123 * Lookup a mount point by filesystem identifier.
124 */
125struct mount *
126getvfs(fsid)
127 fsid_t *fsid;
128{
129 register struct mount *mp;
130
d713f801
KM
131 mp = rootfs;
132 do {
54fb9dc2
KM
133 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
134 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
d713f801 135 return (mp);
3c4390e8 136 }
54fb9dc2 137 mp = mp->mnt_next;
d713f801
KM
138 } while (mp != rootfs);
139 return ((struct mount *)0);
3c4390e8
KM
140}
141
142/*
143 * Set vnode attributes to VNOVAL
144 */
145void vattr_null(vap)
146 register struct vattr *vap;
147{
148
149 vap->va_type = VNON;
83504fd5
KM
150 vap->va_size = vap->va_bytes = VNOVAL;
151#ifdef _NOQUAD
152 vap->va_size_rsv = vap->va_bytes_rsv = VNOVAL;
153#endif
3c4390e8 154 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
83504fd5
KM
155 vap->va_fsid = vap->va_fileid =
156 vap->va_blocksize = vap->va_rdev =
3c4390e8
KM
157 vap->va_atime.tv_sec = vap->va_atime.tv_usec =
158 vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
8cf4d4fb
KM
159 vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
160 vap->va_flags = vap->va_gen = VNOVAL;
3c4390e8 161}
c60798ca 162
36d09cb1
KM
163/*
164 * Routines having to do with the management of the vnode table.
165 */
e781da98 166extern struct vnode *vfreeh, **vfreet;
953f3c9e
JH
167extern struct vnodeops dead_vnodeops;
168extern struct vnodeops spec_vnodeops;
32339c94 169extern void vclean();
1a80f56e 170long numvnodes;
e781da98 171extern struct vattr va_null;
36d09cb1
KM
172
173/*
174 * Return the next vnode from the free list.
175 */
176getnewvnode(tag, mp, vops, vpp)
177 enum vtagtype tag;
178 struct mount *mp;
cf74dd57 179 int (**vops)();
36d09cb1
KM
180 struct vnode **vpp;
181{
182 register struct vnode *vp, *vq;
183
1a80f56e 184 if (numvnodes < desiredvnodes) {
aacc1bff
KM
185 vp = (struct vnode *)malloc((u_long)sizeof *vp,
186 M_VNODE, M_WAITOK);
1a80f56e
KM
187 bzero((char *)vp, sizeof *vp);
188 numvnodes++;
189 } else {
190 if ((vp = vfreeh) == NULL) {
191 tablefull("vnode");
192 *vpp = 0;
193 return (ENFILE);
194 }
195 if (vp->v_usecount)
196 panic("free vnode isn't");
197 if (vq = vp->v_freef)
198 vq->v_freeb = &vfreeh;
199 else
200 vfreet = &vfreeh;
201 vfreeh = vq;
202 vp->v_freef = NULL;
203 vp->v_freeb = NULL;
39b99eb6 204 vp->v_lease = NULL;
1a80f56e
KM
205 if (vp->v_type != VBAD)
206 vgone(vp);
2345b093
KM
207 if (vp->v_data)
208 panic("cleaned vnode isn't");
1a80f56e 209 vp->v_flag = 0;
1a80f56e
KM
210 vp->v_lastr = 0;
211 vp->v_socket = 0;
36d09cb1 212 }
b027498b 213 vp->v_type = VNON;
36d09cb1
KM
214 cache_purge(vp);
215 vp->v_tag = tag;
ef24f6dd 216 vp->v_op = vops;
36d09cb1
KM
217 insmntque(vp, mp);
218 VREF(vp);
219 *vpp = vp;
220 return (0);
221}
222
223/*
224 * Move a vnode from one mount queue to another.
225 */
226insmntque(vp, mp)
227 register struct vnode *vp;
228 register struct mount *mp;
229{
8136adfc 230 register struct vnode *vq;
36d09cb1
KM
231
232 /*
233 * Delete from old mount point vnode list, if on one.
234 */
235 if (vp->v_mountb) {
236 if (vq = vp->v_mountf)
237 vq->v_mountb = vp->v_mountb;
238 *vp->v_mountb = vq;
239 }
240 /*
241 * Insert into list of vnodes for the new mount point, if available.
242 */
a45ff315 243 vp->v_mount = mp;
36d09cb1
KM
244 if (mp == NULL) {
245 vp->v_mountf = NULL;
246 vp->v_mountb = NULL;
247 return;
248 }
8136adfc
KM
249 if (vq = mp->mnt_mounth)
250 vq->v_mountb = &vp->v_mountf;
251 vp->v_mountf = vq;
252 vp->v_mountb = &mp->mnt_mounth;
253 mp->mnt_mounth = vp;
36d09cb1
KM
254}
255
76429560
KM
256/*
257 * Make sure all write-behind blocks associated
258 * with mount point are flushed out (from sync).
259 */
260mntflushbuf(mountp, flags)
261 struct mount *mountp;
262 int flags;
263{
264 register struct vnode *vp;
265
266 if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
267 panic("mntflushbuf: not busy");
268loop:
269 for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
270 if (VOP_ISLOCKED(vp))
271 continue;
272 if (vget(vp))
273 goto loop;
274 vflushbuf(vp, flags);
275 vput(vp);
276 if (vp->v_mount != mountp)
277 goto loop;
278 }
279}
280
281/*
282 * Flush all dirty buffers associated with a vnode.
283 */
284vflushbuf(vp, flags)
285 register struct vnode *vp;
286 int flags;
287{
288 register struct buf *bp;
289 struct buf *nbp;
290 int s;
291
292loop:
293 s = splbio();
294 for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
295 nbp = bp->b_blockf;
296 if ((bp->b_flags & B_BUSY))
297 continue;
298 if ((bp->b_flags & B_DELWRI) == 0)
299 panic("vflushbuf: not dirty");
300 bremfree(bp);
301 bp->b_flags |= B_BUSY;
302 splx(s);
303 /*
304 * Wait for I/O associated with indirect blocks to complete,
305 * since there is no way to quickly wait for them below.
306 * NB: This is really specific to ufs, but is done here
307 * as it is easier and quicker.
308 */
77dc8a8c 309 if (bp->b_vp == vp || (flags & B_SYNC) == 0)
76429560 310 (void) bawrite(bp);
77dc8a8c 311 else
76429560 312 (void) bwrite(bp);
77dc8a8c 313 goto loop;
76429560
KM
314 }
315 splx(s);
316 if ((flags & B_SYNC) == 0)
317 return;
318 s = splbio();
319 while (vp->v_numoutput) {
320 vp->v_flag |= VBWAIT;
321 sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
322 }
323 splx(s);
324 if (vp->v_dirtyblkhd) {
325 vprint("vflushbuf: dirty", vp);
326 goto loop;
327 }
328}
329
330/*
331 * Update outstanding I/O count and do wakeup if requested.
332 */
333vwakeup(bp)
334 register struct buf *bp;
335{
336 register struct vnode *vp;
337
338 bp->b_dirtyoff = bp->b_dirtyend = 0;
339 if (vp = bp->b_vp) {
340 vp->v_numoutput--;
341 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
342 if (vp->v_numoutput < 0)
343 panic("vwakeup: neg numoutput");
344 vp->v_flag &= ~VBWAIT;
345 wakeup((caddr_t)&vp->v_numoutput);
346 }
347 }
348}
349
350/*
351 * Invalidate in core blocks belonging to closed or umounted filesystem
352 *
353 * Go through the list of vnodes associated with the file system;
354 * for each vnode invalidate any buffers that it holds. Normally
355 * this routine is preceeded by a bflush call, so that on a quiescent
356 * filesystem there will be no dirty buffers when we are done. Binval
357 * returns the count of dirty buffers when it is finished.
358 */
359mntinvalbuf(mountp)
360 struct mount *mountp;
361{
362 register struct vnode *vp;
363 int dirty = 0;
364
365 if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
366 panic("mntinvalbuf: not busy");
367loop:
368 for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
369 if (vget(vp))
370 goto loop;
371 dirty += vinvalbuf(vp, 1);
372 vput(vp);
373 if (vp->v_mount != mountp)
374 goto loop;
375 }
376 return (dirty);
377}
378
379/*
380 * Flush out and invalidate all buffers associated with a vnode.
381 * Called with the underlying object locked.
382 */
383vinvalbuf(vp, save)
384 register struct vnode *vp;
385 int save;
386{
387 register struct buf *bp;
388 struct buf *nbp, *blist;
389 int s, dirty = 0;
390
391 for (;;) {
392 if (blist = vp->v_dirtyblkhd)
393 /* void */;
394 else if (blist = vp->v_cleanblkhd)
395 /* void */;
396 else
397 break;
398 for (bp = blist; bp; bp = nbp) {
399 nbp = bp->b_blockf;
400 s = splbio();
401 if (bp->b_flags & B_BUSY) {
402 bp->b_flags |= B_WANTED;
403 sleep((caddr_t)bp, PRIBIO + 1);
404 splx(s);
405 break;
406 }
407 bremfree(bp);
408 bp->b_flags |= B_BUSY;
409 splx(s);
410 if (save && (bp->b_flags & B_DELWRI)) {
24e9a81d
KM
411 dirty++;
412 (void) VOP_BWRITE(bp);
76429560
KM
413 break;
414 }
415 if (bp->b_vp != vp)
416 reassignbuf(bp, bp->b_vp);
417 else
418 bp->b_flags |= B_INVAL;
419 brelse(bp);
420 }
421 }
422 if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
423 panic("vinvalbuf: flush failed");
424 return (dirty);
425}
426
427/*
428 * Associate a buffer with a vnode.
429 */
430bgetvp(vp, bp)
431 register struct vnode *vp;
432 register struct buf *bp;
433{
8136adfc
KM
434 register struct vnode *vq;
435 register struct buf *bq;
76429560
KM
436
437 if (bp->b_vp)
438 panic("bgetvp: not free");
439 VHOLD(vp);
440 bp->b_vp = vp;
441 if (vp->v_type == VBLK || vp->v_type == VCHR)
442 bp->b_dev = vp->v_rdev;
443 else
444 bp->b_dev = NODEV;
445 /*
446 * Insert onto list for new vnode.
447 */
8136adfc
KM
448 if (bq = vp->v_cleanblkhd)
449 bq->b_blockb = &bp->b_blockf;
450 bp->b_blockf = bq;
451 bp->b_blockb = &vp->v_cleanblkhd;
452 vp->v_cleanblkhd = bp;
76429560
KM
453}
454
455/*
456 * Disassociate a buffer from a vnode.
457 */
458brelvp(bp)
459 register struct buf *bp;
460{
461 struct buf *bq;
462 struct vnode *vp;
463
464 if (bp->b_vp == (struct vnode *) 0)
465 panic("brelvp: NULL");
466 /*
467 * Delete from old vnode list, if on one.
468 */
469 if (bp->b_blockb) {
470 if (bq = bp->b_blockf)
471 bq->b_blockb = bp->b_blockb;
472 *bp->b_blockb = bq;
473 bp->b_blockf = NULL;
474 bp->b_blockb = NULL;
475 }
476 vp = bp->b_vp;
477 bp->b_vp = (struct vnode *) 0;
478 HOLDRELE(vp);
479}
480
481/*
482 * Reassign a buffer from one vnode to another.
483 * Used to assign file specific control information
484 * (indirect blocks) to the vnode to which they belong.
485 */
486reassignbuf(bp, newvp)
487 register struct buf *bp;
488 register struct vnode *newvp;
489{
490 register struct buf *bq, **listheadp;
491
e5c3f16e
KM
492 if (newvp == NULL) {
493 printf("reassignbuf: NULL");
494 return;
495 }
76429560
KM
496 /*
497 * Delete from old vnode list, if on one.
498 */
499 if (bp->b_blockb) {
500 if (bq = bp->b_blockf)
501 bq->b_blockb = bp->b_blockb;
502 *bp->b_blockb = bq;
503 }
504 /*
505 * If dirty, put on list of dirty buffers;
506 * otherwise insert onto list of clean buffers.
507 */
508 if (bp->b_flags & B_DELWRI)
509 listheadp = &newvp->v_dirtyblkhd;
510 else
511 listheadp = &newvp->v_cleanblkhd;
8136adfc
KM
512 if (bq = *listheadp)
513 bq->b_blockb = &bp->b_blockf;
514 bp->b_blockf = bq;
515 bp->b_blockb = listheadp;
516 *listheadp = bp;
76429560
KM
517}
518
36d09cb1 519/*
ef24f6dd
KM
520 * Create a vnode for a block device.
521 * Used for root filesystem, argdev, and swap areas.
522 * Also used for memory file system special devices.
523 */
524bdevvp(dev, vpp)
525 dev_t dev;
526 struct vnode **vpp;
527{
ef24f6dd
KM
528 register struct vnode *vp;
529 struct vnode *nvp;
530 int error;
531
1c89915d
KM
532 if (dev == NODEV)
533 return (0);
4ef5d036 534 error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
ef24f6dd
KM
535 if (error) {
536 *vpp = 0;
537 return (error);
538 }
539 vp = nvp;
540 vp->v_type = VBLK;
c0de8792 541 if (nvp = checkalias(vp, dev, (struct mount *)0)) {
ef24f6dd
KM
542 vput(vp);
543 vp = nvp;
544 }
545 *vpp = vp;
546 return (0);
547}
548
549/*
550 * Check to see if the new vnode represents a special device
551 * for which we already have a vnode (either because of
552 * bdevvp() or because of a different vnode representing
553 * the same block device). If such an alias exists, deallocate
f0556f86 554 * the existing contents and return the aliased vnode. The
ef24f6dd
KM
555 * caller is responsible for filling it with its new contents.
556 */
557struct vnode *
c0de8792 558checkalias(nvp, nvp_rdev, mp)
ef24f6dd 559 register struct vnode *nvp;
c0de8792 560 dev_t nvp_rdev;
ef24f6dd
KM
561 struct mount *mp;
562{
563 register struct vnode *vp;
c0de8792 564 struct vnode **vpp;
ef24f6dd
KM
565
566 if (nvp->v_type != VBLK && nvp->v_type != VCHR)
54fb9dc2 567 return (NULLVP);
c0de8792
KM
568
569 vpp = &speclisth[SPECHASH(nvp_rdev)];
ef24f6dd 570loop:
c0de8792
KM
571 for (vp = *vpp; vp; vp = vp->v_specnext) {
572 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
ef24f6dd 573 continue;
c0de8792
KM
574 /*
575 * Alias, but not in use, so flush it out.
576 */
7f7b7d89 577 if (vp->v_usecount == 0) {
c0de8792
KM
578 vgone(vp);
579 goto loop;
580 }
ef62830d
KM
581 if (vget(vp))
582 goto loop;
ef24f6dd
KM
583 break;
584 }
c0de8792 585 if (vp == NULL || vp->v_tag != VT_NON) {
c0de8792
KM
586 MALLOC(nvp->v_specinfo, struct specinfo *,
587 sizeof(struct specinfo), M_VNODE, M_WAITOK);
588 nvp->v_rdev = nvp_rdev;
7f7b7d89 589 nvp->v_hashchain = vpp;
c0de8792 590 nvp->v_specnext = *vpp;
2c957a90 591 nvp->v_specflags = 0;
c0de8792 592 *vpp = nvp;
40452d5e
KM
593 if (vp != NULL) {
594 nvp->v_flag |= VALIASED;
595 vp->v_flag |= VALIASED;
596 vput(vp);
597 }
54fb9dc2 598 return (NULLVP);
ef24f6dd 599 }
2bae1875
KM
600 VOP_UNLOCK(vp);
601 vclean(vp, 0);
ef24f6dd
KM
602 vp->v_op = nvp->v_op;
603 vp->v_tag = nvp->v_tag;
604 nvp->v_type = VNON;
605 insmntque(vp, mp);
606 return (vp);
607}
608
609/*
610 * Grab a particular vnode from the free list, increment its
611 * reference count and lock it. The vnode lock bit is set the
612 * vnode is being eliminated in vgone. The process is awakened
613 * when the transition is completed, and an error returned to
614 * indicate that the vnode is no longer usable (possibly having
615 * been changed to a new file system type).
36d09cb1
KM
616 */
617vget(vp)
618 register struct vnode *vp;
619{
620 register struct vnode *vq;
621
ef24f6dd
KM
622 if (vp->v_flag & VXLOCK) {
623 vp->v_flag |= VXWANT;
624 sleep((caddr_t)vp, PINOD);
625 return (1);
626 }
7f7b7d89 627 if (vp->v_usecount == 0) {
ef24f6dd
KM
628 if (vq = vp->v_freef)
629 vq->v_freeb = vp->v_freeb;
630 else
631 vfreet = vp->v_freeb;
632 *vp->v_freeb = vq;
633 vp->v_freef = NULL;
634 vp->v_freeb = NULL;
635 }
36d09cb1 636 VREF(vp);
ef24f6dd
KM
637 VOP_LOCK(vp);
638 return (0);
36d09cb1
KM
639}
640
d32390ea
KM
641int bug_refs = 0;
642
36d09cb1
KM
643/*
644 * Vnode reference, just increment the count
645 */
646void vref(vp)
647 struct vnode *vp;
648{
649
7f7b7d89 650 vp->v_usecount++;
d32390ea
KM
651 if (vp->v_type != VBLK && curproc)
652 curproc->p_spare[0]++;
653 if (bug_refs)
654 vprint("vref: ");
36d09cb1
KM
655}
656
657/*
658 * vput(), just unlock and vrele()
659 */
660void vput(vp)
661 register struct vnode *vp;
662{
4d1ee2eb 663
36d09cb1
KM
664 VOP_UNLOCK(vp);
665 vrele(vp);
666}
667
668/*
669 * Vnode release.
670 * If count drops to zero, call inactive routine and return to freelist.
671 */
672void vrele(vp)
673 register struct vnode *vp;
674{
c099667a 675 struct proc *p = curproc; /* XXX */
36d09cb1 676
65c3b3a8 677#ifdef DIAGNOSTIC
36d09cb1 678 if (vp == NULL)
ef24f6dd 679 panic("vrele: null vp");
65c3b3a8 680#endif
7f7b7d89 681 vp->v_usecount--;
d32390ea
KM
682 if (vp->v_type != VBLK && curproc)
683 curproc->p_spare[0]--;
684 if (bug_refs)
685 vprint("vref: ");
7f7b7d89 686 if (vp->v_usecount > 0)
36d09cb1 687 return;
65c3b3a8
KM
688#ifdef DIAGNOSTIC
689 if (vp->v_usecount != 0 || vp->v_writecount != 0) {
690 vprint("vrele: bad ref count", vp);
691 panic("vrele: ref cnt");
692 }
693#endif
54fb9dc2 694 if (vfreeh == NULLVP) {
36d09cb1
KM
695 /*
696 * insert into empty list
697 */
698 vfreeh = vp;
699 vp->v_freeb = &vfreeh;
36d09cb1
KM
700 } else {
701 /*
702 * insert at tail of list
703 */
704 *vfreet = vp;
705 vp->v_freeb = vfreet;
36d09cb1 706 }
ef24f6dd
KM
707 vp->v_freef = NULL;
708 vfreet = &vp->v_freef;
c099667a 709 VOP_INACTIVE(vp, p);
ef24f6dd
KM
710}
711
7f7b7d89
KM
712/*
713 * Page or buffer structure gets a reference.
714 */
451df175 715void vhold(vp)
7f7b7d89
KM
716 register struct vnode *vp;
717{
718
719 vp->v_holdcnt++;
720}
721
722/*
723 * Page or buffer structure frees a reference.
724 */
451df175 725void holdrele(vp)
7f7b7d89
KM
726 register struct vnode *vp;
727{
728
729 if (vp->v_holdcnt <= 0)
730 panic("holdrele: holdcnt");
731 vp->v_holdcnt--;
732}
733
f0556f86
KM
734/*
735 * Remove any vnodes in the vnode table belonging to mount point mp.
736 *
737 * If MNT_NOFORCE is specified, there should not be any active ones,
738 * return error if any are found (nb: this is a user error, not a
739 * system error). If MNT_FORCE is specified, detach any active vnodes
740 * that are found.
741 */
742int busyprt = 0; /* patch to print out busy vnodes */
743
744vflush(mp, skipvp, flags)
745 struct mount *mp;
746 struct vnode *skipvp;
747 int flags;
748{
749 register struct vnode *vp, *nvp;
750 int busy = 0;
751
54fb9dc2 752 if ((mp->mnt_flag & MNT_MPBUSY) == 0)
36ef03ec 753 panic("vflush: not busy");
4597dd33 754loop:
54fb9dc2 755 for (vp = mp->mnt_mounth; vp; vp = nvp) {
4597dd33
KM
756 if (vp->v_mount != mp)
757 goto loop;
f0556f86
KM
758 nvp = vp->v_mountf;
759 /*
760 * Skip over a selected vnode.
f0556f86
KM
761 */
762 if (vp == skipvp)
763 continue;
36ef03ec
KM
764 /*
765 * Skip over a vnodes marked VSYSTEM.
766 */
767 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
768 continue;
f0556f86 769 /*
7f7b7d89 770 * With v_usecount == 0, all we need to do is clear
f0556f86
KM
771 * out the vnode data structures and we are done.
772 */
7f7b7d89 773 if (vp->v_usecount == 0) {
f0556f86
KM
774 vgone(vp);
775 continue;
776 }
777 /*
778 * For block or character devices, revert to an
779 * anonymous device. For all other files, just kill them.
780 */
36ef03ec 781 if (flags & FORCECLOSE) {
f0556f86
KM
782 if (vp->v_type != VBLK && vp->v_type != VCHR) {
783 vgone(vp);
784 } else {
785 vclean(vp, 0);
786 vp->v_op = &spec_vnodeops;
787 insmntque(vp, (struct mount *)0);
788 }
789 continue;
790 }
791 if (busyprt)
0bf84b18 792 vprint("vflush: busy vnode", vp);
f0556f86
KM
793 busy++;
794 }
795 if (busy)
796 return (EBUSY);
797 return (0);
798}
799
ef24f6dd
KM
800/*
801 * Disassociate the underlying file system from a vnode.
ef24f6dd 802 */
36ef03ec 803void vclean(vp, flags)
ef24f6dd 804 register struct vnode *vp;
aacc1bff 805 int flags;
ef24f6dd
KM
806{
807 struct vnodeops *origops;
2bae1875 808 int active;
c099667a 809 struct proc *p = curproc; /* XXX */
ef24f6dd 810
2bae1875
KM
811 /*
812 * Check to see if the vnode is in use.
0bf84b18
KM
813 * If so we have to reference it before we clean it out
814 * so that its count cannot fall to zero and generate a
815 * race against ourselves to recycle it.
2bae1875 816 */
7f7b7d89 817 if (active = vp->v_usecount)
2bae1875 818 VREF(vp);
2bae1875
KM
819 /*
820 * Prevent the vnode from being recycled or
821 * brought into use while we clean it out.
822 */
0bf84b18
KM
823 if (vp->v_flag & VXLOCK)
824 panic("vclean: deadlock");
ef24f6dd 825 vp->v_flag |= VXLOCK;
0bf84b18
KM
826 /*
827 * Even if the count is zero, the VOP_INACTIVE routine may still
828 * have the object locked while it cleans it out. The VOP_LOCK
829 * ensures that the VOP_INACTIVE routine is done with its work.
830 * For active vnodes, it ensures that no other activity can
831 * occur while the buffer list is being cleaned out.
832 */
833 VOP_LOCK(vp);
36ef03ec 834 if (flags & DOCLOSE)
0bf84b18 835 vinvalbuf(vp, 1);
ef24f6dd
KM
836 /*
837 * Prevent any further operations on the vnode from
838 * being passed through to the old file system.
839 */
840 origops = vp->v_op;
841 vp->v_op = &dead_vnodeops;
842 vp->v_tag = VT_NON;
843 /*
2bae1875
KM
844 * If purging an active vnode, it must be unlocked, closed,
845 * and deactivated before being reclaimed.
ef24f6dd 846 */
20454d5a 847 (*(origops->vop_unlock))(vp);
2bae1875 848 if (active) {
36ef03ec 849 if (flags & DOCLOSE)
20454d5a
KM
850 (*(origops->vop_close))(vp, IO_NDELAY, NOCRED, p);
851 (*(origops->vop_inactive))(vp, p);
ef24f6dd
KM
852 }
853 /*
854 * Reclaim the vnode.
855 */
20454d5a 856 if ((*(origops->vop_reclaim))(vp))
ef24f6dd 857 panic("vclean: cannot reclaim");
2bae1875
KM
858 if (active)
859 vrele(vp);
ef24f6dd
KM
860 /*
861 * Done with purge, notify sleepers in vget of the grim news.
862 */
863 vp->v_flag &= ~VXLOCK;
864 if (vp->v_flag & VXWANT) {
865 vp->v_flag &= ~VXWANT;
866 wakeup((caddr_t)vp);
867 }
868}
869
ef62830d
KM
870/*
871 * Eliminate all activity associated with the requested vnode
872 * and with all vnodes aliased to the requested vnode.
873 */
874void vgoneall(vp)
875 register struct vnode *vp;
876{
7f7b7d89 877 register struct vnode *vq;
ef62830d 878
7a7b3a95
KM
879 if (vp->v_flag & VALIASED) {
880 /*
881 * If a vgone (or vclean) is already in progress,
882 * wait until it is done and return.
883 */
884 if (vp->v_flag & VXLOCK) {
885 vp->v_flag |= VXWANT;
886 sleep((caddr_t)vp, PINOD);
887 return;
888 }
889 /*
890 * Ensure that vp will not be vgone'd while we
891 * are eliminating its aliases.
892 */
893 vp->v_flag |= VXLOCK;
894 while (vp->v_flag & VALIASED) {
895 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
896 if (vq->v_rdev != vp->v_rdev ||
897 vq->v_type != vp->v_type || vp == vq)
898 continue;
899 vgone(vq);
900 break;
901 }
ef62830d 902 }
7a7b3a95
KM
903 /*
904 * Remove the lock so that vgone below will
905 * really eliminate the vnode after which time
906 * vgone will awaken any sleepers.
907 */
908 vp->v_flag &= ~VXLOCK;
ef62830d
KM
909 }
910 vgone(vp);
911}
912
ef24f6dd
KM
913/*
914 * Eliminate all activity associated with a vnode
915 * in preparation for reuse.
916 */
917void vgone(vp)
918 register struct vnode *vp;
919{
7f7b7d89 920 register struct vnode *vq;
c0de8792 921 struct vnode *vx;
ef24f6dd 922
4f55e3ec
KM
923 /*
924 * If a vgone (or vclean) is already in progress,
925 * wait until it is done and return.
926 */
927 if (vp->v_flag & VXLOCK) {
928 vp->v_flag |= VXWANT;
929 sleep((caddr_t)vp, PINOD);
930 return;
931 }
ef24f6dd
KM
932 /*
933 * Clean out the filesystem specific data.
934 */
36ef03ec 935 vclean(vp, DOCLOSE);
ef24f6dd
KM
936 /*
937 * Delete from old mount point vnode list, if on one.
938 */
939 if (vp->v_mountb) {
940 if (vq = vp->v_mountf)
941 vq->v_mountb = vp->v_mountb;
942 *vp->v_mountb = vq;
943 vp->v_mountf = NULL;
944 vp->v_mountb = NULL;
d10e9258 945 vp->v_mount = NULL;
ef24f6dd
KM
946 }
947 /*
948 * If special device, remove it from special device alias list.
949 */
950 if (vp->v_type == VBLK || vp->v_type == VCHR) {
7f7b7d89
KM
951 if (*vp->v_hashchain == vp) {
952 *vp->v_hashchain = vp->v_specnext;
ef24f6dd 953 } else {
7f7b7d89 954 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
c0de8792 955 if (vq->v_specnext != vp)
ef24f6dd 956 continue;
c0de8792 957 vq->v_specnext = vp->v_specnext;
ef24f6dd
KM
958 break;
959 }
c0de8792 960 if (vq == NULL)
ef24f6dd
KM
961 panic("missing bdev");
962 }
c0de8792 963 if (vp->v_flag & VALIASED) {
4d1ee2eb 964 vx = NULL;
7f7b7d89 965 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c
KM
966 if (vq->v_rdev != vp->v_rdev ||
967 vq->v_type != vp->v_type)
c0de8792 968 continue;
4d1ee2eb
CT
969 if (vx)
970 break;
c0de8792
KM
971 vx = vq;
972 }
4d1ee2eb 973 if (vx == NULL)
c0de8792 974 panic("missing alias");
4d1ee2eb 975 if (vq == NULL)
c0de8792
KM
976 vx->v_flag &= ~VALIASED;
977 vp->v_flag &= ~VALIASED;
978 }
979 FREE(vp->v_specinfo, M_VNODE);
980 vp->v_specinfo = NULL;
ef24f6dd
KM
981 }
982 /*
983 * If it is on the freelist, move it to the head of the list.
984 */
985 if (vp->v_freeb) {
986 if (vq = vp->v_freef)
987 vq->v_freeb = vp->v_freeb;
988 else
989 vfreet = vp->v_freeb;
990 *vp->v_freeb = vq;
991 vp->v_freef = vfreeh;
992 vp->v_freeb = &vfreeh;
993 vfreeh->v_freeb = &vp->v_freef;
994 vfreeh = vp;
995 }
2bae1875 996 vp->v_type = VBAD;
36d09cb1 997}
ef62830d 998
2bcd6066
KM
999/*
1000 * Lookup a vnode by device number.
1001 */
1002vfinddev(dev, type, vpp)
1003 dev_t dev;
1004 enum vtype type;
1005 struct vnode **vpp;
1006{
1007 register struct vnode *vp;
1008
1009 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1010 if (dev != vp->v_rdev || type != vp->v_type)
1011 continue;
1012 *vpp = vp;
1013 return (0);
1014 }
1015 return (1);
1016}
1017
ef62830d
KM
1018/*
1019 * Calculate the total number of references to a special device.
1020 */
1021vcount(vp)
1022 register struct vnode *vp;
1023{
7f7b7d89 1024 register struct vnode *vq;
ef62830d
KM
1025 int count;
1026
1027 if ((vp->v_flag & VALIASED) == 0)
7f7b7d89 1028 return (vp->v_usecount);
ef62830d 1029loop:
7f7b7d89 1030 for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
de81e10c 1031 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
ef62830d
KM
1032 continue;
1033 /*
1034 * Alias, but not in use, so flush it out.
1035 */
7f7b7d89 1036 if (vq->v_usecount == 0) {
ef62830d
KM
1037 vgone(vq);
1038 goto loop;
1039 }
7f7b7d89 1040 count += vq->v_usecount;
ef62830d
KM
1041 }
1042 return (count);
1043}
0bf84b18
KM
1044
1045/*
1046 * Print out a description of a vnode.
1047 */
1048static char *typename[] =
61f846a8 1049 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
0bf84b18
KM
1050
1051vprint(label, vp)
1052 char *label;
1053 register struct vnode *vp;
1054{
f2f730c6 1055 char buf[64];
0bf84b18
KM
1056
1057 if (label != NULL)
1058 printf("%s: ", label);
65c3b3a8
KM
1059 printf("type %s, usecount %d, writecount %d, refcount %d,",
1060 typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1061 vp->v_holdcnt);
f2f730c6
KM
1062 buf[0] = '\0';
1063 if (vp->v_flag & VROOT)
1064 strcat(buf, "|VROOT");
1065 if (vp->v_flag & VTEXT)
1066 strcat(buf, "|VTEXT");
36ef03ec
KM
1067 if (vp->v_flag & VSYSTEM)
1068 strcat(buf, "|VSYSTEM");
36ef03ec
KM
1069 if (vp->v_flag & VXLOCK)
1070 strcat(buf, "|VXLOCK");
1071 if (vp->v_flag & VXWANT)
1072 strcat(buf, "|VXWANT");
f2f730c6
KM
1073 if (vp->v_flag & VBWAIT)
1074 strcat(buf, "|VBWAIT");
36ef03ec
KM
1075 if (vp->v_flag & VALIASED)
1076 strcat(buf, "|VALIASED");
f2f730c6
KM
1077 if (buf[0] != '\0')
1078 printf(" flags (%s)", &buf[1]);
1079 printf("\n\t");
0bf84b18
KM
1080 VOP_PRINT(vp);
1081}
985cbdd5 1082
34c62e18
KM
1083#ifdef DEBUG
1084/*
1085 * List all of the locked vnodes in the system.
1086 * Called when debugging the kernel.
1087 */
1088printlockedvnodes()
1089{
1090 register struct mount *mp;
1091 register struct vnode *vp;
1092
1093 printf("Locked vnodes\n");
1094 mp = rootfs;
1095 do {
1096 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf)
1097 if (VOP_ISLOCKED(vp))
1098 vprint((char *)0, vp);
1099 mp = mp->mnt_next;
1100 } while (mp != rootfs);
1101}
1102#endif
1103
985cbdd5
MT
1104int kinfo_vdebug = 1;
1105int kinfo_vgetfailed;
1106#define KINFO_VNODESLOP 10
1107/*
1108 * Dump vnode list (via kinfo).
1109 * Copyout address of vnode followed by vnode.
1110 */
aacc1bff 1111/* ARGSUSED */
985cbdd5 1112kinfo_vnode(op, where, acopysize, arg, aneeded)
aacc1bff 1113 int op;
985cbdd5 1114 char *where;
aacc1bff 1115 int *acopysize, arg, *aneeded;
985cbdd5
MT
1116{
1117 register struct mount *mp = rootfs;
36ef03ec 1118 struct mount *omp;
985cbdd5 1119 struct vnode *vp;
985cbdd5
MT
1120 register char *bp = where, *savebp;
1121 char *ewhere = where + *acopysize;
1122 int error;
1123
1124#define VPTRSZ sizeof (struct vnode *)
1125#define VNODESZ sizeof (struct vnode)
1126 if (where == NULL) {
1127 *aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1128 return (0);
1129 }
1130
985cbdd5 1131 do {
36ef03ec 1132 if (vfs_busy(mp)) {
54fb9dc2 1133 mp = mp->mnt_next;
36ef03ec
KM
1134 continue;
1135 }
985cbdd5
MT
1136 savebp = bp;
1137again:
4597dd33 1138 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
41185b3b
KM
1139 /*
1140 * Check that the vp is still associated with
1141 * this filesystem. RACE: could have been
1142 * recycled onto the same filesystem.
1143 */
4597dd33
KM
1144 if (vp->v_mount != mp) {
1145 if (kinfo_vdebug)
1146 printf("kinfo: vp changed\n");
1147 bp = savebp;
1148 goto again;
1149 }
985cbdd5
MT
1150 if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
1151 ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1152 (error = copyout((caddr_t)vp, bp + VPTRSZ,
41185b3b 1153 VNODESZ))))
985cbdd5 1154 return (error);
985cbdd5 1155 bp += VPTRSZ + VNODESZ;
985cbdd5 1156 }
36ef03ec 1157 omp = mp;
54fb9dc2 1158 mp = mp->mnt_next;
36ef03ec 1159 vfs_unbusy(omp);
985cbdd5
MT
1160 } while (mp != rootfs);
1161
1162 *aneeded = bp - where;
1163 if (bp > ewhere)
1164 *acopysize = ewhere - where;
1165 else
1166 *acopysize = bp - where;
1167 return (0);
1168}