expand vput inline so that locking is managed properly
[unix-history] / usr / src / sys / kern / vfs_subr.c
index eeef374..cc599a5 100644 (file)
 /*
 /*
- * Copyright (c) 1989 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1989, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
  *
  *
- * Redistribution and use in source and binary forms are permitted
- * provided that the above copyright notice and this paragraph are
- * duplicated in all such forms and that any documentation,
- * advertising materials, and other materials related to such
- * distribution and use acknowledge that the software was developed
- * by the University of California, Berkeley.  The name of the
- * University may not be used to endorse or promote products derived
- * from this software without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ * %sccs.include.redist.c%
  *
  *
- *     @(#)vfs_subr.c  7.2 (Berkeley) %G%
+ *     @(#)vfs_subr.c  8.25 (Berkeley) %G%
  */
 
 /*
  * External virtual filesystem routines
  */
 
  */
 
 /*
  * External virtual filesystem routines
  */
 
-#include "param.h"
-#include "mount.h"
-#include "time.h"
-#include "vnode.h"
-#include "errno.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/time.h>
+#include <sys/vnode.h>
+#include <sys/stat.h>
+#include <sys/namei.h>
+#include <sys/ucred.h>
+#include <sys/buf.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/domain.h>
+#include <sys/mbuf.h>
+
+#include <vm/vm.h>
+#include <sys/sysctl.h>
+
+#include <miscfs/specfs/specdev.h>
+
+enum vtype iftovt_tab[16] = {
+       VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
+       VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
+};
+int    vttoif_tab[9] = {
+       0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
+       S_IFSOCK, S_IFIFO, S_IFMT,
+};
 
 /*
 
 /*
- * Add a new mount point to the list of mounted filesystems.
- * Lock the filesystem so that namei will not cross into the
- * the tree below the covered vnode.
+ * Insq/Remq for the vnode usage lists.
  */
  */
-vfs_add(mountedvp, mp, flags)
-       register struct vnode *mountedvp;
-       register struct mount *mp;
-       int flags;
-{
-       register int error;
-
-       error = vfs_lock(mp);
-       if (error)
-               return (error);
-       if (mountedvp == (struct vnode *)0) {
-               /*
-                * We are mounting the root filesystem.
-                */
-               rootfs = mp;
-               mp->m_next = mp;
-               mp->m_prev = mp;
-       } else {
-               if (mountedvp->v_mountedhere != (struct mount *)0) {
-                       vfs_unlock(mp);
-                       return(EBUSY);
-               }
-               /*
-                * Put the new filesystem on the mount list after root.
-                */
-               mp->m_next = rootfs->m_next;
-               mp->m_prev = rootfs;
-               rootfs->m_next = mp;
-               mp->m_next->m_prev = mp;
-               mountedvp->v_mountedhere = mp;
-       }
-       mp->m_vnodecovered = mountedvp;
-       if (flags & M_RDONLY) {
-               mp->m_flag |= M_RDONLY;
-       } else {
-               mp->m_flag &= ~M_RDONLY;
-       }
-       if (flags & M_NOSUID) {
-               mp->m_flag |= M_NOSUID;
-       } else {
-               mp->m_flag &= ~M_NOSUID;
-       }
-       return (0);
+#define        bufinsvn(bp, dp)        LIST_INSERT_HEAD(dp, bp, b_vnbufs)
+#define        bufremvn(bp) {                                                  \
+       LIST_REMOVE(bp, b_vnbufs);                                      \
+       (bp)->b_vnbufs.le_next = NOLIST;                                \
 }
 }
+TAILQ_HEAD(freelst, vnode) vnode_free_list;    /* vnode free list */
+struct mntlist mountlist;                      /* mounted filesystem list */
+static struct simplelock mntid_slock;
+struct simplelock mntvnode_slock;
+static struct simplelock spechash_slock;
+static struct simplelock vnode_free_list_slock;
 
 /*
 
 /*
- * Remove a mount point from the list of mounted filesystems.
- * Unmount of the root is illegal.
+ * Initialize the vnode management data structures.
  */
 void
  */
 void
-vfs_remove(mp)
-       register struct mount *mp;
+vntblinit()
 {
 
 {
 
-       if (mp == rootfs)
-               panic("vfs_remove: unmounting root");
-       mp->m_prev->m_next = mp->m_next;
-       mp->m_next->m_prev = mp->m_prev;
-       mp->m_vnodecovered->v_mountedhere = (struct mount *)0;
-       vfs_unlock(mp);
+       simple_lock_init(&mntvnode_slock);
+       simple_lock_init(&mntid_slock);
+       simple_lock_init(&spechash_slock);
+       TAILQ_INIT(&vnode_free_list);
+       simple_lock_init(&vnode_free_list_slock);
+       CIRCLEQ_INIT(&mountlist);
 }
 
 /*
  * Lock a filesystem.
  * Used to prevent access to it while mounting and unmounting.
  */
 }
 
 /*
  * Lock a filesystem.
  * Used to prevent access to it while mounting and unmounting.
  */
+int
 vfs_lock(mp)
        register struct mount *mp;
 {
 
 vfs_lock(mp)
        register struct mount *mp;
 {
 
-       if (mp->m_flag & M_MLOCK)
-               return (EBUSY);
-       mp->m_flag |= M_MLOCK;
+       while (mp->mnt_flag & MNT_MLOCK) {
+               mp->mnt_flag |= MNT_MWAIT;
+               tsleep((caddr_t)mp, PVFS, "vfslock", 0);
+       }
+       mp->mnt_flag |= MNT_MLOCK;
        return (0);
 }
 
        return (0);
 }
 
@@ -117,47 +101,1639 @@ vfs_unlock(mp)
        register struct mount *mp;
 {
 
        register struct mount *mp;
 {
 
-       if ((mp->m_flag & M_MLOCK) == 0)
-               panic("vfs_unlock: locked fs");
-       mp->m_flag &= ~M_MLOCK;
-       if (mp->m_flag & M_MWAIT) {
-               mp->m_flag &= ~M_MWAIT;
+       if ((mp->mnt_flag & MNT_MLOCK) == 0)
+               panic("vfs_unlock: not locked");
+       mp->mnt_flag &= ~MNT_MLOCK;
+       if (mp->mnt_flag & MNT_MWAIT) {
+               mp->mnt_flag &= ~MNT_MWAIT;
                wakeup((caddr_t)mp);
        }
 }
 
                wakeup((caddr_t)mp);
        }
 }
 
+/*
+ * Mark a mount point as busy.
+ * Used to synchronize access and to delay unmounting.
+ */
+int
+vfs_busy(mp)
+       register struct mount *mp;
+{
+
+       while (mp->mnt_flag & MNT_MPBUSY) {
+               mp->mnt_flag |= MNT_MPWANT;
+               tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbusy", 0);
+       }
+       if (mp->mnt_flag & MNT_UNMOUNT)
+               return (1);
+       mp->mnt_flag |= MNT_MPBUSY;
+       return (0);
+}
+
+/*
+ * Free a busy filesystem.
+ * Panic if filesystem is not busy.
+ */
+void
+vfs_unbusy(mp)
+       register struct mount *mp;
+{
+
+       if ((mp->mnt_flag & MNT_MPBUSY) == 0)
+               panic("vfs_unbusy: not busy");
+       mp->mnt_flag &= ~MNT_MPBUSY;
+       if (mp->mnt_flag & MNT_MPWANT) {
+               mp->mnt_flag &= ~MNT_MPWANT;
+               wakeup((caddr_t)&mp->mnt_flag);
+       }
+}
+
+/*
+ * Lookup a filesystem type, and if found allocate and initialize
+ * a mount structure for it.
+ *
+ * Devname is usually updated by mount(8) after booting.
+ */
+int
+vfs_rootmountalloc(fstypename, devname, mpp)
+       char *fstypename;
+       char *devname;
+       struct mount **mpp;
+{
+       struct vfsconf *vfsp;
+       struct mount *mp;
+
+       for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+               if (!strcmp(vfsp->vfc_name, fstypename))
+                       break;
+       if (vfsp == NULL)
+               return (ENODEV);
+       mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
+       bzero((char *)mp, (u_long)sizeof(struct mount));
+       LIST_INIT(&mp->mnt_vnodelist);
+       mp->mnt_vfc = vfsp;
+       mp->mnt_op = vfsp->vfc_vfsops;
+       mp->mnt_flag = MNT_RDONLY;
+       mp->mnt_vnodecovered = NULLVP;
+       vfsp->vfc_refcount++;
+       mp->mnt_stat.f_type = vfsp->vfc_typenum;
+       mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
+       strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
+       mp->mnt_stat.f_mntonname[0] = '/';
+       (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
+       *mpp = mp;
+       return (0);
+}
+
+/*
+ * Find an appropriate filesystem to use for the root. If a filesystem
+ * has not been preselected, walk through the list of known filesystems
+ * trying those that have mountroot routines, and try them until one
+ * works or we have tried them all.
+ */
+int
+vfs_mountroot()
+{
+       struct vfsconf *vfsp;
+       extern int (*mountroot)(void);
+       int error;
+
+       if (mountroot != NULL)
+               return ((*vfsp->vfc_mountroot)());
+       for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
+               if (vfsp->vfc_mountroot == NULL)
+                       continue;
+               if ((error = (*vfsp->vfc_mountroot)()) == 0)
+                       return (0);
+               printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
+       }
+       return (ENODEV);
+}
+
 /*
  * Lookup a mount point by filesystem identifier.
  */
 struct mount *
 /*
  * Lookup a mount point by filesystem identifier.
  */
 struct mount *
-getvfs(fsid)
+vfs_getvfs(fsid)
        fsid_t *fsid;
 {
        register struct mount *mp;
 
        fsid_t *fsid;
 {
        register struct mount *mp;
 
-       for (mp = rootfs; mp; mp = mp->m_next) {
-               if (mp->m_fsid.val[0] == fsid->val[0] &&
-                   mp->m_fsid.val[1] == fsid->val[1]) {
-                       break;
+       for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
+            mp = mp->mnt_list.cqe_next) {
+               if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
+                   mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
+                       return (mp);
+       }
+       return ((struct mount *)0);
+}
+
+/*
+ * Get a new unique fsid
+ */
+void
+vfs_getnewfsid(mp)
+       struct mount *mp;
+{
+static u_short xxxfs_mntid;
+
+       fsid_t tfsid;
+       int mtype;
+
+       simple_lock(&mntid_slock);
+       mtype = mp->mnt_vfc->vfc_typenum;
+       mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
+       mp->mnt_stat.f_fsid.val[1] = mtype;
+       if (xxxfs_mntid == 0)
+               ++xxxfs_mntid;
+       tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
+       tfsid.val[1] = mtype;
+       if (mountlist.cqh_first != (void *)&mountlist) {
+               while (vfs_getvfs(&tfsid)) {
+                       tfsid.val[0]++;
+                       xxxfs_mntid++;
                }
        }
                }
        }
-       return (mp);
+       mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
+       simple_unlock(&mntid_slock);
 }
 
 /*
  * Set vnode attributes to VNOVAL
  */
 }
 
 /*
  * Set vnode attributes to VNOVAL
  */
-void vattr_null(vap)
+void
+vattr_null(vap)
        register struct vattr *vap;
 {
 
        vap->va_type = VNON;
        register struct vattr *vap;
 {
 
        vap->va_type = VNON;
+       vap->va_size = vap->va_bytes = VNOVAL;
        vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
        vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
-               vap->va_fsid = vap->va_fileid = vap->va_size =
-               vap->va_size1 = vap->va_blocksize = vap->va_rdev =
-               vap->va_bytes = vap->va_bytes1 =
-               vap->va_atime.tv_sec = vap->va_atime.tv_usec =
-               vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
-               vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
+               vap->va_fsid = vap->va_fileid =
+               vap->va_blocksize = vap->va_rdev =
+               vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
+               vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
+               vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
                vap->va_flags = vap->va_gen = VNOVAL;
                vap->va_flags = vap->va_gen = VNOVAL;
+       vap->va_vaflags = 0;
+}
+
+/*
+ * Routines having to do with the management of the vnode table.
+ */
+extern int (**dead_vnodeop_p)();
+static void vclean __P((struct vnode *vp, int flag, struct proc *p));
+extern void vgonel __P((struct vnode *vp, struct proc *p));
+long numvnodes;
+extern struct vattr va_null;
+int newnodes = 0;
+int printcnt = 0;
+
+/*
+ * Return the next vnode from the free list.
+ */
+int
+getnewvnode(tag, mp, vops, vpp)
+       enum vtagtype tag;
+       struct mount *mp;
+       int (**vops)();
+       struct vnode **vpp;
+{
+       struct proc *p = curproc;       /* XXX */
+       struct vnode *vp;
+       int s;
+       int cnt;
+
+top:
+       simple_lock(&vnode_free_list_slock);
+newnodes++;
+       if ((vnode_free_list.tqh_first == NULL &&
+            numvnodes < 2 * desiredvnodes) ||
+           numvnodes < desiredvnodes) {
+               simple_unlock(&vnode_free_list_slock);
+               vp = (struct vnode *)malloc((u_long)sizeof *vp,
+                   M_VNODE, M_WAITOK);
+               bzero((char *)vp, sizeof *vp);
+               vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+               vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+               vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
+               vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
+               numvnodes++;
+               vp->v_spare[0] = numvnodes;
+       } else {
+               for (vp = vnode_free_list.tqh_first;
+                               vp != NULLVP; vp = vp->v_freelist.tqe_next) {
+                       if (simple_lock_try(&vp->v_interlock))
+                               break;
+               }
+               /*
+                * Unless this is a bad time of the month, at most
+                * the first NCPUS items on the free list are
+                * locked, so this is close enough to being empty.
+                */
+               if (vp == NULLVP) {
+                       simple_unlock(&vnode_free_list_slock);
+                       tablefull("vnode");
+                       *vpp = 0;
+                       return (ENFILE);
+               }
+               if (vp->v_usecount)
+                       panic("free vnode isn't");
+               if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
+                   vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
+                       panic("getnewvnode: not on queue");
+               TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+               vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+               /* see comment on why 0xdeadb is set at end of vgone (below) */
+               vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+               simple_unlock(&vnode_free_list_slock);
+               vp->v_lease = NULL;
+               if (vp->v_type != VBAD)
+                       vgonel(vp, p);
+               else
+                       simple_unlock(&vp->v_interlock);
+#ifdef DIAGNOSTIC
+               if (vp->v_data)
+                       panic("cleaned vnode isn't");
+               s = splbio();
+               if (vp->v_numoutput)
+                       panic("Clean vnode has pending I/O's");
+               splx(s);
+#endif
+               vp->v_flag = 0;
+               vp->v_lastr = 0;
+               vp->v_ralen = 0;
+               vp->v_maxra = 0;
+               vp->v_lastw = 0;
+               vp->v_lasta = 0;
+               vp->v_cstart = 0;
+               vp->v_clen = 0;
+               vp->v_socket = 0;
+       }
+       vp->v_type = VNON;
+       cache_purge(vp);
+       vp->v_tag = tag;
+       vp->v_op = vops;
+       insmntque(vp, mp);
+       *vpp = vp;
+       vp->v_usecount = 1;
+       vp->v_data = 0;
+       if (printcnt-- > 0) vprint("getnewvnode got", vp);
+       return (0);
+}
+
+/*
+ * Move a vnode from one mount queue to another.
+ */
+void
+insmntque(vp, mp)
+       struct vnode *vp;
+       struct mount *mp;
+{
+
+       simple_lock(&mntvnode_slock);
+       /*
+        * Delete from old mount point vnode list, if on one.
+        */
+       if (vp->v_mount != NULL) {
+               if (vp->v_mntvnodes.le_next == (struct vnode *)0xdeadf ||
+                   vp->v_mntvnodes.le_prev == (struct vnode **)0xdeadb)
+                       panic("insmntque: not on queue");
+               LIST_REMOVE(vp, v_mntvnodes);
+               vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
+               vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
+       }
+       /*
+        * Insert into list of vnodes for the new mount point, if available.
+        */
+       if ((vp->v_mount = mp) != NULL)
+               LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
+       simple_unlock(&mntvnode_slock);
+}
+
+/*
+ * Update outstanding I/O count and do wakeup if requested.
+ */
+void
+vwakeup(bp)
+       register struct buf *bp;
+{
+       register struct vnode *vp;
+
+       bp->b_flags &= ~B_WRITEINPROG;
+       if (vp = bp->b_vp) {
+               if (--vp->v_numoutput < 0)
+                       panic("vwakeup: neg numoutput");
+               if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
+                       if (vp->v_numoutput < 0)
+                               panic("vwakeup: neg numoutput 2");
+                       vp->v_flag &= ~VBWAIT;
+                       wakeup((caddr_t)&vp->v_numoutput);
+               }
+       }
+}
+
+/*
+ * Flush out and invalidate all buffers associated with a vnode.
+ * Called with the underlying object locked.
+ */
+int
+vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
+       register struct vnode *vp;
+       int flags;
+       struct ucred *cred;
+       struct proc *p;
+       int slpflag, slptimeo;
+{
+       register struct buf *bp;
+       struct buf *nbp, *blist;
+       int s, error;
+
+       if (flags & V_SAVE) {
+               if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
+                       return (error);
+               if (vp->v_dirtyblkhd.lh_first != NULL)
+                       panic("vinvalbuf: dirty bufs");
+       }
+       for (;;) {
+               if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
+                       while (blist && blist->b_lblkno < 0)
+                               blist = blist->b_vnbufs.le_next;
+               if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
+                   (flags & V_SAVEMETA))
+                       while (blist && blist->b_lblkno < 0)
+                               blist = blist->b_vnbufs.le_next;
+               if (!blist)
+                       break;
+
+               for (bp = blist; bp; bp = nbp) {
+                       nbp = bp->b_vnbufs.le_next;
+                       if (flags & V_SAVEMETA && bp->b_lblkno < 0)
+                               continue;
+                       s = splbio();
+                       if (bp->b_flags & B_BUSY) {
+                               bp->b_flags |= B_WANTED;
+                               error = tsleep((caddr_t)bp,
+                                       slpflag | (PRIBIO + 1), "vinvalbuf",
+                                       slptimeo);
+                               splx(s);
+                               if (error)
+                                       return (error);
+                               break;
+                       }
+                       bremfree(bp);
+                       bp->b_flags |= B_BUSY;
+                       splx(s);
+                       /*
+                        * XXX Since there are no node locks for NFS, I believe
+                        * there is a slight chance that a delayed write will
+                        * occur while sleeping just above, so check for it.
+                        */
+                       if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
+                               (void) VOP_BWRITE(bp);
+                               break;
+                       }
+                       bp->b_flags |= B_INVAL;
+                       brelse(bp);
+               }
+       }
+       if (!(flags & V_SAVEMETA) &&
+           (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
+               panic("vinvalbuf: flush failed");
+       return (0);
+}
+
+/*
+ * Associate a buffer with a vnode.
+ */
+void
+bgetvp(vp, bp)
+       register struct vnode *vp;
+       register struct buf *bp;
+{
+
+       if (bp->b_vp)
+               panic("bgetvp: not free");
+       VHOLD(vp);
+       bp->b_vp = vp;
+       if (vp->v_type == VBLK || vp->v_type == VCHR)
+               bp->b_dev = vp->v_rdev;
+       else
+               bp->b_dev = NODEV;
+       /*
+        * Insert onto list for new vnode.
+        */
+       bufinsvn(bp, &vp->v_cleanblkhd);
+}
+
+/*
+ * Disassociate a buffer from a vnode.
+ */
+void
+brelvp(bp)
+       register struct buf *bp;
+{
+       struct vnode *vp;
+
+       if (bp->b_vp == (struct vnode *) 0)
+               panic("brelvp: NULL");
+       /*
+        * Delete from old vnode list, if on one.
+        */
+       if (bp->b_vnbufs.le_next != NOLIST)
+               bufremvn(bp);
+       vp = bp->b_vp;
+       bp->b_vp = (struct vnode *) 0;
+       HOLDRELE(vp);
+}
+
+/*
+ * Reassign a buffer from one vnode to another.
+ * Used to assign file specific control information
+ * (indirect blocks) to the vnode to which they belong.
+ */
+void
+reassignbuf(bp, newvp)
+       register struct buf *bp;
+       register struct vnode *newvp;
+{
+       register struct buflists *listheadp;
+
+       if (newvp == NULL) {
+               printf("reassignbuf: NULL");
+               return;
+       }
+       /*
+        * Delete from old vnode list, if on one.
+        */
+       if (bp->b_vnbufs.le_next != NOLIST)
+               bufremvn(bp);
+       /*
+        * If dirty, put on list of dirty buffers;
+        * otherwise insert onto list of clean buffers.
+        */
+       if (bp->b_flags & B_DELWRI)
+               listheadp = &newvp->v_dirtyblkhd;
+       else
+               listheadp = &newvp->v_cleanblkhd;
+       bufinsvn(bp, listheadp);
+}
+
+/*
+ * Create a vnode for a block device.
+ * Used for root filesystem, argdev, and swap areas.
+ * Also used for memory file system special devices.
+ */
+int
+bdevvp(dev, vpp)
+       dev_t dev;
+       struct vnode **vpp;
+{
+       register struct vnode *vp;
+       struct vnode *nvp;
+       int error;
+
+       if (dev == NODEV) {
+               *vpp = NULLVP;
+               return (ENODEV);
+       }
+       error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
+       if (error) {
+               *vpp = NULLVP;
+               return (error);
+       }
+       vp = nvp;
+       vp->v_type = VBLK;
+       if (nvp = checkalias(vp, dev, (struct mount *)0)) {
+               vput(vp);
+               vp = nvp;
+       }
+       *vpp = vp;
+       return (0);
+}
+
+/*
+ * Check to see if the new vnode represents a special device
+ * for which we already have a vnode (either because of
+ * bdevvp() or because of a different vnode representing
+ * the same block device). If such an alias exists, deallocate
+ * the existing contents and return the aliased vnode. The
+ * caller is responsible for filling it with its new contents.
+ */
+struct vnode *
+checkalias(nvp, nvp_rdev, mp)
+       register struct vnode *nvp;
+       dev_t nvp_rdev;
+       struct mount *mp;
+{
+       struct proc *p = curproc;       /* XXX */
+       struct vnode *vp;
+       struct vnode **vpp;
+
+       if (nvp->v_type != VBLK && nvp->v_type != VCHR)
+               return (NULLVP);
+
+       vpp = &speclisth[SPECHASH(nvp_rdev)];
+loop:
+       simple_lock(&spechash_slock);
+       for (vp = *vpp; vp; vp = vp->v_specnext) {
+               if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
+                       continue;
+               /*
+                * Alias, but not in use, so flush it out.
+                */
+               simple_lock(&vp->v_interlock);
+               if (vp->v_usecount == 0) {
+                       simple_unlock(&spechash_slock);
+                       vgonel(vp, p);
+                       goto loop;
+               }
+               if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
+                       simple_unlock(&spechash_slock);
+                       goto loop;
+               }
+               break;
+       }
+       if (vp == NULL || vp->v_tag != VT_NON) {
+               MALLOC(nvp->v_specinfo, struct specinfo *,
+                       sizeof(struct specinfo), M_VNODE, M_WAITOK);
+               nvp->v_rdev = nvp_rdev;
+               nvp->v_hashchain = vpp;
+               nvp->v_specnext = *vpp;
+               nvp->v_specflags = 0;
+               simple_unlock(&spechash_slock);
+               *vpp = nvp;
+               if (vp != NULLVP) {
+                       nvp->v_flag |= VALIASED;
+                       vp->v_flag |= VALIASED;
+                       vput(vp);
+               }
+               return (NULLVP);
+       }
+       simple_unlock(&spechash_slock);
+       VOP_UNLOCK(vp, 0, p);
+       simple_lock(&vp->v_interlock);
+       vclean(vp, 0, p);
+       vp->v_op = nvp->v_op;
+       vp->v_tag = nvp->v_tag;
+       nvp->v_type = VNON;
+       insmntque(vp, mp);
+       return (vp);
+}
+
+/*
+ * Grab a particular vnode from the free list, increment its
+ * reference count and lock it. The vnode lock bit is set the
+ * vnode is being eliminated in vgone. The process is awakened
+ * when the transition is completed, and an error returned to
+ * indicate that the vnode is no longer usable (possibly having
+ * been changed to a new file system type).
+ */
+int
+vget(vp, flags, p)
+       struct vnode *vp;
+       int flags;
+       struct proc *p;
+{
+
+       /*
+        * If the vnode is in the process of being cleaned out for
+        * another use, we wait for the cleaning to finish and then
+        * return failure. Cleaning is determined by checking that
+        * the VXLOCK flag is set.
+        */
+       if ((flags & LK_INTERLOCK) == 0)
+               simple_lock(&vp->v_interlock);
+       if (vp->v_flag & VXLOCK) {
+               vp->v_flag |= VXWANT;
+               simple_unlock(&vp->v_interlock);
+               tsleep((caddr_t)vp, PINOD, "vget", 0);
+               return (ENOENT);
+       }
+       if (vp->v_usecount == 0) {
+               if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
+                   vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
+                       panic("vget: not on queue");
+               TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+               simple_unlock(&vnode_free_list_slock);
+       }
+               vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+               vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+       }
+       vp->v_usecount++;
+       if (flags & LK_TYPE_MASK)
+               return (vn_lock(vp, flags | LK_INTERLOCK, p));
+       simple_unlock(&vp->v_interlock);
+       if (printcnt-- > 0) vprint("vget got", vp);
+       return (0);
+}
+
+int bug_refs = 0;
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ *
+ * Getting a lock just clears the interlock if necessary.
+ */
+int
+vop_nolock(ap)
+       struct vop_lock_args /* {
+               struct vnode *a_vp;
+               int a_flags;
+               struct proc *a_p;
+       } */ *ap;
+{
+       struct vnode *vp = ap->a_vp;
+
+       /*
+        * Since we are not using the lock manager, we must clear
+        * the interlock here.
+        */
+       if (ap->a_flags & LK_INTERLOCK)
+               simple_unlock(&vp->v_interlock);
+       return (0);
+}
+
+/*
+ * Unlock has nothing to do.
+ */
+int
+vop_nounlock(ap)
+       struct vop_unlock_args /* {
+               struct vnode *a_vp;
+               int a_flags;
+               struct proc *a_p;
+       } */ *ap;
+{
+
+       return (0);
+}
+
+/*
+ * Nothing is ever locked.
+ */
+int
+vop_noislocked(ap)
+       struct vop_islocked_args /* {
+               struct vnode *a_vp;
+       } */ *ap;
+{
+
+       return (0);
+}
+
+/*
+ * Vnode reference.
+ */
+void
+vref(vp)
+       struct vnode *vp;
+{
+
+       simple_lock(&vp->v_interlock);
+       if (vp->v_usecount <= 0)
+               panic("vref used where vget required");
+       if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
+           vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
+               panic("vref: not free");
+       vp->v_usecount++;
+       simple_unlock(&vp->v_interlock);
+       if (printcnt-- > 0) vprint("vref get", vp);
+       if (vp->v_type != VBLK && curproc)
+               curproc->p_spare[0]++;
+       if (bug_refs)
+               vprint("vref: ");
+}
+
+/*
+ * vput(), just unlock and vrele()
+ */
+void
+vput(vp)
+       struct vnode *vp;
+{
+       struct proc *p = curproc;       /* XXX */
+
+#ifdef DIGANOSTIC
+       if (vp == NULL)
+               panic("vput: null vp");
+#endif
+       simple_lock(&vp->v_interlock);
+       vp->v_usecount--;
+       if (vp->v_usecount > 0) {
+               simple_unlock(&vp->v_interlock);
+               VOP_UNLOCK(vp, 0, p);
+               return;
+       }
+#ifdef DIAGNOSTIC
+       if (vp->v_usecount < 0 || vp->v_writecount != 0) {
+               vprint("vput: bad ref count", vp);
+               panic("vput: ref cnt");
+       }
+#endif
+       /*
+        * insert at tail of LRU list
+        */
+       simple_lock(&vnode_free_list_slock);
+       TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+       simple_unlock(&vnode_free_list_slock);
+       simple_unlock(&vp->v_interlock);
+       VOP_INACTIVE(vp, p);
+}
+
+/*
+ * Vnode release.
+ * If count drops to zero, call inactive routine and return to freelist.
+ */
+void
+vrele(vp)
+       struct vnode *vp;
+{
+       struct proc *p = curproc;       /* XXX */
+
+#ifdef DIAGNOSTIC
+       if (vp == NULL)
+               panic("vrele: null vp");
+#endif
+       simple_lock(&vp->v_interlock);
+       vp->v_usecount--;
+       if (printcnt-- > 0) vprint("vrele put", vp);
+       if (vp->v_type != VBLK && curproc)
+               curproc->p_spare[0]--;
+       if (bug_refs)
+               vprint("vref: ");
+       if (vp->v_usecount > 0) {
+               simple_unlock(&vp->v_interlock);
+               return;
+       }
+#ifdef DIAGNOSTIC
+       if (vp->v_usecount < 0 || vp->v_writecount != 0) {
+               vprint("vrele: bad ref count", vp);
+               panic("vrele: ref cnt");
+       }
+#endif
+       /*
+        * insert at tail of LRU list
+        */
+       simple_lock(&vnode_free_list_slock);
+       if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
+           vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
+               panic("vrele: not free");
+       TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+       simple_unlock(&vnode_free_list_slock);
+       if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0)
+               VOP_INACTIVE(vp, p);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * Page or buffer structure gets a reference.
+ */
+void
+vhold(vp)
+       register struct vnode *vp;
+{
+
+       simple_lock(&vp->v_interlock);
+       vp->v_holdcnt++;
+       simple_unlock(&vp->v_interlock);
+}
+
+/*
+ * Page or buffer structure frees a reference.
+ */
+void
+holdrele(vp)
+       register struct vnode *vp;
+{
+
+       simple_lock(&vp->v_interlock);
+       if (vp->v_holdcnt <= 0)
+               panic("holdrele: holdcnt");
+       vp->v_holdcnt--;
+       simple_unlock(&vp->v_interlock);
+}
+#endif /* DIAGNOSTIC */
+
+/*
+ * Remove any vnodes in the vnode table belonging to mount point mp.
+ *
+ * If MNT_NOFORCE is specified, there should not be any active ones,
+ * return error if any are found (nb: this is a user error, not a
+ * system error). If MNT_FORCE is specified, detach any active vnodes
+ * that are found.
+ */
+#ifdef DIAGNOSTIC
+int busyprt = 0;       /* print out busy vnodes */
+struct ctldebug debug1 = { "busyprt", &busyprt };
+#endif
+
+int
+vflush(mp, skipvp, flags)
+       struct mount *mp;
+       struct vnode *skipvp;
+       int flags;
+{
+       struct proc *p = curproc;       /* XXX */
+       struct vnode *vp, *nvp;
+       int busy = 0;
+
+#ifdef DIAGNOSTIC
+       if ((mp->mnt_flag & MNT_MPBUSY) == 0)
+               panic("vflush: not busy");
+#endif
+
+       simple_lock(&mntvnode_slock);
+loop:
+       for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
+               if (vp->v_mount != mp)
+                       goto loop;
+               nvp = vp->v_mntvnodes.le_next;
+               /*
+                * Skip over a selected vnode.
+                */
+               if (vp == skipvp)
+                       continue;
+
+               simple_lock(&vp->v_interlock);
+               /*
+                * Skip over a vnodes marked VSYSTEM.
+                */
+               if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
+                       simple_unlock(&vp->v_interlock);
+                       continue;
+               }
+               /*
+                * If WRITECLOSE is set, only flush out regular file
+                * vnodes open for writing.
+                */
+               if ((flags & WRITECLOSE) &&
+                   (vp->v_writecount == 0 || vp->v_type != VREG)) {
+                       simple_unlock(&vp->v_interlock);
+                       continue;
+               }
+               /*
+                * With v_usecount == 0, all we need to do is clear
+                * out the vnode data structures and we are done.
+                */
+               if (vp->v_usecount == 0) {
+                       simple_unlock(&mntvnode_slock);
+                       vgonel(vp, p);
+                       simple_lock(&mntvnode_slock);
+                       continue;
+               }
+               /*
+                * If FORCECLOSE is set, forcibly close the vnode.
+                * For block or character devices, revert to an
+                * anonymous device. For all other files, just kill them.
+                */
+               if (flags & FORCECLOSE) {
+                       simple_unlock(&mntvnode_slock);
+                       if (vp->v_type != VBLK && vp->v_type != VCHR) {
+                               vgonel(vp, p);
+                       } else {
+                               vclean(vp, 0, p);
+                               vp->v_op = spec_vnodeop_p;
+                               insmntque(vp, (struct mount *)0);
+                       }
+                       simple_lock(&mntvnode_slock);
+                       continue;
+               }
+#ifdef DIAGNOSTIC
+               if (busyprt)
+                       vprint("vflush: busy vnode", vp);
+#endif
+               simple_unlock(&vp->v_interlock);
+               busy++;
+       }
+       simple_unlock(&mntvnode_slock);
+       if (busy)
+               return (EBUSY);
+       return (0);
+}
+
+/*
+ * Disassociate the underlying file system from a vnode.
+ * The vnode interlock is held on entry.
+ */
+static void
+vclean(vp, flags, p)
+       struct vnode *vp;
+       int flags;
+       struct proc *p;
+{
+       int active;
+
+       /*
+        * Check to see if the vnode is in use.
+        * If so we have to reference it before we clean it out
+        * so that its count cannot fall to zero and generate a
+        * race against ourselves to recycle it.
+        */
+       if (active = vp->v_usecount)
+               vp->v_usecount++;
+       /*
+        * Prevent the vnode from being recycled or
+        * brought into use while we clean it out.
+        */
+       if (vp->v_flag & VXLOCK)
+               panic("vclean: deadlock");
+       vp->v_flag |= VXLOCK;
+       /*
+        * Even if the count is zero, the VOP_INACTIVE routine may still
+        * have the object locked while it cleans it out. The VOP_LOCK
+        * ensures that the VOP_INACTIVE routine is done with its work.
+        * For active vnodes, it ensures that no other activity can
+        * occur while the underlying object is being cleaned out.
+        */
+       VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
+       /*
+        * Clean out any buffers associated with the vnode.
+        */
+       if (flags & DOCLOSE)
+               vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
+       /*
+        * If purging an active vnode, it must be closed and
+        * deactivated before being reclaimed. Note that the
+        * VOP_INACTIVE will unlock the vnode.
+        */
+       if (active) {
+               if (flags & DOCLOSE)
+                       VOP_CLOSE(vp, IO_NDELAY, NOCRED, p);
+               VOP_INACTIVE(vp, p);
+       } else {
+               /*
+                * Any other processes trying to obtain this lock must first
+                * wait for VXLOCK to clear, then call the new lock operation.
+                */
+               VOP_UNLOCK(vp, 0, p);
+       }
+       /*
+        * Reclaim the vnode.
+        */
+       if (VOP_RECLAIM(vp, p))
+               panic("vclean: cannot reclaim");
+       if (active)
+               vrele(vp);
+       cache_purge(vp);
+
+       /*
+        * Done with purge, notify sleepers of the grim news.
+        */
+       vp->v_op = dead_vnodeop_p;
+       vp->v_tag = VT_NON;
+       vp->v_flag &= ~VXLOCK;
+       if (vp->v_flag & VXWANT) {
+               vp->v_flag &= ~VXWANT;
+               wakeup((caddr_t)vp);
+       }
+}
+
+/*
+ * Eliminate all activity associated with  the requested vnode
+ * and with all vnodes aliased to the requested vnode.
+ */
+int
+vop_revoke(ap)
+       struct vop_revoke_args /* {
+               struct vnode *a_vp;
+               int a_flags;
+       } */ *ap;
+{
+       struct vnode *vp, *vq;
+       struct proc *p = curproc;       /* XXX */
+
+#ifdef DIAGNOSTIC
+       if ((ap->a_flags & REVOKEALL) == 0)
+               panic("vop_revoke");
+#endif
+
+       vp = ap->a_vp;
+       simple_lock(&vp->v_interlock);
+
+       if (vp->v_flag & VALIASED) {
+               /*
+                * If a vgone (or vclean) is already in progress,
+                * wait until it is done and return.
+                */
+               if (vp->v_flag & VXLOCK) {
+                       vp->v_flag |= VXWANT;
+                       simple_unlock(&vp->v_interlock);
+                       tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
+                       return (0);
+               }
+               /*
+                * Ensure that vp will not be vgone'd while we
+                * are eliminating its aliases.
+                */
+               vp->v_flag |= VXLOCK;
+               simple_unlock(&vp->v_interlock);
+               while (vp->v_flag & VALIASED) {
+                       simple_lock(&spechash_slock);
+                       for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+                               if (vq->v_rdev != vp->v_rdev ||
+                                   vq->v_type != vp->v_type || vp == vq)
+                                       continue;
+                               simple_unlock(&spechash_slock);
+                               vgone(vq);
+                               break;
+                       }
+                       if (vq == NULLVP)
+                               simple_unlock(&spechash_slock);
+               }
+               /*
+                * Remove the lock so that vgone below will
+                * really eliminate the vnode after which time
+                * vgone will awaken any sleepers.
+                */
+               simple_lock(&vp->v_interlock);
+               vp->v_flag &= ~VXLOCK;
+       }
+       vgonel(vp, p);
+       return (0);
+}
+
+/*
+ * Recycle an unused vnode to the front of the free list.
+ * Release the passed interlock if the vnode will be recycled.
+ */
+int
+vrecycle(vp, inter_lkp, p)
+       struct vnode *vp;
+       struct simplelock *inter_lkp;
+       struct proc *p;
+{
+
+       simple_lock(&vp->v_interlock);
+       if (vp->v_usecount == 0) {
+               if (inter_lkp)
+                       simple_unlock(inter_lkp);
+               vgonel(vp, p);
+               return (1);
+       }
+       simple_unlock(&vp->v_interlock);
+       return (0);
+}
+
+/*
+ * Eliminate all activity associated with a vnode
+ * in preparation for reuse.
+ */
+void
+vgone(vp)
+       struct vnode *vp;
+{
+       struct proc *p = curproc;       /* XXX */
+
+       simple_lock(&vp->v_interlock);
+       vgonel(vp, p);
+}
+
+/*
+ * vgone, with the vp interlock held.
+ */
+void
+vgonel(vp, p)
+       struct vnode *vp;
+       struct proc *p;
+{
+       struct vnode *vq;
+       struct vnode *vx;
+
+       /*
+        * If a vgone (or vclean) is already in progress,
+        * wait until it is done and return.
+        */
+       if (vp->v_flag & VXLOCK) {
+               vp->v_flag |= VXWANT;
+               simple_unlock(&vp->v_interlock);
+               tsleep((caddr_t)vp, PINOD, "vgone", 0);
+               return;
+       }
+       /*
+        * Clean out the filesystem specific data.
+        */
+       vclean(vp, DOCLOSE, p);
+       /*
+        * Delete from old mount point vnode list, if on one.
+        */
+       if (vp->v_mount != NULL)
+               insmntque(vp, (struct mount *)0);
+       /*
+        * If special device, remove it from special device alias list
+        * if it is on one.
+        */
+       if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
+               simple_lock(&spechash_slock);
+               if (*vp->v_hashchain == vp) {
+                       *vp->v_hashchain = vp->v_specnext;
+               } else {
+                       for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+                               if (vq->v_specnext != vp)
+                                       continue;
+                               vq->v_specnext = vp->v_specnext;
+                               break;
+                       }
+                       if (vq == NULL)
+                               panic("missing bdev");
+               }
+               if (vp->v_flag & VALIASED) {
+                       vx = NULL;
+                       for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+                               if (vq->v_rdev != vp->v_rdev ||
+                                   vq->v_type != vp->v_type)
+                                       continue;
+                               if (vx)
+                                       break;
+                               vx = vq;
+                       }
+                       if (vx == NULL)
+                               panic("missing alias");
+                       if (vq == NULL)
+                               vx->v_flag &= ~VALIASED;
+                       vp->v_flag &= ~VALIASED;
+               }
+               simple_unlock(&spechash_slock);
+               FREE(vp->v_specinfo, M_VNODE);
+               vp->v_specinfo = NULL;
+       }
+       /*
+        * If it is on the freelist and not already at the head,
+        * move it to the head of the list. The test of the back
+        * pointer and the reference count of zero is because
+        * it will be removed from the free list by getnewvnode,
+        * but will not have its reference count incremented until
+        * after calling vgone. If the reference count were
+        * incremented first, vgone would (incorrectly) try to
+        * close the previous instance of the underlying object.
+        * So, the back pointer is explicitly set to `0xdeadb' in
+        * getnewvnode after removing it from the freelist to ensure
+        * that we do not try to move it here.
+        */
+       if (vp->v_usecount == 0) {
+               simple_lock(&vnode_free_list_slock);
+               if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
+                   vnode_free_list.tqh_first != vp) {
+                       TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+                       TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+               }
+               simple_unlock(&vnode_free_list_slock);
+       }
+       vp->v_type = VBAD;
+}
+
+/*
+ * Lookup a vnode by device number.
+ */
+int
+vfinddev(dev, type, vpp)
+       dev_t dev;
+       enum vtype type;
+       struct vnode **vpp;
+{
+       struct vnode *vp;
+       int rc = 0;
+
+       simple_lock(&spechash_slock);
+       for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
+               if (dev != vp->v_rdev || type != vp->v_type)
+                       continue;
+               *vpp = vp;
+               rc = 1;
+               break;
+       }
+       simple_unlock(&spechash_slock);
+       return (rc);
+}
+
+/*
+ * Calculate the total number of references to a special device.
+ */
+int
+vcount(vp)
+       struct vnode *vp;
+{
+       struct vnode *vq, *vnext;
+       int count;
+
+loop:
+       if ((vp->v_flag & VALIASED) == 0)
+               return (vp->v_usecount);
+       simple_lock(&spechash_slock);
+       for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
+               vnext = vq->v_specnext;
+               if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
+                       continue;
+               /*
+                * Alias, but not in use, so flush it out.
+                */
+               if (vq->v_usecount == 0 && vq != vp) {
+                       simple_unlock(&spechash_slock);
+                       vgone(vq);
+                       goto loop;
+               }
+               count += vq->v_usecount;
+       }
+       simple_unlock(&spechash_slock);
+       return (count);
+}
+
+/*
+ * Print out a description of a vnode.
+ */
+static char *typename[] =
+   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
+
+void
+vprint(label, vp)
+       char *label;
+       register struct vnode *vp;
+{
+       char buf[64];
+
+       if (label != NULL)
+               printf("%s: ", label);
+       printf("num %d ", vp->v_spare[0]);
+       printf("type %s, usecount %d, writecount %d, refcount %d,",
+               typename[vp->v_type], vp->v_usecount, vp->v_writecount,
+               vp->v_holdcnt);
+       buf[0] = '\0';
+       if (vp->v_flag & VROOT)
+               strcat(buf, "|VROOT");
+       if (vp->v_flag & VTEXT)
+               strcat(buf, "|VTEXT");
+       if (vp->v_flag & VSYSTEM)
+               strcat(buf, "|VSYSTEM");
+       if (vp->v_flag & VXLOCK)
+               strcat(buf, "|VXLOCK");
+       if (vp->v_flag & VXWANT)
+               strcat(buf, "|VXWANT");
+       if (vp->v_flag & VBWAIT)
+               strcat(buf, "|VBWAIT");
+       if (vp->v_flag & VALIASED)
+               strcat(buf, "|VALIASED");
+       if (buf[0] != '\0')
+               printf(" flags (%s)", &buf[1]);
+       if (vp->v_data == NULL) {
+               printf("\n");
+       } else {
+               printf("\n\t");
+               VOP_PRINT(vp);
+       }
+}
+
+#ifdef DEBUG
+/*
+ * List all of the locked vnodes in the system.
+ * Called when debugging the kernel.
+ */
+void
+printlockedvnodes()
+{
+       register struct mount *mp;
+       register struct vnode *vp;
+
+       printf("Locked vnodes\n");
+       for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
+            mp = mp->mnt_list.cqe_next) {
+               for (vp = mp->mnt_vnodelist.lh_first;
+                    vp != NULL;
+                    vp = vp->v_mntvnodes.le_next) {
+                       if (VOP_ISLOCKED(vp))
+                               vprint((char *)0, vp);
+               }
+       }
+}
+#endif
+
+/*
+ * Top level filesystem related information gathering.
+ */
+int
+vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+       int *name;
+       u_int namelen;
+       void *oldp;
+       size_t *oldlenp;
+       void *newp;
+       size_t newlen;
+       struct proc *p;
+{
+       struct ctldebug *cdp;
+       struct vfsconf *vfsp;
+
+       /* all sysctl names at this level are at least name and field */
+       if (namelen < 2)
+               return (ENOTDIR);               /* overloaded */
+       if (name[0] != VFS_GENERIC) {
+               for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+                       if (vfsp->vfc_typenum == name[0])
+                               break;
+               if (vfsp == NULL)
+                       return (EOPNOTSUPP);
+               return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
+                   oldp, oldlenp, newp, newlen, p));
+       }
+       switch (name[1]) {
+       case VFS_MAXTYPENUM:
+               return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
+       case VFS_CONF:
+               if (namelen < 3)
+                       return (ENOTDIR);       /* overloaded */
+               for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+                       if (vfsp->vfc_typenum == name[2])
+                               break;
+               if (vfsp == NULL)
+                       return (EOPNOTSUPP);
+               return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
+                   sizeof(struct vfsconf)));
+       }
+       return (EOPNOTSUPP);
+}
+
+int kinfo_vdebug = 1;
+int kinfo_vgetfailed;
+#define KINFO_VNODESLOP        10
+/*
+ * Dump vnode list (via sysctl).
+ * Copyout address of vnode followed by vnode.
+ */
+/* ARGSUSED */
+int
+sysctl_vnode(where, sizep)
+       char *where;
+       size_t *sizep;
+{
+       register struct mount *mp, *nmp;
+       struct vnode *nvp, *vp;
+       register char *bp = where, *savebp;
+       char *ewhere;
+       int error;
+
+#define VPTRSZ sizeof (struct vnode *)
+#define VNODESZ        sizeof (struct vnode)
+       if (where == NULL) {
+               *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
+               return (0);
+       }
+       ewhere = where + *sizep;
+               
+       for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
+               nmp = mp->mnt_list.cqe_next;
+               if (vfs_busy(mp))
+                       continue;
+               savebp = bp;
+again:
+               simple_lock(&mntvnode_slock);
+               for (vp = mp->mnt_vnodelist.lh_first;
+                    vp != NULL;
+                    vp = nvp) {
+                       /*
+                        * Check that the vp is still associated with
+                        * this filesystem.  RACE: could have been
+                        * recycled onto the same filesystem.
+                        */
+                       if (vp->v_mount != mp) {
+                               simple_unlock(&mntvnode_slock);
+                               if (kinfo_vdebug)
+                                       printf("kinfo: vp changed\n");
+                               bp = savebp;
+                               goto again;
+                       }
+                       nvp = vp->v_mntvnodes.le_next;
+                       if (bp + VPTRSZ + VNODESZ > ewhere) {
+                               simple_unlock(&mntvnode_slock);
+                               *sizep = bp - where;
+                               return (ENOMEM);
+                       }
+                       simple_unlock(&mntvnode_slock);
+                       if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
+                          (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
+                               return (error);
+                       bp += VPTRSZ + VNODESZ;
+                       simple_lock(&mntvnode_slock);
+               }
+               simple_unlock(&mntvnode_slock);
+               vfs_unbusy(mp);
+       }
+
+       *sizep = bp - where;
+       return (0);
+}
+
+/*
+ * Check to see if a filesystem is mounted on a block device.
+ */
+int
+vfs_mountedon(vp)
+       struct vnode *vp;
+{
+       struct vnode *vq;
+       int error = 0;
+
+       if (vp->v_specflags & SI_MOUNTEDON)
+               return (EBUSY);
+       if (vp->v_flag & VALIASED) {
+               simple_lock(&spechash_slock);
+               for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+                       if (vq->v_rdev != vp->v_rdev ||
+                           vq->v_type != vp->v_type)
+                               continue;
+                       if (vq->v_specflags & SI_MOUNTEDON) {
+                               error = EBUSY;
+                               break;
+                       }
+               }
+               simple_unlock(&spechash_slock);
+       }
+       return (error);
+}
+
+/*
+ * Unmount all filesystems. The list is traversed in reverse order
+ * of mounting to avoid dependencies.
+ */
+void
+vfs_unmountall()
+{
+       struct mount *mp, *nmp;
+
+       for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
+               nmp = mp->mnt_list.cqe_prev;
+               (void) dounmount(mp, MNT_FORCE, &proc0);
+       }
+}
+
+/*
+ * Build hash lists of net addresses and hang them off the mount point.
+ * Called by ufs_mount() to set up the lists of export addresses.
+ */
+static int
+vfs_hang_addrlist(mp, nep, argp)
+       struct mount *mp;
+       struct netexport *nep;
+       struct export_args *argp;
+{
+       register struct netcred *np;
+       register struct radix_node_head *rnh;
+       register int i;
+       struct radix_node *rn;
+       struct sockaddr *saddr, *smask = 0;
+       struct domain *dom;
+       int error;
+
+       if (argp->ex_addrlen == 0) {
+               if (mp->mnt_flag & MNT_DEFEXPORTED)
+                       return (EPERM);
+               np = &nep->ne_defexported;
+               np->netc_exflags = argp->ex_flags;
+               np->netc_anon = argp->ex_anon;
+               np->netc_anon.cr_ref = 1;
+               mp->mnt_flag |= MNT_DEFEXPORTED;
+               return (0);
+       }
+       i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
+       np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
+       bzero((caddr_t)np, i);
+       saddr = (struct sockaddr *)(np + 1);
+       if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen))
+               goto out;
+       if (saddr->sa_len > argp->ex_addrlen)
+               saddr->sa_len = argp->ex_addrlen;
+       if (argp->ex_masklen) {
+               smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
+               error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
+               if (error)
+                       goto out;
+               if (smask->sa_len > argp->ex_masklen)
+                       smask->sa_len = argp->ex_masklen;
+       }
+       i = saddr->sa_family;
+       if ((rnh = nep->ne_rtable[i]) == 0) {
+               /*
+                * Seems silly to initialize every AF when most are not
+                * used, do so on demand here
+                */
+               for (dom = domains; dom; dom = dom->dom_next)
+                       if (dom->dom_family == i && dom->dom_rtattach) {
+                               dom->dom_rtattach((void **)&nep->ne_rtable[i],
+                                       dom->dom_rtoffset);
+                               break;
+                       }
+               if ((rnh = nep->ne_rtable[i]) == 0) {
+                       error = ENOBUFS;
+                       goto out;
+               }
+       }
+       rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
+               np->netc_rnodes);
+       if (rn == 0) {
+               /*
+                * One of the reasons that rnh_addaddr may fail is that
+                * the entry already exists. To check for this case, we
+                * look up the entry to see if it is there. If so, we
+                * do not need to make a new entry but do return success.
+                */
+               free(np, M_NETADDR);
+               rn = (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh);
+               if (rn != 0 && (rn->rn_flags & RNF_ROOT) == 0 &&
+                   ((struct netcred *)rn)->netc_exflags == argp->ex_flags &&
+                   !bcmp((caddr_t)&((struct netcred *)rn)->netc_anon,
+                           (caddr_t)&argp->ex_anon, sizeof(struct ucred)))
+                       return (0);
+               return (EPERM);
+       }
+       np->netc_exflags = argp->ex_flags;
+       np->netc_anon = argp->ex_anon;
+       np->netc_anon.cr_ref = 1;
+       return (0);
+out:
+       free(np, M_NETADDR);
+       return (error);
+}
+
+/* ARGSUSED */
+static int
+vfs_free_netcred(rn, w)
+       struct radix_node *rn;
+       caddr_t w;
+{
+       register struct radix_node_head *rnh = (struct radix_node_head *)w;
+
+       (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
+       free((caddr_t)rn, M_NETADDR);
+       return (0);
+}
+
+/*
+ * Free the net address hash lists that are hanging off the mount points.
+ */
+static void
+vfs_free_addrlist(nep)
+       struct netexport *nep;
+{
+       register int i;
+       register struct radix_node_head *rnh;
+
+       for (i = 0; i <= AF_MAX; i++)
+               if (rnh = nep->ne_rtable[i]) {
+                       (*rnh->rnh_walktree)(rnh, vfs_free_netcred,
+                           (caddr_t)rnh);
+                       free((caddr_t)rnh, M_RTABLE);
+                       nep->ne_rtable[i] = 0;
+               }
+}
+
+int
+vfs_export(mp, nep, argp)
+       struct mount *mp;
+       struct netexport *nep;
+       struct export_args *argp;
+{
+       int error;
+
+       if (argp->ex_flags & MNT_DELEXPORT) {
+               vfs_free_addrlist(nep);
+               mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
+       }
+       if (argp->ex_flags & MNT_EXPORTED) {
+               if (error = vfs_hang_addrlist(mp, nep, argp))
+                       return (error);
+               mp->mnt_flag |= MNT_EXPORTED;
+       }
+       return (0);
+}
+
+struct netcred *
+vfs_export_lookup(mp, nep, nam)
+       register struct mount *mp;
+       struct netexport *nep;
+       struct mbuf *nam;
+{
+       register struct netcred *np;
+       register struct radix_node_head *rnh;
+       struct sockaddr *saddr;
+
+       np = NULL;
+       if (mp->mnt_flag & MNT_EXPORTED) {
+               /*
+                * Lookup in the export list first.
+                */
+               if (nam != NULL) {
+                       saddr = mtod(nam, struct sockaddr *);
+                       rnh = nep->ne_rtable[saddr->sa_family];
+                       if (rnh != NULL) {
+                               np = (struct netcred *)
+                                       (*rnh->rnh_matchaddr)((caddr_t)saddr,
+                                                             rnh);
+                               if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
+                                       np = NULL;
+                       }
+               }
+               /*
+                * If no address match, use the default if it exists.
+                */
+               if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
+                       np = &nep->ne_defexported;
+       }
+       return (np);
 }
 }