/*
- * Copyright (c) 1989 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
*
* %sccs.include.redist.c%
*
- * @(#)vfs_subr.c 7.67 (Berkeley) %G%
+ * @(#)vfs_subr.c 8.24 (Berkeley) %G%
*/
/*
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/time.h>
#include <sys/vnode.h>
-#include <sys/specdev.h>
+#include <sys/stat.h>
#include <sys/namei.h>
#include <sys/ucred.h>
#include <sys/buf.h>
#include <sys/errno.h>
#include <sys/malloc.h>
+#include <sys/domain.h>
+#include <sys/mbuf.h>
+
+#include <vm/vm.h>
+#include <sys/sysctl.h>
+
+#include <miscfs/specfs/specdev.h>
+
+enum vtype iftovt_tab[16] = {
+ VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
+ VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
+};
+int vttoif_tab[9] = {
+ 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
+ S_IFSOCK, S_IFIFO, S_IFMT,
+};
+
+/*
+ * Insq/Remq for the vnode usage lists.
+ */
+#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
+#define bufremvn(bp) { \
+ LIST_REMOVE(bp, b_vnbufs); \
+ (bp)->b_vnbufs.le_next = NOLIST; \
+}
+TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
+struct mntlist mountlist; /* mounted filesystem list */
+static struct simplelock mntid_slock;
+struct simplelock mntvnode_slock;
+static struct simplelock spechash_slock;
+static struct simplelock vnode_free_list_slock;
/*
- * Remove a mount point from the list of mounted filesystems.
- * Unmount of the root is illegal.
+ * Initialize the vnode management data structures.
*/
void
-vfs_remove(mp)
- register struct mount *mp;
+vntblinit()
{
- if (mp == rootfs)
- panic("vfs_remove: unmounting root");
- mp->mnt_prev->mnt_next = mp->mnt_next;
- mp->mnt_next->mnt_prev = mp->mnt_prev;
- mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
- vfs_unlock(mp);
+ simple_lock_init(&mntvnode_slock);
+ simple_lock_init(&mntid_slock);
+ simple_lock_init(&spechash_slock);
+ TAILQ_INIT(&vnode_free_list);
+ simple_lock_init(&vnode_free_list_slock);
+ CIRCLEQ_INIT(&mountlist);
}
/*
* Lock a filesystem.
* Used to prevent access to it while mounting and unmounting.
*/
+int
vfs_lock(mp)
register struct mount *mp;
{
- while(mp->mnt_flag & MNT_MLOCK) {
+ while (mp->mnt_flag & MNT_MLOCK) {
mp->mnt_flag |= MNT_MWAIT;
- sleep((caddr_t)mp, PVFS);
+ tsleep((caddr_t)mp, PVFS, "vfslock", 0);
}
mp->mnt_flag |= MNT_MLOCK;
return (0);
* Mark a mount point as busy.
* Used to synchronize access and to delay unmounting.
*/
+int
vfs_busy(mp)
register struct mount *mp;
{
- while(mp->mnt_flag & MNT_MPBUSY) {
+ while (mp->mnt_flag & MNT_MPBUSY) {
mp->mnt_flag |= MNT_MPWANT;
- sleep((caddr_t)&mp->mnt_flag, PVFS);
+ tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbusy", 0);
}
if (mp->mnt_flag & MNT_UNMOUNT)
return (1);
* Free a busy filesystem.
* Panic if filesystem is not busy.
*/
+void
vfs_unbusy(mp)
register struct mount *mp;
{
}
}
+/*
+ * Lookup a filesystem type, and if found allocate and initialize
+ * a mount structure for it.
+ *
+ * Devname is usually updated by mount(8) after booting.
+ */
+int
+vfs_rootmountalloc(fstypename, devname, mpp)
+ char *fstypename;
+ char *devname;
+ struct mount **mpp;
+{
+ struct vfsconf *vfsp;
+ struct mount *mp;
+
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (!strcmp(vfsp->vfc_name, fstypename))
+ break;
+ if (vfsp == NULL)
+ return (ENODEV);
+ mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
+ bzero((char *)mp, (u_long)sizeof(struct mount));
+ LIST_INIT(&mp->mnt_vnodelist);
+ mp->mnt_vfc = vfsp;
+ mp->mnt_op = vfsp->vfc_vfsops;
+ mp->mnt_flag = MNT_RDONLY;
+ mp->mnt_vnodecovered = NULLVP;
+ vfsp->vfc_refcount++;
+ mp->mnt_stat.f_type = vfsp->vfc_typenum;
+ mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
+ strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
+ mp->mnt_stat.f_mntonname[0] = '/';
+ (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
+ *mpp = mp;
+ return (0);
+}
+
+/*
+ * Find an appropriate filesystem to use for the root. If a filesystem
+ * has not been preselected, walk through the list of known filesystems
+ * trying those that have mountroot routines, and try them until one
+ * works or we have tried them all.
+ */
+int
+vfs_mountroot()
+{
+ struct vfsconf *vfsp;
+ extern int (*mountroot)(void);
+ int error;
+
+ if (mountroot != NULL)
+ return ((*vfsp->vfc_mountroot)());
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
+ if (vfsp->vfc_mountroot == NULL)
+ continue;
+ if ((error = (*vfsp->vfc_mountroot)()) == 0)
+ return (0);
+ printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
+ }
+ return (ENODEV);
+}
+
/*
* Lookup a mount point by filesystem identifier.
*/
struct mount *
-getvfs(fsid)
+vfs_getvfs(fsid)
fsid_t *fsid;
{
register struct mount *mp;
- mp = rootfs;
- do {
+ for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
+ mp = mp->mnt_list.cqe_next) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
- mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
+ mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
return (mp);
- }
- mp = mp->mnt_next;
- } while (mp != rootfs);
+ }
return ((struct mount *)0);
}
+/*
+ * Get a new unique fsid
+ */
+void
+vfs_getnewfsid(mp)
+ struct mount *mp;
+{
+static u_short xxxfs_mntid;
+
+ fsid_t tfsid;
+ int mtype;
+
+ simple_lock(&mntid_slock);
+ mtype = mp->mnt_vfc->vfc_typenum;
+ mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
+ mp->mnt_stat.f_fsid.val[1] = mtype;
+ if (xxxfs_mntid == 0)
+ ++xxxfs_mntid;
+ tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
+ tfsid.val[1] = mtype;
+ if (mountlist.cqh_first != (void *)&mountlist) {
+ while (vfs_getvfs(&tfsid)) {
+ tfsid.val[0]++;
+ xxxfs_mntid++;
+ }
+ }
+ mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
+ simple_unlock(&mntid_slock);
+}
+
/*
* Set vnode attributes to VNOVAL
*/
-void vattr_null(vap)
+void
+vattr_null(vap)
register struct vattr *vap;
{
vap->va_type = VNON;
vap->va_size = vap->va_bytes = VNOVAL;
-#ifdef _NOQUAD
- vap->va_size_rsv = vap->va_bytes_rsv = VNOVAL;
-#endif
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
- vap->va_atime.tv_sec = vap->va_atime.tv_usec =
- vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
- vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
+ vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
+ vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
+ vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
vap->va_flags = vap->va_gen = VNOVAL;
+ vap->va_vaflags = 0;
}
/*
* Routines having to do with the management of the vnode table.
*/
-struct vnode *vfreeh, **vfreet;
-extern struct vnodeops dead_vnodeops, spec_vnodeops;
-extern void vclean();
+extern int (**dead_vnodeop_p)();
+static void vclean __P((struct vnode *vp, int flag, struct proc *p));
+extern void vgonel __P((struct vnode *vp, struct proc *p));
long numvnodes;
-struct vattr va_null;
-
-/*
- * Initialize the vnode structures and initialize each file system type.
- */
-vfsinit()
-{
- struct vfsops **vfsp;
-
- /*
- * Initialize the vnode name cache
- */
- nchinit();
- /*
- * Initialize each file system type.
- */
- vattr_null(&va_null);
- for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
- if (*vfsp == NULL)
- continue;
- (*(*vfsp)->vfs_init)();
- }
-}
+extern struct vattr va_null;
+int newnodes = 0;
+int printcnt = 0;
/*
* Return the next vnode from the free list.
*/
+int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
- struct vnodeops *vops;
+ int (**vops)();
struct vnode **vpp;
{
- register struct vnode *vp, *vq;
-
- if (numvnodes < desiredvnodes) {
+ struct proc *p = curproc; /* XXX */
+ struct vnode *vp;
+ int s;
+ int cnt;
+
+top:
+ simple_lock(&vnode_free_list_slock);
+newnodes++;
+ if ((vnode_free_list.tqh_first == NULL &&
+ numvnodes < 2 * desiredvnodes) ||
+ numvnodes < desiredvnodes) {
+ simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *)malloc((u_long)sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *)vp, sizeof *vp);
+ vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+ vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+ vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
+ vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
numvnodes++;
+ vp->v_spare[0] = numvnodes;
} else {
- if ((vp = vfreeh) == NULL) {
+ for (vp = vnode_free_list.tqh_first;
+ vp != NULLVP; vp = vp->v_freelist.tqe_next) {
+ if (simple_lock_try(&vp->v_interlock))
+ break;
+ }
+ /*
+ * Unless this is a bad time of the month, at most
+ * the first NCPUS items on the free list are
+ * locked, so this is close enough to being empty.
+ */
+ if (vp == NULLVP) {
+ simple_unlock(&vnode_free_list_slock);
tablefull("vnode");
*vpp = 0;
return (ENFILE);
}
if (vp->v_usecount)
panic("free vnode isn't");
- if (vq = vp->v_freef)
- vq->v_freeb = &vfreeh;
- else
- vfreet = &vfreeh;
- vfreeh = vq;
- vp->v_freef = NULL;
- vp->v_freeb = NULL;
+ if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
+ vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
+ panic("getnewvnode: not on queue");
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+ /* see comment on why 0xdeadb is set at end of vgone (below) */
+ vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+ simple_unlock(&vnode_free_list_slock);
vp->v_lease = NULL;
if (vp->v_type != VBAD)
- vgone(vp);
+ vgonel(vp, p);
+ else
+ simple_unlock(&vp->v_interlock);
+#ifdef DIAGNOSTIC
if (vp->v_data)
panic("cleaned vnode isn't");
+ s = splbio();
+ if (vp->v_numoutput)
+ panic("Clean vnode has pending I/O's");
+ splx(s);
+#endif
vp->v_flag = 0;
vp->v_lastr = 0;
+ vp->v_ralen = 0;
+ vp->v_maxra = 0;
+ vp->v_lastw = 0;
+ vp->v_lasta = 0;
+ vp->v_cstart = 0;
+ vp->v_clen = 0;
vp->v_socket = 0;
}
vp->v_type = VNON;
vp->v_tag = tag;
vp->v_op = vops;
insmntque(vp, mp);
- VREF(vp);
*vpp = vp;
+ vp->v_usecount = 1;
+ vp->v_data = 0;
+ if (printcnt-- > 0) vprint("getnewvnode got", vp);
return (0);
}
/*
* Move a vnode from one mount queue to another.
*/
+void
insmntque(vp, mp)
- register struct vnode *vp;
- register struct mount *mp;
+ struct vnode *vp;
+ struct mount *mp;
{
- register struct vnode *vq;
+ simple_lock(&mntvnode_slock);
/*
* Delete from old mount point vnode list, if on one.
*/
- if (vp->v_mountb) {
- if (vq = vp->v_mountf)
- vq->v_mountb = vp->v_mountb;
- *vp->v_mountb = vq;
+ if (vp->v_mount != NULL) {
+ if (vp->v_mntvnodes.le_next == (struct vnode *)0xdeadf ||
+ vp->v_mntvnodes.le_prev == (struct vnode **)0xdeadb)
+ panic("insmntque: not on queue");
+ LIST_REMOVE(vp, v_mntvnodes);
+ vp->v_mntvnodes.le_next = (struct vnode *)0xdeadf;
+ vp->v_mntvnodes.le_prev = (struct vnode **)0xdeadb;
}
/*
* Insert into list of vnodes for the new mount point, if available.
*/
- vp->v_mount = mp;
- if (mp == NULL) {
- vp->v_mountf = NULL;
- vp->v_mountb = NULL;
- return;
- }
- if (vq = mp->mnt_mounth)
- vq->v_mountb = &vp->v_mountf;
- vp->v_mountf = vq;
- vp->v_mountb = &mp->mnt_mounth;
- mp->mnt_mounth = vp;
-}
-
-/*
- * Make sure all write-behind blocks associated
- * with mount point are flushed out (from sync).
- */
-mntflushbuf(mountp, flags)
- struct mount *mountp;
- int flags;
-{
- register struct vnode *vp;
-
- if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
- panic("mntflushbuf: not busy");
-loop:
- for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
- if (VOP_ISLOCKED(vp))
- continue;
- if (vget(vp))
- goto loop;
- vflushbuf(vp, flags);
- vput(vp);
- if (vp->v_mount != mountp)
- goto loop;
- }
-}
-
-/*
- * Flush all dirty buffers associated with a vnode.
- */
-vflushbuf(vp, flags)
- register struct vnode *vp;
- int flags;
-{
- register struct buf *bp;
- struct buf *nbp;
- int s;
-
-loop:
- s = splbio();
- for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
- nbp = bp->b_blockf;
- if ((bp->b_flags & B_BUSY))
- continue;
- if ((bp->b_flags & B_DELWRI) == 0)
- panic("vflushbuf: not dirty");
- bremfree(bp);
- bp->b_flags |= B_BUSY;
- splx(s);
- /*
- * Wait for I/O associated with indirect blocks to complete,
- * since there is no way to quickly wait for them below.
- * NB: This is really specific to ufs, but is done here
- * as it is easier and quicker.
- */
- if (bp->b_vp == vp || (flags & B_SYNC) == 0)
- (void) bawrite(bp);
- else
- (void) bwrite(bp);
- goto loop;
- }
- splx(s);
- if ((flags & B_SYNC) == 0)
- return;
- s = splbio();
- while (vp->v_numoutput) {
- vp->v_flag |= VBWAIT;
- sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
- }
- splx(s);
- if (vp->v_dirtyblkhd) {
- vprint("vflushbuf: dirty", vp);
- goto loop;
- }
+ if ((vp->v_mount = mp) != NULL)
+ LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
+ simple_unlock(&mntvnode_slock);
}
/*
* Update outstanding I/O count and do wakeup if requested.
*/
+void
vwakeup(bp)
register struct buf *bp;
{
register struct vnode *vp;
- bp->b_dirtyoff = bp->b_dirtyend = 0;
+ bp->b_flags &= ~B_WRITEINPROG;
if (vp = bp->b_vp) {
- vp->v_numoutput--;
+ if (--vp->v_numoutput < 0)
+ panic("vwakeup: neg numoutput");
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
if (vp->v_numoutput < 0)
- panic("vwakeup: neg numoutput");
+ panic("vwakeup: neg numoutput 2");
vp->v_flag &= ~VBWAIT;
wakeup((caddr_t)&vp->v_numoutput);
}
}
}
-/*
- * Invalidate in core blocks belonging to closed or umounted filesystem
- *
- * Go through the list of vnodes associated with the file system;
- * for each vnode invalidate any buffers that it holds. Normally
- * this routine is preceeded by a bflush call, so that on a quiescent
- * filesystem there will be no dirty buffers when we are done. Binval
- * returns the count of dirty buffers when it is finished.
- */
-mntinvalbuf(mountp)
- struct mount *mountp;
-{
- register struct vnode *vp;
- int dirty = 0;
-
- if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
- panic("mntinvalbuf: not busy");
-loop:
- for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
- if (vget(vp))
- goto loop;
- dirty += vinvalbuf(vp, 1);
- vput(vp);
- if (vp->v_mount != mountp)
- goto loop;
- }
- return (dirty);
-}
-
/*
* Flush out and invalidate all buffers associated with a vnode.
* Called with the underlying object locked.
*/
-vinvalbuf(vp, save)
+int
+vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
register struct vnode *vp;
- int save;
+ int flags;
+ struct ucred *cred;
+ struct proc *p;
+ int slpflag, slptimeo;
{
register struct buf *bp;
struct buf *nbp, *blist;
- int s, dirty = 0;
+ int s, error;
+ if (flags & V_SAVE) {
+ if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
+ return (error);
+ if (vp->v_dirtyblkhd.lh_first != NULL)
+ panic("vinvalbuf: dirty bufs");
+ }
for (;;) {
- if (blist = vp->v_dirtyblkhd)
- /* void */;
- else if (blist = vp->v_cleanblkhd)
- /* void */;
- else
+ if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
+ while (blist && blist->b_lblkno < 0)
+ blist = blist->b_vnbufs.le_next;
+ if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
+ (flags & V_SAVEMETA))
+ while (blist && blist->b_lblkno < 0)
+ blist = blist->b_vnbufs.le_next;
+ if (!blist)
break;
+
for (bp = blist; bp; bp = nbp) {
- nbp = bp->b_blockf;
+ nbp = bp->b_vnbufs.le_next;
+ if (flags & V_SAVEMETA && bp->b_lblkno < 0)
+ continue;
s = splbio();
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
- sleep((caddr_t)bp, PRIBIO + 1);
+ error = tsleep((caddr_t)bp,
+ slpflag | (PRIBIO + 1), "vinvalbuf",
+ slptimeo);
splx(s);
+ if (error)
+ return (error);
break;
}
bremfree(bp);
bp->b_flags |= B_BUSY;
splx(s);
- if (save && (bp->b_flags & B_DELWRI)) {
- dirty++;
+ /*
+ * XXX Since there are no node locks for NFS, I believe
+ * there is a slight chance that a delayed write will
+ * occur while sleeping just above, so check for it.
+ */
+ if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
(void) VOP_BWRITE(bp);
break;
}
- if (bp->b_vp != vp)
- reassignbuf(bp, bp->b_vp);
- else
- bp->b_flags |= B_INVAL;
+ bp->b_flags |= B_INVAL;
brelse(bp);
}
}
- if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
+ if (!(flags & V_SAVEMETA) &&
+ (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
panic("vinvalbuf: flush failed");
- return (dirty);
+ return (0);
}
/*
* Associate a buffer with a vnode.
*/
+void
bgetvp(vp, bp)
register struct vnode *vp;
register struct buf *bp;
{
- register struct vnode *vq;
- register struct buf *bq;
if (bp->b_vp)
panic("bgetvp: not free");
/*
* Insert onto list for new vnode.
*/
- if (bq = vp->v_cleanblkhd)
- bq->b_blockb = &bp->b_blockf;
- bp->b_blockf = bq;
- bp->b_blockb = &vp->v_cleanblkhd;
- vp->v_cleanblkhd = bp;
+ bufinsvn(bp, &vp->v_cleanblkhd);
}
/*
* Disassociate a buffer from a vnode.
*/
+void
brelvp(bp)
register struct buf *bp;
{
- struct buf *bq;
struct vnode *vp;
if (bp->b_vp == (struct vnode *) 0)
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_blockb) {
- if (bq = bp->b_blockf)
- bq->b_blockb = bp->b_blockb;
- *bp->b_blockb = bq;
- bp->b_blockf = NULL;
- bp->b_blockb = NULL;
- }
+ if (bp->b_vnbufs.le_next != NOLIST)
+ bufremvn(bp);
vp = bp->b_vp;
bp->b_vp = (struct vnode *) 0;
HOLDRELE(vp);
* Used to assign file specific control information
* (indirect blocks) to the vnode to which they belong.
*/
+void
reassignbuf(bp, newvp)
register struct buf *bp;
register struct vnode *newvp;
{
- register struct buf *bq, **listheadp;
+ register struct buflists *listheadp;
- if (newvp == NULL)
- panic("reassignbuf: NULL");
+ if (newvp == NULL) {
+ printf("reassignbuf: NULL");
+ return;
+ }
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_blockb) {
- if (bq = bp->b_blockf)
- bq->b_blockb = bp->b_blockb;
- *bp->b_blockb = bq;
- }
+ if (bp->b_vnbufs.le_next != NOLIST)
+ bufremvn(bp);
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
listheadp = &newvp->v_dirtyblkhd;
else
listheadp = &newvp->v_cleanblkhd;
- if (bq = *listheadp)
- bq->b_blockb = &bp->b_blockf;
- bp->b_blockf = bq;
- bp->b_blockb = listheadp;
- *listheadp = bp;
+ bufinsvn(bp, listheadp);
}
/*
* Used for root filesystem, argdev, and swap areas.
* Also used for memory file system special devices.
*/
+int
bdevvp(dev, vpp)
dev_t dev;
struct vnode **vpp;
struct vnode *nvp;
int error;
- if (dev == NODEV)
- return (0);
- error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
+ if (dev == NODEV) {
+ *vpp = NULLVP;
+ return (ENODEV);
+ }
+ error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
if (error) {
- *vpp = 0;
+ *vpp = NULLVP;
return (error);
}
vp = nvp;
dev_t nvp_rdev;
struct mount *mp;
{
- register struct vnode *vp;
+ struct proc *p = curproc; /* XXX */
+ struct vnode *vp;
struct vnode **vpp;
if (nvp->v_type != VBLK && nvp->v_type != VCHR)
vpp = &speclisth[SPECHASH(nvp_rdev)];
loop:
+ simple_lock(&spechash_slock);
for (vp = *vpp; vp; vp = vp->v_specnext) {
if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
+ simple_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
- vgone(vp);
+ simple_unlock(&spechash_slock);
+ vgonel(vp, p);
goto loop;
}
- if (vget(vp))
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
+ simple_unlock(&spechash_slock);
goto loop;
+ }
break;
}
if (vp == NULL || vp->v_tag != VT_NON) {
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
nvp->v_specflags = 0;
+ simple_unlock(&spechash_slock);
*vpp = nvp;
- if (vp != NULL) {
+ if (vp != NULLVP) {
nvp->v_flag |= VALIASED;
vp->v_flag |= VALIASED;
vput(vp);
}
return (NULLVP);
}
- VOP_UNLOCK(vp);
- vclean(vp, 0);
+ simple_unlock(&spechash_slock);
+ VOP_UNLOCK(vp, 0, p);
+ simple_lock(&vp->v_interlock);
+ vclean(vp, 0, p);
vp->v_op = nvp->v_op;
vp->v_tag = nvp->v_tag;
nvp->v_type = VNON;
* indicate that the vnode is no longer usable (possibly having
* been changed to a new file system type).
*/
-vget(vp)
- register struct vnode *vp;
+int
+vget(vp, flags, p)
+ struct vnode *vp;
+ int flags;
+ struct proc *p;
{
- register struct vnode *vq;
+ /*
+ * If the vnode is in the process of being cleaned out for
+ * another use, we wait for the cleaning to finish and then
+ * return failure. Cleaning is determined by checking that
+ * the VXLOCK flag is set.
+ */
+ if ((flags & LK_INTERLOCK) == 0)
+ simple_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- sleep((caddr_t)vp, PINOD);
- return (1);
+ simple_unlock(&vp->v_interlock);
+ tsleep((caddr_t)vp, PINOD, "vget", 0);
+ return (ENOENT);
}
if (vp->v_usecount == 0) {
- if (vq = vp->v_freef)
- vq->v_freeb = vp->v_freeb;
- else
- vfreet = vp->v_freeb;
- *vp->v_freeb = vq;
- vp->v_freef = NULL;
- vp->v_freeb = NULL;
+ if (vp->v_freelist.tqe_next == (struct vnode *)0xdeadf ||
+ vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
+ panic("vget: not on queue");
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
}
- VREF(vp);
- VOP_LOCK(vp);
+ vp->v_freelist.tqe_next = (struct vnode *)0xdeadf;
+ vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+ }
+ vp->v_usecount++;
+ if (flags & LK_TYPE_MASK)
+ return (vn_lock(vp, flags | LK_INTERLOCK, p));
+ simple_unlock(&vp->v_interlock);
+ if (printcnt-- > 0) vprint("vget got", vp);
return (0);
}
+int bug_refs = 0;
+
/*
- * Vnode reference, just increment the count
+ * Stubs to use when there is no locking to be done on the underlying object.
+ *
+ * Getting a lock just clears the interlock if necessary.
*/
-void vref(vp)
+int
+vop_nolock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+
+ /*
+ * Since we are not using the lock manager, we must clear
+ * the interlock here.
+ */
+ if (ap->a_flags & LK_INTERLOCK)
+ simple_unlock(&vp->v_interlock);
+ return (0);
+}
+
+/*
+ * Unlock has nothing to do.
+ */
+int
+vop_nounlock(ap)
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+
+ return (0);
+}
+
+/*
+ * Nothing is ever locked.
+ */
+int
+vop_noislocked(ap)
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+
+ return (0);
+}
+
+/*
+ * Vnode reference.
+ */
+void
+vref(vp)
struct vnode *vp;
{
+ simple_lock(&vp->v_interlock);
+ if (vp->v_usecount <= 0)
+ panic("vref used where vget required");
+ if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
+ vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
+ panic("vref: not free");
vp->v_usecount++;
+ simple_unlock(&vp->v_interlock);
+ if (printcnt-- > 0) vprint("vref get", vp);
+ if (vp->v_type != VBLK && curproc)
+ curproc->p_spare[0]++;
+ if (bug_refs)
+ vprint("vref: ");
}
/*
* vput(), just unlock and vrele()
*/
-void vput(vp)
- register struct vnode *vp;
+void
+vput(vp)
+ struct vnode *vp;
{
- VOP_UNLOCK(vp);
+ struct proc *p = curproc; /* XXX */
+
+ VOP_UNLOCK(vp, 0, p);
vrele(vp);
}
* Vnode release.
* If count drops to zero, call inactive routine and return to freelist.
*/
-void vrele(vp)
- register struct vnode *vp;
+void
+vrele(vp)
+ struct vnode *vp;
{
- struct proc *p = curproc; /* XXX */
+ struct proc *p = curproc; /* XXX */
#ifdef DIAGNOSTIC
if (vp == NULL)
panic("vrele: null vp");
#endif
+ simple_lock(&vp->v_interlock);
vp->v_usecount--;
- if (vp->v_usecount > 0)
+ if (printcnt-- > 0) vprint("vrele put", vp);
+ if (vp->v_type != VBLK && curproc)
+ curproc->p_spare[0]--;
+ if (bug_refs)
+ vprint("vref: ");
+ if (vp->v_usecount > 0) {
+ simple_unlock(&vp->v_interlock);
return;
+ }
#ifdef DIAGNOSTIC
- if (vp->v_usecount != 0 || vp->v_writecount != 0) {
+ if (vp->v_usecount < 0 || vp->v_writecount != 0) {
vprint("vrele: bad ref count", vp);
panic("vrele: ref cnt");
}
#endif
- if (vfreeh == NULLVP) {
- /*
- * insert into empty list
- */
- vfreeh = vp;
- vp->v_freeb = &vfreeh;
- } else {
- /*
- * insert at tail of list
- */
- *vfreet = vp;
- vp->v_freeb = vfreet;
- }
- vp->v_freef = NULL;
- vfreet = &vp->v_freef;
- VOP_INACTIVE(vp, p);
+ /*
+ * insert at tail of LRU list
+ */
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_freelist.tqe_next != (struct vnode *)0xdeadf ||
+ vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
+ panic("vrele: not free");
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0)
+ VOP_INACTIVE(vp, p);
}
+#ifdef DIAGNOSTIC
/*
* Page or buffer structure gets a reference.
*/
+void
vhold(vp)
register struct vnode *vp;
{
+ simple_lock(&vp->v_interlock);
vp->v_holdcnt++;
+ simple_unlock(&vp->v_interlock);
}
/*
* Page or buffer structure frees a reference.
*/
+void
holdrele(vp)
register struct vnode *vp;
{
+ simple_lock(&vp->v_interlock);
if (vp->v_holdcnt <= 0)
panic("holdrele: holdcnt");
vp->v_holdcnt--;
+ simple_unlock(&vp->v_interlock);
}
+#endif /* DIAGNOSTIC */
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
* system error). If MNT_FORCE is specified, detach any active vnodes
* that are found.
*/
-int busyprt = 0; /* patch to print out busy vnodes */
+#ifdef DIAGNOSTIC
+int busyprt = 0; /* print out busy vnodes */
+struct ctldebug debug1 = { "busyprt", &busyprt };
+#endif
+int
vflush(mp, skipvp, flags)
struct mount *mp;
struct vnode *skipvp;
int flags;
{
- register struct vnode *vp, *nvp;
+ struct proc *p = curproc; /* XXX */
+ struct vnode *vp, *nvp;
int busy = 0;
+#ifdef DIAGNOSTIC
if ((mp->mnt_flag & MNT_MPBUSY) == 0)
panic("vflush: not busy");
+#endif
+
+ simple_lock(&mntvnode_slock);
loop:
- for (vp = mp->mnt_mounth; vp; vp = nvp) {
+ for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
if (vp->v_mount != mp)
goto loop;
- nvp = vp->v_mountf;
+ nvp = vp->v_mntvnodes.le_next;
/*
* Skip over a selected vnode.
*/
if (vp == skipvp)
continue;
+
+ simple_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
- if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
+ if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
+ simple_unlock(&vp->v_interlock);
continue;
+ }
+ /*
+ * If WRITECLOSE is set, only flush out regular file
+ * vnodes open for writing.
+ */
+ if ((flags & WRITECLOSE) &&
+ (vp->v_writecount == 0 || vp->v_type != VREG)) {
+ simple_unlock(&vp->v_interlock);
+ continue;
+ }
/*
* With v_usecount == 0, all we need to do is clear
* out the vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- vgone(vp);
+ simple_unlock(&mntvnode_slock);
+ vgonel(vp, p);
+ simple_lock(&mntvnode_slock);
continue;
}
/*
+ * If FORCECLOSE is set, forcibly close the vnode.
* For block or character devices, revert to an
* anonymous device. For all other files, just kill them.
*/
if (flags & FORCECLOSE) {
+ simple_unlock(&mntvnode_slock);
if (vp->v_type != VBLK && vp->v_type != VCHR) {
- vgone(vp);
+ vgonel(vp, p);
} else {
- vclean(vp, 0);
- vp->v_op = &spec_vnodeops;
+ vclean(vp, 0, p);
+ vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *)0);
}
+ simple_lock(&mntvnode_slock);
continue;
}
+#ifdef DIAGNOSTIC
if (busyprt)
vprint("vflush: busy vnode", vp);
+#endif
+ simple_unlock(&vp->v_interlock);
busy++;
}
+ simple_unlock(&mntvnode_slock);
if (busy)
return (EBUSY);
return (0);
/*
* Disassociate the underlying file system from a vnode.
+ * The vnode interlock is held on entry.
*/
-void vclean(vp, flags)
- register struct vnode *vp;
+static void
+vclean(vp, flags, p)
+ struct vnode *vp;
int flags;
+ struct proc *p;
{
- struct vnodeops *origops;
int active;
- struct proc *p = curproc; /* XXX */
/*
* Check to see if the vnode is in use.
* race against ourselves to recycle it.
*/
if (active = vp->v_usecount)
- VREF(vp);
+ vp->v_usecount++;
/*
* Prevent the vnode from being recycled or
* brought into use while we clean it out.
* have the object locked while it cleans it out. The VOP_LOCK
* ensures that the VOP_INACTIVE routine is done with its work.
* For active vnodes, it ensures that no other activity can
- * occur while the buffer list is being cleaned out.
+ * occur while the underlying object is being cleaned out.
*/
- VOP_LOCK(vp);
- if (flags & DOCLOSE)
- vinvalbuf(vp, 1);
+ VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
/*
- * Prevent any further operations on the vnode from
- * being passed through to the old file system.
+ * Clean out any buffers associated with the vnode.
*/
- origops = vp->v_op;
- vp->v_op = &dead_vnodeops;
- vp->v_tag = VT_NON;
+ if (flags & DOCLOSE)
+ vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
/*
- * If purging an active vnode, it must be unlocked, closed,
- * and deactivated before being reclaimed.
+ * If purging an active vnode, it must be closed and
+ * deactivated before being reclaimed. Note that the
+ * VOP_INACTIVE will unlock the vnode.
*/
- (*(origops->vop_unlock))(vp);
if (active) {
if (flags & DOCLOSE)
- (*(origops->vop_close))(vp, IO_NDELAY, NOCRED, p);
- (*(origops->vop_inactive))(vp, p);
+ VOP_CLOSE(vp, IO_NDELAY, NOCRED, p);
+ VOP_INACTIVE(vp, p);
+ } else {
+ /*
+ * Any other processes trying to obtain this lock must first
+ * wait for VXLOCK to clear, then call the new lock operation.
+ */
+ VOP_UNLOCK(vp, 0, p);
}
/*
* Reclaim the vnode.
*/
- if ((*(origops->vop_reclaim))(vp))
+ if (VOP_RECLAIM(vp, p))
panic("vclean: cannot reclaim");
if (active)
vrele(vp);
+ cache_purge(vp);
+
/*
- * Done with purge, notify sleepers in vget of the grim news.
+ * Done with purge, notify sleepers of the grim news.
*/
+ vp->v_op = dead_vnodeop_p;
+ vp->v_tag = VT_NON;
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
* Eliminate all activity associated with the requested vnode
* and with all vnodes aliased to the requested vnode.
*/
-void vgoneall(vp)
- register struct vnode *vp;
+int
+vop_revoke(ap)
+ struct vop_revoke_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ } */ *ap;
{
- register struct vnode *vq;
+ struct vnode *vp, *vq;
+ struct proc *p = curproc; /* XXX */
+
+#ifdef DIAGNOSTIC
+ if ((ap->a_flags & REVOKEALL) == 0)
+ panic("vop_revoke");
+#endif
+
+ vp = ap->a_vp;
+ simple_lock(&vp->v_interlock);
if (vp->v_flag & VALIASED) {
/*
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- sleep((caddr_t)vp, PINOD);
- return;
+ simple_unlock(&vp->v_interlock);
+ tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
+ return (0);
}
/*
* Ensure that vp will not be vgone'd while we
* are eliminating its aliases.
*/
vp->v_flag |= VXLOCK;
+ simple_unlock(&vp->v_interlock);
while (vp->v_flag & VALIASED) {
+ simple_lock(&spechash_slock);
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type || vp == vq)
continue;
+ simple_unlock(&spechash_slock);
vgone(vq);
break;
}
+ if (vq == NULLVP)
+ simple_unlock(&spechash_slock);
}
/*
* Remove the lock so that vgone below will
* really eliminate the vnode after which time
* vgone will awaken any sleepers.
*/
+ simple_lock(&vp->v_interlock);
vp->v_flag &= ~VXLOCK;
}
- vgone(vp);
+ vgonel(vp, p);
+ return (0);
+}
+
+/*
+ * Recycle an unused vnode to the front of the free list.
+ * Release the passed interlock if the vnode will be recycled.
+ */
+int
+vrecycle(vp, inter_lkp, p)
+ struct vnode *vp;
+ struct simplelock *inter_lkp;
+ struct proc *p;
+{
+
+ simple_lock(&vp->v_interlock);
+ if (vp->v_usecount == 0) {
+ if (inter_lkp)
+ simple_unlock(inter_lkp);
+ vgonel(vp, p);
+ return (1);
+ }
+ simple_unlock(&vp->v_interlock);
+ return (0);
}
/*
* Eliminate all activity associated with a vnode
* in preparation for reuse.
*/
-void vgone(vp)
- register struct vnode *vp;
+void
+vgone(vp)
+ struct vnode *vp;
{
- register struct vnode *vq;
+ struct proc *p = curproc; /* XXX */
+
+ simple_lock(&vp->v_interlock);
+ vgonel(vp, p);
+}
+
+/*
+ * vgone, with the vp interlock held.
+ */
+void
+vgonel(vp, p)
+ struct vnode *vp;
+ struct proc *p;
+{
+ struct vnode *vq;
struct vnode *vx;
- long count;
/*
* If a vgone (or vclean) is already in progress,
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- sleep((caddr_t)vp, PINOD);
+ simple_unlock(&vp->v_interlock);
+ tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
/*
* Clean out the filesystem specific data.
*/
- vclean(vp, DOCLOSE);
+ vclean(vp, DOCLOSE, p);
/*
* Delete from old mount point vnode list, if on one.
*/
- if (vp->v_mountb) {
- if (vq = vp->v_mountf)
- vq->v_mountb = vp->v_mountb;
- *vp->v_mountb = vq;
- vp->v_mountf = NULL;
- vp->v_mountb = NULL;
- vp->v_mount = NULL;
- }
+ if (vp->v_mount != NULL)
+ insmntque(vp, (struct mount *)0);
/*
- * If special device, remove it from special device alias list.
+ * If special device, remove it from special device alias list
+ * if it is on one.
*/
- if (vp->v_type == VBLK || vp->v_type == VCHR) {
+ if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
+ simple_lock(&spechash_slock);
if (*vp->v_hashchain == vp) {
*vp->v_hashchain = vp->v_specnext;
} else {
panic("missing bdev");
}
if (vp->v_flag & VALIASED) {
- count = 0;
+ vx = NULL;
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type)
continue;
- count++;
+ if (vx)
+ break;
vx = vq;
}
- if (count == 0)
+ if (vx == NULL)
panic("missing alias");
- if (count == 1)
+ if (vq == NULL)
vx->v_flag &= ~VALIASED;
vp->v_flag &= ~VALIASED;
}
+ simple_unlock(&spechash_slock);
FREE(vp->v_specinfo, M_VNODE);
vp->v_specinfo = NULL;
}
/*
- * If it is on the freelist, move it to the head of the list.
+ * If it is on the freelist and not already at the head,
+ * move it to the head of the list. The test of the back
+ * pointer and the reference count of zero is because
+ * it will be removed from the free list by getnewvnode,
+ * but will not have its reference count incremented until
+ * after calling vgone. If the reference count were
+ * incremented first, vgone would (incorrectly) try to
+ * close the previous instance of the underlying object.
+ * So, the back pointer is explicitly set to `0xdeadb' in
+ * getnewvnode after removing it from the freelist to ensure
+ * that we do not try to move it here.
*/
- if (vp->v_freeb) {
- if (vq = vp->v_freef)
- vq->v_freeb = vp->v_freeb;
- else
- vfreet = vp->v_freeb;
- *vp->v_freeb = vq;
- vp->v_freef = vfreeh;
- vp->v_freeb = &vfreeh;
- vfreeh->v_freeb = &vp->v_freef;
- vfreeh = vp;
+ if (vp->v_usecount == 0) {
+ simple_lock(&vnode_free_list_slock);
+ if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
+ vnode_free_list.tqh_first != vp) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+ }
+ simple_unlock(&vnode_free_list_slock);
}
vp->v_type = VBAD;
}
/*
* Lookup a vnode by device number.
*/
+int
vfinddev(dev, type, vpp)
dev_t dev;
enum vtype type;
struct vnode **vpp;
{
- register struct vnode *vp;
+ struct vnode *vp;
+ int rc = 0;
+ simple_lock(&spechash_slock);
for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
if (dev != vp->v_rdev || type != vp->v_type)
continue;
*vpp = vp;
- return (0);
+ rc = 1;
+ break;
}
- return (1);
+ simple_unlock(&spechash_slock);
+ return (rc);
}
/*
* Calculate the total number of references to a special device.
*/
+int
vcount(vp)
- register struct vnode *vp;
+ struct vnode *vp;
{
- register struct vnode *vq;
+ struct vnode *vq, *vnext;
int count;
+loop:
if ((vp->v_flag & VALIASED) == 0)
return (vp->v_usecount);
-loop:
- for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+ simple_lock(&spechash_slock);
+ for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
+ vnext = vq->v_specnext;
if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
- if (vq->v_usecount == 0) {
+ if (vq->v_usecount == 0 && vq != vp) {
+ simple_unlock(&spechash_slock);
vgone(vq);
goto loop;
}
count += vq->v_usecount;
}
+ simple_unlock(&spechash_slock);
return (count);
}
static char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
+void
vprint(label, vp)
char *label;
register struct vnode *vp;
if (label != NULL)
printf("%s: ", label);
+ printf("num %d ", vp->v_spare[0]);
printf("type %s, usecount %d, writecount %d, refcount %d,",
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
strcat(buf, "|VALIASED");
if (buf[0] != '\0')
printf(" flags (%s)", &buf[1]);
- printf("\n\t");
- VOP_PRINT(vp);
+ if (vp->v_data == NULL) {
+ printf("\n");
+ } else {
+ printf("\n\t");
+ VOP_PRINT(vp);
+ }
}
#ifdef DEBUG
* List all of the locked vnodes in the system.
* Called when debugging the kernel.
*/
+void
printlockedvnodes()
{
register struct mount *mp;
register struct vnode *vp;
printf("Locked vnodes\n");
- mp = rootfs;
- do {
- for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf)
+ for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
+ mp = mp->mnt_list.cqe_next) {
+ for (vp = mp->mnt_vnodelist.lh_first;
+ vp != NULL;
+ vp = vp->v_mntvnodes.le_next) {
if (VOP_ISLOCKED(vp))
vprint((char *)0, vp);
- mp = mp->mnt_next;
- } while (mp != rootfs);
+ }
+ }
}
#endif
+/*
+ * Top level filesystem related information gathering.
+ */
+int
+vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+ struct ctldebug *cdp;
+ struct vfsconf *vfsp;
+
+ /* all sysctl names at this level are at least name and field */
+ if (namelen < 2)
+ return (ENOTDIR); /* overloaded */
+ if (name[0] != VFS_GENERIC) {
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[0])
+ break;
+ if (vfsp == NULL)
+ return (EOPNOTSUPP);
+ return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
+ oldp, oldlenp, newp, newlen, p));
+ }
+ switch (name[1]) {
+ case VFS_MAXTYPENUM:
+ return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
+ case VFS_CONF:
+ if (namelen < 3)
+ return (ENOTDIR); /* overloaded */
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[2])
+ break;
+ if (vfsp == NULL)
+ return (EOPNOTSUPP);
+ return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
+ sizeof(struct vfsconf)));
+ }
+ return (EOPNOTSUPP);
+}
+
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
#define KINFO_VNODESLOP 10
/*
- * Dump vnode list (via kinfo).
+ * Dump vnode list (via sysctl).
* Copyout address of vnode followed by vnode.
*/
/* ARGSUSED */
-kinfo_vnode(op, where, acopysize, arg, aneeded)
- int op;
+int
+sysctl_vnode(where, sizep)
char *where;
- int *acopysize, arg, *aneeded;
+ size_t *sizep;
{
- register struct mount *mp = rootfs;
- struct mount *omp;
- struct vnode *vp;
+ register struct mount *mp, *nmp;
+ struct vnode *nvp, *vp;
register char *bp = where, *savebp;
- char *ewhere = where + *acopysize;
+ char *ewhere;
int error;
#define VPTRSZ sizeof (struct vnode *)
#define VNODESZ sizeof (struct vnode)
if (where == NULL) {
- *aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
+ *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
return (0);
}
+ ewhere = where + *sizep;
- do {
- if (vfs_busy(mp)) {
- mp = mp->mnt_next;
+ for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
+ nmp = mp->mnt_list.cqe_next;
+ if (vfs_busy(mp))
continue;
- }
savebp = bp;
again:
- for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
+ simple_lock(&mntvnode_slock);
+ for (vp = mp->mnt_vnodelist.lh_first;
+ vp != NULL;
+ vp = nvp) {
/*
* Check that the vp is still associated with
* this filesystem. RACE: could have been
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
+ simple_unlock(&mntvnode_slock);
if (kinfo_vdebug)
printf("kinfo: vp changed\n");
bp = savebp;
goto again;
}
- if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
- ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
- (error = copyout((caddr_t)vp, bp + VPTRSZ,
- VNODESZ))))
+ nvp = vp->v_mntvnodes.le_next;
+ if (bp + VPTRSZ + VNODESZ > ewhere) {
+ simple_unlock(&mntvnode_slock);
+ *sizep = bp - where;
+ return (ENOMEM);
+ }
+ simple_unlock(&mntvnode_slock);
+ if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
+ (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
return (error);
bp += VPTRSZ + VNODESZ;
+ simple_lock(&mntvnode_slock);
}
- omp = mp;
- mp = mp->mnt_next;
- vfs_unbusy(omp);
- } while (mp != rootfs);
-
- *aneeded = bp - where;
- if (bp > ewhere)
- *acopysize = ewhere - where;
- else
- *acopysize = bp - where;
+ simple_unlock(&mntvnode_slock);
+ vfs_unbusy(mp);
+ }
+
+ *sizep = bp - where;
+ return (0);
+}
+
+/*
+ * Check to see if a filesystem is mounted on a block device.
+ */
+int
+vfs_mountedon(vp)
+ struct vnode *vp;
+{
+ struct vnode *vq;
+ int error = 0;
+
+ if (vp->v_specflags & SI_MOUNTEDON)
+ return (EBUSY);
+ if (vp->v_flag & VALIASED) {
+ simple_lock(&spechash_slock);
+ for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+ if (vq->v_rdev != vp->v_rdev ||
+ vq->v_type != vp->v_type)
+ continue;
+ if (vq->v_specflags & SI_MOUNTEDON) {
+ error = EBUSY;
+ break;
+ }
+ }
+ simple_unlock(&spechash_slock);
+ }
+ return (error);
+}
+
+/*
+ * Unmount all filesystems. The list is traversed in reverse order
+ * of mounting to avoid dependencies.
+ */
+void
+vfs_unmountall()
+{
+ struct mount *mp, *nmp;
+
+ for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
+ nmp = mp->mnt_list.cqe_prev;
+ (void) dounmount(mp, MNT_FORCE, &proc0);
+ }
+}
+
+/*
+ * Build hash lists of net addresses and hang them off the mount point.
+ * Called by ufs_mount() to set up the lists of export addresses.
+ */
+static int
+vfs_hang_addrlist(mp, nep, argp)
+ struct mount *mp;
+ struct netexport *nep;
+ struct export_args *argp;
+{
+ register struct netcred *np;
+ register struct radix_node_head *rnh;
+ register int i;
+ struct radix_node *rn;
+ struct sockaddr *saddr, *smask = 0;
+ struct domain *dom;
+ int error;
+
+ if (argp->ex_addrlen == 0) {
+ if (mp->mnt_flag & MNT_DEFEXPORTED)
+ return (EPERM);
+ np = &nep->ne_defexported;
+ np->netc_exflags = argp->ex_flags;
+ np->netc_anon = argp->ex_anon;
+ np->netc_anon.cr_ref = 1;
+ mp->mnt_flag |= MNT_DEFEXPORTED;
+ return (0);
+ }
+ i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
+ np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
+ bzero((caddr_t)np, i);
+ saddr = (struct sockaddr *)(np + 1);
+ if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen))
+ goto out;
+ if (saddr->sa_len > argp->ex_addrlen)
+ saddr->sa_len = argp->ex_addrlen;
+ if (argp->ex_masklen) {
+ smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
+ error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
+ if (error)
+ goto out;
+ if (smask->sa_len > argp->ex_masklen)
+ smask->sa_len = argp->ex_masklen;
+ }
+ i = saddr->sa_family;
+ if ((rnh = nep->ne_rtable[i]) == 0) {
+ /*
+ * Seems silly to initialize every AF when most are not
+ * used, do so on demand here
+ */
+ for (dom = domains; dom; dom = dom->dom_next)
+ if (dom->dom_family == i && dom->dom_rtattach) {
+ dom->dom_rtattach((void **)&nep->ne_rtable[i],
+ dom->dom_rtoffset);
+ break;
+ }
+ if ((rnh = nep->ne_rtable[i]) == 0) {
+ error = ENOBUFS;
+ goto out;
+ }
+ }
+ rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
+ np->netc_rnodes);
+ if (rn == 0) {
+ /*
+ * One of the reasons that rnh_addaddr may fail is that
+ * the entry already exists. To check for this case, we
+ * look up the entry to see if it is there. If so, we
+ * do not need to make a new entry but do return success.
+ */
+ free(np, M_NETADDR);
+ rn = (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh);
+ if (rn != 0 && (rn->rn_flags & RNF_ROOT) == 0 &&
+ ((struct netcred *)rn)->netc_exflags == argp->ex_flags &&
+ !bcmp((caddr_t)&((struct netcred *)rn)->netc_anon,
+ (caddr_t)&argp->ex_anon, sizeof(struct ucred)))
+ return (0);
+ return (EPERM);
+ }
+ np->netc_exflags = argp->ex_flags;
+ np->netc_anon = argp->ex_anon;
+ np->netc_anon.cr_ref = 1;
return (0);
+out:
+ free(np, M_NETADDR);
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+vfs_free_netcred(rn, w)
+ struct radix_node *rn;
+ caddr_t w;
+{
+ register struct radix_node_head *rnh = (struct radix_node_head *)w;
+
+ (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
+ free((caddr_t)rn, M_NETADDR);
+ return (0);
+}
+
+/*
+ * Free the net address hash lists that are hanging off the mount points.
+ */
+static void
+vfs_free_addrlist(nep)
+ struct netexport *nep;
+{
+ register int i;
+ register struct radix_node_head *rnh;
+
+ for (i = 0; i <= AF_MAX; i++)
+ if (rnh = nep->ne_rtable[i]) {
+ (*rnh->rnh_walktree)(rnh, vfs_free_netcred,
+ (caddr_t)rnh);
+ free((caddr_t)rnh, M_RTABLE);
+ nep->ne_rtable[i] = 0;
+ }
+}
+
+int
+vfs_export(mp, nep, argp)
+ struct mount *mp;
+ struct netexport *nep;
+ struct export_args *argp;
+{
+ int error;
+
+ if (argp->ex_flags & MNT_DELEXPORT) {
+ vfs_free_addrlist(nep);
+ mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
+ }
+ if (argp->ex_flags & MNT_EXPORTED) {
+ if (error = vfs_hang_addrlist(mp, nep, argp))
+ return (error);
+ mp->mnt_flag |= MNT_EXPORTED;
+ }
+ return (0);
+}
+
+struct netcred *
+vfs_export_lookup(mp, nep, nam)
+ register struct mount *mp;
+ struct netexport *nep;
+ struct mbuf *nam;
+{
+ register struct netcred *np;
+ register struct radix_node_head *rnh;
+ struct sockaddr *saddr;
+
+ np = NULL;
+ if (mp->mnt_flag & MNT_EXPORTED) {
+ /*
+ * Lookup in the export list first.
+ */
+ if (nam != NULL) {
+ saddr = mtod(nam, struct sockaddr *);
+ rnh = nep->ne_rtable[saddr->sa_family];
+ if (rnh != NULL) {
+ np = (struct netcred *)
+ (*rnh->rnh_matchaddr)((caddr_t)saddr,
+ rnh);
+ if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
+ np = NULL;
+ }
+ }
+ /*
+ * If no address match, use the default if it exists.
+ */
+ if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
+ np = &nep->ne_defexported;
+ }
+ return (np);
}