*
* %sccs.include.redist.c%
*
- * @(#)union.h 1.4 (Berkeley) %G%
+ * @(#)union.h 1.5 (Berkeley) %G%
*/
struct union_args {
#define UN_WANT 0x01
#define UN_LOCKED 0x02
-extern int union_allocvp __P((struct vnode **, struct mount *, struct vnode *, struct vnode *, struct componentname *, struct vnode *, struct vnode *));
+extern int union_allocvp __P((struct vnode **, struct mount *,
+ struct vnode *, struct vnode *,
+ struct componentname *, struct vnode *,
+ struct vnode *));
+extern int union_copyfile __P((struct proc *, struct ucred *,
+ struct vnode *, struct vnode *));
+extern int union_vn_create __P((struct vnode **, struct union_node *,
+ int, struct proc *));
#define MOUNTTOUNIONMOUNT(mp) ((struct union_mount *)((mp)->mnt_data))
#define VTOUNION(vp) ((struct union_node *)(vp)->v_data)
*
* %sccs.include.redist.c%
*
- * @(#)union_subr.c 1.4 (Berkeley) %G%
+ * @(#)union_subr.c 1.5 (Berkeley) %G%
*/
#include <sys/param.h>
#include <sys/vnode.h>
#include <sys/namei.h>
#include <sys/malloc.h>
+#include <sys/file.h>
#include "union.h" /*<miscfs/union/union.h>*/
#ifdef DIAGNOSTIC
vp->v_data = 0;
return (0);
}
+
+/*
+ * copyfile. copy the vnode (fvp) to the vnode (tvp)
+ * using a sequence of reads and writes. both (fvp)
+ * and (tvp) are locked on entry and exit.
+ */
+int
+union_copyfile(p, cred, fvp, tvp)
+ struct proc *p;
+ struct ucred *cred;
+ struct vnode *fvp;
+ struct vnode *tvp;
+{
+ char *buf;
+ struct uio uio;
+ struct iovec iov;
+ int error = 0;
+
+ /*
+ * strategy:
+ * allocate a buffer of size MAXBSIZE.
+ * loop doing reads and writes, keeping track
+ * of the current uio offset.
+ * give up at the first sign of trouble.
+ */
+
+ uio.uio_procp = p;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_offset = 0;
+
+ VOP_UNLOCK(fvp); /* XXX */
+ LEASE_CHECK(fvp, p, cred, LEASE_READ);
+ VOP_LOCK(fvp); /* XXX */
+ VOP_UNLOCK(tvp); /* XXX */
+ LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
+ VOP_LOCK(tvp); /* XXX */
+
+ buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
+
+ /* ugly loop follows... */
+ do {
+ off_t offset = uio.uio_offset;
+
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ iov.iov_base = buf;
+ iov.iov_len = MAXBSIZE;
+ uio.uio_resid = iov.iov_len;
+ uio.uio_rw = UIO_READ;
+ error = VOP_READ(fvp, &uio, 0, cred);
+
+ if (error == 0) {
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ iov.iov_base = buf;
+ iov.iov_len = MAXBSIZE - uio.uio_resid;
+ uio.uio_offset = offset;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_resid = iov.iov_len;
+
+ if (uio.uio_resid == 0)
+ break;
+
+ do {
+ error = VOP_WRITE(tvp, &uio, 0, cred);
+ } while ((uio.uio_resid > 0) && (error == 0));
+ }
+
+ } while (error == 0);
+
+ free(buf, M_TEMP);
+ return (error);
+}
+
+/*
+ * union_vn_create: creates and opens a new shadow file
+ * on the upper union layer. this function is similar
+ * in spirit to calling vn_open but it avoids calling namei().
+ * the problem with calling namei is that a) it locks too many
+ * things, and b) it doesn't start at the "right" directory,
+ * whereas relookup is told where to start.
+ */
+int
+union_vn_create(vpp, un, cmode, p)
+ struct vnode **vpp;
+ struct union_node *un;
+ int cmode;
+ struct proc *p;
+{
+ struct vnode *vp;
+ struct ucred *cred = p->p_ucred;
+ struct vattr vat;
+ struct vattr *vap = &vat;
+ int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
+ int error;
+ int hash;
+ char *cp;
+ struct componentname cn;
+
+ *vpp = NULLVP;
+
+ cn.cn_namelen = strlen(un->un_path);
+ cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK);
+ bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
+ cn.cn_nameiop = CREATE;
+ cn.cn_flags = (LOCKLEAF|LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
+ cn.cn_proc = p;
+ cn.cn_cred = p->p_ucred;
+ cn.cn_nameptr = cn.cn_pnbuf;
+ for (hash = 0, cp = cn.cn_nameptr; *cp != 0 && *cp != '/'; cp++)
+ hash += (unsigned char)*cp;
+ cn.cn_hash = hash;
+ cn.cn_consume = 0;
+
+ if (error = relookup(un->un_dirvp, &vp, &cn))
+ return (error);
+ if (vp == NULLVP) {
+ VATTR_NULL(vap);
+ vap->va_type = VREG;
+ vap->va_mode = cmode;
+ LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
+ if (error = VOP_CREATE(un->un_dirvp, &vp,
+ &cn, vap))
+ return (error);
+ } else {
+ VOP_ABORTOP(un->un_dirvp, &cn);
+ if (un->un_dirvp == vp)
+ vrele(un->un_dirvp);
+ else
+ vput(vp);
+ error = EEXIST;
+ goto bad;
+ }
+
+ if (vp->v_type != VREG) {
+ error = EOPNOTSUPP;
+ goto bad;
+ }
+
+ VOP_UNLOCK(vp); /* XXX */
+ LEASE_CHECK(vp, p, cred, LEASE_WRITE);
+ VOP_LOCK(vp); /* XXX */
+ VATTR_NULL(vap);
+ vap->va_size = 0;
+ if (error = VOP_SETATTR(vp, vap, cred, p))
+ goto bad;
+
+ if (error = VOP_OPEN(vp, fmode, cred, p))
+ goto bad;
+
+ vp->v_writecount++;
+ *vpp = vp;
+ return (0);
+bad:
+ vput(vp);
+ return (error);
+}
*
* %sccs.include.redist.c%
*
- * @(#)union_vfsops.c 1.4 (Berkeley) %G%
+ * @(#)union_vfsops.c 1.5 (Berkeley) %G%
*/
/*
- * Null Layer
- * (See union_vnops.c for a description of what this does.)
+ * Union Layer
*/
#include <sys/param.h>
/*
* Update is a no-op
*/
- if (mp->mnt_flag & MNT_UPDATE)
+ if (mp->mnt_flag & MNT_UPDATE) {
+ /*
+ * Need to provide.
+ * 1. a way to convert between rdonly and rdwr mounts.
+ * 2. support for nfs exports.
+ */
return (EOPNOTSUPP);
+ }
/*
* Get argument
um->um_cred = crdup(p->p_ucred);
um->um_cred->cr_uid = p->p_cred->p_ruid;
- if ((lowerrootvp->v_mount->mnt_flag & MNT_LOCAL) ||
+ if ((lowerrootvp->v_mount->mnt_flag & MNT_LOCAL) &&
(upperrootvp->v_mount->mnt_flag & MNT_LOCAL))
mp->mnt_flag |= MNT_LOCAL;
+ /*
+ * Copy in the upper layer's RDONLY flag. This is for the benefit
+ * of lookup() which explicitly checks the flag, rather than asking
+ * the filesystem for it's own opinion. This means, that an update
+ * mount of the underlying filesystem to go from rdonly to rdwr
+ * will leave the unioned view as read-only.
+ */
+ mp->mnt_flag |= (upperrootvp->v_mount->mnt_flag & MNT_RDONLY);
mp->mnt_data = (qaddr_t) um;
getnewfsid(mp, MOUNT_UNION);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
- (void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
- &size);
- bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
+ bcopy("union:", mp->mnt_stat.f_mntfromname, 6);
+ (void) copyinstr(args.target, mp->mnt_stat.f_mntfromname + 6,
+ MNAMELEN - 1 - 6, &size);
+ bzero(mp->mnt_stat.f_mntfromname + 6 + size, MNAMELEN - 6 - size);
#ifdef UNION_DIAGNOSTIC
printf("union_mount: upper %s, lower at %s\n",
mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
*
* %sccs.include.redist.c%
*
- * @(#)union_vnops.c 1.4 (Berkeley) %G%
+ * @(#)union_vnops.c 1.5 (Berkeley) %G%
*/
#include <sys/param.h>
int error;
struct vattr va;
struct proc *p = cnp->cn_proc;
+ struct componentname cn;
/*
* policy: when creating the shadow directory in the
* did the mount (um->um_cred).
*/
+ /*
+ * A new componentname structure must be faked up because
+ * there is no way to know where the upper level cnp came
+ * from or what it is being used for. This must duplicate
+ * some of the work done by NDINIT, some of the work done
+ * by namei, some of the work done by lookup and some of
+ * the work done by VOP_LOOKUP when given a CREATE flag.
+ * Conclusion: Horrible.
+ *
+ * The pathname buffer will be FREEed by VOP_MKDIR.
+ */
+ cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK);
+ bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen+1);
+
+ cn.cn_nameiop = CREATE;
+ cn.cn_flags = HASBUF | SAVENAME | ISLASTCN;
+ cn.cn_proc = cnp->cn_proc;
+ cn.cn_cred = cnp->cn_cred;
+ cn.cn_nameptr = cn.cn_pnbuf;
+ cn.cn_namelen = cnp->cn_namelen;
+ cn.cn_hash = cnp->cn_hash;
+ cn.cn_consume = cnp->cn_consume;
+
VATTR_NULL(&va);
va.va_type = VDIR;
va.va_mode = UN_DIRMODE &~ p->p_fd->fd_cmask;
- VOP_UNLOCK(dvp);
+
+ /* LEASE_CHECK: dvp is locked */
LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
+
VREF(dvp);
- VOP_LOCK(dvp);
- error = VOP_MKDIR(dvp, vpp, cnp, &va);
+ error = VOP_MKDIR(dvp, vpp, &cn, &va);
VOP_LOCK(dvp);
return (error);
}
struct vnode *tdvp;
struct mount *mp;
+ /*
+ * If stepping up the directory tree, check for going
+ * back across the mount point, in which case do what
+ * lookup would do by stepping back down the mount
+ * hierarchy.
+ */
if (cnp->cn_flags & ISDOTDOT) {
for (;;) {
if ((dvp->v_flag & VROOT) == 0 ||
if (error)
return (error);
+ /*
+ * If going back up the directory tree, then the parent directory
+ * will have been unlocked, unless lookup found the last
+ * component. In which case, re-lock the node here to allow
+ * it to be unlocked again (phew) in union_lookup.
+ */
+ if ((cnp->cn_flags & ISDOTDOT) && !(cnp->cn_flags & ISLASTCN))
+ VOP_LOCK(dvp);
+
dvp = tdvp;
+
+ /*
+ * Lastly check if the current node is a mount point in
+ * which cse walk up the mount hierarchy making sure not to
+ * bump into the root of the mount tree (ie. dvp != udvp).
+ */
while (dvp != udvp && (dvp->v_type == VDIR) &&
(mp = dvp->v_mountedhere) &&
(cnp->cn_flags & NOCROSSMOUNT) == 0) {
struct union_node *dun = VTOUNION(dvp);
struct componentname *cnp = ap->a_cnp;
int lockparent = cnp->cn_flags & LOCKPARENT;
+ int rdonly = cnp->cn_flags & RDONLY;
cnp->cn_flags |= LOCKPARENT;
/* case 2. */
if (uerror != 0 /* && (lerror == 0) */ ) {
if (lowervp->v_type == VDIR) { /* case 2b. */
- if (uppervp != upperdvp)
- VOP_LOCK(upperdvp);
+ VOP_LOCK(upperdvp);
uerror = union_mkshadow(upperdvp, cnp, &uppervp);
if (uppervp != upperdvp)
VOP_UNLOCK(upperdvp);
if (lowervp)
vrele(lowervp);
} else {
- if (!lockparent && (*ap->a_vpp != dvp))
- VOP_UNLOCK(dvp);
+ if (*ap->a_vpp != dvp)
+ if (!lockparent || !(cnp->cn_flags & ISLASTCN))
+ VOP_UNLOCK(dvp);
}
return (error);
return (EROFS);
}
-/*
- * copyfile. copy the vnode (fvp) to the vnode (tvp)
- * using a sequence of reads and writes. both (fvp)
- * and (tvp) are locked on entry and exit.
- */
-static int
-union_copyfile(p, cred, fvp, tvp)
- struct proc *p;
- struct ucred *cred;
- struct vnode *fvp;
- struct vnode *tvp;
-{
- char *buf;
- struct uio uio;
- struct iovec iov;
- int error = 0;
- off_t offset;
-
- /*
- * strategy:
- * allocate a buffer of size MAXBSIZE.
- * loop doing reads and writes, keeping track
- * of the current uio offset.
- * give up at the first sign of trouble.
- */
-
- uio.uio_procp = p;
- uio.uio_segflg = UIO_SYSSPACE;
- offset = 0;
-
- VOP_UNLOCK(fvp); /* XXX */
- LEASE_CHECK(fvp, p, cred, LEASE_READ);
- VOP_LOCK(fvp); /* XXX */
- VOP_UNLOCK(tvp); /* XXX */
- LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
- VOP_LOCK(tvp); /* XXX */
-
- buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
- do {
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- iov.iov_base = buf;
- iov.iov_len = MAXBSIZE;
- uio.uio_resid = iov.iov_len;
- uio.uio_offset = offset;
- uio.uio_rw = UIO_READ;
- error = VOP_READ(fvp, &uio, 0, cred);
-
- if (error == 0) {
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- iov.iov_base = buf;
- iov.iov_len = MAXBSIZE - uio.uio_resid;
- uio.uio_rw = UIO_WRITE;
- uio.uio_resid = iov.iov_len;
- uio.uio_offset = offset;
-
- do {
- error = VOP_WRITE(tvp, &uio, 0, cred);
- } while (error == 0 && uio.uio_resid > 0);
- if (error == 0)
- offset = uio.uio_offset;
- }
- } while ((uio.uio_resid == 0) && (error == 0));
-
- free(buf, M_TEMP);
- return (error);
-}
-
int
union_open(ap)
struct vop_open_args /* {
if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
struct nameidata nd;
struct filedesc *fdp = p->p_fd;
- int fmode;
+ struct vnode *vp;
+ /*int fmode;*/
int cmode;
/*
* XXX - perhaps shoudl re-lookup the node (once more
* with feeling) and simply open that. Who knows.
*/
+ /*
NDINIT(&nd, CREATE, 0, UIO_SYSSPACE, un->un_path, p);
fmode = (O_CREAT|O_TRUNC|O_EXCL);
+ */
cmode = UN_FILEMODE & ~fdp->fd_cmask;
- error = vn_open(&nd, fmode, cmode);
+ error = union_vn_create(&vp, un, cmode, p);
if (error)
return (error);
- un->un_uppervp = nd.ni_vp; /* XXX */
+ un->un_uppervp = vp; /* XXX */
/* at this point, uppervp is locked */
/*
* XXX - should not ignore errors
* from VOP_CLOSE
*/
- VOP_LOCK(un->un_lowervp);
+ VOP_LOCK(tvp);
error = VOP_OPEN(tvp, FREAD, cred, p);
if (error == 0) {
error = union_copyfile(p, cred,