* Copyright (c) 1992, 1993, 1994 The Regents of the University of California.
* Copyright (c) 1992, 1993, 1994 Jan-Simon Pendry.
* This code is derived from software contributed to Berkeley by
* %sccs.include.redist.c%
* @(#)union_vnops.c 8.18 (Berkeley) %G%
#include <miscfs/union/union.h>
if (((un)->un_flags & UN_ULOCK) == 0) { \
VOP_LOCK(un
->un_uppervp
);
un
->un_flags
|= UN_ULOCK
;
union_lookup1(udvp
, dvpp
, vpp
, cnp
)
struct componentname
*cnp
;
* If stepping up the directory tree, check for going
* back across the mount point, in which case do what
* lookup would do by stepping back down the mount
if (cnp
->cn_flags
& ISDOTDOT
) {
while ((dvp
!= udvp
) && (dvp
->v_flag
& VROOT
)) {
* Don't do the NOCROSSMOUNT check
* at this level. By definition,
* union fs deals with namespaces, not
*dvpp
= dvp
= dvp
->v_mount
->mnt_vnodecovered
;
error
= VOP_LOOKUP(dvp
, &tdvp
, cnp
);
* The parent directory will have been unlocked, unless lookup
* found the last component. In which case, re-lock the node
* here to allow it to be unlocked again (phew) in union_lookup.
if (dvp
!= tdvp
&& !(cnp
->cn_flags
& ISLASTCN
))
* Lastly check if the current node is a mount point in
* which case walk up the mount hierarchy making sure not to
* bump into the root of the mount tree (ie. dvp != udvp).
while (dvp
!= udvp
&& (dvp
->v_type
== VDIR
) &&
(mp
= dvp
->v_mountedhere
)) {
if (mp
->mnt_flag
& MNT_MLOCK
) {
mp
->mnt_flag
|= MNT_MWAIT
;
sleep((caddr_t
) mp
, PVFS
);
if (error
= VFS_ROOT(mp
, &tdvp
)) {
struct vop_lookup_args
/* {
struct vnodeop_desc *a_desc;
struct componentname *a_cnp;
struct vnode
*uppervp
, *lowervp
;
struct vnode
*upperdvp
, *lowerdvp
;
struct vnode
*dvp
= ap
->a_dvp
;
struct union_node
*dun
= VTOUNION(dvp
);
struct componentname
*cnp
= ap
->a_cnp
;
int lockparent
= cnp
->cn_flags
& LOCKPARENT
;
int rdonly
= cnp
->cn_flags
& RDONLY
;
struct union_mount
*um
= MOUNTTOUNIONMOUNT(dvp
->v_mount
);
struct ucred
*saved_cred
;
if (cnp
->cn_namelen
== 3 &&
cnp
->cn_nameptr
[2] == '.' &&
cnp
->cn_nameptr
[1] == '.' &&
cnp
->cn_nameptr
[0] == '.') {
dvp
= *ap
->a_vpp
= LOWERVP(ap
->a_dvp
);
if (!lockparent
|| !(cnp
->cn_flags
& ISLASTCN
))
cnp
->cn_flags
|= LOCKPARENT
;
upperdvp
= dun
->un_uppervp
;
lowerdvp
= dun
->un_lowervp
;
* do the lookup in the upper level.
* if that level comsumes additional pathnames,
* then assume that something special is going
* on and just return that vnode.
if (upperdvp
!= NULLVP
) {
uerror
= union_lookup1(um
->um_uppervp
, &upperdvp
,
/*if (uppervp == upperdvp)
dun->un_flags |= UN_KLOCK;*/
if (cnp
->cn_consume
!= 0) {
cnp
->cn_flags
&= ~LOCKPARENT
;
* in a similar way to the upper layer, do the lookup
* in the lower layer. this time, if there is some
* component magic going on, then vput whatever we got
* back from the upper layer and return the lower vnode
if (lowerdvp
!= NULLVP
) {
* Only do a LOOKUP on the bottom node, since
* we won't be making changes to it anyway.
nameiop
= cnp
->cn_nameiop
;
cnp
->cn_nameiop
= LOOKUP
;
if (um
->um_op
== UNMNT_BELOW
) {
saved_cred
= cnp
->cn_cred
;
cnp
->cn_cred
= um
->um_cred
;
lerror
= union_lookup1(um
->um_lowervp
, &lowerdvp
,
if (um
->um_op
== UNMNT_BELOW
)
cnp
->cn_cred
= saved_cred
;
cnp
->cn_nameiop
= nameiop
;
if (cnp
->cn_consume
!= 0) {
cnp
->cn_flags
&= ~LOCKPARENT
;
if ((cnp
->cn_flags
& ISDOTDOT
) && dun
->un_pvp
!= NULLVP
) {
lowervp
= LOWERVP(dun
->un_pvp
);
cnp
->cn_flags
&= ~LOCKPARENT
;
* at this point, we have uerror and lerror indicating
* possible errors with the lookups in the upper and lower
* layers. additionally, uppervp and lowervp are (locked)
* references to existing vnodes in the upper and lower layers.
* there are now three cases to consider.
* 1. if both layers returned an error, then return whatever
* error the upper layer generated.
* 2. if the top layer failed and the bottom layer succeeded
* then two subcases occur.
* a. the bottom vnode is not a directory, in which
* case just return a new union vnode referencing
* an empty top layer and the existing bottom layer.
* b. the bottom vnode is a directory, in which case
* create a new directory in the top-level and
* 3. if the top layer succeeded then return a new union
* vnode referencing whatever the new top layer and
* whatever the bottom layer returned.
if ((uerror
!= 0) && (lerror
!= 0)) {
if (uerror
!= 0 /* && (lerror == 0) */ ) {
if (lowervp
->v_type
== VDIR
) { /* case 2b. */
dun
->un_flags
&= ~UN_ULOCK
;
uerror
= union_mkshadow(um
, upperdvp
, cnp
, &uppervp
);
dun
->un_flags
|= UN_ULOCK
;
error
= union_allocvp(ap
->a_vpp
, dvp
->v_mount
, dvp
, upperdvp
, cnp
,
if (!lockparent
|| !(cnp
->cn_flags
& ISLASTCN
))
struct vop_create_args
/* {
struct componentname *a_cnp;
struct union_node
*un
= VTOUNION(ap
->a_dvp
);
struct vnode
*dvp
= un
->un_uppervp
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_CREATE(dvp
, &vp
, ap
->a_cnp
, ap
->a_vap
);
struct vop_mknod_args
/* {
struct componentname *a_cnp;
struct union_node
*un
= VTOUNION(ap
->a_dvp
);
struct vnode
*dvp
= un
->un_uppervp
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_MKNOD(dvp
, &vp
, ap
->a_cnp
, ap
->a_vap
);
struct vop_open_args
/* {
struct vnodeop_desc *a_desc;
struct union_node
*un
= VTOUNION(ap
->a_vp
);
struct ucred
*cred
= ap
->a_cred
;
struct proc
*p
= ap
->a_p
;
* If there is an existing upper vp then simply open that.
* If the lower vnode is being opened for writing, then
* copy the file contents to the upper vnode and open that,
* otherwise can simply open the lower vnode.
if ((ap
->a_mode
& FWRITE
) && (tvp
->v_type
== VREG
)) {
error
= union_copyup(un
, (mode
&O_TRUNC
) == 0, cred
, p
);
error
= VOP_OPEN(un
->un_uppervp
, mode
, cred
, p
);
* Just open the lower vnode
error
= VOP_OPEN(tvp
, mode
, cred
, p
);
error
= VOP_OPEN(tvp
, mode
, cred
, p
);
struct vop_close_args
/* {
struct union_node
*un
= VTOUNION(ap
->a_vp
);
if (un
->un_uppervp
!= NULLVP
) {
panic("union: un_openl cnt");
return (VOP_CLOSE(vp
, ap
->a_fflag
, ap
->a_cred
, ap
->a_p
));
* Check access permission on the union vnode.
* The access check being enforced is to check
* against both the underlying vnode, and any
* copied vnode. This ensures that no additional
* file permissions are given away simply because
* the user caused an implicit file copy.
struct vop_access_args
/* {
struct vnodeop_desc *a_desc;
struct union_node
*un
= VTOUNION(ap
->a_vp
);
if ((vp
= un
->un_uppervp
) != NULLVP
) {
return (VOP_ACCESS(vp
, ap
->a_mode
, ap
->a_cred
, ap
->a_p
));
if ((vp
= un
->un_lowervp
) != NULLVP
) {
error
= VOP_ACCESS(vp
, ap
->a_mode
, ap
->a_cred
, ap
->a_p
);
struct union_mount
*um
= MOUNTTOUNIONMOUNT(vp
->v_mount
);
if (um
->um_op
== UNMNT_BELOW
)
error
= VOP_ACCESS(vp
, ap
->a_mode
,
* We handle getattr only to change the fsid and
struct vop_getattr_args
/* {
struct union_node
*un
= VTOUNION(ap
->a_vp
);
struct vnode
*vp
= un
->un_uppervp
;
* Some programs walk the filesystem hierarchy by counting
* links to directories to avoid stat'ing all the time.
* This means the link count on directories needs to be "correct".
* The only way to do that is to call getattr on both layers
* and fix up the link count. The link count will not necessarily
* be accurate but will be large enough to defeat the tree walkers.
* It's not clear whether VOP_GETATTR is to be
* called with the vnode locked or not. stat() calls
* it with (vp) locked, and fstat calls it with
* In the mean time, compensate here by checking
* the union_node's lock flag.
if (un
->un_flags
& UN_LOCKED
)
error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
, ap
->a_p
);
union_newsize(ap
->a_vp
, vap
->va_size
, VNOVAL
);
} else if (vp
->v_type
== VDIR
) {
error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
, ap
->a_p
);
union_newsize(ap
->a_vp
, VNOVAL
, vap
->va_size
);
if ((vap
!= ap
->a_vap
) && (vap
->va_type
== VDIR
))
ap
->a_vap
->va_nlink
+= vap
->va_nlink
;
ap
->a_vap
->va_fsid
= ap
->a_vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
struct vop_setattr_args
/* {
struct union_node
*un
= VTOUNION(ap
->a_vp
);
* Handle case of truncating lower object to zero size,
* by creating a zero length upper object. This is to
* handle the case of open with O_TRUNC and O_CREAT.
if ((un
->un_uppervp
== NULLVP
) &&
/* assert(un->un_lowervp != NULLVP) */
(un
->un_lowervp
->v_type
== VREG
) &&
(ap
->a_vap
->va_size
== 0)) {
error
= union_vn_create(&vp
, un
, ap
->a_p
);
/* at this point, uppervp is locked */
union_vn_close(un
->un_uppervp
, FWRITE
, ap
->a_cred
, ap
->a_p
);
un
->un_flags
|= UN_ULOCK
;
* Try to set attributes in upper layer,
* otherwise return read-only filesystem error.
if (un
->un_uppervp
!= NULLVP
) {
error
= VOP_SETATTR(un
->un_uppervp
, ap
->a_vap
,
if ((error
== 0) && (ap
->a_vap
->va_size
!= VNOVAL
))
union_newsize(ap
->a_vp
, ap
->a_vap
->va_size
, VNOVAL
);
struct vop_read_args
/* {
struct vnode
*vp
= OTHERVP(ap
->a_vp
);
int dolock
= (vp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_READ(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
* perhaps the size of the underlying object has changed under
* our feet. take advantage of the offset information present
struct union_node
*un
= VTOUNION(ap
->a_vp
);
off_t cur
= ap
->a_uio
->uio_offset
;
if (vp
== un
->un_uppervp
) {
if (cur
> un
->un_uppersz
)
union_newsize(ap
->a_vp
, cur
, VNOVAL
);
if (cur
> un
->un_lowersz
)
union_newsize(ap
->a_vp
, VNOVAL
, cur
);
struct vop_read_args
/* {
struct vnode
*vp
= OTHERVP(ap
->a_vp
);
int dolock
= (vp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_WRITE(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
* the size of the underlying object may be changed by the
struct union_node
*un
= VTOUNION(ap
->a_vp
);
off_t cur
= ap
->a_uio
->uio_offset
;
if (vp
== un
->un_uppervp
) {
if (cur
> un
->un_uppersz
)
union_newsize(ap
->a_vp
, cur
, VNOVAL
);
if (cur
> un
->un_lowersz
)
union_newsize(ap
->a_vp
, VNOVAL
, cur
);
struct vop_ioctl_args
/* {
return (VOP_IOCTL(OTHERVP(ap
->a_vp
), ap
->a_command
, ap
->a_data
,
ap
->a_fflag
, ap
->a_cred
, ap
->a_p
));
struct vop_select_args
/* {
return (VOP_SELECT(OTHERVP(ap
->a_vp
), ap
->a_which
, ap
->a_fflags
,
struct vop_mmap_args
/* {
return (VOP_MMAP(OTHERVP(ap
->a_vp
), ap
->a_fflags
,
struct vop_fsync_args
/* {
struct vnode
*targetvp
= OTHERVP(ap
->a_vp
);
if (targetvp
!= NULLVP
) {
int dolock
= (targetvp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_FSYNC(targetvp
, ap
->a_cred
,
struct vop_seek_args
/* {
return (VOP_SEEK(OTHERVP(ap
->a_vp
), ap
->a_oldoff
, ap
->a_newoff
, ap
->a_cred
));
struct vop_remove_args
/* {
struct componentname *a_cnp;
struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
struct union_node
*un
= VTOUNION(ap
->a_vp
);
if (dun
->un_uppervp
!= NULLVP
&& un
->un_uppervp
!= NULLVP
) {
struct vnode
*dvp
= dun
->un_uppervp
;
struct vnode
*vp
= un
->un_uppervp
;
dun
->un_flags
|= UN_KLOCK
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_REMOVE(dvp
, vp
, ap
->a_cnp
);
* XXX: should create a whiteout here
* XXX: should create a whiteout here
struct vop_link_args
/* {
struct componentname *a_cnp;
if (ap
->a_vp
->v_op
!= ap
->a_tdvp
->v_op
) {
struct union_node
*tdun
= VTOUNION(ap
->a_tdvp
);
if (tdun
->un_uppervp
== NULLVP
) {
if (un
->un_uppervp
== tdun
->un_dirvp
) {
un
->un_flags
&= ~UN_ULOCK
;
VOP_UNLOCK(un
->un_uppervp
);
error
= union_copyup(tdun
, 1, ap
->a_cnp
->cn_cred
,
if (un
->un_uppervp
== tdun
->un_dirvp
) {
VOP_LOCK(un
->un_uppervp
);
un
->un_flags
|= UN_ULOCK
;
un
->un_flags
|= UN_KLOCK
;
return (VOP_LINK(vp
, tdvp
, ap
->a_cnp
));
struct vop_rename_args
/* {
struct componentname *a_fcnp;
struct componentname *a_tcnp;
struct vnode
*fdvp
= ap
->a_fdvp
;
struct vnode
*fvp
= ap
->a_fvp
;
struct vnode
*tdvp
= ap
->a_tdvp
;
struct vnode
*tvp
= ap
->a_tvp
;
if (fdvp
->v_op
== union_vnodeop_p
) { /* always true */
struct union_node
*un
= VTOUNION(fdvp
);
if (un
->un_uppervp
== NULLVP
) {
if (fvp
->v_op
== union_vnodeop_p
) { /* always true */
struct union_node
*un
= VTOUNION(fvp
);
if (un
->un_uppervp
== NULLVP
) {
if (tdvp
->v_op
== union_vnodeop_p
) {
struct union_node
*un
= VTOUNION(tdvp
);
if (un
->un_uppervp
== NULLVP
) {
* this should never happen in normal
* operation but might if there was
* a problem creating the top-level shadow
un
->un_flags
|= UN_KLOCK
;
if (tvp
!= NULLVP
&& tvp
->v_op
== union_vnodeop_p
) {
struct union_node
*un
= VTOUNION(tvp
);
un
->un_flags
|= UN_KLOCK
;
return (VOP_RENAME(fdvp
, fvp
, ap
->a_fcnp
, tdvp
, tvp
, ap
->a_tcnp
));
struct vop_mkdir_args
/* {
struct componentname *a_cnp;
struct union_node
*un
= VTOUNION(ap
->a_dvp
);
struct vnode
*dvp
= un
->un_uppervp
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_MKDIR(dvp
, &vp
, ap
->a_cnp
, ap
->a_vap
);
struct vop_rmdir_args
/* {
struct componentname *a_cnp;
struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
struct union_node
*un
= VTOUNION(ap
->a_vp
);
if (dun
->un_uppervp
!= NULLVP
&& un
->un_uppervp
!= NULLVP
) {
struct vnode
*dvp
= dun
->un_uppervp
;
struct vnode
*vp
= un
->un_uppervp
;
dun
->un_flags
|= UN_KLOCK
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_RMDIR(dvp
, vp
, ap
->a_cnp
);
* XXX: should create a whiteout here
* XXX: should create a whiteout here
struct vop_symlink_args
/* {
struct componentname *a_cnp;
struct union_node
*un
= VTOUNION(ap
->a_dvp
);
struct vnode
*dvp
= un
->un_uppervp
;
struct mount
*mp
= ap
->a_dvp
->v_mount
;
un
->un_flags
|= UN_KLOCK
;
error
= VOP_SYMLINK(dvp
, &vp
, ap
->a_cnp
,
ap
->a_vap
, ap
->a_target
);
* union_readdir works in concert with getdirentries and
* readdir(3) to provide a list of entries in the unioned
* directories. getdirentries is responsible for walking
* down the union stack. readdir(3) is responsible for
* eliminating duplicate names from the returned data stream.
struct vop_readdir_args
/* {
struct vnodeop_desc *a_desc;
register struct union_node
*un
= VTOUNION(ap
->a_vp
);
register struct vnode
*uvp
= un
->un_uppervp
;
return (VOCALL(uvp
->v_op
, VOFFSET(vop_readdir
), ap
));
struct vop_readlink_args
/* {
struct vnode
*vp
= OTHERVP(ap
->a_vp
);
int dolock
= (vp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_READLINK(vp
, ap
->a_uio
, ap
->a_cred
);
struct vop_abortop_args
/* {
struct componentname *a_cnp;
struct vnode
*vp
= OTHERVP(ap
->a_dvp
);
struct union_node
*un
= VTOUNION(ap
->a_dvp
);
int islocked
= un
->un_flags
& UN_LOCKED
;
int dolock
= (vp
== LOWERVP(ap
->a_dvp
));
FIXUP(VTOUNION(ap
->a_dvp
));
error
= VOP_ABORTOP(vp
, ap
->a_cnp
);
struct vop_inactive_args
/* {
struct union_node
*un
= VTOUNION(ap
->a_vp
);
* Do nothing (and _don't_ bypass).
* Wait to vrele lowervp until reclaim,
* so that until then our union_node is in the
* NEEDSWORK: Someday, consider inactive'ing
* the lowervp and then trying to reactivate it
* with capabilities (v_id)
* like they do in the name lookup cache code.
* That's too much work for now.
if (un
->un_flags
& UN_LOCKED
)
panic("union: inactivating locked node");
if (un
->un_flags
& UN_ULOCK
)
panic("union: inactivating w/locked upper node");
if ((un
->un_flags
& UN_CACHED
) == 0)
struct vop_reclaim_args
/* {
struct vop_lock_args
*ap
;
struct vnode
*vp
= ap
->a_vp
;
while (vp
->v_flag
& VXLOCK
) {
sleep((caddr_t
)vp
, PINOD
);
if (un
->un_uppervp
!= NULLVP
) {
if (((un
->un_flags
& UN_ULOCK
) == 0) &&
un
->un_flags
|= UN_ULOCK
;
VOP_LOCK(un
->un_uppervp
);
if (un
->un_flags
& UN_KLOCK
)
panic("union: dangling upper lock");
if (un
->un_flags
& UN_LOCKED
) {
if (curproc
&& un
->un_pid
== curproc
->p_pid
&&
un
->un_pid
> -1 && curproc
->p_pid
> -1)
panic("union: locking against myself");
sleep((caddr_t
) &un
->un_flags
, PINOD
);
un
->un_pid
= curproc
->p_pid
;
un
->un_flags
|= UN_LOCKED
;
struct vop_lock_args
*ap
;
struct union_node
*un
= VTOUNION(ap
->a_vp
);
if ((un
->un_flags
& UN_LOCKED
) == 0)
panic("union: unlock unlocked node");
if (curproc
&& un
->un_pid
!= curproc
->p_pid
&&
curproc
->p_pid
> -1 && un
->un_pid
> -1)
panic("union: unlocking other process's union node");
un
->un_flags
&= ~UN_LOCKED
;
if ((un
->un_flags
& (UN_ULOCK
|UN_KLOCK
)) == UN_ULOCK
)
VOP_UNLOCK(un
->un_uppervp
);
un
->un_flags
&= ~(UN_ULOCK
|UN_KLOCK
);
if (un
->un_flags
& UN_WANT
) {
un
->un_flags
&= ~UN_WANT
;
wakeup((caddr_t
) &un
->un_flags
);
struct vop_bmap_args
/* {
struct vnode
*vp
= OTHERVP(ap
->a_vp
);
int dolock
= (vp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_BMAP(vp
, ap
->a_bn
, ap
->a_vpp
, ap
->a_bnp
, ap
->a_runp
);
struct vop_print_args
/* {
struct vnode
*vp
= ap
->a_vp
;
printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n",
vp
, UPPERVP(vp
), LOWERVP(vp
));
struct vop_islocked_args
/* {
return ((VTOUNION(ap
->a_vp
)->un_flags
& UN_LOCKED
) ? 1 : 0);
struct vop_pathconf_args
/* {
struct vnode
*vp
= OTHERVP(ap
->a_vp
);
int dolock
= (vp
== LOWERVP(ap
->a_vp
));
FIXUP(VTOUNION(ap
->a_vp
));
error
= VOP_PATHCONF(vp
, ap
->a_name
, ap
->a_retval
);
struct vop_advlock_args
/* {
return (VOP_ADVLOCK(OTHERVP(ap
->a_vp
), ap
->a_id
, ap
->a_op
,
* XXX - vop_strategy must be hand coded because it has no
* vnode in its arguments.
* This goes away with a merged VM/buffer cache.
struct vop_strategy_args
/* {
struct buf
*bp
= ap
->a_bp
;
bp
->b_vp
= OTHERVP(bp
->b_vp
);
panic("union_strategy: nil vp");
if (((bp
->b_flags
& B_READ
) == 0) &&
(bp
->b_vp
== LOWERVP(savedvp
)))
panic("union_strategy: writing to lowervp");
error
= VOP_STRATEGY(bp
);
* Global vfs data structures
int (**union_vnodeop_p
)();
struct vnodeopv_entry_desc union_vnodeop_entries
[] = {
{ &vop_default_desc
, vn_default_error
},
{ &vop_lookup_desc
, union_lookup
}, /* lookup */
{ &vop_create_desc
, union_create
}, /* create */
{ &vop_mknod_desc
, union_mknod
}, /* mknod */
{ &vop_open_desc
, union_open
}, /* open */
{ &vop_close_desc
, union_close
}, /* close */
{ &vop_access_desc
, union_access
}, /* access */
{ &vop_getattr_desc
, union_getattr
}, /* getattr */
{ &vop_setattr_desc
, union_setattr
}, /* setattr */
{ &vop_read_desc
, union_read
}, /* read */
{ &vop_write_desc
, union_write
}, /* write */
{ &vop_ioctl_desc
, union_ioctl
}, /* ioctl */
{ &vop_select_desc
, union_select
}, /* select */
{ &vop_mmap_desc
, union_mmap
}, /* mmap */
{ &vop_fsync_desc
, union_fsync
}, /* fsync */
{ &vop_seek_desc
, union_seek
}, /* seek */
{ &vop_remove_desc
, union_remove
}, /* remove */
{ &vop_link_desc
, union_link
}, /* link */
{ &vop_rename_desc
, union_rename
}, /* rename */
{ &vop_mkdir_desc
, union_mkdir
}, /* mkdir */
{ &vop_rmdir_desc
, union_rmdir
}, /* rmdir */
{ &vop_symlink_desc
, union_symlink
}, /* symlink */
{ &vop_readdir_desc
, union_readdir
}, /* readdir */
{ &vop_readlink_desc
, union_readlink
}, /* readlink */
{ &vop_abortop_desc
, union_abortop
}, /* abortop */
{ &vop_inactive_desc
, union_inactive
}, /* inactive */
{ &vop_reclaim_desc
, union_reclaim
}, /* reclaim */
{ &vop_lock_desc
, union_lock
}, /* lock */
{ &vop_unlock_desc
, union_unlock
}, /* unlock */
{ &vop_bmap_desc
, union_bmap
}, /* bmap */
{ &vop_strategy_desc
, union_strategy
}, /* strategy */
{ &vop_print_desc
, union_print
}, /* print */
{ &vop_islocked_desc
, union_islocked
}, /* islocked */
{ &vop_pathconf_desc
, union_pathconf
}, /* pathconf */
{ &vop_advlock_desc
, union_advlock
}, /* advlock */
{ &vop_blkatoff_desc
, union_blkatoff
}, /* blkatoff */
{ &vop_valloc_desc
, union_valloc
}, /* valloc */
{ &vop_vfree_desc
, union_vfree
}, /* vfree */
{ &vop_truncate_desc
, union_truncate
}, /* truncate */
{ &vop_update_desc
, union_update
}, /* update */
{ &vop_bwrite_desc
, union_bwrite
}, /* bwrite */
{ (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
struct vnodeopv_desc union_vnodeop_opv_desc
=
{ &union_vnodeop_p
, union_vnodeop_entries
};