date and time created 94/01/28 09:42:44 by pendry
[unix-history] / usr / src / sys / miscfs / union / union_vnops.c
CommitLineData
a1fa407d
JSP
1/*
2 * Copyright (c) 1992, 1993, 1994 The Regents of the University of California.
3 * Copyright (c) 1992, 1993, 1994 Jan-Simon Pendry.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry and by John Heidemann of the UCLA Ficus project.
8 *
9 * %sccs.include.redist.c%
10 *
11 * @(#)union_vnops.c 1.1 (Berkeley) %G%
12 */
13
14#include <sys/param.h>
15#include <sys/systm.h>
16#include <sys/proc.h>
17#include <sys/file.h>
18#include <sys/filedesc.h>
19#include <sys/time.h>
20#include <sys/types.h>
21#include <sys/vnode.h>
22#include <sys/mount.h>
23#include <sys/namei.h>
24#include <sys/malloc.h>
25#include <sys/buf.h>
26#include "union.h"
27
28
29int union_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
30
31/*
32 * This is the 10-Apr-92 bypass routine.
33 * This version has been optimized for speed, throwing away some
34 * safety checks. It should still always work, but it's not as
35 * robust to programmer errors.
36 * Define SAFETY to include some error checking code.
37 *
38 * In general, we map all vnodes going down and unmap them on the way back.
39 * As an exception to this, vnodes can be marked "unmapped" by setting
40 * the Nth bit in operation's vdesc_flags.
41 *
42 * Also, some BSD vnode operations have the side effect of vrele'ing
43 * their arguments. With stacking, the reference counts are held
44 * by the upper node, not the lower one, so we must handle these
45 * side-effects here. This is not of concern in Sun-derived systems
46 * since there are no such side-effects.
47 *
48 * This makes the following assumptions:
49 * - only one returned vpp
50 * - no INOUT vpp's (Sun's vop_open has one of these)
51 * - the vnode operation vector of the first vnode should be used
52 * to determine what implementation of the op should be invoked
53 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
54 * problems on rmdir'ing mount points and renaming?)
55 */
56int
57union_bypass(ap)
58 struct vop_generic_args /* {
59 struct vnodeop_desc *a_desc;
60 <other random data follows, presumably>
61 } */ *ap;
62{
63 struct vnode **this_vp_p;
64 int error;
65 struct vnode *old_vps[VDESC_MAX_VPS];
66 struct vnode **vps_p[VDESC_MAX_VPS];
67 struct vnode ***vppp;
68 struct vnodeop_desc *descp = ap->a_desc;
69 int reles, i;
70
71 if (union_bug_bypass)
72 printf ("union_bypass: %s\n", descp->vdesc_name);
73
74#ifdef SAFETY
75 /*
76 * We require at least one vp.
77 */
78 if (descp->vdesc_vp_offsets == NULL ||
79 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
80 panic ("union_bypass: no vp's in map.\n");
81#endif
82
83 /*
84 * Map the vnodes going in.
85 * Later, we'll invoke the operation based on
86 * the first mapped vnode's operation vector.
87 */
88 reles = descp->vdesc_flags;
89 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
90 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
91 break; /* bail out at end of list */
92 vps_p[i] = this_vp_p =
93 VOPARG_OFFSETTO(struct vnode **, descp->vdesc_vp_offsets[i],ap);
94 /*
95 * We're not guaranteed that any but the first vnode
96 * are of our type. Check for and don't map any
97 * that aren't. (We must always map first vp or vclean fails.)
98 */
99 if (i && (*this_vp_p)->v_op != union_vnodeop_p) {
100 old_vps[i] = NULL;
101 } else {
102 old_vps[i] = *this_vp_p;
103 *(vps_p[i]) = OTHERVP(*this_vp_p);
104 /*
105 * XXX - Several operations have the side effect
106 * of vrele'ing their vp's. We must account for
107 * that. (This should go away in the future.)
108 */
109 if (reles & 1)
110 VREF(*this_vp_p);
111 }
112
113 }
114
115 /*
116 * Call the operation on the lower layer
117 * with the modified argument structure.
118 */
119 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
120
121 /*
122 * Maintain the illusion of call-by-value
123 * by restoring vnodes in the argument structure
124 * to their original value.
125 */
126 reles = descp->vdesc_flags;
127 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
128 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
129 break; /* bail out at end of list */
130 if (old_vps[i]) {
131 *(vps_p[i]) = old_vps[i];
132 if (reles & 1)
133 vrele(*(vps_p[i]));
134 }
135 }
136
137 /*
138 * Map the possible out-going vpp
139 * (Assumes that the lower layer always returns
140 * a VREF'ed vpp unless it gets an error.)
141 */
142 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
143 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
144 !error) {
145 /*
146 * XXX - even though some ops have vpp returned vp's,
147 * several ops actually vrele this before returning.
148 * We must avoid these ops.
149 * (This should go away when these ops are regularized.)
150 */
151 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
152 goto out;
153 vppp = VOPARG_OFFSETTO(struct vnode***,
154 descp->vdesc_vpp_offset,ap);
155 panic("union: failed to handled returned vnode");
156 error = union_allocvp(0, 0, 0, 0, 0, 0);
157 }
158
159out:
160 return (error);
161}
162
163/*
164 * Check access permission on the union vnode.
165 * The access check being enforced is to check
166 * against both the underlying vnode, and any
167 * copied vnode. This ensures that no additional
168 * file permissions are given away simply because
169 * the user caused an implicit file copy.
170 */
171int
172union_access(ap)
173 struct vop_access_args /* {
174 struct vnodeop_desc *a_desc;
175 struct vnode *a_vp;
176 int a_mode;
177 struct ucred *a_cred;
178 struct proc *a_p;
179 } */ *ap;
180{
181 struct union_node *un = VTOUNION(ap->a_vp);
182 struct vnode *vp;
183
184 if (vp = un->un_lowervp) {
185 int error;
186
187 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p);
188 if (error)
189 return (error);
190 }
191
192 if (vp = un->un_uppervp)
193 return (VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p));
194
195 return (0);
196}
197
198static int
199union_mkshadow(dvp, cnp, vpp)
200 struct vnode *dvp;
201 struct componentname *cnp;
202 struct vnode *vpp;
203{
204 int error;
205 struct vattr va;
206 struct proc *p = cnp->cn_proc;
207 int lockparent = (cnp->cn_flags & LOCKPARENT);
208
209 /*
210 * policy: when creating the shadow directory in the
211 * upper layer, create it owned by the current user,
212 * group from parent directory, and mode 777 modified
213 * by umask (ie mostly identical to the mkdir syscall).
214 * (jsp, kb)
215 * TODO: create the directory owned by the user who
216 * did the mount (um->um_cred).
217 */
218
219 VATTR_NULL(&va);
220 va.va_type = VDIR;
221 va.va_mode = UN_DIRMODE &~ p->p_fd->fd_cmask;
222 if (lockparent)
223 VOP_UNLOCK(dvp);
224 LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
225 VOP_LOCK(dvp);
226 error = VOP_MKDIR(dvp, vpp, cnp, &va);
227 if (lockparent)
228 VOP_LOCK(dvp);
229 return (error);
230}
231
232static int
233union_lookup1(dvp, vpp, cnp)
234 struct vnode *dvp;
235 struct vnode **vpp;
236 struct componentname *cnp;
237{
238 int error;
239 struct vnode *tdvp;
240 struct mount *mp;
241
242 if (cnp->cn_flags & ISDOTDOT) {
243 for (;;) {
244 if ((dvp->v_flag & VROOT) == 0 ||
245 (cnp->cn_flags & NOCROSSMOUNT))
246 break;
247
248 tdvp = dvp;
249 dvp = dvp->v_mount->mnt_vnodecovered;
250 vput(tdvp);
251 VREF(dvp);
252 VOP_LOCK(dvp);
253 }
254 }
255
256 error = VOP_LOOKUP(dvp, &tdvp, cnp);
257 if (error)
258 return (error);
259
260 dvp = tdvp;
261 while ((dvp->v_type == VDIR) && (mp = dvp->v_mountedhere) &&
262 (cnp->cn_flags & NOCROSSMOUNT) == 0) {
263
264 if (mp->mnt_flag & MNT_MLOCK) {
265 mp->mnt_flag |= MNT_MWAIT;
266 sleep((caddr_t) mp, PVFS);
267 continue;
268 }
269
270 if (error = VFS_ROOT(mp, &tdvp)) {
271 vput(dvp);
272 return (error);
273 }
274
275 vput(tdvp);
276 dvp = tdvp;
277 }
278
279 *vpp = dvp;
280 return (0);
281}
282
283int
284union_lookup(ap)
285 struct vop_lookup_args /* {
286 struct vnodeop_desc *a_desc;
287 struct vnode *a_dvp;
288 struct vnode **a_vpp;
289 struct componentname *a_cnp;
290 } */ *ap;
291{
292 int uerror, lerror;
293 struct vnode *uppervp, *lowervp;
294 struct vnode *upperdvp, *lowerdvp;
295 struct vnode *dvp = ap->a_dvp;
296 struct union_node *dun = VTOUNION(ap->a_dvp);
297 struct componentname *cnp = ap->a_cnp;
298 int lockparent = cnp->cn_flags & LOCKPARENT;
299
300 upperdvp = dun->un_uppervp;
301 lowerdvp = dun->un_lowervp;
302
303 /*
304 * do the lookup in the upper level.
305 * if that level comsumes additional pathnames,
306 * then assume that something special is going
307 * on and just return that vnode.
308 */
309 uppervp = 0;
310 if (upperdvp) {
311 uerror = union_lookup1(upperdvp, &uppervp, cnp);
312 if (cnp->cn_consume != 0) {
313 *ap->a_vpp = uppervp;
314 return (uerror);
315 }
316 if (!lockparent)
317 VOP_LOCK(upperdvp);
318 } else {
319 uerror = ENOENT;
320 }
321
322 /*
323 * in a similar way to the upper layer, do the lookup
324 * in the lower layer. this time, if there is some
325 * component magic going on, then vput whatever we got
326 * back from the upper layer and return the lower vnode
327 * instead.
328 */
329 lowervp = 0;
330 if (lowerdvp) {
331 lerror = union_lookup1(lowerdvp, &lowervp, cnp);
332 if (cnp->cn_consume != 0) {
333 if (uppervp) {
334 vput(uppervp);
335 uppervp = 0;
336 }
337 *ap->a_vpp = lowervp;
338 return (lerror);
339 }
340 if (!lockparent)
341 VOP_LOCK(lowerdvp);
342 } else {
343 lerror = ENOENT;
344 }
345
346 /*
347 * at this point, we have uerror and lerror indicating
348 * possible errors with the lookups in the upper and lower
349 * layers. additionally, uppervp and lowervp are (locked)
350 * references to existing vnodes in the upper and lower layers.
351 *
352 * there are now three cases to consider.
353 * 1. if both layers returned an error, then return whatever
354 * error the upper layer generated.
355 *
356 * 2. if the top layer failed and the bottom layer succeeded
357 * then two subcases occur.
358 * a. the bottom vnode is not a directory, in which
359 * case just return a new union vnode referencing
360 * an empty top layer and the existing bottom layer.
361 * b. the bottom vnode is a directory, in which case
362 * create a new directory in the top-level and
363 * continue as in case 3.
364 *
365 * 3. if the top layer succeeded then return a new union
366 * vnode referencing whatever the new top layer and
367 * whatever the bottom layer returned.
368 */
369
370 /* case 1. */
371 if ((uerror != 0) && (lerror != 0)) {
372 *ap->a_vpp = 0;
373 return (uerror);
374 }
375
376 /* case 2. */
377 if (uerror != 0 /* && (lerror == 0) */ ) {
378 if (lowervp->v_type == VDIR) { /* case 2b. */
379 uerror = union_mkshadow(upperdvp, cnp, &uppervp);
380 if (uerror) {
381 if (lowervp) {
382 vput(lowervp);
383 lowervp = 0;
384 }
385 return (uerror);
386 }
387 }
388 }
389
390 return (union_allocvp(ap->a_vpp, dvp->v_mount, dvp, cnp,
391 uppervp, lowervp));
392}
393
394/*
395 * copyfile. copy the vnode (fvp) to the vnode (tvp)
396 * using a sequence of reads and writes.
397 */
398static int
399union_copyfile(p, cred, fvp, tvp)
400 struct proc *p;
401 struct ucred *cred;
402 struct vnode *fvp;
403 struct vnode *tvp;
404{
405 char *buf;
406 struct uio uio;
407 struct iovec iov;
408 int error = 0;
409 off_t offset;
410
411 /*
412 * strategy:
413 * allocate a buffer of size MAXBSIZE.
414 * loop doing reads and writes, keeping track
415 * of the current uio offset.
416 * give up at the first sign of trouble.
417 */
418
419 uio.uio_procp = p;
420 uio.uio_segflg = UIO_SYSSPACE;
421 offset = 0;
422
423 VOP_UNLOCK(fvp); /* XXX */
424 LEASE_CHECK(fvp, p, cred, LEASE_READ);
425 VOP_LOCK(fvp); /* XXX */
426 VOP_UNLOCK(tvp); /* XXX */
427 LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
428 VOP_LOCK(tvp); /* XXX */
429
430 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
431 do {
432 uio.uio_iov = &iov;
433 uio.uio_iovcnt = 1;
434 iov.iov_base = buf;
435 iov.iov_len = MAXBSIZE;
436 uio.uio_resid = iov.iov_len;
437 uio.uio_offset = offset;
438 uio.uio_rw = UIO_READ;
439 error = VOP_READ(fvp, &uio, 0, cred);
440
441 if (error == 0) {
442 uio.uio_iov = &iov;
443 uio.uio_iovcnt = 1;
444 iov.iov_base = buf;
445 iov.iov_len = MAXBSIZE - uio.uio_resid;
446 uio.uio_rw = UIO_WRITE;
447 uio.uio_resid = iov.iov_len;
448 uio.uio_offset = offset;
449
450 do {
451 error = VOP_WRITE(tvp, &uio, 0, cred);
452 } while (error == 0 && uio.uio_resid > 0);
453 if (error == 0)
454 offset = uio.uio_offset;
455 }
456 } while ((uio.uio_resid == 0) && (error == 0));
457
458 free(buf, M_TEMP);
459 return (error);
460}
461
462int
463union_open(ap)
464 struct vop_open_args /* {
465 struct vnodeop_desc *a_desc;
466 struct vnode *a_vp;
467 int a_mode;
468 struct ucred *a_cred;
469 struct proc *a_p;
470 } */ *ap;
471{
472 struct union_node *un = VTOUNION(ap->a_vp);
473 int mode = ap->a_mode;
474 struct ucred *cred = ap->a_cred;
475 struct proc *p = ap->a_p;
476
477 /*
478 * If there is an existing upper vp then simply open that.
479 */
480 if (un->un_uppervp)
481 return (VOP_OPEN(un->un_uppervp, mode, cred, p));
482
483 /*
484 * If the lower vnode is being opened for writing, then
485 * copy the file contents to the upper vnode and open that,
486 * otherwise can simply open the lower vnode.
487 */
488 if ((ap->a_mode & FWRITE) && (un->un_lowervp->v_type == VREG)) {
489 int error;
490 struct nameidata nd;
491 struct filedesc *fdp = p->p_fd;
492 int fmode;
493 int cmode;
494
495 /*
496 * Open the named file in the upper layer. Note that
497 * the file may have come into existence *since* the lookup
498 * was done, since the upper layer may really be a
499 * loopback mount of some other filesystem... so open
500 * the file with exclusive create and barf if it already
501 * exists.
502 * XXX - perhaps shoudl re-lookup the node (once more with
503 * feeling) and simply open that. Who knows.
504 */
505 NDINIT(&nd, CREATE, 0, UIO_SYSSPACE, un->un_path, p);
506 fmode = (O_CREAT|O_TRUNC|O_EXCL);
507 cmode = UN_FILEMODE & ~fdp->fd_cmask;
508 error = vn_open(&nd, fmode, cmode);
509 if (error)
510 return (error);
511 un->un_uppervp = nd.ni_vp;
512 /*
513 * Now, if the file is being opened with truncation, then
514 * the (new) upper vnode is ready to fly, otherwise the
515 * data from the lower vnode must be copied to the upper
516 * layer first. This only works for regular files (check
517 * is made above).
518 */
519 if ((mode & O_TRUNC) == 0) {
520 /* XXX - should not ignore errors from VOP_CLOSE */
521 error = VOP_OPEN(un->un_lowervp, FREAD, cred, p);
522 if (error == 0) {
523 error = union_copyfile(p, cred,
524 un->un_lowervp, un->un_uppervp);
525 (void) VOP_CLOSE(un->un_lowervp, FREAD);
526 }
527 (void) VOP_CLOSE(un->un_uppervp, FWRITE);
528 }
529 if (error == 0)
530 error = VOP_OPEN(un->un_uppervp, FREAD, cred, p);
531 return (error);
532 }
533
534 return (VOP_OPEN(un->un_lowervp, mode, cred, p));
535}
536
537/*
538 * We handle getattr only to change the fsid.
539 */
540int
541union_getattr(ap)
542 struct vop_getattr_args /* {
543 struct vnode *a_vp;
544 struct vattr *a_vap;
545 struct ucred *a_cred;
546 struct proc *a_p;
547 } */ *ap;
548{
549 int error;
550
551 if (error = union_bypass(ap))
552 return (error);
553 /* Requires that arguments be restored. */
554 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
555 return (0);
556}
557
558/*
559 * union_readdir works in concert with getdirentries and
560 * readdir(3) to provide a list of entries in the unioned
561 * directories. getdirentries is responsible for walking
562 * down the union stack. readdir(3) is responsible for
563 * eliminating duplicate names from the returned data stream.
564 */
565int
566union_readdir(ap)
567 struct vop_readdir_args /* {
568 struct vnodeop_desc *a_desc;
569 struct vnode *a_vp;
570 struct uio *a_uio;
571 struct ucred *a_cred;
572 } */ *ap;
573{
574 struct union_node *un = VTOUNION(ap->a_vp);
575
576 if (un->un_uppervp)
577 return (union_bypass(ap));
578
579 return (0);
580}
581
582int
583union_inactive(ap)
584 struct vop_inactive_args /* {
585 struct vnode *a_vp;
586 } */ *ap;
587{
588
589 /*
590 * Do nothing (and _don't_ bypass).
591 * Wait to vrele lowervp until reclaim,
592 * so that until then our union_node is in the
593 * cache and reusable.
594 *
595 * NEEDSWORK: Someday, consider inactive'ing
596 * the lowervp and then trying to reactivate it
597 * with capabilities (v_id)
598 * like they do in the name lookup cache code.
599 * That's too much work for now.
600 */
601 return (0);
602}
603
604int
605union_reclaim(ap)
606 struct vop_reclaim_args /* {
607 struct vnode *a_vp;
608 } */ *ap;
609{
610 struct vnode *vp = ap->a_vp;
611 struct union_node *un = VTOUNION(vp);
612 struct vnode *uppervp = un->un_uppervp;
613 struct vnode *lowervp = un->un_lowervp;
614 struct vnode *dirvp = un->un_dirvp;
615 char *path = un->un_path;
616
617 /*
618 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
619 * so we can't call VOPs on ourself.
620 */
621 /* After this assignment, this node will not be re-used. */
622 un->un_uppervp = 0;
623 un->un_lowervp = 0;
624 un->un_dirvp = 0;
625 un->un_path = NULL;
626 union_freevp(vp);
627 if (uppervp)
628 vrele(uppervp);
629 if (lowervp)
630 vrele(lowervp);
631 if (dirvp)
632 vrele(dirvp);
633 if (path)
634 free(path, M_TEMP);
635 return (0);
636}
637
638
639int
640union_print(ap)
641 struct vop_print_args /* {
642 struct vnode *a_vp;
643 } */ *ap;
644{
645 struct vnode *vp = ap->a_vp;
646
647 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n",
648 vp, UPPERVP(vp), LOWERVP(vp));
649 return (0);
650}
651
652
653/*
654 * XXX - vop_strategy must be hand coded because it has no
655 * vnode in its arguments.
656 * This goes away with a merged VM/buffer cache.
657 */
658int
659union_strategy(ap)
660 struct vop_strategy_args /* {
661 struct buf *a_bp;
662 } */ *ap;
663{
664 struct buf *bp = ap->a_bp;
665 int error;
666 struct vnode *savedvp;
667
668 savedvp = bp->b_vp;
669 bp->b_vp = OTHERVP(bp->b_vp);
670
671#ifdef DIAGNOSTIC
672 if (bp->b_vp == 0)
673 panic("union_strategy: nil vp");
674 if (((bp->b_flags & B_READ) == 0) &&
675 (bp->b_vp == LOWERVP(savedvp)))
676 panic("union_strategy: writing to lowervp");
677#endif
678
679 error = VOP_STRATEGY(bp);
680 bp->b_vp = savedvp;
681
682 return (error);
683}
684
685
686/*
687 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
688 * vnode in its arguments.
689 * This goes away with a merged VM/buffer cache.
690 */
691int
692union_bwrite(ap)
693 struct vop_bwrite_args /* {
694 struct buf *a_bp;
695 } */ *ap;
696{
697 struct buf *bp = ap->a_bp;
698 int error;
699 struct vnode *savedvp;
700
701 savedvp = bp->b_vp;
702 bp->b_vp = UPPERVP(bp->b_vp);
703
704#ifdef DIAGNOSTIC
705 if (bp->b_vp == 0)
706 panic("union_bwrite: no upper vp");
707#endif
708
709 error = VOP_BWRITE(bp);
710
711 bp->b_vp = savedvp;
712
713 return (error);
714}
715
716int
717union_lock(ap)
718 struct vop_lock_args *ap;
719{
720 struct union_node *un = VTOUNION(ap->a_vp);
721
722#ifdef DIAGNOSTIC
723 if (un->un_pid == curproc->p_pid)
724 panic("union: locking agsinst myself");
725#endif
726 while (un->un_flags & UN_LOCKED) {
727 un->un_flags |= UN_WANT;
728 sleep((caddr_t) &un->un_flags, PINOD);
729 }
730 un->un_flags |= UN_LOCKED;
731#ifdef DIAGNOSTIC
732 un->un_pid = curproc->p_pid;
733#endif
734
735 if (un->un_lowervp && !VOP_ISLOCKED(un->un_lowervp))
736 VOP_LOCK(un->un_lowervp);
737 if (un->un_uppervp && !VOP_ISLOCKED(un->un_uppervp))
738 VOP_LOCK(un->un_uppervp);
739}
740
741int
742union_unlock(ap)
743 struct vop_lock_args *ap;
744{
745 struct union_node *un = VTOUNION(ap->a_vp);
746
747#ifdef DIAGNOSTIC
748 if (un->un_pid != curproc->p_pid)
749 panic("union: unlocking other process's union node");
750 if ((un->un_flags & UN_LOCKED) == 0)
751 panic("union: unlock unlocked node");
752#endif
753
754 if (un->un_uppervp && VOP_ISLOCKED(un->un_uppervp))
755 VOP_UNLOCK(un->un_uppervp);
756 if (un->un_lowervp && VOP_ISLOCKED(un->un_lowervp))
757 VOP_UNLOCK(un->un_lowervp);
758
759 un->un_flags &= ~UN_LOCKED;
760 if (un->un_flags & UN_WANT) {
761 un->un_flags &= ~UN_WANT;
762 wakeup((caddr_t) &un->un_flags);
763 }
764
765#ifdef DIAGNOSTIC
766 un->un_pid = 0;
767#endif
768}
769
770/*
771 * Global vfs data structures
772 */
773int (**union_vnodeop_p)();
774struct vnodeopv_entry_desc union_vnodeop_entries[] = {
775 { &vop_default_desc, union_bypass },
776
777 { &vop_getattr_desc, union_getattr },
778 { &vop_inactive_desc, union_inactive },
779 { &vop_reclaim_desc, union_reclaim },
780 { &vop_print_desc, union_print },
781
782 { &vop_strategy_desc, union_strategy },
783 { &vop_bwrite_desc, union_bwrite },
784
785 { &vop_lock_desc, union_lock },
786 { &vop_unlock_desc, union_unlock },
787
788 { (struct vnodeop_desc*)NULL, (int(*)())NULL }
789};
790struct vnodeopv_desc union_vnodeop_opv_desc =
791 { &union_vnodeop_p, union_vnodeop_entries };