2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3 * Copyright (c) 1992, 1993, 1994, 1995
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
38 * $FreeBSD: src/sys/miscfs/union/union_vnops.c,v 1.72 1999/12/15 23:02:14 eivind Exp $
39 * $DragonFly: src/sys/vfs/union/union_vnops.c,v 1.29 2006/05/06 02:43:15 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/fcntl.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/malloc.h>
54 #include <sys/sysctl.h>
58 #include <vm/vnode_pager.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
66 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
68 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
71 static int union_access (struct vop_access_args *ap);
72 static int union_advlock (struct vop_advlock_args *ap);
73 static int union_bmap (struct vop_bmap_args *ap);
74 static int union_close (struct vop_close_args *ap);
75 static int union_create (struct vop_old_create_args *ap);
76 static int union_fsync (struct vop_fsync_args *ap);
77 static int union_getattr (struct vop_getattr_args *ap);
78 static int union_inactive (struct vop_inactive_args *ap);
79 static int union_ioctl (struct vop_ioctl_args *ap);
80 static int union_link (struct vop_old_link_args *ap);
81 static int union_lock (struct vop_lock_args *ap);
82 static int union_lookup (struct vop_old_lookup_args *ap);
83 static int union_lookup1 (struct vnode *udvp, struct vnode **dvp,
85 struct componentname *cnp);
86 static int union_mkdir (struct vop_old_mkdir_args *ap);
87 static int union_mknod (struct vop_old_mknod_args *ap);
88 static int union_mmap (struct vop_mmap_args *ap);
89 static int union_open (struct vop_open_args *ap);
90 static int union_pathconf (struct vop_pathconf_args *ap);
91 static int union_print (struct vop_print_args *ap);
92 static int union_read (struct vop_read_args *ap);
93 static int union_readdir (struct vop_readdir_args *ap);
94 static int union_readlink (struct vop_readlink_args *ap);
95 static int union_reclaim (struct vop_reclaim_args *ap);
96 static int union_remove (struct vop_old_remove_args *ap);
97 static int union_rename (struct vop_old_rename_args *ap);
98 static int union_revoke (struct vop_revoke_args *ap);
99 static int union_rmdir (struct vop_old_rmdir_args *ap);
100 static int union_poll (struct vop_poll_args *ap);
101 static int union_setattr (struct vop_setattr_args *ap);
102 static int union_strategy (struct vop_strategy_args *ap);
103 static int union_getpages (struct vop_getpages_args *ap);
104 static int union_putpages (struct vop_putpages_args *ap);
105 static int union_symlink (struct vop_old_symlink_args *ap);
106 static int union_unlock (struct vop_unlock_args *ap);
107 static int union_whiteout (struct vop_old_whiteout_args *ap);
108 static int union_write (struct vop_read_args *ap);
112 union_lock_upper(struct union_node *un, struct thread *td)
114 struct vnode *uppervp;
116 if ((uppervp = un->un_uppervp) != NULL) {
118 vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
120 KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
126 union_unlock_upper(struct vnode *uppervp, struct thread *td)
133 union_lock_other(struct union_node *un, struct thread *td)
137 if (un->un_uppervp != NULL) {
138 vp = union_lock_upper(un, td);
139 } else if ((vp = un->un_lowervp) != NULL) {
141 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
148 union_unlock_other(struct vnode *vp, struct thread *td)
156 * udvp must be exclusively locked on call and will remain
157 * exclusively locked on return. This is the mount point
158 * for out filesystem.
160 * dvp Our base directory, locked and referenced.
161 * The passed dvp will be dereferenced and unlocked on return
162 * and a new dvp will be returned which is locked and
163 * referenced in the same variable.
165 * vpp is filled in with the result if no error occured,
168 * If an error is returned, *vpp is set to NULLVP. If no
169 * error occurs, *vpp is returned with a reference and an
174 union_lookup1(struct vnode *udvp, struct vnode **pdvp, struct vnode **vpp,
175 struct componentname *cnp)
178 struct thread *td = cnp->cn_td;
179 struct vnode *dvp = *pdvp;
184 * If stepping up the directory tree, check for going
185 * back across the mount point, in which case do what
186 * lookup would do by stepping back down the mount
189 if (cnp->cn_flags & CNP_ISDOTDOT) {
190 while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
192 * Don't do the NOCROSSMOUNT check
193 * at this level. By definition,
194 * union fs deals with namespaces, not
198 dvp = dvp->v_mount->mnt_vnodecovered;
201 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
206 * Set return dvp to be the upperdvp 'parent directory.
211 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
212 * changes will have been made to dvp, so we are set to return.
215 error = VOP_LOOKUP(dvp, &tdvp, cnp);
217 UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
223 * The parent directory will have been unlocked, unless lookup
224 * found the last component or if dvp == tdvp (tdvp must be locked).
226 * We want our dvp to remain locked and ref'd. We also want tdvp
227 * to remain locked and ref'd.
229 UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
232 if (dvp != tdvp && (cnp->cn_flags & CNP_XXXISLASTCN) == 0)
233 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
237 * Lastly check if the current node is a mount point in
238 * which case walk up the mount hierarchy making sure not to
239 * bump into the root of the mount tree (ie. dvp != udvp).
241 * We use dvp as a temporary variable here, it is no longer related
242 * to the dvp above. However, we have to ensure that both *pdvp and
243 * tdvp are locked on return.
249 (dvp->v_type == VDIR) &&
250 (mp = dvp->v_mountedhere)
261 error = VFS_ROOT(mp, &dvp);
266 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY);
278 * union_lookup(struct vnodeop_desc *a_desc, struct vnode *a_dvp,
279 * struct vnode **a_vpp, struct componentname *a_cnp)
282 union_lookup(struct vop_old_lookup_args *ap)
286 struct vnode *uppervp, *lowervp;
287 struct vnode *upperdvp, *lowerdvp;
288 struct vnode *dvp = ap->a_dvp; /* starting dir */
289 struct union_node *dun = VTOUNION(dvp); /* associated union node */
290 struct componentname *cnp = ap->a_cnp;
291 struct thread *td = cnp->cn_td;
292 int lockparent = cnp->cn_flags & CNP_LOCKPARENT;
293 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
294 struct ucred *saved_cred = NULL;
301 * Disallow write attemps to the filesystem mounted read-only.
303 if ((dvp->v_mount->mnt_flag & MNT_RDONLY) &&
304 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME)) {
309 * For any lookup's we do, always return with the parent locked
311 cnp->cn_flags |= CNP_LOCKPARENT;
313 lowerdvp = dun->un_lowervp;
322 * Get a private lock on uppervp and a reference, effectively
323 * taking it out of the union_node's control.
325 * We must lock upperdvp while holding our lock on dvp
326 * to avoid a deadlock.
328 upperdvp = union_lock_upper(dun, td);
331 * do the lookup in the upper level.
332 * if that level comsumes additional pathnames,
333 * then assume that something special is going
334 * on and just return that vnode.
336 if (upperdvp != NULLVP) {
338 * We do not have to worry about the DOTDOT case, we've
339 * already unlocked dvp.
341 UDEBUG(("A %p\n", upperdvp));
344 * Do the lookup. We must supply a locked and referenced
345 * upperdvp to the function and will get a new locked and
346 * referenced upperdvp back with the old having been
349 * If an error is returned, uppervp will be NULLVP. If no
350 * error occurs, uppervp will be the locked and referenced
351 * return vnode or possibly NULL, depending on what is being
352 * requested. It is possible that the returned uppervp
353 * will be the same as upperdvp.
355 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
357 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
360 upperdvp->v_usecount,
361 VOP_ISLOCKED(upperdvp, NULL),
363 (uppervp ? uppervp->v_usecount : -99),
364 (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
368 * Disallow write attemps to the filesystem mounted read-only.
370 if (uerror == EJUSTRETURN &&
371 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
372 (cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME)) {
378 * Special case. If cn_consume != 0 skip out. The result
379 * of the lookup is transfered to our return variable. If
380 * an error occured we have to throw away the results.
383 if (cnp->cn_consume != 0) {
384 if ((error = uerror) == 0) {
385 *ap->a_vpp = uppervp;
392 * Calculate whiteout, fall through
395 if (uerror == ENOENT || uerror == EJUSTRETURN) {
396 if (cnp->cn_flags & CNP_ISWHITEOUT) {
398 } else if (lowerdvp != NULLVP) {
401 terror = VOP_GETATTR(upperdvp, &va);
402 if (terror == 0 && (va.va_flags & OPAQUE))
409 * in a similar way to the upper layer, do the lookup
410 * in the lower layer. this time, if there is some
411 * component magic going on, then vput whatever we got
412 * back from the upper layer and return the lower vnode
416 if (lowerdvp != NULLVP && !iswhiteout) {
419 UDEBUG(("B %p\n", lowerdvp));
422 * Force only LOOKUPs on the lower node, since
423 * we won't be making changes to it anyway.
425 nameiop = cnp->cn_nameiop;
426 cnp->cn_nameiop = NAMEI_LOOKUP;
427 if (um->um_op == UNMNT_BELOW) {
428 saved_cred = cnp->cn_cred;
429 cnp->cn_cred = um->um_cred;
433 * We shouldn't have to worry about locking interactions
434 * between the lower layer and our union layer (w.r.t.
435 * `..' processing) because we don't futz with lowervp
436 * locks in the union-node instantiation code path.
438 * union_lookup1() requires lowervp to be locked on entry,
439 * and it will be unlocked on return. The ref count will
440 * not change. On return lowervp doesn't represent anything
441 * to us so we NULL it out.
444 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
445 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
446 if (lowerdvp == lowervp)
450 lowerdvp = NULL; /* lowerdvp invalid after vput */
452 if (um->um_op == UNMNT_BELOW)
453 cnp->cn_cred = saved_cred;
454 cnp->cn_nameiop = nameiop;
456 if (cnp->cn_consume != 0 || lerror == EACCES) {
457 if ((error = lerror) == 0) {
458 *ap->a_vpp = lowervp;
464 UDEBUG(("C %p\n", lowerdvp));
465 if ((cnp->cn_flags & CNP_ISDOTDOT) && dun->un_pvp != NULLVP) {
466 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
468 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
475 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
477 * 1. If both layers returned an error, select the upper layer.
479 * 2. If the upper layer faile and the bottom layer succeeded,
480 * two subcases occur:
482 * a. The bottom vnode is not a directory, in which case
483 * just return a new union vnode referencing an
484 * empty top layer and the existing bottom layer.
486 * b. The button vnode is a directory, in which case
487 * create a new directory in the top layer and
488 * and fall through to case 3.
490 * 3. If the top layer succeeded then return a new union
491 * vnode referencing whatever the new top layer and
492 * whatever the bottom layer returned.
496 if ((uerror != 0) && (lerror != 0)) {
502 if (uerror != 0 /* && (lerror == 0) */ ) {
503 if (lowervp->v_type == VDIR) { /* case 2b. */
504 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
506 * oops, uppervp has a problem, we may have to shadow.
508 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
517 * Must call union_allocvp with both the upper and lower vnodes
518 * referenced and the upper vnode locked. ap->a_vpp is returned
519 * referenced and locked. lowervp, uppervp, and upperdvp are
520 * absorbed by union_allocvp() whether it succeeds or fails.
522 * upperdvp is the parent directory of uppervp which may be
523 * different, depending on the path, from dvp->un_uppervp. That's
524 * why it is a separate argument. Note that it must be unlocked.
526 * dvp must be locked on entry to the call and will be locked on
530 if (uppervp && uppervp != upperdvp)
531 VOP_UNLOCK(uppervp, 0);
533 VOP_UNLOCK(lowervp, 0);
535 VOP_UNLOCK(upperdvp, 0);
537 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
538 uppervp, lowervp, 1);
540 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
549 * - put away any extra junk laying around. Note that lowervp
550 * (if not NULL) will never be the same as *ap->a_vp and
551 * neither will uppervp, because when we set that state we
552 * NULL-out lowervp or uppervp. On the otherhand, upperdvp
553 * may match uppervp or *ap->a_vpp.
555 * - relock/unlock dvp if appropriate.
560 if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
573 * Restore LOCKPARENT state
577 cnp->cn_flags &= ~CNP_LOCKPARENT;
579 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
580 ((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
584 * dvp lock state, determine whether to relock dvp. dvp is expected
585 * to be locked on return if:
587 * - there was an error (except not EJUSTRETURN), or
588 * - we hit the last component and lockparent is true
590 * dvp_is_locked is the current state of the dvp lock, not counting
591 * the possibility that *ap->a_vpp == dvp (in which case it is locked
592 * anyway). Note that *ap->a_vpp == dvp only if no error occured.
595 if (*ap->a_vpp != dvp) {
596 if ((error == 0 || error == EJUSTRETURN) && !lockparent) {
606 if (cnp->cn_namelen == 1 &&
607 cnp->cn_nameptr[0] == '.' &&
609 panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
619 * a_dvp is locked on entry and remains locked on return. a_vpp is returned
620 * locked if no error occurs, otherwise it is garbage.
622 * union_create(struct vnode *a_dvp, struct vnode **a_vpp,
623 * struct componentname *a_cnp, struct vattr *a_vap)
626 union_create(struct vop_old_create_args *ap)
628 struct union_node *dun = VTOUNION(ap->a_dvp);
629 struct componentname *cnp = ap->a_cnp;
630 struct thread *td = cnp->cn_td;
634 if ((dvp = union_lock_upper(dun, td)) != NULL) {
638 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
640 mp = ap->a_dvp->v_mount;
642 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
643 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
645 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
647 union_unlock_upper(dvp, td);
653 * union_whiteout(struct vnode *a_dvp, struct componentname *a_cnp,
657 union_whiteout(struct vop_old_whiteout_args *ap)
659 struct union_node *un = VTOUNION(ap->a_dvp);
660 struct componentname *cnp = ap->a_cnp;
661 struct vnode *uppervp;
662 int error = EOPNOTSUPP;
664 if ((uppervp = union_lock_upper(un, cnp->cn_td)) != NULLVP) {
665 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
666 union_unlock_upper(uppervp, cnp->cn_td);
674 * a_dvp is locked on entry and should remain locked on return.
675 * a_vpp is garbagre whether an error occurs or not.
677 * union_mknod(struct vnode *a_dvp, struct vnode **a_vpp,
678 * struct componentname *a_cnp, struct vattr *a_vap)
681 union_mknod(struct vop_old_mknod_args *ap)
683 struct union_node *dun = VTOUNION(ap->a_dvp);
684 struct componentname *cnp = ap->a_cnp;
688 if ((dvp = union_lock_upper(dun, cnp->cn_td)) != NULL) {
689 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
690 union_unlock_upper(dvp, cnp->cn_td);
698 * run open VOP. When opening the underlying vnode we have to mimic
699 * vn_open. What we *really* need to do to avoid screwups if the
700 * open semantics change is to call vn_open(). For example, ufs blows
701 * up if you open a file but do not vmio it prior to writing.
703 * union_open(struct vnodeop_desc *a_desc, struct vnode *a_vp, int a_mode,
704 * struct ucred *a_cred, struct thread *a_td)
707 union_open(struct vop_open_args *ap)
709 struct union_node *un = VTOUNION(ap->a_vp);
711 int mode = ap->a_mode;
712 struct ucred *cred = ap->a_cred;
713 struct thread *td = ap->a_td;
718 * If there is an existing upper vp then simply open that.
719 * The upper vp takes precedence over the lower vp. When opening
720 * a lower vp for writing copy it to the uppervp and then open the
723 * At the end of this section tvp will be left locked.
725 if ((tvp = union_lock_upper(un, td)) == NULLVP) {
727 * If the lower vnode is being opened for writing, then
728 * copy the file contents to the upper vnode and open that,
729 * otherwise can simply open the lower vnode.
731 tvp = un->un_lowervp;
732 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
733 int docopy = !(mode & O_TRUNC);
734 error = union_copyup(un, docopy, cred, td);
735 tvp = union_lock_upper(un, td);
739 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
745 * We are holding the correct vnode, open it. Note
746 * that in DragonFly, VOP_OPEN is responsible for associating
747 * a VM object with the vnode if the vnode is mappable or the
748 * underlying filesystem uses buffer cache calls on it.
751 error = VOP_OPEN(tvp, mode, cred, NULL);
754 * Release any locks held
758 union_unlock_upper(tvp, td);
768 * It is unclear whether a_vp is passed locked or unlocked. Whatever
769 * the case we do not change it.
771 * union_close(struct vnode *a_vp, int a_fflag, struct ucred *a_cred,
772 * struct thread *a_td)
775 union_close(struct vop_close_args *ap)
777 struct union_node *un = VTOUNION(ap->a_vp);
780 if ((vp = un->un_uppervp) == NULLVP) {
781 #ifdef UNION_DIAGNOSTIC
782 if (un->un_openl <= 0)
783 panic("union: un_openl cnt");
788 ap->a_head.a_ops = *vp->v_ops;
790 return(vop_close_ap(ap));
794 * Check access permission on the union vnode.
795 * The access check being enforced is to check
796 * against both the underlying vnode, and any
797 * copied vnode. This ensures that no additional
798 * file permissions are given away simply because
799 * the user caused an implicit file copy.
801 * union_access(struct vnodeop_desc *a_desc, struct vnode *a_vp, int a_mode,
802 * struct ucred *a_cred, struct thread *a_td)
805 union_access(struct vop_access_args *ap)
807 struct union_node *un = VTOUNION(ap->a_vp);
808 struct thread *td = ap->a_td;
813 * Disallow write attempts on filesystems mounted read-only.
815 if ((ap->a_mode & VWRITE) &&
816 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
817 switch (ap->a_vp->v_type) {
827 if ((vp = union_lock_upper(un, td)) != NULLVP) {
828 ap->a_head.a_ops = *vp->v_ops;
830 error = vop_access_ap(ap);
831 union_unlock_upper(vp, td);
835 if ((vp = un->un_lowervp) != NULLVP) {
836 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
837 ap->a_head.a_ops = *vp->v_ops;
841 * Remove VWRITE from a_mode if our mount point is RW, because
842 * we want to allow writes and lowervp may be read-only.
844 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
845 ap->a_mode &= ~VWRITE;
847 error = vop_access_ap(ap);
849 struct union_mount *um;
851 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
853 if (um->um_op == UNMNT_BELOW) {
854 ap->a_cred = um->um_cred;
855 error = vop_access_ap(ap);
864 * We handle getattr only to change the fsid and
867 * It's not clear whether VOP_GETATTR is to be
868 * called with the vnode locked or not. stat() calls
869 * it with (vp) locked, and fstat calls it with
872 * Because of this we cannot use our normal locking functions
873 * if we do not intend to lock the main a_vp node. At the moment
874 * we are running without any specific locking at all, but beware
875 * to any programmer that care must be taken if locking is added
878 * union_getattr(struct vnode *a_vp, struct vattr *a_vap,
879 * struct ucred *a_cred, struct thread *a_td)
882 union_getattr(struct vop_getattr_args *ap)
885 struct union_node *un = VTOUNION(ap->a_vp);
891 * Some programs walk the filesystem hierarchy by counting
892 * links to directories to avoid stat'ing all the time.
893 * This means the link count on directories needs to be "correct".
894 * The only way to do that is to call getattr on both layers
895 * and fix up the link count. The link count will not necessarily
896 * be accurate but will be large enough to defeat the tree walkers.
901 if ((vp = un->un_uppervp) != NULLVP) {
902 error = VOP_GETATTR(vp, vap);
905 /* XXX isn't this dangerouso without a lock? */
906 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
911 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
919 error = VOP_GETATTR(vp, vap);
922 /* XXX isn't this dangerous without a lock? */
923 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
926 if ((vap != ap->a_vap) && (vap->va_type == VDIR))
927 ap->a_vap->va_nlink += vap->va_nlink;
932 * union_setattr(struct vnode *a_vp, struct vattr *a_vap,
933 * struct ucred *a_cred, struct thread *a_td)
936 union_setattr(struct vop_setattr_args *ap)
938 struct union_node *un = VTOUNION(ap->a_vp);
939 struct thread *td = ap->a_td;
940 struct vattr *vap = ap->a_vap;
941 struct vnode *uppervp;
945 * Disallow write attempts on filesystems mounted read-only.
947 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
948 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
949 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
950 vap->va_mtime.tv_sec != VNOVAL ||
951 vap->va_mode != (mode_t)VNOVAL)) {
956 * Handle case of truncating lower object to zero size,
957 * by creating a zero length upper object. This is to
958 * handle the case of open with O_TRUNC and O_CREAT.
960 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
961 error = union_copyup(un, (ap->a_vap->va_size != 0),
962 ap->a_cred, ap->a_td);
968 * Try to set attributes in upper layer,
969 * otherwise return read-only filesystem error.
972 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
973 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, ap->a_cred);
974 if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
975 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
976 union_unlock_upper(uppervp, td);
986 union_getpages(struct vop_getpages_args *ap)
990 r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
991 ap->a_count, ap->a_reqpage);
1000 union_putpages(struct vop_putpages_args *ap)
1004 r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1005 ap->a_sync, ap->a_rtvals);
1010 * union_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1011 * struct ucred *a_cred)
1014 union_read(struct vop_read_args *ap)
1016 struct union_node *un = VTOUNION(ap->a_vp);
1017 struct thread *td = ap->a_uio->uio_td;
1021 uvp = union_lock_other(un, td);
1022 KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1024 if (ap->a_vp->v_flag & VOBJBUF)
1025 union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1027 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1028 union_unlock_other(uvp, td);
1032 * perhaps the size of the underlying object has changed under
1033 * our feet. take advantage of the offset information present
1034 * in the uio structure.
1037 struct union_node *un = VTOUNION(ap->a_vp);
1038 off_t cur = ap->a_uio->uio_offset;
1040 if (uvp == un->un_uppervp) {
1041 if (cur > un->un_uppersz)
1042 union_newsize(ap->a_vp, cur, VNOVAL);
1044 if (cur > un->un_lowersz)
1045 union_newsize(ap->a_vp, VNOVAL, cur);
1052 * union_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1053 * struct ucred *a_cred)
1056 union_write(struct vop_read_args *ap)
1058 struct union_node *un = VTOUNION(ap->a_vp);
1059 struct thread *td = ap->a_uio->uio_td;
1060 struct vnode *uppervp;
1063 if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1064 panic("union: missing upper layer in write");
1067 * Since our VM pages are associated with our vnode rather then
1068 * the real vnode, and since we do not run our reads and writes
1069 * through our own VM cache, we have a VM/VFS coherency problem.
1070 * We solve them by invalidating or flushing the associated VM
1071 * pages prior to allowing a normal read or write to occur.
1073 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1074 * writes because we are not cache-coherent. Normal writes need
1075 * to be made coherent with our VM-backing store, which we do by
1076 * first flushing any dirty VM pages associated with the write
1077 * range, and then destroying any clean VM pages associated with
1081 if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1082 ap->a_uio->uio_segflg = UIO_SYSSPACE;
1083 } else if (ap->a_vp->v_flag & VOBJBUF) {
1084 union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1087 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1090 * the size of the underlying object may be changed by the
1094 off_t cur = ap->a_uio->uio_offset;
1096 if (cur > un->un_uppersz)
1097 union_newsize(ap->a_vp, cur, VNOVAL);
1099 union_unlock_upper(uppervp, td);
1104 * union_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, int a_fflag,
1105 * struct ucred *a_cred, struct thread *a_td)
1108 union_ioctl(struct vop_ioctl_args *ap)
1110 struct vnode *ovp = OTHERVP(ap->a_vp);
1112 ap->a_head.a_ops = *ovp->v_ops;
1114 return(vop_ioctl_ap(ap));
1118 * union_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred,
1119 * struct thread *a_td)
1122 union_poll(struct vop_poll_args *ap)
1124 struct vnode *ovp = OTHERVP(ap->a_vp);
1126 ap->a_head.a_ops = *ovp->v_ops;
1128 return(vop_poll_ap(ap));
1132 * union_revoke(struct vnode *a_vp, int a_flags, struct thread *a_td)
1135 union_revoke(struct vop_revoke_args *ap)
1137 struct vnode *vp = ap->a_vp;
1140 if ((vx = UPPERVP(vp)) != NULL) {
1141 if (vx_get(vx) == 0) {
1142 VOP_REVOKE(vx, ap->a_flags);
1146 if ((vx = LOWERVP(vp)) != NULL) {
1147 if (vx_get(vx) == 0) {
1148 VOP_REVOKE(vx, ap->a_flags);
1157 * union_mmap(struct vnode *a_vp, int a_fflags, struct ucred *a_cred,
1158 * struct thread *a_td)
1161 union_mmap(struct vop_mmap_args *ap)
1163 struct vnode *ovp = OTHERVP(ap->a_vp);
1165 ap->a_head.a_ops = *ovp->v_ops;
1167 return (vop_mmap_ap(ap));
1171 * union_fsync(struct vnode *a_vp, struct ucred *a_cred, int a_waitfor,
1172 * struct thread *a_td)
1175 union_fsync(struct vop_fsync_args *ap)
1178 struct thread *td = ap->a_td;
1179 struct vnode *targetvp;
1180 struct union_node *un = VTOUNION(ap->a_vp);
1182 if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1183 error = VOP_FSYNC(targetvp, ap->a_waitfor);
1184 union_unlock_other(targetvp, td);
1193 * Remove the specified cnp. The dvp and vp are passed to us locked
1194 * and must remain locked on return.
1196 * union_remove(struct vnode *a_dvp, struct vnode *a_vp,
1197 * struct componentname *a_cnp)
1200 union_remove(struct vop_old_remove_args *ap)
1202 struct union_node *dun = VTOUNION(ap->a_dvp);
1203 struct union_node *un = VTOUNION(ap->a_vp);
1204 struct componentname *cnp = ap->a_cnp;
1205 struct thread *td = cnp->cn_td;
1206 struct vnode *uppervp;
1207 struct vnode *upperdvp;
1210 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1211 panic("union remove: null upper vnode");
1213 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1214 if (union_dowhiteout(un, cnp->cn_cred, td))
1215 cnp->cn_flags |= CNP_DOWHITEOUT;
1216 error = VOP_REMOVE(upperdvp, uppervp, cnp);
1220 union_removed_upper(un);
1222 union_unlock_upper(uppervp, td);
1224 error = union_mkwhiteout(
1225 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1226 upperdvp, ap->a_cnp, un->un_path);
1228 union_unlock_upper(upperdvp, td);
1235 * tdvp will be locked on entry, vp will not be locked on entry.
1236 * tdvp should remain locked on return and vp should remain unlocked
1239 * union_link(struct vnode *a_tdvp, struct vnode *a_vp,
1240 * struct componentname *a_cnp)
1243 union_link(struct vop_old_link_args *ap)
1245 struct componentname *cnp = ap->a_cnp;
1246 struct thread *td = cnp->cn_td;
1247 struct union_node *dun = VTOUNION(ap->a_tdvp);
1252 if (ap->a_tdvp->v_ops != ap->a_vp->v_ops) {
1255 struct union_node *tun = VTOUNION(ap->a_vp);
1257 if (tun->un_uppervp == NULLVP) {
1258 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1260 if (dun->un_uppervp == tun->un_dirvp) {
1261 if (dun->un_flags & UN_ULOCK) {
1262 dun->un_flags &= ~UN_ULOCK;
1263 VOP_UNLOCK(dun->un_uppervp, 0);
1267 error = union_copyup(tun, 1, cnp->cn_cred, td);
1269 if (dun->un_uppervp == tun->un_dirvp) {
1270 vn_lock(dun->un_uppervp,
1271 LK_EXCLUSIVE | LK_RETRY);
1272 dun->un_flags |= UN_ULOCK;
1275 VOP_UNLOCK(ap->a_vp, 0);
1277 vp = tun->un_uppervp;
1284 * Make sure upper is locked, then unlock the union directory we were
1285 * called with to avoid a deadlock while we are calling VOP_LINK on
1286 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp
1287 * is expected to be locked on return.
1290 if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1293 VOP_UNLOCK(ap->a_tdvp, 0); /* unlock calling node */
1294 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */
1297 * We have to unlock tdvp prior to relocking our calling node in
1298 * order to avoid a deadlock.
1300 union_unlock_upper(tdvp, td);
1301 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY);
1306 * union_rename(struct vnode *a_fdvp, struct vnode *a_fvp,
1307 * struct componentname *a_fcnp, struct vnode *a_tdvp,
1308 * struct vnode *a_tvp, struct componentname *a_tcnp)
1311 union_rename(struct vop_old_rename_args *ap)
1314 struct vnode *fdvp = ap->a_fdvp;
1315 struct vnode *fvp = ap->a_fvp;
1316 struct vnode *tdvp = ap->a_tdvp;
1317 struct vnode *tvp = ap->a_tvp;
1320 * Figure out what fdvp to pass to our upper or lower vnode. If we
1321 * replace the fdvp, release the original one and ref the new one.
1324 if (fdvp->v_tag == VT_UNION) { /* always true */
1325 struct union_node *un = VTOUNION(fdvp);
1326 if (un->un_uppervp == NULLVP) {
1328 * this should never happen in normal
1329 * operation but might if there was
1330 * a problem creating the top-level shadow
1336 fdvp = un->un_uppervp;
1342 * Figure out what fvp to pass to our upper or lower vnode. If we
1343 * replace the fvp, release the original one and ref the new one.
1346 if (fvp->v_tag == VT_UNION) { /* always true */
1347 struct union_node *un = VTOUNION(fvp);
1349 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1352 if (un->un_uppervp == NULLVP) {
1353 switch(fvp->v_type) {
1355 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY);
1356 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_td);
1357 VOP_UNLOCK(un->un_vnode, 0);
1365 * There is only one way to rename a directory
1366 * based in the lowervp, and that is to copy
1367 * the entire directory hierarchy. Otherwise
1368 * it would not last across a reboot.
1373 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
1374 error = union_mkshadow(um, fdvp,
1375 ap->a_fcnp, &un->un_uppervp);
1376 VOP_UNLOCK(fdvp, 0);
1378 VOP_UNLOCK(un->un_uppervp, 0);
1389 if (un->un_lowervp != NULLVP)
1390 ap->a_fcnp->cn_flags |= CNP_DOWHITEOUT;
1391 fvp = un->un_uppervp;
1397 * Figure out what tdvp (destination directory) to pass to the
1398 * lower level. If we replace it with uppervp, we need to vput the
1399 * old one. The exclusive lock is transfered to what we will pass
1400 * down in the VOP_RENAME and we replace uppervp with a simple
1404 if (tdvp->v_tag == VT_UNION) {
1405 struct union_node *un = VTOUNION(tdvp);
1407 if (un->un_uppervp == NULLVP) {
1409 * this should never happen in normal
1410 * operation but might if there was
1411 * a problem creating the top-level shadow
1419 * new tdvp is a lock and reference on uppervp, put away
1422 tdvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1427 * Figure out what tvp (destination file) to pass to the
1430 * If the uppervp file does not exist put away the (wrong)
1431 * file and change tvp to NULL.
1434 if (tvp != NULLVP && tvp->v_tag == VT_UNION) {
1435 struct union_node *un = VTOUNION(tvp);
1437 tvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1439 /* note: tvp may be NULL */
1443 * VOP_RENAME releases/vputs prior to returning, so we have no
1447 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1450 * Error. We still have to release / vput the various elements.
1458 if (tvp != NULLVP) {
1468 * union_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
1469 * struct componentname *a_cnp, struct vattr *a_vap)
1472 union_mkdir(struct vop_old_mkdir_args *ap)
1474 struct union_node *dun = VTOUNION(ap->a_dvp);
1475 struct componentname *cnp = ap->a_cnp;
1476 struct thread *td = cnp->cn_td;
1477 struct vnode *upperdvp;
1480 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1483 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1484 union_unlock_upper(upperdvp, td);
1488 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1489 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1490 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1491 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1498 * union_rmdir(struct vnode *a_dvp, struct vnode *a_vp,
1499 * struct componentname *a_cnp)
1502 union_rmdir(struct vop_old_rmdir_args *ap)
1504 struct union_node *dun = VTOUNION(ap->a_dvp);
1505 struct union_node *un = VTOUNION(ap->a_vp);
1506 struct componentname *cnp = ap->a_cnp;
1507 struct thread *td = cnp->cn_td;
1508 struct vnode *upperdvp;
1509 struct vnode *uppervp;
1512 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1513 panic("union rmdir: null upper vnode");
1515 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1516 if (union_dowhiteout(un, cnp->cn_cred, td))
1517 cnp->cn_flags |= CNP_DOWHITEOUT;
1518 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1519 union_unlock_upper(uppervp, td);
1521 error = union_mkwhiteout(
1522 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1523 dun->un_uppervp, ap->a_cnp, un->un_path);
1525 union_unlock_upper(upperdvp, td);
1532 * dvp is locked on entry and remains locked on return. a_vpp is garbage
1535 * union_symlink(struct vnode *a_dvp, struct vnode **a_vpp,
1536 * struct componentname *a_cnp, struct vattr *a_vap,
1540 union_symlink(struct vop_old_symlink_args *ap)
1542 struct union_node *dun = VTOUNION(ap->a_dvp);
1543 struct componentname *cnp = ap->a_cnp;
1544 struct thread *td = cnp->cn_td;
1548 if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1549 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1551 union_unlock_upper(dvp, td);
1557 * union_readdir works in concert with getdirentries and
1558 * readdir(3) to provide a list of entries in the unioned
1559 * directories. getdirentries is responsible for walking
1560 * down the union stack. readdir(3) is responsible for
1561 * eliminating duplicate names from the returned data stream.
1563 * union_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
1564 * int *a_eofflag, u_long *a_cookies, int a_ncookies)
1567 union_readdir(struct vop_readdir_args *ap)
1569 struct union_node *un = VTOUNION(ap->a_vp);
1570 struct thread *td = ap->a_uio->uio_td;
1574 if ((uvp = union_lock_upper(un, td)) != NULLVP) {
1575 ap->a_head.a_ops = *uvp->v_ops;
1577 error = vop_readdir_ap(ap);
1578 union_unlock_upper(uvp, td);
1584 * union_readlink(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
1587 union_readlink(struct vop_readlink_args *ap)
1590 struct union_node *un = VTOUNION(ap->a_vp);
1591 struct uio *uio = ap->a_uio;
1592 struct thread *td = uio->uio_td;
1595 vp = union_lock_other(un, td);
1596 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1598 ap->a_head.a_ops = *vp->v_ops;
1600 error = vop_readlink_ap(ap);
1601 union_unlock_other(vp, td);
1609 * Called with the vnode locked. We are expected to unlock the vnode.
1611 * union_inactive(struct vnode *a_vp, struct thread *a_td)
1614 union_inactive(struct vop_inactive_args *ap)
1616 struct vnode *vp = ap->a_vp;
1617 /*struct thread *td = ap->a_td;*/
1618 struct union_node *un = VTOUNION(vp);
1622 * Do nothing (and _don't_ bypass).
1623 * Wait to vrele lowervp until reclaim,
1624 * so that until then our union_node is in the
1625 * cache and reusable.
1627 * NEEDSWORK: Someday, consider inactive'ing
1628 * the lowervp and then trying to reactivate it
1629 * with capabilities (v_id)
1630 * like they do in the name lookup cache code.
1631 * That's too much work for now.
1634 if (un->un_dircache != 0) {
1635 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1637 free (un->un_dircache, M_TEMP);
1638 un->un_dircache = 0;
1642 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1643 un->un_flags &= ~UN_ULOCK;
1644 VOP_UNLOCK(un->un_uppervp, 0);
1648 if ((un->un_flags & UN_CACHED) == 0)
1655 * union_reclaim(struct vnode *a_vp)
1658 union_reclaim(struct vop_reclaim_args *ap)
1660 union_freevp(ap->a_vp);
1666 union_lock(struct vop_lock_args *ap)
1669 struct vnode *vp = ap->a_vp;
1670 struct thread *td = ap->a_td;
1671 int flags = ap->a_flags;
1672 struct union_node *un;
1676 error = vop_stdlock(ap);
1682 * Lock the upper if it exists and this is an exclusive lock
1685 if (un->un_uppervp != NULLVP &&
1686 (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1687 if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
1688 error = vn_lock(un->un_uppervp, flags);
1690 struct vop_unlock_args uap = { 0 };
1691 uap.a_vp = ap->a_vp;
1692 uap.a_flags = ap->a_flags;
1693 vop_stdunlock(&uap);
1696 un->un_flags |= UN_ULOCK;
1707 * Unlock our union node. This also unlocks uppervp.
1709 * union_unlock(struct vnode *a_vp, int a_flags, struct thread *a_td)
1712 union_unlock(struct vop_unlock_args *ap)
1716 struct union_node *un = VTOUNION(ap->a_vp);
1718 KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1721 error = vop_stdunlock(ap);
1725 * If no exclusive locks remain and we are holding an uppervp lock,
1726 * remove the uppervp lock.
1729 if ((un->un_flags & UN_ULOCK) &&
1730 lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
1731 un->un_flags &= ~UN_ULOCK;
1732 VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE);
1741 * There isn't much we can do. We cannot push through to the real vnode
1742 * to get to the underlying device because this will bypass data
1743 * cached by the real vnode.
1745 * For some reason we cannot return the 'real' vnode either, it seems
1746 * to blow up memory maps.
1748 * union_bmap(struct vnode *a_vp, off_t a_loffset, struct vnode **a_vpp,
1749 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1752 union_bmap(struct vop_bmap_args *ap)
1758 * union_print(struct vnode *a_vp)
1761 union_print(struct vop_print_args *ap)
1763 struct vnode *vp = ap->a_vp;
1765 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1766 vp, UPPERVP(vp), LOWERVP(vp));
1767 if (UPPERVP(vp) != NULLVP)
1768 vprint("union: upper", UPPERVP(vp));
1769 if (LOWERVP(vp) != NULLVP)
1770 vprint("union: lower", LOWERVP(vp));
1776 * union_pathconf(struct vnode *a_vp, int a_name, int *a_retval)
1779 union_pathconf(struct vop_pathconf_args *ap)
1782 struct thread *td = curthread; /* XXX */
1783 struct union_node *un = VTOUNION(ap->a_vp);
1786 vp = union_lock_other(un, td);
1787 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1789 ap->a_head.a_ops = *vp->v_ops;
1791 error = vop_pathconf_ap(ap);
1792 union_unlock_other(vp, td);
1798 * union_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1799 * struct flock *a_fl, int a_flags)
1802 union_advlock(struct vop_advlock_args *ap)
1804 struct vnode *ovp = OTHERVP(ap->a_vp);
1806 ap->a_head.a_ops = *ovp->v_ops;
1808 return (vop_advlock_ap(ap));
1813 * XXX - vop_strategy must be hand coded because it has no
1814 * YYY - and it is not coherent with anything
1816 * vnode in its arguments.
1817 * This goes away with a merged VM/buffer cache.
1819 * union_strategy(struct vnode *a_vp, struct bio *a_bio)
1822 union_strategy(struct vop_strategy_args *ap)
1824 struct bio *bio = ap->a_bio;
1825 struct buf *bp = bio->bio_buf;
1826 struct vnode *othervp = OTHERVP(ap->a_vp);
1829 if (othervp == NULLVP)
1830 panic("union_strategy: nil vp");
1831 if (bp->b_cmd != BUF_CMD_READ && (othervp == LOWERVP(ap->a_vp)))
1832 panic("union_strategy: writing to lowervp");
1834 return (vn_strategy(othervp, bio));
1838 * Global vfs data structures
1840 struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1841 { &vop_default_desc, vop_defaultop },
1842 { &vop_access_desc, (vnodeopv_entry_t) union_access },
1843 { &vop_advlock_desc, (vnodeopv_entry_t) union_advlock },
1844 { &vop_bmap_desc, (vnodeopv_entry_t) union_bmap },
1845 { &vop_close_desc, (vnodeopv_entry_t) union_close },
1846 { &vop_old_create_desc, (vnodeopv_entry_t) union_create },
1847 { &vop_fsync_desc, (vnodeopv_entry_t) union_fsync },
1848 { &vop_getpages_desc, (vnodeopv_entry_t) union_getpages },
1849 { &vop_putpages_desc, (vnodeopv_entry_t) union_putpages },
1850 { &vop_getattr_desc, (vnodeopv_entry_t) union_getattr },
1851 { &vop_inactive_desc, (vnodeopv_entry_t) union_inactive },
1852 { &vop_ioctl_desc, (vnodeopv_entry_t) union_ioctl },
1853 { &vop_islocked_desc, vop_stdislocked },
1854 { &vop_old_link_desc, (vnodeopv_entry_t) union_link },
1855 { &vop_lock_desc, (vnodeopv_entry_t) union_lock },
1856 { &vop_old_lookup_desc, (vnodeopv_entry_t) union_lookup },
1857 { &vop_old_mkdir_desc, (vnodeopv_entry_t) union_mkdir },
1858 { &vop_old_mknod_desc, (vnodeopv_entry_t) union_mknod },
1859 { &vop_mmap_desc, (vnodeopv_entry_t) union_mmap },
1860 { &vop_open_desc, (vnodeopv_entry_t) union_open },
1861 { &vop_pathconf_desc, (vnodeopv_entry_t) union_pathconf },
1862 { &vop_poll_desc, (vnodeopv_entry_t) union_poll },
1863 { &vop_print_desc, (vnodeopv_entry_t) union_print },
1864 { &vop_read_desc, (vnodeopv_entry_t) union_read },
1865 { &vop_readdir_desc, (vnodeopv_entry_t) union_readdir },
1866 { &vop_readlink_desc, (vnodeopv_entry_t) union_readlink },
1867 { &vop_reclaim_desc, (vnodeopv_entry_t) union_reclaim },
1868 { &vop_old_remove_desc, (vnodeopv_entry_t) union_remove },
1869 { &vop_old_rename_desc, (vnodeopv_entry_t) union_rename },
1870 { &vop_revoke_desc, (vnodeopv_entry_t) union_revoke },
1871 { &vop_old_rmdir_desc, (vnodeopv_entry_t) union_rmdir },
1872 { &vop_setattr_desc, (vnodeopv_entry_t) union_setattr },
1873 { &vop_strategy_desc, (vnodeopv_entry_t) union_strategy },
1874 { &vop_old_symlink_desc, (vnodeopv_entry_t) union_symlink },
1875 { &vop_unlock_desc, (vnodeopv_entry_t) union_unlock },
1876 { &vop_old_whiteout_desc, (vnodeopv_entry_t) union_whiteout },
1877 { &vop_write_desc, (vnodeopv_entry_t) union_write },