2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3 * Copyright (c) 1992, 1993, 1994, 1995
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
34 * $FreeBSD: src/sys/miscfs/union/union_vnops.c,v 1.72 1999/12/15 23:02:14 eivind Exp $
35 * $DragonFly: src/sys/vfs/union/union_vnops.c,v 1.39 2007/11/20 21:03:51 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/vnode.h>
45 #include <sys/mount.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
54 #include <vm/vnode_pager.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_object.h>
62 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
64 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
67 static int union_access (struct vop_access_args *ap);
68 static int union_advlock (struct vop_advlock_args *ap);
69 static int union_bmap (struct vop_bmap_args *ap);
70 static int union_close (struct vop_close_args *ap);
71 static int union_create (struct vop_old_create_args *ap);
72 static int union_fsync (struct vop_fsync_args *ap);
73 static int union_getattr (struct vop_getattr_args *ap);
74 static int union_inactive (struct vop_inactive_args *ap);
75 static int union_ioctl (struct vop_ioctl_args *ap);
76 static int union_link (struct vop_old_link_args *ap);
77 static int union_lookup (struct vop_old_lookup_args *ap);
78 static int union_lookup1 (struct vnode *udvp, struct vnode **dvp,
80 struct componentname *cnp);
81 static int union_mkdir (struct vop_old_mkdir_args *ap);
82 static int union_mknod (struct vop_old_mknod_args *ap);
83 static int union_mmap (struct vop_mmap_args *ap);
84 static int union_open (struct vop_open_args *ap);
85 static int union_pathconf (struct vop_pathconf_args *ap);
86 static int union_print (struct vop_print_args *ap);
87 static int union_read (struct vop_read_args *ap);
88 static int union_readdir (struct vop_readdir_args *ap);
89 static int union_readlink (struct vop_readlink_args *ap);
90 static int union_reclaim (struct vop_reclaim_args *ap);
91 static int union_remove (struct vop_old_remove_args *ap);
92 static int union_rename (struct vop_old_rename_args *ap);
93 static int union_rmdir (struct vop_old_rmdir_args *ap);
94 static int union_poll (struct vop_poll_args *ap);
95 static int union_setattr (struct vop_setattr_args *ap);
96 static int union_strategy (struct vop_strategy_args *ap);
97 static int union_getpages (struct vop_getpages_args *ap);
98 static int union_putpages (struct vop_putpages_args *ap);
99 static int union_symlink (struct vop_old_symlink_args *ap);
100 static int union_whiteout (struct vop_old_whiteout_args *ap);
101 static int union_write (struct vop_read_args *ap);
105 union_lock_upper(struct union_node *un, struct thread *td)
107 struct vnode *uppervp;
109 if ((uppervp = un->un_uppervp) != NULL) {
111 vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
113 KASSERT((uppervp == NULL || uppervp->v_sysref.refcnt > 0), ("uppervp usecount is 0"));
119 union_ref_upper(struct union_node *un)
121 struct vnode *uppervp;
123 if ((uppervp = un->un_uppervp) != NULL) {
125 if (uppervp->v_flag & VRECLAIMED) {
135 union_unlock_upper(struct vnode *uppervp, struct thread *td)
142 union_lock_other(struct union_node *un, struct thread *td)
146 if (un->un_uppervp != NULL) {
147 vp = union_lock_upper(un, td);
148 } else if ((vp = un->un_lowervp) != NULL) {
150 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
157 union_unlock_other(struct vnode *vp, struct thread *td)
165 * udvp must be exclusively locked on call and will remain
166 * exclusively locked on return. This is the mount point
167 * for out filesystem.
169 * dvp Our base directory, locked and referenced.
170 * The passed dvp will be dereferenced and unlocked on return
171 * and a new dvp will be returned which is locked and
172 * referenced in the same variable.
174 * vpp is filled in with the result if no error occured,
177 * If an error is returned, *vpp is set to NULLVP. If no
178 * error occurs, *vpp is returned with a reference and an
183 union_lookup1(struct vnode *udvp, struct vnode **pdvp, struct vnode **vpp,
184 struct componentname *cnp)
187 struct thread *td = cnp->cn_td;
188 struct vnode *dvp = *pdvp;
193 * If stepping up the directory tree, check for going
194 * back across the mount point, in which case do what
195 * lookup would do by stepping back down the mount
198 if (cnp->cn_flags & CNP_ISDOTDOT) {
199 while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
201 * Don't do the NOCROSSMOUNT check
202 * at this level. By definition,
203 * union fs deals with namespaces, not
207 dvp = dvp->v_mount->mnt_vnodecovered;
210 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
215 * Set return dvp to be the upperdvp 'parent directory.
220 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
221 * changes will have been made to dvp, so we are set to return.
224 error = VOP_LOOKUP(dvp, &tdvp, cnp);
226 UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
232 * The parent directory will have been unlocked, unless lookup
233 * found the last component or if dvp == tdvp (tdvp must be locked).
235 * We want our dvp to remain locked and ref'd. We also want tdvp
236 * to remain locked and ref'd.
238 UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
241 if (dvp != tdvp && (cnp->cn_flags & CNP_XXXISLASTCN) == 0)
242 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
246 * Lastly check if the current node is a mount point in
247 * which case walk up the mount hierarchy making sure not to
248 * bump into the root of the mount tree (ie. dvp != udvp).
250 * We use dvp as a temporary variable here, it is no longer related
251 * to the dvp above. However, we have to ensure that both *pdvp and
252 * tdvp are locked on return.
258 (dvp->v_type == VDIR) &&
259 (mp = dvp->v_mountedhere)
270 error = VFS_ROOT(mp, &dvp);
275 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY);
287 * union_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
288 * struct componentname *a_cnp)
291 union_lookup(struct vop_old_lookup_args *ap)
295 struct vnode *uppervp, *lowervp;
296 struct vnode *upperdvp, *lowerdvp;
297 struct vnode *dvp = ap->a_dvp; /* starting dir */
298 struct union_node *dun = VTOUNION(dvp); /* associated union node */
299 struct componentname *cnp = ap->a_cnp;
300 struct thread *td = cnp->cn_td;
301 int lockparent = cnp->cn_flags & CNP_LOCKPARENT;
302 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
303 struct ucred *saved_cred = NULL;
310 * Disallow write attemps to the filesystem mounted read-only.
312 if ((dvp->v_mount->mnt_flag & MNT_RDONLY) &&
313 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME)) {
318 * For any lookup's we do, always return with the parent locked
320 cnp->cn_flags |= CNP_LOCKPARENT;
322 lowerdvp = dun->un_lowervp;
331 * Get a private lock on uppervp and a reference, effectively
332 * taking it out of the union_node's control.
334 * We must lock upperdvp while holding our lock on dvp
335 * to avoid a deadlock.
337 upperdvp = union_lock_upper(dun, td);
340 * do the lookup in the upper level.
341 * if that level comsumes additional pathnames,
342 * then assume that something special is going
343 * on and just return that vnode.
345 if (upperdvp != NULLVP) {
347 * We do not have to worry about the DOTDOT case, we've
348 * already unlocked dvp.
350 UDEBUG(("A %p\n", upperdvp));
353 * Do the lookup. We must supply a locked and referenced
354 * upperdvp to the function and will get a new locked and
355 * referenced upperdvp back with the old having been
358 * If an error is returned, uppervp will be NULLVP. If no
359 * error occurs, uppervp will be the locked and referenced
360 * return vnode or possibly NULL, depending on what is being
361 * requested. It is possible that the returned uppervp
362 * will be the same as upperdvp.
364 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
366 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
369 upperdvp->v_sysref.refcnt,
370 vn_islocked(upperdvp),
372 (uppervp ? uppervp->v_sysref.refcnt : -99),
373 (uppervp ? vn_islocked(uppervp) : -99)
377 * Disallow write attemps to the filesystem mounted read-only.
379 if (uerror == EJUSTRETURN &&
380 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
381 (cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME)) {
387 * Special case. If cn_consume != 0 skip out. The result
388 * of the lookup is transfered to our return variable. If
389 * an error occured we have to throw away the results.
392 if (cnp->cn_consume != 0) {
393 if ((error = uerror) == 0) {
394 *ap->a_vpp = uppervp;
401 * Calculate whiteout, fall through
404 if (uerror == ENOENT || uerror == EJUSTRETURN) {
405 if (cnp->cn_flags & CNP_ISWHITEOUT) {
407 } else if (lowerdvp != NULLVP) {
410 terror = VOP_GETATTR(upperdvp, &va);
411 if (terror == 0 && (va.va_flags & OPAQUE))
418 * in a similar way to the upper layer, do the lookup
419 * in the lower layer. this time, if there is some
420 * component magic going on, then vput whatever we got
421 * back from the upper layer and return the lower vnode
425 if (lowerdvp != NULLVP && !iswhiteout) {
428 UDEBUG(("B %p\n", lowerdvp));
431 * Force only LOOKUPs on the lower node, since
432 * we won't be making changes to it anyway.
434 nameiop = cnp->cn_nameiop;
435 cnp->cn_nameiop = NAMEI_LOOKUP;
436 if (um->um_op == UNMNT_BELOW) {
437 saved_cred = cnp->cn_cred;
438 cnp->cn_cred = um->um_cred;
442 * We shouldn't have to worry about locking interactions
443 * between the lower layer and our union layer (w.r.t.
444 * `..' processing) because we don't futz with lowervp
445 * locks in the union-node instantiation code path.
447 * union_lookup1() requires lowervp to be locked on entry,
448 * and it will be unlocked on return. The ref count will
449 * not change. On return lowervp doesn't represent anything
450 * to us so we NULL it out.
453 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
454 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
455 if (lowerdvp == lowervp)
459 lowerdvp = NULL; /* lowerdvp invalid after vput */
461 if (um->um_op == UNMNT_BELOW)
462 cnp->cn_cred = saved_cred;
463 cnp->cn_nameiop = nameiop;
465 if (cnp->cn_consume != 0 || lerror == EACCES) {
466 if ((error = lerror) == 0) {
467 *ap->a_vpp = lowervp;
473 UDEBUG(("C %p\n", lowerdvp));
474 if ((cnp->cn_flags & CNP_ISDOTDOT) && dun->un_pvp != NULLVP) {
475 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
477 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
484 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
486 * 1. If both layers returned an error, select the upper layer.
488 * 2. If the upper layer faile and the bottom layer succeeded,
489 * two subcases occur:
491 * a. The bottom vnode is not a directory, in which case
492 * just return a new union vnode referencing an
493 * empty top layer and the existing bottom layer.
495 * b. The button vnode is a directory, in which case
496 * create a new directory in the top layer and
497 * and fall through to case 3.
499 * 3. If the top layer succeeded then return a new union
500 * vnode referencing whatever the new top layer and
501 * whatever the bottom layer returned.
505 if ((uerror != 0) && (lerror != 0)) {
511 if (uerror != 0 /* && (lerror == 0) */ ) {
512 if (lowervp->v_type == VDIR) { /* case 2b. */
513 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
515 * oops, uppervp has a problem, we may have to shadow.
517 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
526 * Must call union_allocvp with both the upper and lower vnodes
527 * referenced and the upper vnode locked. ap->a_vpp is returned
528 * referenced and locked. lowervp, uppervp, and upperdvp are
529 * absorbed by union_allocvp() whether it succeeds or fails.
531 * upperdvp is the parent directory of uppervp which may be
532 * different, depending on the path, from dvp->un_uppervp. That's
533 * why it is a separate argument. Note that it must be unlocked.
535 * dvp must be locked on entry to the call and will be locked on
539 if (uppervp && uppervp != upperdvp)
546 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
547 uppervp, lowervp, 1);
549 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_sysref.refcnt) : -99));
558 * - put away any extra junk laying around. Note that lowervp
559 * (if not NULL) will never be the same as *ap->a_vp and
560 * neither will uppervp, because when we set that state we
561 * NULL-out lowervp or uppervp. On the otherhand, upperdvp
562 * may match uppervp or *ap->a_vpp.
564 * - relock/unlock dvp if appropriate.
569 if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
582 * Restore LOCKPARENT state
586 cnp->cn_flags &= ~CNP_LOCKPARENT;
588 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
589 ((*ap->a_vpp) ? (*ap->a_vpp)->v_sysref.refcnt : -99),
593 * dvp lock state, determine whether to relock dvp. dvp is expected
594 * to be locked on return if:
596 * - there was an error (except not EJUSTRETURN), or
597 * - we hit the last component and lockparent is true
599 * dvp_is_locked is the current state of the dvp lock, not counting
600 * the possibility that *ap->a_vpp == dvp (in which case it is locked
601 * anyway). Note that *ap->a_vpp == dvp only if no error occured.
604 if (*ap->a_vpp != dvp) {
605 if ((error == 0 || error == EJUSTRETURN) && !lockparent) {
615 if (cnp->cn_namelen == 1 &&
616 cnp->cn_nameptr[0] == '.' &&
618 panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
628 * a_dvp is locked on entry and remains locked on return. a_vpp is returned
629 * locked if no error occurs, otherwise it is garbage.
631 * union_create(struct vnode *a_dvp, struct vnode **a_vpp,
632 * struct componentname *a_cnp, struct vattr *a_vap)
635 union_create(struct vop_old_create_args *ap)
637 struct union_node *dun = VTOUNION(ap->a_dvp);
638 struct componentname *cnp = ap->a_cnp;
639 struct thread *td = cnp->cn_td;
643 if ((dvp = union_lock_upper(dun, td)) != NULL) {
647 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
649 mp = ap->a_dvp->v_mount;
651 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
652 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
654 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
656 union_unlock_upper(dvp, td);
662 * union_whiteout(struct vnode *a_dvp, struct componentname *a_cnp,
666 union_whiteout(struct vop_old_whiteout_args *ap)
668 struct union_node *un = VTOUNION(ap->a_dvp);
669 struct componentname *cnp = ap->a_cnp;
670 struct vnode *uppervp;
671 int error = EOPNOTSUPP;
673 if ((uppervp = union_lock_upper(un, cnp->cn_td)) != NULLVP) {
674 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
675 union_unlock_upper(uppervp, cnp->cn_td);
683 * a_dvp is locked on entry and should remain locked on return.
684 * a_vpp is garbagre whether an error occurs or not.
686 * union_mknod(struct vnode *a_dvp, struct vnode **a_vpp,
687 * struct componentname *a_cnp, struct vattr *a_vap)
690 union_mknod(struct vop_old_mknod_args *ap)
692 struct union_node *dun = VTOUNION(ap->a_dvp);
693 struct componentname *cnp = ap->a_cnp;
697 if ((dvp = union_lock_upper(dun, cnp->cn_td)) != NULL) {
698 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
699 union_unlock_upper(dvp, cnp->cn_td);
707 * run open VOP. When opening the underlying vnode we have to mimic
708 * vn_open. What we *really* need to do to avoid screwups if the
709 * open semantics change is to call vn_open(). For example, ufs blows
710 * up if you open a file but do not vmio it prior to writing.
712 * union_open(struct vnode *a_vp, int a_mode,
713 * struct ucred *a_cred, struct thread *a_td)
716 union_open(struct vop_open_args *ap)
718 struct union_node *un = VTOUNION(ap->a_vp);
720 int mode = ap->a_mode;
721 struct ucred *cred = ap->a_cred;
722 struct thread *td = ap->a_td;
727 * If there is an existing upper vp then simply open that.
728 * The upper vp takes precedence over the lower vp. When opening
729 * a lower vp for writing copy it to the uppervp and then open the
732 * At the end of this section tvp will be left locked.
734 if ((tvp = union_lock_upper(un, td)) == NULLVP) {
736 * If the lower vnode is being opened for writing, then
737 * copy the file contents to the upper vnode and open that,
738 * otherwise can simply open the lower vnode.
740 tvp = un->un_lowervp;
741 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
742 int docopy = !(mode & O_TRUNC);
743 error = union_copyup(un, docopy, cred, td);
744 tvp = union_lock_upper(un, td);
748 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
754 * We are holding the correct vnode, open it. Note
755 * that in DragonFly, VOP_OPEN is responsible for associating
756 * a VM object with the vnode if the vnode is mappable or the
757 * underlying filesystem uses buffer cache calls on it.
760 error = VOP_OPEN(tvp, mode, cred, NULL);
763 * Release any locks held
767 union_unlock_upper(tvp, td);
777 * It is unclear whether a_vp is passed locked or unlocked. Whatever
778 * the case we do not change it.
780 * union_close(struct vnode *a_vp, int a_fflag, struct ucred *a_cred,
781 * struct thread *a_td)
784 union_close(struct vop_close_args *ap)
786 struct union_node *un = VTOUNION(ap->a_vp);
789 if ((vp = un->un_uppervp) == NULLVP) {
790 #ifdef UNION_DIAGNOSTIC
791 if (un->un_openl <= 0)
792 panic("union: un_openl cnt");
797 ap->a_head.a_ops = *vp->v_ops;
799 return(vop_close_ap(ap));
803 * Check access permission on the union vnode.
804 * The access check being enforced is to check
805 * against both the underlying vnode, and any
806 * copied vnode. This ensures that no additional
807 * file permissions are given away simply because
808 * the user caused an implicit file copy.
810 * union_access(struct vnode *a_vp, int a_mode,
811 * struct ucred *a_cred, struct thread *a_td)
814 union_access(struct vop_access_args *ap)
816 struct union_node *un = VTOUNION(ap->a_vp);
817 struct thread *td = ap->a_td;
822 * Disallow write attempts on filesystems mounted read-only.
824 if ((ap->a_mode & VWRITE) &&
825 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
826 switch (ap->a_vp->v_type) {
836 if ((vp = union_lock_upper(un, td)) != NULLVP) {
837 ap->a_head.a_ops = *vp->v_ops;
839 error = vop_access_ap(ap);
840 union_unlock_upper(vp, td);
844 if ((vp = un->un_lowervp) != NULLVP) {
845 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
846 ap->a_head.a_ops = *vp->v_ops;
850 * Remove VWRITE from a_mode if our mount point is RW, because
851 * we want to allow writes and lowervp may be read-only.
853 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
854 ap->a_mode &= ~VWRITE;
856 error = vop_access_ap(ap);
858 struct union_mount *um;
860 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
862 if (um->um_op == UNMNT_BELOW) {
863 ap->a_cred = um->um_cred;
864 error = vop_access_ap(ap);
873 * We handle getattr only to change the fsid and
876 * It's not clear whether VOP_GETATTR is to be
877 * called with the vnode locked or not. stat() calls
878 * it with (vp) locked, and fstat calls it with
881 * Because of this we cannot use our normal locking functions
882 * if we do not intend to lock the main a_vp node. At the moment
883 * we are running without any specific locking at all, but beware
884 * to any programmer that care must be taken if locking is added
887 * union_getattr(struct vnode *a_vp, struct vattr *a_vap,
888 * struct ucred *a_cred, struct thread *a_td)
891 union_getattr(struct vop_getattr_args *ap)
894 struct union_node *un = VTOUNION(ap->a_vp);
900 * Some programs walk the filesystem hierarchy by counting
901 * links to directories to avoid stat'ing all the time.
902 * This means the link count on directories needs to be "correct".
903 * The only way to do that is to call getattr on both layers
904 * and fix up the link count. The link count will not necessarily
905 * be accurate but will be large enough to defeat the tree walkers.
910 if ((vp = un->un_uppervp) != NULLVP) {
911 error = VOP_GETATTR(vp, vap);
914 /* XXX isn't this dangerouso without a lock? */
915 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
920 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
928 error = VOP_GETATTR(vp, vap);
931 /* XXX isn't this dangerous without a lock? */
932 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
935 if ((vap != ap->a_vap) && (vap->va_type == VDIR))
936 ap->a_vap->va_nlink += vap->va_nlink;
941 * union_setattr(struct vnode *a_vp, struct vattr *a_vap,
942 * struct ucred *a_cred, struct thread *a_td)
945 union_setattr(struct vop_setattr_args *ap)
947 struct union_node *un = VTOUNION(ap->a_vp);
948 struct thread *td = ap->a_td;
949 struct vattr *vap = ap->a_vap;
950 struct vnode *uppervp;
954 * Disallow write attempts on filesystems mounted read-only.
956 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
957 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
958 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
959 vap->va_mtime.tv_sec != VNOVAL ||
960 vap->va_mode != (mode_t)VNOVAL)) {
965 * Handle case of truncating lower object to zero size,
966 * by creating a zero length upper object. This is to
967 * handle the case of open with O_TRUNC and O_CREAT.
969 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
970 error = union_copyup(un, (ap->a_vap->va_size != 0),
971 ap->a_cred, ap->a_td);
977 * Try to set attributes in upper layer,
978 * otherwise return read-only filesystem error.
981 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
982 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, ap->a_cred);
983 if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
984 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
985 union_unlock_upper(uppervp, td);
995 union_getpages(struct vop_getpages_args *ap)
999 r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1000 ap->a_count, ap->a_reqpage,
1010 union_putpages(struct vop_putpages_args *ap)
1014 r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1015 ap->a_sync, ap->a_rtvals);
1020 * union_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1021 * struct ucred *a_cred)
1024 union_read(struct vop_read_args *ap)
1026 struct union_node *un = VTOUNION(ap->a_vp);
1027 struct thread *td = ap->a_uio->uio_td;
1031 uvp = union_lock_other(un, td);
1032 KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1034 if (ap->a_vp->v_flag & VOBJBUF)
1035 union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1037 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1038 union_unlock_other(uvp, td);
1042 * perhaps the size of the underlying object has changed under
1043 * our feet. take advantage of the offset information present
1044 * in the uio structure.
1047 struct union_node *un = VTOUNION(ap->a_vp);
1048 off_t cur = ap->a_uio->uio_offset;
1050 if (uvp == un->un_uppervp) {
1051 if (cur > un->un_uppersz)
1052 union_newsize(ap->a_vp, cur, VNOVAL);
1054 if (cur > un->un_lowersz)
1055 union_newsize(ap->a_vp, VNOVAL, cur);
1062 * union_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1063 * struct ucred *a_cred)
1066 union_write(struct vop_read_args *ap)
1068 struct union_node *un = VTOUNION(ap->a_vp);
1069 struct thread *td = ap->a_uio->uio_td;
1070 struct vnode *uppervp;
1073 if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1074 panic("union: missing upper layer in write");
1077 * Since our VM pages are associated with our vnode rather then
1078 * the real vnode, and since we do not run our reads and writes
1079 * through our own VM cache, we have a VM/VFS coherency problem.
1080 * We solve them by invalidating or flushing the associated VM
1081 * pages prior to allowing a normal read or write to occur.
1083 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1084 * writes because we are not cache-coherent. Normal writes need
1085 * to be made coherent with our VM-backing store, which we do by
1086 * first flushing any dirty VM pages associated with the write
1087 * range, and then destroying any clean VM pages associated with
1091 if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1092 ap->a_uio->uio_segflg = UIO_SYSSPACE;
1093 } else if (ap->a_vp->v_flag & VOBJBUF) {
1094 union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1097 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1100 * the size of the underlying object may be changed by the
1104 off_t cur = ap->a_uio->uio_offset;
1106 if (cur > un->un_uppersz)
1107 union_newsize(ap->a_vp, cur, VNOVAL);
1109 union_unlock_upper(uppervp, td);
1114 * union_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, int a_fflag,
1115 * struct ucred *a_cred, struct thread *a_td)
1118 union_ioctl(struct vop_ioctl_args *ap)
1120 struct vnode *ovp = OTHERVP(ap->a_vp);
1122 ap->a_head.a_ops = *ovp->v_ops;
1124 return(vop_ioctl_ap(ap));
1128 * union_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred,
1129 * struct thread *a_td)
1132 union_poll(struct vop_poll_args *ap)
1134 struct vnode *ovp = OTHERVP(ap->a_vp);
1136 ap->a_head.a_ops = *ovp->v_ops;
1138 return(vop_poll_ap(ap));
1142 * union_mmap(struct vnode *a_vp, int a_fflags, struct ucred *a_cred,
1143 * struct thread *a_td)
1146 union_mmap(struct vop_mmap_args *ap)
1148 struct vnode *ovp = OTHERVP(ap->a_vp);
1150 ap->a_head.a_ops = *ovp->v_ops;
1152 return (vop_mmap_ap(ap));
1156 * union_fsync(struct vnode *a_vp, struct ucred *a_cred, int a_waitfor,
1157 * struct thread *a_td)
1160 union_fsync(struct vop_fsync_args *ap)
1163 struct thread *td = ap->a_td;
1164 struct vnode *targetvp;
1165 struct union_node *un = VTOUNION(ap->a_vp);
1167 if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1168 error = VOP_FSYNC(targetvp, ap->a_waitfor, 0);
1169 union_unlock_other(targetvp, td);
1178 * Remove the specified cnp. The dvp and vp are passed to us locked
1179 * and must remain locked on return.
1181 * union_remove(struct vnode *a_dvp, struct vnode *a_vp,
1182 * struct componentname *a_cnp)
1185 union_remove(struct vop_old_remove_args *ap)
1187 struct union_node *dun = VTOUNION(ap->a_dvp);
1188 struct union_node *un = VTOUNION(ap->a_vp);
1189 struct componentname *cnp = ap->a_cnp;
1190 struct thread *td = cnp->cn_td;
1191 struct vnode *uppervp;
1192 struct vnode *upperdvp;
1195 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1196 panic("union remove: null upper vnode");
1198 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1199 if (union_dowhiteout(un, cnp->cn_cred, td))
1200 cnp->cn_flags |= CNP_DOWHITEOUT;
1201 error = VOP_REMOVE(upperdvp, uppervp, cnp);
1205 union_removed_upper(un);
1207 union_unlock_upper(uppervp, td);
1209 error = union_mkwhiteout(
1210 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1211 upperdvp, ap->a_cnp, un->un_path);
1213 union_unlock_upper(upperdvp, td);
1220 * tdvp will be locked on entry, vp will not be locked on entry.
1221 * tdvp should remain locked on return and vp should remain unlocked
1224 * union_link(struct vnode *a_tdvp, struct vnode *a_vp,
1225 * struct componentname *a_cnp)
1228 union_link(struct vop_old_link_args *ap)
1230 struct componentname *cnp = ap->a_cnp;
1231 struct thread *td = cnp->cn_td;
1232 struct union_node *dun = VTOUNION(ap->a_tdvp);
1237 if (ap->a_tdvp->v_ops != ap->a_vp->v_ops) {
1240 struct union_node *tun = VTOUNION(ap->a_vp);
1242 if (tun->un_uppervp == NULLVP) {
1243 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1245 if (dun->un_uppervp == tun->un_dirvp) {
1246 if (dun->un_flags & UN_ULOCK) {
1247 dun->un_flags &= ~UN_ULOCK;
1248 vn_unlock(dun->un_uppervp);
1252 error = union_copyup(tun, 1, cnp->cn_cred, td);
1254 if (dun->un_uppervp == tun->un_dirvp) {
1255 vn_lock(dun->un_uppervp,
1256 LK_EXCLUSIVE | LK_RETRY);
1257 dun->un_flags |= UN_ULOCK;
1260 vn_unlock(ap->a_vp);
1262 vp = tun->un_uppervp;
1269 * Make sure upper is locked, then unlock the union directory we were
1270 * called with to avoid a deadlock while we are calling VOP_LINK on
1271 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp
1272 * is expected to be locked on return.
1275 if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1278 vn_unlock(ap->a_tdvp); /* unlock calling node */
1279 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */
1282 * We have to unlock tdvp prior to relocking our calling node in
1283 * order to avoid a deadlock.
1285 union_unlock_upper(tdvp, td);
1286 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY);
1291 * union_rename(struct vnode *a_fdvp, struct vnode *a_fvp,
1292 * struct componentname *a_fcnp, struct vnode *a_tdvp,
1293 * struct vnode *a_tvp, struct componentname *a_tcnp)
1296 union_rename(struct vop_old_rename_args *ap)
1299 struct vnode *fdvp = ap->a_fdvp;
1300 struct vnode *fvp = ap->a_fvp;
1301 struct vnode *tdvp = ap->a_tdvp;
1302 struct vnode *tvp = ap->a_tvp;
1305 * Figure out what fdvp to pass to our upper or lower vnode. If we
1306 * replace the fdvp, release the original one and ref the new one.
1309 if (fdvp->v_tag == VT_UNION) { /* always true */
1310 struct union_node *un = VTOUNION(fdvp);
1311 if (un->un_uppervp == NULLVP) {
1313 * this should never happen in normal
1314 * operation but might if there was
1315 * a problem creating the top-level shadow
1321 fdvp = un->un_uppervp;
1327 * Figure out what fvp to pass to our upper or lower vnode. If we
1328 * replace the fvp, release the original one and ref the new one.
1331 if (fvp->v_tag == VT_UNION) { /* always true */
1332 struct union_node *un = VTOUNION(fvp);
1334 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1337 if (un->un_uppervp == NULLVP) {
1338 switch(fvp->v_type) {
1340 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY);
1341 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_td);
1342 vn_unlock(un->un_vnode);
1350 * There is only one way to rename a directory
1351 * based in the lowervp, and that is to copy
1352 * the entire directory hierarchy. Otherwise
1353 * it would not last across a reboot.
1358 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
1359 error = union_mkshadow(um, fdvp,
1360 ap->a_fcnp, &un->un_uppervp);
1363 vn_unlock(un->un_uppervp);
1374 if (un->un_lowervp != NULLVP)
1375 ap->a_fcnp->cn_flags |= CNP_DOWHITEOUT;
1376 fvp = un->un_uppervp;
1382 * Figure out what tdvp (destination directory) to pass to the
1383 * lower level. If we replace it with uppervp, we need to vput the
1384 * old one. The exclusive lock is transfered to what we will pass
1385 * down in the VOP_RENAME and we replace uppervp with a simple
1389 if (tdvp->v_tag == VT_UNION) {
1390 struct union_node *un = VTOUNION(tdvp);
1392 if (un->un_uppervp == NULLVP) {
1394 * this should never happen in normal
1395 * operation but might if there was
1396 * a problem creating the top-level shadow
1404 * new tdvp is a lock and reference on uppervp, put away
1407 tdvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1412 * Figure out what tvp (destination file) to pass to the
1415 * If the uppervp file does not exist put away the (wrong)
1416 * file and change tvp to NULL.
1419 if (tvp != NULLVP && tvp->v_tag == VT_UNION) {
1420 struct union_node *un = VTOUNION(tvp);
1422 tvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1424 /* note: tvp may be NULL */
1428 * VOP_RENAME releases/vputs prior to returning, so we have no
1432 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1435 * Error. We still have to release / vput the various elements.
1443 if (tvp != NULLVP) {
1453 * union_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
1454 * struct componentname *a_cnp, struct vattr *a_vap)
1457 union_mkdir(struct vop_old_mkdir_args *ap)
1459 struct union_node *dun = VTOUNION(ap->a_dvp);
1460 struct componentname *cnp = ap->a_cnp;
1461 struct thread *td = cnp->cn_td;
1462 struct vnode *upperdvp;
1465 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1468 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1469 union_unlock_upper(upperdvp, td);
1473 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
1474 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1475 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1476 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
1483 * union_rmdir(struct vnode *a_dvp, struct vnode *a_vp,
1484 * struct componentname *a_cnp)
1487 union_rmdir(struct vop_old_rmdir_args *ap)
1489 struct union_node *dun = VTOUNION(ap->a_dvp);
1490 struct union_node *un = VTOUNION(ap->a_vp);
1491 struct componentname *cnp = ap->a_cnp;
1492 struct thread *td = cnp->cn_td;
1493 struct vnode *upperdvp;
1494 struct vnode *uppervp;
1497 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1498 panic("union rmdir: null upper vnode");
1500 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1501 if (union_dowhiteout(un, cnp->cn_cred, td))
1502 cnp->cn_flags |= CNP_DOWHITEOUT;
1503 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1504 union_unlock_upper(uppervp, td);
1506 error = union_mkwhiteout(
1507 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1508 dun->un_uppervp, ap->a_cnp, un->un_path);
1510 union_unlock_upper(upperdvp, td);
1517 * dvp is locked on entry and remains locked on return. a_vpp is garbage
1520 * union_symlink(struct vnode *a_dvp, struct vnode **a_vpp,
1521 * struct componentname *a_cnp, struct vattr *a_vap,
1525 union_symlink(struct vop_old_symlink_args *ap)
1527 struct union_node *dun = VTOUNION(ap->a_dvp);
1528 struct componentname *cnp = ap->a_cnp;
1529 struct thread *td = cnp->cn_td;
1533 if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1534 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1536 union_unlock_upper(dvp, td);
1542 * union_readdir works in concert with getdirentries and
1543 * readdir(3) to provide a list of entries in the unioned
1544 * directories. getdirentries is responsible for walking
1545 * down the union stack. readdir(3) is responsible for
1546 * eliminating duplicate names from the returned data stream.
1548 * union_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
1549 * int *a_eofflag, off_t *a_cookies, int a_ncookies)
1552 union_readdir(struct vop_readdir_args *ap)
1554 struct union_node *un = VTOUNION(ap->a_vp);
1555 struct thread *td = ap->a_uio->uio_td;
1559 if ((uvp = union_ref_upper(un)) != NULLVP) {
1560 ap->a_head.a_ops = *uvp->v_ops;
1562 error = vop_readdir_ap(ap);
1569 * union_readlink(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
1572 union_readlink(struct vop_readlink_args *ap)
1575 struct union_node *un = VTOUNION(ap->a_vp);
1576 struct uio *uio = ap->a_uio;
1577 struct thread *td = uio->uio_td;
1580 vp = union_lock_other(un, td);
1581 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1583 ap->a_head.a_ops = *vp->v_ops;
1585 error = vop_readlink_ap(ap);
1586 union_unlock_other(vp, td);
1594 * Called with the vnode locked. We are expected to unlock the vnode.
1596 * union_inactive(struct vnode *a_vp, struct thread *a_td)
1599 union_inactive(struct vop_inactive_args *ap)
1601 struct vnode *vp = ap->a_vp;
1602 /*struct thread *td = ap->a_td;*/
1603 struct union_node *un = VTOUNION(vp);
1607 * Do nothing (and _don't_ bypass).
1608 * Wait to vrele lowervp until reclaim,
1609 * so that until then our union_node is in the
1610 * cache and reusable.
1612 * NEEDSWORK: Someday, consider inactive'ing
1613 * the lowervp and then trying to reactivate it
1614 * with capabilities (v_id)
1615 * like they do in the name lookup cache code.
1616 * That's too much work for now.
1619 if (un->un_dircache != 0) {
1620 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1622 kfree (un->un_dircache, M_TEMP);
1623 un->un_dircache = 0;
1627 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1628 un->un_flags &= ~UN_ULOCK;
1629 vn_unlock(un->un_uppervp);
1633 if ((un->un_flags & UN_CACHED) == 0)
1640 * union_reclaim(struct vnode *a_vp)
1643 union_reclaim(struct vop_reclaim_args *ap)
1645 union_freevp(ap->a_vp);
1653 * There isn't much we can do. We cannot push through to the real vnode
1654 * to get to the underlying device because this will bypass data
1655 * cached by the real vnode.
1657 * For some reason we cannot return the 'real' vnode either, it seems
1658 * to blow up memory maps.
1660 * union_bmap(struct vnode *a_vp, off_t a_loffset,
1661 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1664 union_bmap(struct vop_bmap_args *ap)
1670 * union_print(struct vnode *a_vp)
1673 union_print(struct vop_print_args *ap)
1675 struct vnode *vp = ap->a_vp;
1677 kprintf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1678 vp, UPPERVP(vp), LOWERVP(vp));
1679 if (UPPERVP(vp) != NULLVP)
1680 vprint("union: upper", UPPERVP(vp));
1681 if (LOWERVP(vp) != NULLVP)
1682 vprint("union: lower", LOWERVP(vp));
1688 * union_pathconf(struct vnode *a_vp, int a_name, int *a_retval)
1691 union_pathconf(struct vop_pathconf_args *ap)
1694 struct thread *td = curthread; /* XXX */
1695 struct union_node *un = VTOUNION(ap->a_vp);
1698 vp = union_lock_other(un, td);
1699 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1701 ap->a_head.a_ops = *vp->v_ops;
1703 error = vop_pathconf_ap(ap);
1704 union_unlock_other(vp, td);
1710 * union_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1711 * struct flock *a_fl, int a_flags)
1714 union_advlock(struct vop_advlock_args *ap)
1716 struct vnode *ovp = OTHERVP(ap->a_vp);
1718 ap->a_head.a_ops = *ovp->v_ops;
1720 return (vop_advlock_ap(ap));
1725 * XXX - vop_strategy must be hand coded because it has no
1726 * YYY - and it is not coherent with anything
1728 * vnode in its arguments.
1729 * This goes away with a merged VM/buffer cache.
1731 * union_strategy(struct vnode *a_vp, struct bio *a_bio)
1734 union_strategy(struct vop_strategy_args *ap)
1736 struct bio *bio = ap->a_bio;
1737 struct buf *bp = bio->bio_buf;
1738 struct vnode *othervp = OTHERVP(ap->a_vp);
1741 if (othervp == NULLVP)
1742 panic("union_strategy: nil vp");
1743 if (bp->b_cmd != BUF_CMD_READ && (othervp == LOWERVP(ap->a_vp)))
1744 panic("union_strategy: writing to lowervp");
1746 return (vn_strategy(othervp, bio));
1750 * Global vfs data structures
1752 struct vop_ops union_vnode_vops = {
1753 .vop_default = vop_defaultop,
1754 .vop_access = union_access,
1755 .vop_advlock = union_advlock,
1756 .vop_bmap = union_bmap,
1757 .vop_close = union_close,
1758 .vop_old_create = union_create,
1759 .vop_fsync = union_fsync,
1760 .vop_getpages = union_getpages,
1761 .vop_putpages = union_putpages,
1762 .vop_getattr = union_getattr,
1763 .vop_inactive = union_inactive,
1764 .vop_ioctl = union_ioctl,
1765 .vop_old_link = union_link,
1766 .vop_old_lookup = union_lookup,
1767 .vop_old_mkdir = union_mkdir,
1768 .vop_old_mknod = union_mknod,
1769 .vop_mmap = union_mmap,
1770 .vop_open = union_open,
1771 .vop_pathconf = union_pathconf,
1772 .vop_poll = union_poll,
1773 .vop_print = union_print,
1774 .vop_read = union_read,
1775 .vop_readdir = union_readdir,
1776 .vop_readlink = union_readlink,
1777 .vop_reclaim = union_reclaim,
1778 .vop_old_remove = union_remove,
1779 .vop_old_rename = union_rename,
1780 .vop_old_rmdir = union_rmdir,
1781 .vop_setattr = union_setattr,
1782 .vop_strategy = union_strategy,
1783 .vop_old_symlink = union_symlink,
1784 .vop_old_whiteout = union_whiteout,
1785 .vop_write = union_write