proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / vfs / union / union_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3 * Copyright (c) 1992, 1993, 1994, 1995
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
38 * $FreeBSD: src/sys/miscfs/union/union_vnops.c,v 1.72 1999/12/15 23:02:14 eivind Exp $
dadab5e9 39 * $DragonFly: src/sys/vfs/union/union_vnops.c,v 1.3 2003/06/25 03:56:01 dillon Exp $
984263bc
MD
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/proc.h>
45#include <sys/fcntl.h>
46#include <sys/stat.h>
47#include <sys/kernel.h>
48#include <sys/vnode.h>
49#include <sys/mount.h>
50#include <sys/namei.h>
51#include <sys/malloc.h>
52#include <sys/buf.h>
53#include <sys/lock.h>
54#include <sys/sysctl.h>
55#include <miscfs/union/union.h>
56
57#include <vm/vm.h>
58#include <vm/vnode_pager.h>
59
60#include <vm/vm_page.h>
61#include <vm/vm_object.h>
62
63int uniondebug = 0;
64
65#if UDEBUG_ENABLED
66SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
67#else
68SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
69#endif
70
71static int union_access __P((struct vop_access_args *ap));
72static int union_advlock __P((struct vop_advlock_args *ap));
73static int union_bmap __P((struct vop_bmap_args *ap));
74static int union_close __P((struct vop_close_args *ap));
75static int union_create __P((struct vop_create_args *ap));
76static int union_fsync __P((struct vop_fsync_args *ap));
77static int union_getattr __P((struct vop_getattr_args *ap));
78static int union_inactive __P((struct vop_inactive_args *ap));
79static int union_ioctl __P((struct vop_ioctl_args *ap));
80static int union_lease __P((struct vop_lease_args *ap));
81static int union_link __P((struct vop_link_args *ap));
82static int union_lock __P((struct vop_lock_args *ap));
83static int union_lookup __P((struct vop_lookup_args *ap));
84static int union_lookup1 __P((struct vnode *udvp, struct vnode **dvp,
85 struct vnode **vpp,
86 struct componentname *cnp));
87static int union_mkdir __P((struct vop_mkdir_args *ap));
88static int union_mknod __P((struct vop_mknod_args *ap));
89static int union_mmap __P((struct vop_mmap_args *ap));
90static int union_open __P((struct vop_open_args *ap));
91static int union_pathconf __P((struct vop_pathconf_args *ap));
92static int union_print __P((struct vop_print_args *ap));
93static int union_read __P((struct vop_read_args *ap));
94static int union_readdir __P((struct vop_readdir_args *ap));
95static int union_readlink __P((struct vop_readlink_args *ap));
96static int union_reclaim __P((struct vop_reclaim_args *ap));
97static int union_remove __P((struct vop_remove_args *ap));
98static int union_rename __P((struct vop_rename_args *ap));
99static int union_revoke __P((struct vop_revoke_args *ap));
100static int union_rmdir __P((struct vop_rmdir_args *ap));
101static int union_poll __P((struct vop_poll_args *ap));
102static int union_setattr __P((struct vop_setattr_args *ap));
103static int union_strategy __P((struct vop_strategy_args *ap));
104static int union_getpages __P((struct vop_getpages_args *ap));
105static int union_putpages __P((struct vop_putpages_args *ap));
106static int union_symlink __P((struct vop_symlink_args *ap));
107static int union_unlock __P((struct vop_unlock_args *ap));
108static int union_whiteout __P((struct vop_whiteout_args *ap));
109static int union_write __P((struct vop_read_args *ap));
110
111static __inline
112struct vnode *
dadab5e9 113union_lock_upper(struct union_node *un, struct thread *td)
984263bc
MD
114{
115 struct vnode *uppervp;
116
117 if ((uppervp = un->un_uppervp) != NULL) {
118 VREF(uppervp);
dadab5e9 119 vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
984263bc
MD
120 }
121 KASSERT((uppervp == NULL || uppervp->v_usecount > 0), ("uppervp usecount is 0"));
122 return(uppervp);
123}
124
125static __inline
126void
dadab5e9 127union_unlock_upper(struct vnode *uppervp, struct thread *td)
984263bc
MD
128{
129 vput(uppervp);
130}
131
132static __inline
133struct vnode *
dadab5e9 134union_lock_other(struct union_node *un, struct thread *td)
984263bc
MD
135{
136 struct vnode *vp;
137
138 if (un->un_uppervp != NULL) {
dadab5e9 139 vp = union_lock_upper(un, td);
984263bc
MD
140 } else if ((vp = un->un_lowervp) != NULL) {
141 VREF(vp);
dadab5e9 142 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
984263bc
MD
143 }
144 return(vp);
145}
146
147static __inline
148void
dadab5e9 149union_unlock_other(struct vnode *vp, struct thread *td)
984263bc
MD
150{
151 vput(vp);
152}
153
154/*
155 * union_lookup:
156 *
157 * udvp must be exclusively locked on call and will remain
158 * exclusively locked on return. This is the mount point
159 * for out filesystem.
160 *
161 * dvp Our base directory, locked and referenced.
162 * The passed dvp will be dereferenced and unlocked on return
163 * and a new dvp will be returned which is locked and
164 * referenced in the same variable.
165 *
166 * vpp is filled in with the result if no error occured,
167 * locked and ref'd.
168 *
169 * If an error is returned, *vpp is set to NULLVP. If no
170 * error occurs, *vpp is returned with a reference and an
171 * exclusive lock.
172 */
173
174static int
175union_lookup1(udvp, pdvp, vpp, cnp)
176 struct vnode *udvp;
177 struct vnode **pdvp;
178 struct vnode **vpp;
179 struct componentname *cnp;
180{
181 int error;
dadab5e9 182 struct thread *td = cnp->cn_td;
984263bc
MD
183 struct vnode *dvp = *pdvp;
184 struct vnode *tdvp;
185 struct mount *mp;
186
187 /*
188 * If stepping up the directory tree, check for going
189 * back across the mount point, in which case do what
190 * lookup would do by stepping back down the mount
191 * hierarchy.
192 */
193 if (cnp->cn_flags & ISDOTDOT) {
194 while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
195 /*
196 * Don't do the NOCROSSMOUNT check
197 * at this level. By definition,
198 * union fs deals with namespaces, not
199 * filesystems.
200 */
201 tdvp = dvp;
202 dvp = dvp->v_mount->mnt_vnodecovered;
203 VREF(dvp);
204 vput(tdvp);
dadab5e9 205 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
206 }
207 }
208
209 /*
210 * Set return dvp to be the upperdvp 'parent directory.
211 */
212 *pdvp = dvp;
213
214 /*
215 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
216 * changes will have been made to dvp, so we are set to return.
217 */
218
219 error = VOP_LOOKUP(dvp, &tdvp, cnp);
220 if (error) {
221 UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
222 *vpp = NULL;
223 return (error);
224 }
225
226 /*
227 * The parent directory will have been unlocked, unless lookup
228 * found the last component or if dvp == tdvp (tdvp must be locked).
229 *
230 * We want our dvp to remain locked and ref'd. We also want tdvp
231 * to remain locked and ref'd.
232 */
233 UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
234
235 if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
dadab5e9 236 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
237
238 /*
239 * Lastly check if the current node is a mount point in
240 * which case walk up the mount hierarchy making sure not to
241 * bump into the root of the mount tree (ie. dvp != udvp).
242 *
243 * We use dvp as a temporary variable here, it is no longer related
244 * to the dvp above. However, we have to ensure that both *pdvp and
245 * tdvp are locked on return.
246 */
247
248 dvp = tdvp;
249 while (
250 dvp != udvp &&
251 (dvp->v_type == VDIR) &&
252 (mp = dvp->v_mountedhere)
253 ) {
254 int relock_pdvp = 0;
255
dadab5e9 256 if (vfs_busy(mp, 0, 0, td))
984263bc
MD
257 continue;
258
259 if (dvp == *pdvp)
260 relock_pdvp = 1;
261 vput(dvp);
262 dvp = NULL;
263 error = VFS_ROOT(mp, &dvp);
264
dadab5e9 265 vfs_unbusy(mp, td);
984263bc
MD
266
267 if (relock_pdvp)
dadab5e9 268 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
269
270 if (error) {
271 *vpp = NULL;
272 return (error);
273 }
274 }
275 *vpp = dvp;
276 return (0);
277}
278
279static int
280union_lookup(ap)
281 struct vop_lookup_args /* {
282 struct vnodeop_desc *a_desc;
283 struct vnode *a_dvp;
284 struct vnode **a_vpp;
285 struct componentname *a_cnp;
286 } */ *ap;
287{
288 int error;
289 int uerror, lerror;
290 struct vnode *uppervp, *lowervp;
291 struct vnode *upperdvp, *lowerdvp;
292 struct vnode *dvp = ap->a_dvp; /* starting dir */
293 struct union_node *dun = VTOUNION(dvp); /* associated union node */
294 struct componentname *cnp = ap->a_cnp;
dadab5e9 295 struct thread *td = cnp->cn_td;
984263bc
MD
296 int lockparent = cnp->cn_flags & LOCKPARENT;
297 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
298 struct ucred *saved_cred = NULL;
299 int iswhiteout;
300 struct vattr va;
301
302 *ap->a_vpp = NULLVP;
303
304 /*
305 * Disallow write attemps to the filesystem mounted read-only.
306 */
307 if ((cnp->cn_flags & ISLASTCN) &&
308 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
309 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
310 return (EROFS);
311 }
312
313 /*
314 * For any lookup's we do, always return with the parent locked
315 */
316 cnp->cn_flags |= LOCKPARENT;
317
318 lowerdvp = dun->un_lowervp;
319 uppervp = NULLVP;
320 lowervp = NULLVP;
321 iswhiteout = 0;
322
323 uerror = ENOENT;
324 lerror = ENOENT;
325
326 /*
327 * Get a private lock on uppervp and a reference, effectively
328 * taking it out of the union_node's control.
329 *
330 * We must lock upperdvp while holding our lock on dvp
331 * to avoid a deadlock.
332 */
dadab5e9 333 upperdvp = union_lock_upper(dun, td);
984263bc
MD
334
335 /*
336 * do the lookup in the upper level.
337 * if that level comsumes additional pathnames,
338 * then assume that something special is going
339 * on and just return that vnode.
340 */
341 if (upperdvp != NULLVP) {
342 /*
343 * We do not have to worry about the DOTDOT case, we've
344 * already unlocked dvp.
345 */
346 UDEBUG(("A %p\n", upperdvp));
347
348 /*
349 * Do the lookup. We must supply a locked and referenced
350 * upperdvp to the function and will get a new locked and
351 * referenced upperdvp back with the old having been
352 * dereferenced.
353 *
354 * If an error is returned, uppervp will be NULLVP. If no
355 * error occurs, uppervp will be the locked and referenced
356 * return vnode or possibly NULL, depending on what is being
357 * requested. It is possible that the returned uppervp
358 * will be the same as upperdvp.
359 */
360 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
361 UDEBUG((
362 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
363 uerror,
364 upperdvp,
365 upperdvp->v_usecount,
366 VOP_ISLOCKED(upperdvp, NULL),
367 uppervp,
368 (uppervp ? uppervp->v_usecount : -99),
369 (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
370 ));
371
372 /*
373 * Disallow write attemps to the filesystem mounted read-only.
374 */
375 if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
376 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
377 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
378 error = EROFS;
379 goto out;
380 }
381
382 /*
383 * Special case. If cn_consume != 0 skip out. The result
384 * of the lookup is transfered to our return variable. If
385 * an error occured we have to throw away the results.
386 */
387
388 if (cnp->cn_consume != 0) {
389 if ((error = uerror) == 0) {
390 *ap->a_vpp = uppervp;
391 uppervp = NULL;
392 }
393 goto out;
394 }
395
396 /*
397 * Calculate whiteout, fall through
398 */
399
400 if (uerror == ENOENT || uerror == EJUSTRETURN) {
401 if (cnp->cn_flags & ISWHITEOUT) {
402 iswhiteout = 1;
403 } else if (lowerdvp != NULLVP) {
404 int terror;
405
406 terror = VOP_GETATTR(upperdvp, &va,
dadab5e9 407 cnp->cn_cred, cnp->cn_td);
984263bc
MD
408 if (terror == 0 && (va.va_flags & OPAQUE))
409 iswhiteout = 1;
410 }
411 }
412 }
413
414 /*
415 * in a similar way to the upper layer, do the lookup
416 * in the lower layer. this time, if there is some
417 * component magic going on, then vput whatever we got
418 * back from the upper layer and return the lower vnode
419 * instead.
420 */
421
422 if (lowerdvp != NULLVP && !iswhiteout) {
423 int nameiop;
424
425 UDEBUG(("B %p\n", lowerdvp));
426
427 /*
428 * Force only LOOKUPs on the lower node, since
429 * we won't be making changes to it anyway.
430 */
431 nameiop = cnp->cn_nameiop;
432 cnp->cn_nameiop = LOOKUP;
433 if (um->um_op == UNMNT_BELOW) {
434 saved_cred = cnp->cn_cred;
435 cnp->cn_cred = um->um_cred;
436 }
437
438 /*
439 * We shouldn't have to worry about locking interactions
440 * between the lower layer and our union layer (w.r.t.
441 * `..' processing) because we don't futz with lowervp
442 * locks in the union-node instantiation code path.
443 *
444 * union_lookup1() requires lowervp to be locked on entry,
445 * and it will be unlocked on return. The ref count will
446 * not change. On return lowervp doesn't represent anything
447 * to us so we NULL it out.
448 */
449 VREF(lowerdvp);
dadab5e9 450 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
451 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
452 if (lowerdvp == lowervp)
453 vrele(lowerdvp);
454 else
455 vput(lowerdvp);
456 lowerdvp = NULL; /* lowerdvp invalid after vput */
457
458 if (um->um_op == UNMNT_BELOW)
459 cnp->cn_cred = saved_cred;
460 cnp->cn_nameiop = nameiop;
461
462 if (cnp->cn_consume != 0 || lerror == EACCES) {
463 if ((error = lerror) == 0) {
464 *ap->a_vpp = lowervp;
465 lowervp = NULL;
466 }
467 goto out;
468 }
469 } else {
470 UDEBUG(("C %p\n", lowerdvp));
471 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
472 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
473 VREF(lowervp);
dadab5e9 474 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
475 lerror = 0;
476 }
477 }
478 }
479
480 /*
481 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
482 *
483 * 1. If both layers returned an error, select the upper layer.
484 *
485 * 2. If the upper layer faile and the bottom layer succeeded,
486 * two subcases occur:
487 *
488 * a. The bottom vnode is not a directory, in which case
489 * just return a new union vnode referencing an
490 * empty top layer and the existing bottom layer.
491 *
492 * b. The button vnode is a directory, in which case
493 * create a new directory in the top layer and
494 * and fall through to case 3.
495 *
496 * 3. If the top layer succeeded then return a new union
497 * vnode referencing whatever the new top layer and
498 * whatever the bottom layer returned.
499 */
500
501 /* case 1. */
502 if ((uerror != 0) && (lerror != 0)) {
503 error = uerror;
504 goto out;
505 }
506
507 /* case 2. */
508 if (uerror != 0 /* && (lerror == 0) */ ) {
509 if (lowervp->v_type == VDIR) { /* case 2b. */
510 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
511 /*
512 * oops, uppervp has a problem, we may have to shadow.
513 */
514 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
515 if (uerror) {
516 error = uerror;
517 goto out;
518 }
519 }
520 }
521
522 /*
523 * Must call union_allocvp with both the upper and lower vnodes
524 * referenced and the upper vnode locked. ap->a_vpp is returned
525 * referenced and locked. lowervp, uppervp, and upperdvp are
526 * absorbed by union_allocvp() whether it succeeds or fails.
527 *
528 * upperdvp is the parent directory of uppervp which may be
529 * different, depending on the path, from dvp->un_uppervp. That's
530 * why it is a separate argument. Note that it must be unlocked.
531 *
532 * dvp must be locked on entry to the call and will be locked on
533 * return.
534 */
535
536 if (uppervp && uppervp != upperdvp)
dadab5e9 537 VOP_UNLOCK(uppervp, 0, td);
984263bc 538 if (lowervp)
dadab5e9 539 VOP_UNLOCK(lowervp, 0, td);
984263bc 540 if (upperdvp)
dadab5e9 541 VOP_UNLOCK(upperdvp, 0, td);
984263bc
MD
542
543 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
544 uppervp, lowervp, 1);
545
546 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_usecount) : -99));
547
548 uppervp = NULL;
549 upperdvp = NULL;
550 lowervp = NULL;
551
552 /*
553 * Termination Code
554 *
555 * - put away any extra junk laying around. Note that lowervp
556 * (if not NULL) will never be the same as *ap->a_vp and
557 * neither will uppervp, because when we set that state we
558 * NULL-out lowervp or uppervp. On the otherhand, upperdvp
559 * may match uppervp or *ap->a_vpp.
560 *
561 * - relock/unlock dvp if appropriate.
562 */
563
564out:
565 if (upperdvp) {
566 if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
567 vrele(upperdvp);
568 else
569 vput(upperdvp);
570 }
571
572 if (uppervp)
573 vput(uppervp);
574
575 if (lowervp)
576 vput(lowervp);
577
578 /*
579 * Restore LOCKPARENT state
580 */
581
582 if (!lockparent)
583 cnp->cn_flags &= ~LOCKPARENT;
584
585 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
586 ((*ap->a_vpp) ? (*ap->a_vpp)->v_usecount : -99),
587 lowervp, uppervp));
588
589 /*
590 * dvp lock state, determine whether to relock dvp. dvp is expected
591 * to be locked on return if:
592 *
593 * - there was an error (except not EJUSTRETURN), or
594 * - we hit the last component and lockparent is true
595 *
596 * dvp_is_locked is the current state of the dvp lock, not counting
597 * the possibility that *ap->a_vpp == dvp (in which case it is locked
598 * anyway). Note that *ap->a_vpp == dvp only if no error occured.
599 */
600
601 if (*ap->a_vpp != dvp) {
602 if ((error == 0 || error == EJUSTRETURN) &&
603 (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)) {
dadab5e9 604 VOP_UNLOCK(dvp, 0, td);
984263bc
MD
605 }
606 }
607
608 /*
609 * Diagnostics
610 */
611
612#ifdef DIAGNOSTIC
613 if (cnp->cn_namelen == 1 &&
614 cnp->cn_nameptr[0] == '.' &&
615 *ap->a_vpp != dvp) {
616 panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
617 }
618#endif
619
620 return (error);
621}
622
623/*
624 * union_create:
625 *
626 * a_dvp is locked on entry and remains locked on return. a_vpp is returned
627 * locked if no error occurs, otherwise it is garbage.
628 */
629
630static int
631union_create(ap)
632 struct vop_create_args /* {
633 struct vnode *a_dvp;
634 struct vnode **a_vpp;
635 struct componentname *a_cnp;
636 struct vattr *a_vap;
637 } */ *ap;
638{
639 struct union_node *dun = VTOUNION(ap->a_dvp);
640 struct componentname *cnp = ap->a_cnp;
dadab5e9 641 struct thread *td = cnp->cn_td;
984263bc
MD
642 struct vnode *dvp;
643 int error = EROFS;
644
dadab5e9 645 if ((dvp = union_lock_upper(dun, td)) != NULL) {
984263bc
MD
646 struct vnode *vp;
647 struct mount *mp;
648
649 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
650 if (error == 0) {
651 mp = ap->a_dvp->v_mount;
dadab5e9 652 VOP_UNLOCK(vp, 0, td);
984263bc
MD
653 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_usecount));
654 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
655 cnp, vp, NULLVP, 1);
656 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
657 }
dadab5e9 658 union_unlock_upper(dvp, td);
984263bc
MD
659 }
660 return (error);
661}
662
663static int
664union_whiteout(ap)
665 struct vop_whiteout_args /* {
666 struct vnode *a_dvp;
667 struct componentname *a_cnp;
668 int a_flags;
669 } */ *ap;
670{
671 struct union_node *un = VTOUNION(ap->a_dvp);
672 struct componentname *cnp = ap->a_cnp;
673 struct vnode *uppervp;
674 int error = EOPNOTSUPP;
675
dadab5e9 676 if ((uppervp = union_lock_upper(un, cnp->cn_td)) != NULLVP) {
984263bc 677 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
dadab5e9 678 union_unlock_upper(uppervp, cnp->cn_td);
984263bc
MD
679 }
680 return(error);
681}
682
683/*
684 * union_mknod:
685 *
686 * a_dvp is locked on entry and should remain locked on return.
687 * a_vpp is garbagre whether an error occurs or not.
688 */
689
690static int
691union_mknod(ap)
692 struct vop_mknod_args /* {
693 struct vnode *a_dvp;
694 struct vnode **a_vpp;
695 struct componentname *a_cnp;
696 struct vattr *a_vap;
697 } */ *ap;
698{
699 struct union_node *dun = VTOUNION(ap->a_dvp);
700 struct componentname *cnp = ap->a_cnp;
701 struct vnode *dvp;
702 int error = EROFS;
703
dadab5e9 704 if ((dvp = union_lock_upper(dun, cnp->cn_td)) != NULL) {
984263bc 705 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
dadab5e9 706 union_unlock_upper(dvp, cnp->cn_td);
984263bc
MD
707 }
708 return (error);
709}
710
711/*
712 * union_open:
713 *
714 * run open VOP. When opening the underlying vnode we have to mimic
715 * vn_open. What we *really* need to do to avoid screwups if the
716 * open semantics change is to call vn_open(). For example, ufs blows
717 * up if you open a file but do not vmio it prior to writing.
718 */
719
720static int
721union_open(ap)
722 struct vop_open_args /* {
723 struct vnodeop_desc *a_desc;
724 struct vnode *a_vp;
725 int a_mode;
726 struct ucred *a_cred;
dadab5e9 727 struct thread *a_td;
984263bc
MD
728 } */ *ap;
729{
730 struct union_node *un = VTOUNION(ap->a_vp);
731 struct vnode *tvp;
732 int mode = ap->a_mode;
733 struct ucred *cred = ap->a_cred;
dadab5e9 734 struct thread *td = ap->a_td;
984263bc
MD
735 int error = 0;
736 int tvpisupper = 1;
737
738 /*
739 * If there is an existing upper vp then simply open that.
740 * The upper vp takes precedence over the lower vp. When opening
741 * a lower vp for writing copy it to the uppervp and then open the
742 * uppervp.
743 *
744 * At the end of this section tvp will be left locked.
745 */
dadab5e9 746 if ((tvp = union_lock_upper(un, td)) == NULLVP) {
984263bc
MD
747 /*
748 * If the lower vnode is being opened for writing, then
749 * copy the file contents to the upper vnode and open that,
750 * otherwise can simply open the lower vnode.
751 */
752 tvp = un->un_lowervp;
753 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
754 int docopy = !(mode & O_TRUNC);
dadab5e9
MD
755 error = union_copyup(un, docopy, cred, td);
756 tvp = union_lock_upper(un, td);
984263bc
MD
757 } else {
758 un->un_openl++;
759 VREF(tvp);
dadab5e9 760 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
761 tvpisupper = 0;
762 }
763 }
764
765 /*
766 * We are holding the correct vnode, open it
767 */
768
769 if (error == 0)
dadab5e9 770 error = VOP_OPEN(tvp, mode, cred, td);
984263bc
MD
771
772 /*
773 * Absolutely necessary or UFS will blowup
774 */
775 if (error == 0 && vn_canvmio(tvp) == TRUE) {
dadab5e9 776 error = vfs_object_create(tvp, td, cred);
984263bc
MD
777 }
778
779 /*
780 * Release any locks held
781 */
782 if (tvpisupper) {
783 if (tvp)
dadab5e9 784 union_unlock_upper(tvp, td);
984263bc
MD
785 } else {
786 vput(tvp);
787 }
788 return (error);
789}
790
791/*
792 * union_close:
793 *
794 * It is unclear whether a_vp is passed locked or unlocked. Whatever
795 * the case we do not change it.
796 */
797
798static int
799union_close(ap)
800 struct vop_close_args /* {
801 struct vnode *a_vp;
802 int a_fflag;
803 struct ucred *a_cred;
dadab5e9 804 struct thread *a_td;
984263bc
MD
805 } */ *ap;
806{
807 struct union_node *un = VTOUNION(ap->a_vp);
808 struct vnode *vp;
809
810 if ((vp = un->un_uppervp) == NULLVP) {
811#ifdef UNION_DIAGNOSTIC
812 if (un->un_openl <= 0)
813 panic("union: un_openl cnt");
814#endif
815 --un->un_openl;
816 vp = un->un_lowervp;
817 }
818 ap->a_vp = vp;
819 return (VCALL(vp, VOFFSET(vop_close), ap));
820}
821
822/*
823 * Check access permission on the union vnode.
824 * The access check being enforced is to check
825 * against both the underlying vnode, and any
826 * copied vnode. This ensures that no additional
827 * file permissions are given away simply because
828 * the user caused an implicit file copy.
829 */
830static int
831union_access(ap)
832 struct vop_access_args /* {
833 struct vnodeop_desc *a_desc;
834 struct vnode *a_vp;
835 int a_mode;
836 struct ucred *a_cred;
dadab5e9 837 struct thread *a_td;
984263bc
MD
838 } */ *ap;
839{
840 struct union_node *un = VTOUNION(ap->a_vp);
dadab5e9 841 struct thread *td = ap->a_td;
984263bc
MD
842 int error = EACCES;
843 struct vnode *vp;
844
845 /*
846 * Disallow write attempts on filesystems mounted read-only.
847 */
848 if ((ap->a_mode & VWRITE) &&
849 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
850 switch (ap->a_vp->v_type) {
851 case VREG:
852 case VDIR:
853 case VLNK:
854 return (EROFS);
855 default:
856 break;
857 }
858 }
859
dadab5e9 860 if ((vp = union_lock_upper(un, td)) != NULLVP) {
984263bc
MD
861 ap->a_vp = vp;
862 error = VCALL(vp, VOFFSET(vop_access), ap);
dadab5e9 863 union_unlock_upper(vp, td);
984263bc
MD
864 return(error);
865 }
866
867 if ((vp = un->un_lowervp) != NULLVP) {
dadab5e9 868 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
869 ap->a_vp = vp;
870
871 /*
872 * Remove VWRITE from a_mode if our mount point is RW, because
873 * we want to allow writes and lowervp may be read-only.
874 */
875 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
876 ap->a_mode &= ~VWRITE;
877
878 error = VCALL(vp, VOFFSET(vop_access), ap);
879 if (error == 0) {
880 struct union_mount *um;
881
882 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
883
884 if (um->um_op == UNMNT_BELOW) {
885 ap->a_cred = um->um_cred;
886 error = VCALL(vp, VOFFSET(vop_access), ap);
887 }
888 }
dadab5e9 889 VOP_UNLOCK(vp, 0, td);
984263bc
MD
890 }
891 return(error);
892}
893
894/*
895 * We handle getattr only to change the fsid and
896 * track object sizes
897 *
898 * It's not clear whether VOP_GETATTR is to be
899 * called with the vnode locked or not. stat() calls
900 * it with (vp) locked, and fstat calls it with
901 * (vp) unlocked.
902 *
903 * Because of this we cannot use our normal locking functions
904 * if we do not intend to lock the main a_vp node. At the moment
905 * we are running without any specific locking at all, but beware
906 * to any programmer that care must be taken if locking is added
907 * to this function.
908 */
909
910static int
911union_getattr(ap)
912 struct vop_getattr_args /* {
913 struct vnode *a_vp;
914 struct vattr *a_vap;
915 struct ucred *a_cred;
dadab5e9 916 struct thread *a_td;
984263bc
MD
917 } */ *ap;
918{
919 int error;
920 struct union_node *un = VTOUNION(ap->a_vp);
921 struct vnode *vp;
922 struct vattr *vap;
923 struct vattr va;
924
925 /*
926 * Some programs walk the filesystem hierarchy by counting
927 * links to directories to avoid stat'ing all the time.
928 * This means the link count on directories needs to be "correct".
929 * The only way to do that is to call getattr on both layers
930 * and fix up the link count. The link count will not necessarily
931 * be accurate but will be large enough to defeat the tree walkers.
932 */
933
934 vap = ap->a_vap;
935
936 if ((vp = un->un_uppervp) != NULLVP) {
dadab5e9 937 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
984263bc
MD
938 if (error)
939 return (error);
940 /* XXX isn't this dangerouso without a lock? */
941 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
942 }
943
944 if (vp == NULLVP) {
945 vp = un->un_lowervp;
946 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
947 vp = un->un_lowervp;
948 vap = &va;
949 } else {
950 vp = NULLVP;
951 }
952
953 if (vp != NULLVP) {
dadab5e9 954 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
984263bc
MD
955 if (error)
956 return (error);
957 /* XXX isn't this dangerous without a lock? */
958 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
959 }
960
961 if ((vap != ap->a_vap) && (vap->va_type == VDIR))
962 ap->a_vap->va_nlink += vap->va_nlink;
963 return (0);
964}
965
966static int
967union_setattr(ap)
968 struct vop_setattr_args /* {
969 struct vnode *a_vp;
970 struct vattr *a_vap;
971 struct ucred *a_cred;
dadab5e9 972 struct thread *a_td;
984263bc
MD
973 } */ *ap;
974{
975 struct union_node *un = VTOUNION(ap->a_vp);
dadab5e9 976 struct thread *td = ap->a_td;
984263bc
MD
977 struct vattr *vap = ap->a_vap;
978 struct vnode *uppervp;
979 int error;
980
981 /*
982 * Disallow write attempts on filesystems mounted read-only.
983 */
984 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
985 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
986 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
987 vap->va_mtime.tv_sec != VNOVAL ||
988 vap->va_mode != (mode_t)VNOVAL)) {
989 return (EROFS);
990 }
991
992 /*
993 * Handle case of truncating lower object to zero size,
994 * by creating a zero length upper object. This is to
995 * handle the case of open with O_TRUNC and O_CREAT.
996 */
997 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
998 error = union_copyup(un, (ap->a_vap->va_size != 0),
dadab5e9 999 ap->a_cred, ap->a_td);
984263bc
MD
1000 if (error)
1001 return (error);
1002 }
1003
1004 /*
1005 * Try to set attributes in upper layer,
1006 * otherwise return read-only filesystem error.
1007 */
1008 error = EROFS;
dadab5e9 1009 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
984263bc 1010 error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
dadab5e9 1011 ap->a_cred, ap->a_td);
984263bc
MD
1012 if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1013 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
dadab5e9 1014 union_unlock_upper(uppervp, td);
984263bc
MD
1015 }
1016 return (error);
1017}
1018
1019/*
1020 * union_getpages:
1021 */
1022
1023static int
1024union_getpages(struct vop_getpages_args *ap)
1025{
1026 int r;
1027
1028 r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1029 ap->a_count, ap->a_reqpage);
1030 return(r);
1031}
1032
1033/*
1034 * union_putpages:
1035 */
1036
1037static int
1038union_putpages(struct vop_putpages_args *ap)
1039{
1040 int r;
1041
1042 r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1043 ap->a_sync, ap->a_rtvals);
1044 return(r);
1045}
1046
1047static int
1048union_read(ap)
1049 struct vop_read_args /* {
1050 struct vnode *a_vp;
1051 struct uio *a_uio;
1052 int a_ioflag;
1053 struct ucred *a_cred;
1054 } */ *ap;
1055{
1056 struct union_node *un = VTOUNION(ap->a_vp);
dadab5e9 1057 struct thread *td = ap->a_uio->uio_td;
984263bc
MD
1058 struct vnode *uvp;
1059 int error;
1060
dadab5e9 1061 uvp = union_lock_other(un, td);
984263bc
MD
1062 KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1063
1064 if (ap->a_vp->v_flag & VOBJBUF)
1065 union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1066
1067 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
dadab5e9 1068 union_unlock_other(uvp, td);
984263bc
MD
1069
1070 /*
1071 * XXX
1072 * perhaps the size of the underlying object has changed under
1073 * our feet. take advantage of the offset information present
1074 * in the uio structure.
1075 */
1076 if (error == 0) {
1077 struct union_node *un = VTOUNION(ap->a_vp);
1078 off_t cur = ap->a_uio->uio_offset;
1079
1080 if (uvp == un->un_uppervp) {
1081 if (cur > un->un_uppersz)
1082 union_newsize(ap->a_vp, cur, VNOVAL);
1083 } else {
1084 if (cur > un->un_lowersz)
1085 union_newsize(ap->a_vp, VNOVAL, cur);
1086 }
1087 }
1088 return (error);
1089}
1090
1091static int
1092union_write(ap)
1093 struct vop_read_args /* {
1094 struct vnode *a_vp;
1095 struct uio *a_uio;
1096 int a_ioflag;
1097 struct ucred *a_cred;
1098 } */ *ap;
1099{
1100 struct union_node *un = VTOUNION(ap->a_vp);
dadab5e9 1101 struct thread *td = ap->a_uio->uio_td;
984263bc
MD
1102 struct vnode *uppervp;
1103 int error;
1104
dadab5e9 1105 if ((uppervp = union_lock_upper(un, td)) == NULLVP)
984263bc
MD
1106 panic("union: missing upper layer in write");
1107
1108 /*
1109 * Since our VM pages are associated with our vnode rather then
1110 * the real vnode, and since we do not run our reads and writes
1111 * through our own VM cache, we have a VM/VFS coherency problem.
1112 * We solve them by invalidating or flushing the associated VM
1113 * pages prior to allowing a normal read or write to occur.
1114 *
1115 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1116 * writes because we are not cache-coherent. Normal writes need
1117 * to be made coherent with our VM-backing store, which we do by
1118 * first flushing any dirty VM pages associated with the write
1119 * range, and then destroying any clean VM pages associated with
1120 * the write range.
1121 */
1122
1123 if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1124 ap->a_uio->uio_segflg = UIO_SYSSPACE;
1125 } else if (ap->a_vp->v_flag & VOBJBUF) {
1126 union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1127 }
1128
1129 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1130
1131 /*
1132 * the size of the underlying object may be changed by the
1133 * write.
1134 */
1135 if (error == 0) {
1136 off_t cur = ap->a_uio->uio_offset;
1137
1138 if (cur > un->un_uppersz)
1139 union_newsize(ap->a_vp, cur, VNOVAL);
1140 }
dadab5e9 1141 union_unlock_upper(uppervp, td);
984263bc
MD
1142 return (error);
1143}
1144
1145static int
1146union_lease(ap)
1147 struct vop_lease_args /* {
1148 struct vnode *a_vp;
dadab5e9 1149 struct thread *a_td;
984263bc
MD
1150 struct ucred *a_cred;
1151 int a_flag;
1152 } */ *ap;
1153{
1154 struct vnode *ovp = OTHERVP(ap->a_vp);
1155
1156 ap->a_vp = ovp;
1157 return (VCALL(ovp, VOFFSET(vop_lease), ap));
1158}
1159
1160static int
1161union_ioctl(ap)
1162 struct vop_ioctl_args /* {
1163 struct vnode *a_vp;
1164 int a_command;
1165 caddr_t a_data;
1166 int a_fflag;
1167 struct ucred *a_cred;
dadab5e9 1168 struct thread *a_td;
984263bc
MD
1169 } */ *ap;
1170{
1171 struct vnode *ovp = OTHERVP(ap->a_vp);
1172
1173 ap->a_vp = ovp;
1174 return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1175}
1176
1177static int
1178union_poll(ap)
1179 struct vop_poll_args /* {
1180 struct vnode *a_vp;
1181 int a_events;
1182 struct ucred *a_cred;
dadab5e9 1183 struct thread *a_td;
984263bc
MD
1184 } */ *ap;
1185{
1186 struct vnode *ovp = OTHERVP(ap->a_vp);
1187
1188 ap->a_vp = ovp;
1189 return (VCALL(ovp, VOFFSET(vop_poll), ap));
1190}
1191
1192static int
1193union_revoke(ap)
1194 struct vop_revoke_args /* {
1195 struct vnode *a_vp;
1196 int a_flags;
dadab5e9 1197 struct thread *a_td;
984263bc
MD
1198 } */ *ap;
1199{
1200 struct vnode *vp = ap->a_vp;
1201
1202 if (UPPERVP(vp))
1203 VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1204 if (LOWERVP(vp))
1205 VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1206 vgone(vp);
1207 return (0);
1208}
1209
1210static int
1211union_mmap(ap)
1212 struct vop_mmap_args /* {
1213 struct vnode *a_vp;
1214 int a_fflags;
1215 struct ucred *a_cred;
dadab5e9 1216 struct thread *a_td;
984263bc
MD
1217 } */ *ap;
1218{
1219 struct vnode *ovp = OTHERVP(ap->a_vp);
1220
1221 ap->a_vp = ovp;
1222 return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1223}
1224
1225static int
1226union_fsync(ap)
1227 struct vop_fsync_args /* {
1228 struct vnode *a_vp;
1229 struct ucred *a_cred;
1230 int a_waitfor;
dadab5e9 1231 struct thread *a_td;
984263bc
MD
1232 } */ *ap;
1233{
1234 int error = 0;
dadab5e9 1235 struct thread *td = ap->a_td;
984263bc
MD
1236 struct vnode *targetvp;
1237 struct union_node *un = VTOUNION(ap->a_vp);
1238
dadab5e9
MD
1239 if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1240 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, td);
1241 union_unlock_other(targetvp, td);
984263bc
MD
1242 }
1243
1244 return (error);
1245}
1246
1247/*
1248 * union_remove:
1249 *
1250 * Remove the specified cnp. The dvp and vp are passed to us locked
1251 * and must remain locked on return.
1252 */
1253
1254static int
1255union_remove(ap)
1256 struct vop_remove_args /* {
1257 struct vnode *a_dvp;
1258 struct vnode *a_vp;
1259 struct componentname *a_cnp;
1260 } */ *ap;
1261{
1262 struct union_node *dun = VTOUNION(ap->a_dvp);
1263 struct union_node *un = VTOUNION(ap->a_vp);
1264 struct componentname *cnp = ap->a_cnp;
dadab5e9 1265 struct thread *td = cnp->cn_td;
984263bc
MD
1266 struct vnode *uppervp;
1267 struct vnode *upperdvp;
1268 int error;
1269
dadab5e9 1270 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
984263bc
MD
1271 panic("union remove: null upper vnode");
1272
dadab5e9
MD
1273 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1274 if (union_dowhiteout(un, cnp->cn_cred, td))
984263bc
MD
1275 cnp->cn_flags |= DOWHITEOUT;
1276 error = VOP_REMOVE(upperdvp, uppervp, cnp);
1277#if 0
1278 /* XXX */
1279 if (!error)
1280 union_removed_upper(un);
1281#endif
dadab5e9 1282 union_unlock_upper(uppervp, td);
984263bc
MD
1283 } else {
1284 error = union_mkwhiteout(
1285 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1286 upperdvp, ap->a_cnp, un->un_path);
1287 }
dadab5e9 1288 union_unlock_upper(upperdvp, td);
984263bc
MD
1289 return (error);
1290}
1291
1292/*
1293 * union_link:
1294 *
1295 * tdvp will be locked on entry, vp will not be locked on entry.
1296 * tdvp should remain locked on return and vp should remain unlocked
1297 * on return.
1298 */
1299
1300static int
1301union_link(ap)
1302 struct vop_link_args /* {
1303 struct vnode *a_tdvp;
1304 struct vnode *a_vp;
1305 struct componentname *a_cnp;
1306 } */ *ap;
1307{
1308 struct componentname *cnp = ap->a_cnp;
dadab5e9 1309 struct thread *td = cnp->cn_td;
984263bc
MD
1310 struct union_node *dun = VTOUNION(ap->a_tdvp);
1311 struct vnode *vp;
1312 struct vnode *tdvp;
1313 int error = 0;
1314
1315 if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1316 vp = ap->a_vp;
1317 } else {
1318 struct union_node *tun = VTOUNION(ap->a_vp);
1319
1320 if (tun->un_uppervp == NULLVP) {
dadab5e9 1321 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
1322#if 0
1323 if (dun->un_uppervp == tun->un_dirvp) {
1324 if (dun->un_flags & UN_ULOCK) {
1325 dun->un_flags &= ~UN_ULOCK;
dadab5e9 1326 VOP_UNLOCK(dun->un_uppervp, 0, td);
984263bc
MD
1327 }
1328 }
1329#endif
dadab5e9 1330 error = union_copyup(tun, 1, cnp->cn_cred, td);
984263bc
MD
1331#if 0
1332 if (dun->un_uppervp == tun->un_dirvp) {
1333 vn_lock(dun->un_uppervp,
dadab5e9 1334 LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
1335 dun->un_flags |= UN_ULOCK;
1336 }
1337#endif
dadab5e9 1338 VOP_UNLOCK(ap->a_vp, 0, td);
984263bc
MD
1339 }
1340 vp = tun->un_uppervp;
1341 }
1342
1343 if (error)
1344 return (error);
1345
1346 /*
1347 * Make sure upper is locked, then unlock the union directory we were
1348 * called with to avoid a deadlock while we are calling VOP_LINK on
1349 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp
1350 * is expected to be locked on return.
1351 */
1352
dadab5e9 1353 if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
984263bc
MD
1354 return (EROFS);
1355
dadab5e9 1356 VOP_UNLOCK(ap->a_tdvp, 0, td); /* unlock calling node */
984263bc
MD
1357 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */
1358
1359 /*
1360 * We have to unlock tdvp prior to relocking our calling node in
1361 * order to avoid a deadlock.
1362 */
dadab5e9
MD
1363 union_unlock_upper(tdvp, td);
1364 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td);
984263bc
MD
1365 return (error);
1366}
1367
1368static int
1369union_rename(ap)
1370 struct vop_rename_args /* {
1371 struct vnode *a_fdvp;
1372 struct vnode *a_fvp;
1373 struct componentname *a_fcnp;
1374 struct vnode *a_tdvp;
1375 struct vnode *a_tvp;
1376 struct componentname *a_tcnp;
1377 } */ *ap;
1378{
1379 int error;
1380 struct vnode *fdvp = ap->a_fdvp;
1381 struct vnode *fvp = ap->a_fvp;
1382 struct vnode *tdvp = ap->a_tdvp;
1383 struct vnode *tvp = ap->a_tvp;
1384
1385 /*
1386 * Figure out what fdvp to pass to our upper or lower vnode. If we
1387 * replace the fdvp, release the original one and ref the new one.
1388 */
1389
1390 if (fdvp->v_op == union_vnodeop_p) { /* always true */
1391 struct union_node *un = VTOUNION(fdvp);
1392 if (un->un_uppervp == NULLVP) {
1393 /*
1394 * this should never happen in normal
1395 * operation but might if there was
1396 * a problem creating the top-level shadow
1397 * directory.
1398 */
1399 error = EXDEV;
1400 goto bad;
1401 }
1402 fdvp = un->un_uppervp;
1403 VREF(fdvp);
1404 vrele(ap->a_fdvp);
1405 }
1406
1407 /*
1408 * Figure out what fvp to pass to our upper or lower vnode. If we
1409 * replace the fvp, release the original one and ref the new one.
1410 */
1411
1412 if (fvp->v_op == union_vnodeop_p) { /* always true */
1413 struct union_node *un = VTOUNION(fvp);
1414#if 0
1415 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1416#endif
1417
1418 if (un->un_uppervp == NULLVP) {
1419 switch(fvp->v_type) {
1420 case VREG:
dadab5e9
MD
1421 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_td);
1422 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_td);
1423 VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_td);
984263bc
MD
1424 if (error)
1425 goto bad;
1426 break;
1427 case VDIR:
1428 /*
1429 * XXX not yet.
1430 *
1431 * There is only one way to rename a directory
1432 * based in the lowervp, and that is to copy
1433 * the entire directory hierarchy. Otherwise
1434 * it would not last across a reboot.
1435 */
1436#if 0
1437 vrele(fvp);
1438 fvp = NULL;
dadab5e9 1439 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_td);
984263bc
MD
1440 error = union_mkshadow(um, fdvp,
1441 ap->a_fcnp, &un->un_uppervp);
dadab5e9 1442 VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_td);
984263bc 1443 if (un->un_uppervp)
dadab5e9 1444 VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_td);
984263bc
MD
1445 if (error)
1446 goto bad;
1447 break;
1448#endif
1449 default:
1450 error = EXDEV;
1451 goto bad;
1452 }
1453 }
1454
1455 if (un->un_lowervp != NULLVP)
1456 ap->a_fcnp->cn_flags |= DOWHITEOUT;
1457 fvp = un->un_uppervp;
1458 VREF(fvp);
1459 vrele(ap->a_fvp);
1460 }
1461
1462 /*
1463 * Figure out what tdvp (destination directory) to pass to the
1464 * lower level. If we replace it with uppervp, we need to vput the
1465 * old one. The exclusive lock is transfered to what we will pass
1466 * down in the VOP_RENAME and we replace uppervp with a simple
1467 * reference.
1468 */
1469
1470 if (tdvp->v_op == union_vnodeop_p) {
1471 struct union_node *un = VTOUNION(tdvp);
1472
1473 if (un->un_uppervp == NULLVP) {
1474 /*
1475 * this should never happen in normal
1476 * operation but might if there was
1477 * a problem creating the top-level shadow
1478 * directory.
1479 */
1480 error = EXDEV;
1481 goto bad;
1482 }
1483
1484 /*
1485 * new tdvp is a lock and reference on uppervp, put away
1486 * the old tdvp.
1487 */
dadab5e9 1488 tdvp = union_lock_upper(un, ap->a_tcnp->cn_td);
984263bc
MD
1489 vput(ap->a_tdvp);
1490 }
1491
1492 /*
1493 * Figure out what tvp (destination file) to pass to the
1494 * lower level.
1495 *
1496 * If the uppervp file does not exist put away the (wrong)
1497 * file and change tvp to NULL.
1498 */
1499
1500 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1501 struct union_node *un = VTOUNION(tvp);
1502
dadab5e9 1503 tvp = union_lock_upper(un, ap->a_tcnp->cn_td);
984263bc
MD
1504 vput(ap->a_tvp);
1505 /* note: tvp may be NULL */
1506 }
1507
1508 /*
1509 * VOP_RENAME releases/vputs prior to returning, so we have no
1510 * cleanup to do.
1511 */
1512
1513 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1514
1515 /*
1516 * Error. We still have to release / vput the various elements.
1517 */
1518
1519bad:
1520 vrele(fdvp);
1521 if (fvp)
1522 vrele(fvp);
1523 vput(tdvp);
1524 if (tvp != NULLVP) {
1525 if (tvp != tdvp)
1526 vput(tvp);
1527 else
1528 vrele(tvp);
1529 }
1530 return (error);
1531}
1532
1533static int
1534union_mkdir(ap)
1535 struct vop_mkdir_args /* {
1536 struct vnode *a_dvp;
1537 struct vnode **a_vpp;
1538 struct componentname *a_cnp;
1539 struct vattr *a_vap;
1540 } */ *ap;
1541{
1542 struct union_node *dun = VTOUNION(ap->a_dvp);
1543 struct componentname *cnp = ap->a_cnp;
dadab5e9 1544 struct thread *td = cnp->cn_td;
984263bc
MD
1545 struct vnode *upperdvp;
1546 int error = EROFS;
1547
dadab5e9 1548 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
984263bc
MD
1549 struct vnode *vp;
1550
1551 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
dadab5e9 1552 union_unlock_upper(upperdvp, td);
984263bc
MD
1553
1554 if (error == 0) {
dadab5e9 1555 VOP_UNLOCK(vp, 0, td);
984263bc
MD
1556 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_usecount));
1557 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1558 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1559 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_usecount));
1560 }
1561 }
1562 return (error);
1563}
1564
1565static int
1566union_rmdir(ap)
1567 struct vop_rmdir_args /* {
1568 struct vnode *a_dvp;
1569 struct vnode *a_vp;
1570 struct componentname *a_cnp;
1571 } */ *ap;
1572{
1573 struct union_node *dun = VTOUNION(ap->a_dvp);
1574 struct union_node *un = VTOUNION(ap->a_vp);
1575 struct componentname *cnp = ap->a_cnp;
dadab5e9 1576 struct thread *td = cnp->cn_td;
984263bc
MD
1577 struct vnode *upperdvp;
1578 struct vnode *uppervp;
1579 int error;
1580
dadab5e9 1581 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
984263bc
MD
1582 panic("union rmdir: null upper vnode");
1583
dadab5e9
MD
1584 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1585 if (union_dowhiteout(un, cnp->cn_cred, td))
984263bc
MD
1586 cnp->cn_flags |= DOWHITEOUT;
1587 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
dadab5e9 1588 union_unlock_upper(uppervp, td);
984263bc
MD
1589 } else {
1590 error = union_mkwhiteout(
1591 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1592 dun->un_uppervp, ap->a_cnp, un->un_path);
1593 }
dadab5e9 1594 union_unlock_upper(upperdvp, td);
984263bc
MD
1595 return (error);
1596}
1597
1598/*
1599 * union_symlink:
1600 *
1601 * dvp is locked on entry and remains locked on return. a_vpp is garbage
1602 * (unused).
1603 */
1604
1605static int
1606union_symlink(ap)
1607 struct vop_symlink_args /* {
1608 struct vnode *a_dvp;
1609 struct vnode **a_vpp;
1610 struct componentname *a_cnp;
1611 struct vattr *a_vap;
1612 char *a_target;
1613 } */ *ap;
1614{
1615 struct union_node *dun = VTOUNION(ap->a_dvp);
1616 struct componentname *cnp = ap->a_cnp;
dadab5e9 1617 struct thread *td = cnp->cn_td;
984263bc
MD
1618 struct vnode *dvp;
1619 int error = EROFS;
1620
dadab5e9 1621 if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
984263bc
MD
1622 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1623 ap->a_target);
dadab5e9 1624 union_unlock_upper(dvp, td);
984263bc
MD
1625 }
1626 return (error);
1627}
1628
1629/*
1630 * union_readdir works in concert with getdirentries and
1631 * readdir(3) to provide a list of entries in the unioned
1632 * directories. getdirentries is responsible for walking
1633 * down the union stack. readdir(3) is responsible for
1634 * eliminating duplicate names from the returned data stream.
1635 */
1636static int
1637union_readdir(ap)
1638 struct vop_readdir_args /* {
1639 struct vnode *a_vp;
1640 struct uio *a_uio;
1641 struct ucred *a_cred;
1642 int *a_eofflag;
1643 u_long *a_cookies;
1644 int a_ncookies;
1645 } */ *ap;
1646{
1647 struct union_node *un = VTOUNION(ap->a_vp);
dadab5e9 1648 struct thread *td = ap->a_uio->uio_td;
984263bc
MD
1649 struct vnode *uvp;
1650 int error = 0;
1651
dadab5e9 1652 if ((uvp = union_lock_upper(un, td)) != NULLVP) {
984263bc
MD
1653 ap->a_vp = uvp;
1654 error = VCALL(uvp, VOFFSET(vop_readdir), ap);
dadab5e9 1655 union_unlock_upper(uvp, td);
984263bc
MD
1656 }
1657 return(error);
1658}
1659
1660static int
1661union_readlink(ap)
1662 struct vop_readlink_args /* {
1663 struct vnode *a_vp;
1664 struct uio *a_uio;
1665 struct ucred *a_cred;
1666 } */ *ap;
1667{
1668 int error;
1669 struct union_node *un = VTOUNION(ap->a_vp);
1670 struct uio *uio = ap->a_uio;
dadab5e9 1671 struct thread *td = uio->uio_td;
984263bc
MD
1672 struct vnode *vp;
1673
dadab5e9 1674 vp = union_lock_other(un, td);
984263bc
MD
1675 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1676
1677 ap->a_vp = vp;
1678 error = VCALL(vp, VOFFSET(vop_readlink), ap);
dadab5e9 1679 union_unlock_other(vp, td);
984263bc
MD
1680
1681 return (error);
1682}
1683
1684/*
1685 * union_inactive:
1686 *
1687 * Called with the vnode locked. We are expected to unlock the vnode.
1688 */
1689
1690static int
1691union_inactive(ap)
1692 struct vop_inactive_args /* {
1693 struct vnode *a_vp;
dadab5e9 1694 struct thread *a_td;
984263bc
MD
1695 } */ *ap;
1696{
1697 struct vnode *vp = ap->a_vp;
dadab5e9 1698 struct thread *td = ap->a_td;
984263bc
MD
1699 struct union_node *un = VTOUNION(vp);
1700 struct vnode **vpp;
1701
1702 /*
1703 * Do nothing (and _don't_ bypass).
1704 * Wait to vrele lowervp until reclaim,
1705 * so that until then our union_node is in the
1706 * cache and reusable.
1707 *
1708 * NEEDSWORK: Someday, consider inactive'ing
1709 * the lowervp and then trying to reactivate it
1710 * with capabilities (v_id)
1711 * like they do in the name lookup cache code.
1712 * That's too much work for now.
1713 */
1714
1715 if (un->un_dircache != 0) {
1716 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1717 vrele(*vpp);
1718 free (un->un_dircache, M_TEMP);
1719 un->un_dircache = 0;
1720 }
1721
1722#if 0
1723 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1724 un->un_flags &= ~UN_ULOCK;
dadab5e9 1725 VOP_UNLOCK(un->un_uppervp, 0, td);
984263bc
MD
1726 }
1727#endif
1728
dadab5e9 1729 VOP_UNLOCK(vp, 0, td);
984263bc
MD
1730
1731 if ((un->un_flags & UN_CACHED) == 0)
1732 vgone(vp);
1733
1734 return (0);
1735}
1736
1737static int
1738union_reclaim(ap)
1739 struct vop_reclaim_args /* {
1740 struct vnode *a_vp;
1741 } */ *ap;
1742{
1743 union_freevp(ap->a_vp);
1744
1745 return (0);
1746}
1747
1748static int
1749union_lock(ap)
1750 struct vop_lock_args *ap;
1751{
1752#if 0
1753 struct vnode *vp = ap->a_vp;
dadab5e9 1754 struct thread *td = ap->a_td;
984263bc
MD
1755 int flags = ap->a_flags;
1756 struct union_node *un;
1757#endif
1758 int error;
1759
1760 error = vop_stdlock(ap);
1761#if 0
1762 un = VTOUNION(vp);
1763
1764 if (error == 0) {
1765 /*
1766 * Lock the upper if it exists and this is an exclusive lock
1767 * request.
1768 */
1769 if (un->un_uppervp != NULLVP &&
1770 (flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1771 if ((un->un_flags & UN_ULOCK) == 0 && vp->v_usecount) {
dadab5e9 1772 error = vn_lock(un->un_uppervp, flags, td);
984263bc
MD
1773 if (error) {
1774 struct vop_unlock_args uap = { 0 };
1775 uap.a_vp = ap->a_vp;
1776 uap.a_flags = ap->a_flags;
dadab5e9 1777 uap.a_td = ap->a_td;
984263bc
MD
1778 vop_stdunlock(&uap);
1779 return (error);
1780 }
1781 un->un_flags |= UN_ULOCK;
1782 }
1783 }
1784 }
1785#endif
1786 return (error);
1787}
1788
1789/*
1790 * union_unlock:
1791 *
1792 * Unlock our union node. This also unlocks uppervp.
1793 */
1794static int
1795union_unlock(ap)
1796 struct vop_unlock_args /* {
1797 struct vnode *a_vp;
1798 int a_flags;
dadab5e9 1799 struct thread *a_td;
984263bc
MD
1800 } */ *ap;
1801{
1802 struct union_node *un = VTOUNION(ap->a_vp);
1803 int error;
1804
1805 KASSERT((un->un_uppervp == NULL || un->un_uppervp->v_usecount > 0), ("uppervp usecount is 0"));
1806
1807 error = vop_stdunlock(ap);
1808#if 0
1809
1810 /*
1811 * If no exclusive locks remain and we are holding an uppervp lock,
1812 * remove the uppervp lock.
1813 */
1814
1815 if ((un->un_flags & UN_ULOCK) &&
1816 lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
1817 un->un_flags &= ~UN_ULOCK;
dadab5e9 1818 VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, td);
984263bc
MD
1819 }
1820#endif
1821 return(error);
1822}
1823
1824/*
1825 * union_bmap:
1826 *
1827 * There isn't much we can do. We cannot push through to the real vnode
1828 * to get to the underlying device because this will bypass data
1829 * cached by the real vnode.
1830 *
1831 * For some reason we cannot return the 'real' vnode either, it seems
1832 * to blow up memory maps.
1833 */
1834
1835static int
1836union_bmap(ap)
1837 struct vop_bmap_args /* {
1838 struct vnode *a_vp;
1839 daddr_t a_bn;
1840 struct vnode **a_vpp;
1841 daddr_t *a_bnp;
1842 int *a_runp;
1843 int *a_runb;
1844 } */ *ap;
1845{
1846 return(EOPNOTSUPP);
1847}
1848
1849static int
1850union_print(ap)
1851 struct vop_print_args /* {
1852 struct vnode *a_vp;
1853 } */ *ap;
1854{
1855 struct vnode *vp = ap->a_vp;
1856
1857 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1858 vp, UPPERVP(vp), LOWERVP(vp));
1859 if (UPPERVP(vp) != NULLVP)
1860 vprint("union: upper", UPPERVP(vp));
1861 if (LOWERVP(vp) != NULLVP)
1862 vprint("union: lower", LOWERVP(vp));
1863
1864 return (0);
1865}
1866
1867static int
1868union_pathconf(ap)
1869 struct vop_pathconf_args /* {
1870 struct vnode *a_vp;
1871 int a_name;
1872 int *a_retval;
1873 } */ *ap;
1874{
1875 int error;
dadab5e9 1876 struct thread *td = curthread; /* XXX */
984263bc
MD
1877 struct union_node *un = VTOUNION(ap->a_vp);
1878 struct vnode *vp;
1879
dadab5e9 1880 vp = union_lock_other(un, td);
984263bc
MD
1881 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1882
1883 ap->a_vp = vp;
1884 error = VCALL(vp, VOFFSET(vop_pathconf), ap);
dadab5e9 1885 union_unlock_other(vp, td);
984263bc
MD
1886
1887 return (error);
1888}
1889
1890static int
1891union_advlock(ap)
1892 struct vop_advlock_args /* {
1893 struct vnode *a_vp;
1894 caddr_t a_id;
1895 int a_op;
1896 struct flock *a_fl;
1897 int a_flags;
1898 } */ *ap;
1899{
1900 register struct vnode *ovp = OTHERVP(ap->a_vp);
1901
1902 ap->a_vp = ovp;
1903 return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1904}
1905
1906
1907/*
1908 * XXX - vop_strategy must be hand coded because it has no
1909 * YYY - and it is not coherent with anything
1910 *
1911 * vnode in its arguments.
1912 * This goes away with a merged VM/buffer cache.
1913 */
1914static int
1915union_strategy(ap)
1916 struct vop_strategy_args /* {
1917 struct vnode *a_vp;
1918 struct buf *a_bp;
1919 } */ *ap;
1920{
1921 struct buf *bp = ap->a_bp;
1922 struct vnode *othervp = OTHERVP(bp->b_vp);
1923
1924#ifdef DIAGNOSTIC
1925 if (othervp == NULLVP)
1926 panic("union_strategy: nil vp");
1927 if (((bp->b_flags & B_READ) == 0) &&
1928 (othervp == LOWERVP(bp->b_vp)))
1929 panic("union_strategy: writing to lowervp");
1930#endif
1931 return (VOP_STRATEGY(othervp, bp));
1932}
1933
1934/*
1935 * Global vfs data structures
1936 */
1937vop_t **union_vnodeop_p;
1938static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
1939 { &vop_default_desc, (vop_t *) vop_defaultop },
1940 { &vop_access_desc, (vop_t *) union_access },
1941 { &vop_advlock_desc, (vop_t *) union_advlock },
1942 { &vop_bmap_desc, (vop_t *) union_bmap },
1943 { &vop_close_desc, (vop_t *) union_close },
1944 { &vop_create_desc, (vop_t *) union_create },
1945 { &vop_fsync_desc, (vop_t *) union_fsync },
1946 { &vop_getpages_desc, (vop_t *) union_getpages },
1947 { &vop_putpages_desc, (vop_t *) union_putpages },
1948 { &vop_getattr_desc, (vop_t *) union_getattr },
1949 { &vop_inactive_desc, (vop_t *) union_inactive },
1950 { &vop_ioctl_desc, (vop_t *) union_ioctl },
1951 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
1952 { &vop_lease_desc, (vop_t *) union_lease },
1953 { &vop_link_desc, (vop_t *) union_link },
1954 { &vop_lock_desc, (vop_t *) union_lock },
1955 { &vop_lookup_desc, (vop_t *) union_lookup },
1956 { &vop_mkdir_desc, (vop_t *) union_mkdir },
1957 { &vop_mknod_desc, (vop_t *) union_mknod },
1958 { &vop_mmap_desc, (vop_t *) union_mmap },
1959 { &vop_open_desc, (vop_t *) union_open },
1960 { &vop_pathconf_desc, (vop_t *) union_pathconf },
1961 { &vop_poll_desc, (vop_t *) union_poll },
1962 { &vop_print_desc, (vop_t *) union_print },
1963 { &vop_read_desc, (vop_t *) union_read },
1964 { &vop_readdir_desc, (vop_t *) union_readdir },
1965 { &vop_readlink_desc, (vop_t *) union_readlink },
1966 { &vop_reclaim_desc, (vop_t *) union_reclaim },
1967 { &vop_remove_desc, (vop_t *) union_remove },
1968 { &vop_rename_desc, (vop_t *) union_rename },
1969 { &vop_revoke_desc, (vop_t *) union_revoke },
1970 { &vop_rmdir_desc, (vop_t *) union_rmdir },
1971 { &vop_setattr_desc, (vop_t *) union_setattr },
1972 { &vop_strategy_desc, (vop_t *) union_strategy },
1973 { &vop_symlink_desc, (vop_t *) union_symlink },
1974 { &vop_unlock_desc, (vop_t *) union_unlock },
1975 { &vop_whiteout_desc, (vop_t *) union_whiteout },
1976 { &vop_write_desc, (vop_t *) union_write },
1977 { NULL, NULL }
1978};
1979static struct vnodeopv_desc union_vnodeop_opv_desc =
1980 { &union_vnodeop_p, union_vnodeop_entries };
1981
1982VNODEOP_SET(union_vnodeop_opv_desc);