2 * Copyright (c) 1994 Jan-Simon Pendry
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
38 * $FreeBSD: src/sys/miscfs/union/union_subr.c,v 1.43.2.2 2001/12/25 01:44:45 dillon Exp $
39 * $DragonFly: src/sys/vfs/union/union_subr.c,v 1.16 2004/10/12 19:21:14 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
47 #include <sys/namei.h>
48 #include <sys/malloc.h>
49 #include <sys/fcntl.h>
51 #include <sys/filedesc.h>
52 #include <sys/module.h>
53 #include <sys/mount.h>
56 #include <vm/vm_extern.h> /* for vnode_pager_setsize */
57 #include <vm/vm_zone.h>
58 #include <vm/vm_object.h> /* for vm cache coherency */
61 extern int union_init (void);
63 /* must be power of two, otherwise change UNION_HASH() */
66 /* unsigned int ... */
67 #define UNION_HASH(u, l) \
68 (((((uintptr_t) (u)) + ((uintptr_t) l)) >> 8) & (NHASH-1))
70 static LIST_HEAD(unhead, union_node) unhead[NHASH];
71 static int unvplock[NHASH];
73 static void union_dircache_r (struct vnode *vp, struct vnode ***vppp,
75 static int union_list_lock (int ix);
76 static void union_list_unlock (int ix);
77 static int union_relookup (struct union_mount *um, struct vnode *dvp,
79 struct componentname *cnp,
80 struct componentname *cn, char *path,
82 static void union_updatevp (struct union_node *un,
83 struct vnode *uppervp,
84 struct vnode *lowervp);
85 static void union_newlower (struct union_node *, struct vnode *);
86 static void union_newupper (struct union_node *, struct vnode *);
87 static int union_copyfile (struct vnode *, struct vnode *,
88 struct ucred *, struct thread *);
89 static int union_vn_create (struct vnode **, struct union_node *,
91 static int union_vn_close (struct vnode *, int, struct ucred *,
99 for (i = 0; i < NHASH; i++)
100 LIST_INIT(&unhead[i]);
101 bzero((caddr_t)unvplock, sizeof(unvplock));
106 union_list_lock(int ix)
108 if (unvplock[ix] & UNVP_LOCKED) {
109 unvplock[ix] |= UNVP_WANT;
110 (void) tsleep((caddr_t) &unvplock[ix], 0, "unllck", 0);
113 unvplock[ix] |= UNVP_LOCKED;
118 union_list_unlock(int ix)
120 unvplock[ix] &= ~UNVP_LOCKED;
122 if (unvplock[ix] & UNVP_WANT) {
123 unvplock[ix] &= ~UNVP_WANT;
124 wakeup((caddr_t) &unvplock[ix]);
131 * The uppervp, if not NULL, must be referenced and not locked by us
132 * The lowervp, if not NULL, must be referenced.
134 * if uppervp and lowervp match pointers already installed, nothing
135 * happens. The passed vp's (when matching) are not adjusted. This
136 * routine may only be called by union_newupper() and union_newlower().
140 union_updatevp(struct union_node *un, struct vnode *uppervp,
141 struct vnode *lowervp)
143 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
144 int nhash = UNION_HASH(uppervp, lowervp);
145 int docache = (lowervp != NULLVP || uppervp != NULLVP);
149 * Ensure locking is ordered from lower to higher
150 * to avoid deadlocks.
160 if (lhash != uhash) {
161 while (union_list_lock(lhash))
165 while (union_list_lock(uhash))
168 if (ohash != nhash || !docache) {
169 if (un->un_flags & UN_CACHED) {
170 un->un_flags &= ~UN_CACHED;
171 LIST_REMOVE(un, un_cache);
176 union_list_unlock(ohash);
178 if (un->un_lowervp != lowervp) {
179 if (un->un_lowervp) {
180 vrele(un->un_lowervp);
182 free(un->un_path, M_TEMP);
186 un->un_lowervp = lowervp;
187 un->un_lowersz = VNOVAL;
190 if (un->un_uppervp != uppervp) {
192 vrele(un->un_uppervp);
193 un->un_uppervp = uppervp;
194 un->un_uppersz = VNOVAL;
197 if (docache && (ohash != nhash)) {
198 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
199 un->un_flags |= UN_CACHED;
202 union_list_unlock(nhash);
206 * Set a new lowervp. The passed lowervp must be referenced and will be
207 * stored in the vp in a referenced state.
211 union_newlower(struct union_node *un, struct vnode *lowervp)
213 union_updatevp(un, un->un_uppervp, lowervp);
217 * Set a new uppervp. The passed uppervp must be locked and will be
218 * stored in the vp in a locked state. The caller should not unlock
223 union_newupper(struct union_node *un, struct vnode *uppervp)
225 union_updatevp(un, uppervp, un->un_lowervp);
229 * Keep track of size changes in the underlying vnodes.
230 * If the size changes, then callback to the vm layer
231 * giving priority to the upper layer size.
234 union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz)
236 struct union_node *un;
239 /* only interested in regular files */
240 if (vp->v_type != VREG)
246 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
247 un->un_uppersz = uppersz;
252 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
253 un->un_lowersz = lowersz;
259 UDEBUG(("union: %s size now %ld\n",
260 (uppersz != VNOVAL ? "upper" : "lower"), (long)sz));
261 vnode_pager_setsize(vp, sz);
266 * union_allocvp: allocate a union_node and associate it with a
267 * parent union_node and one or two vnodes.
269 * vpp Holds the returned vnode locked and referenced if no
272 * mp Holds the mount point. mp may or may not be busied.
273 * allocvp makes no changes to mp.
275 * dvp Holds the parent union_node to the one we wish to create.
276 * XXX may only be used to traverse an uncopied lowervp-based
279 * dvp may or may not be locked. allocvp makes no changes
282 * upperdvp Holds the parent vnode to uppervp, generally used along
283 * with path component information to create a shadow of
284 * lowervp when uppervp does not exist.
286 * upperdvp is referenced but unlocked on entry, and will be
287 * dereferenced on return.
289 * uppervp Holds the new uppervp vnode to be stored in the
290 * union_node we are allocating. uppervp is referenced but
291 * not locked, and will be dereferenced on return.
293 * lowervp Holds the new lowervp vnode to be stored in the
294 * union_node we are allocating. lowervp is referenced but
295 * not locked, and will be dereferenced on return.
297 * cnp Holds path component information to be coupled with
298 * lowervp and upperdvp to allow unionfs to create an uppervp
299 * later on. Only used if lowervp is valid. The conents
300 * of cnp is only valid for the duration of the call.
302 * docache Determine whether this node should be entered in the
303 * cache or whether it should be destroyed as soon as possible.
305 * all union_nodes are maintained on a singly-linked
306 * list. new nodes are only allocated when they cannot
307 * be found on this list. entries on the list are
308 * removed when the vfs reclaim entry is called.
310 * a single lock is kept for the entire list. this is
311 * needed because the getnewvnode() function can block
312 * waiting for a vnode to become free, in which case there
313 * may be more than one process trying to get the same
314 * vnode. this lock is only taken if we are going to
315 * call getnewvnode, since the kernel itself is single-threaded.
317 * if an entry is found on the list, then call vget() to
318 * take a reference. this is done because there may be
319 * zero references to it and so it needs to removed from
320 * the vnode free list.
324 union_allocvp(struct vnode **vpp,
326 struct vnode *dvp, /* parent union vnode */
327 struct vnode *upperdvp, /* parent vnode of uppervp */
328 struct componentname *cnp, /* may be null */
329 struct vnode *uppervp, /* may be null */
330 struct vnode *lowervp, /* may be null */
334 struct union_node *un = 0;
335 struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
336 struct thread *td = (cnp) ? cnp->cn_td : curthread; /* XXX */
341 if (uppervp == NULLVP && lowervp == NULLVP)
342 panic("union: unidentifiable allocation");
344 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
349 /* detect the root vnode (and aliases) */
351 if ((uppervp == um->um_uppervp) &&
352 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
353 if (lowervp == NULLVP) {
354 lowervp = um->um_lowervp;
355 if (lowervp != NULLVP)
364 } else for (try = 0; try < 3; try++) {
367 if (lowervp == NULLVP)
369 hash = UNION_HASH(uppervp, lowervp);
373 if (uppervp == NULLVP)
375 hash = UNION_HASH(uppervp, NULLVP);
379 if (lowervp == NULLVP)
381 hash = UNION_HASH(NULLVP, lowervp);
385 while (union_list_lock(hash))
388 for (un = unhead[hash].lh_first; un != 0;
389 un = un->un_cache.le_next) {
390 if ((un->un_lowervp == lowervp ||
391 un->un_lowervp == NULLVP) &&
392 (un->un_uppervp == uppervp ||
393 un->un_uppervp == NULLVP) &&
394 (UNIONTOV(un)->v_mount == mp)) {
395 if (vget(UNIONTOV(un), LK_EXCLUSIVE|LK_SLEEPFAIL,
396 cnp ? cnp->cn_td : NULL)) {
397 union_list_unlock(hash);
404 union_list_unlock(hash);
412 * Obtain a lock on the union_node. Everything is unlocked
413 * except for dvp, so check that case. If they match, our
414 * new un is already locked. Otherwise we have to lock our
417 * A potential deadlock situation occurs when we are holding
418 * one lock while trying to get another. We must follow
419 * strict ordering rules to avoid it. We try to locate dvp
420 * by scanning up from un_vnode, since the most likely
421 * scenario is un being under dvp.
424 if (dvp && un->un_vnode != dvp) {
425 struct vnode *scan = un->un_vnode;
428 scan = VTOUNION(scan)->un_pvp;
429 } while (scan && scan->v_tag == VT_UNION && scan != dvp);
432 * our new un is above dvp (we never saw dvp
433 * while moving up the tree).
436 VOP_UNLOCK(dvp, 0, td);
437 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
438 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
442 * our new un is under dvp
444 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
446 } else if (dvp == NULLVP) {
448 * dvp is NULL, we need to lock un.
450 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, td);
453 * dvp == un->un_vnode, we are already locked.
462 * At this point, the union_node is locked and referenced.
464 * uppervp is locked and referenced or NULL, lowervp is
465 * referenced or NULL.
467 UDEBUG(("Modify existing un %p vn %p upper %p(refs %d) -> %p(refs %d)\n",
468 un, un->un_vnode, un->un_uppervp,
469 (un->un_uppervp ? un->un_uppervp->v_usecount : -99),
471 (uppervp ? uppervp->v_usecount : -99)
474 if (uppervp != un->un_uppervp) {
475 KASSERT(uppervp == NULL || uppervp->v_usecount > 0, ("union_allocvp: too few refs %d (at least 1 required) on uppervp", uppervp->v_usecount));
476 union_newupper(un, uppervp);
477 } else if (uppervp) {
478 KASSERT(uppervp->v_usecount > 1, ("union_allocvp: too few refs %d (at least 2 required) on uppervp", uppervp->v_usecount));
483 * Save information about the lower layer.
484 * This needs to keep track of pathname
485 * and directory information which union_vn_create
488 if (lowervp != un->un_lowervp) {
489 union_newlower(un, lowervp);
490 if (cnp && (lowervp != NULLVP)) {
491 un->un_path = malloc(cnp->cn_namelen+1,
493 bcopy(cnp->cn_nameptr, un->un_path,
495 un->un_path[cnp->cn_namelen] = '\0';
497 } else if (lowervp) {
504 if (upperdvp != un->un_dirvp) {
507 un->un_dirvp = upperdvp;
508 } else if (upperdvp) {
518 * otherwise lock the vp list while we call getnewvnode
519 * since that can block.
521 hash = UNION_HASH(uppervp, lowervp);
523 if (union_list_lock(hash))
528 * Create new node rather then replace old node
531 error = getnewvnode(VT_UNION, mp, mp->mnt_vn_ops, vpp, 0, 0);
534 * If an error occurs clear out vnodes.
546 MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
549 (*vpp)->v_flag |= vflag;
551 (*vpp)->v_type = uppervp->v_type;
553 (*vpp)->v_type = lowervp->v_type;
556 bzero(un, sizeof(*un));
559 un->un_uppervp = uppervp;
560 un->un_uppersz = VNOVAL;
561 un->un_lowervp = lowervp;
562 un->un_lowersz = VNOVAL;
563 un->un_dirvp = upperdvp;
564 un->un_pvp = dvp; /* only parent dir in new allocation */
570 if (cnp && (lowervp != NULLVP)) {
571 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
572 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
573 un->un_path[cnp->cn_namelen] = '\0';
580 LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
581 un->un_flags |= UN_CACHED;
585 * locked refd vpp is returned
590 union_list_unlock(hash);
596 union_freevp(struct vnode *vp)
598 struct union_node *un = VTOUNION(vp);
601 if (un->un_flags & UN_CACHED) {
602 un->un_flags &= ~UN_CACHED;
603 LIST_REMOVE(un, un_cache);
605 if (un->un_pvp != NULLVP) {
609 if (un->un_uppervp != NULLVP) {
610 vrele(un->un_uppervp);
611 un->un_uppervp = NULL;
613 if (un->un_lowervp != NULLVP) {
614 vrele(un->un_lowervp);
615 un->un_lowervp = NULL;
617 if (un->un_dirvp != NULLVP) {
622 free(un->un_path, M_TEMP);
630 * copyfile. copy the vnode (fvp) to the vnode (tvp)
631 * using a sequence of reads and writes. both (fvp)
632 * and (tvp) are locked on entry and exit.
634 * fvp and tvp are both exclusive locked on call, but their refcount's
635 * haven't been bumped at all.
638 union_copyfile(struct vnode *fvp, struct vnode *tvp, struct ucred *cred,
648 * allocate a buffer of size MAXBSIZE.
649 * loop doing reads and writes, keeping track
650 * of the current uio offset.
651 * give up at the first sign of trouble.
654 bzero(&uio, sizeof(uio));
657 uio.uio_segflg = UIO_SYSSPACE;
660 VOP_LEASE(fvp, td, cred, LEASE_READ);
661 VOP_LEASE(tvp, td, cred, LEASE_WRITE);
663 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
665 /* ugly loop follows... */
667 off_t offset = uio.uio_offset;
677 iov.iov_len = MAXBSIZE;
678 uio.uio_resid = iov.iov_len;
679 uio.uio_rw = UIO_READ;
681 if ((error = VOP_READ(fvp, &uio, 0, cred)) != 0)
685 * Get bytes read, handle read eof case and setup for
688 if ((count = MAXBSIZE - uio.uio_resid) == 0)
693 * Write until an error occurs or our buffer has been
694 * exhausted, then update the offset for the next read.
696 while (bufoffset < count) {
699 iov.iov_base = buf + bufoffset;
700 iov.iov_len = count - bufoffset;
701 uio.uio_offset = offset + bufoffset;
702 uio.uio_rw = UIO_WRITE;
703 uio.uio_resid = iov.iov_len;
705 if ((error = VOP_WRITE(tvp, &uio, 0, cred)) != 0)
707 bufoffset += (count - bufoffset) - uio.uio_resid;
709 uio.uio_offset = offset + bufoffset;
710 } while (error == 0);
718 * un's vnode is assumed to be locked on entry and remains locked on exit.
722 union_copyup(struct union_node *un, int docopy, struct ucred *cred,
726 struct vnode *lvp, *uvp;
729 * If the user does not have read permission, the vnode should not
730 * be copied to upper layer.
732 vn_lock(un->un_lowervp, LK_EXCLUSIVE | LK_RETRY, td);
733 error = VOP_ACCESS(un->un_lowervp, VREAD, cred, td);
734 VOP_UNLOCK(un->un_lowervp, 0, td);
738 error = union_vn_create(&uvp, un, td);
742 lvp = un->un_lowervp;
744 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
747 * XX - should not ignore errors
750 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
751 error = VOP_OPEN(lvp, FREAD, cred, td);
752 if (error == 0 && vn_canvmio(lvp) == TRUE)
753 error = vfs_object_create(lvp, td);
755 error = union_copyfile(lvp, uvp, cred, td);
756 VOP_UNLOCK(lvp, 0, td);
757 (void) VOP_CLOSE(lvp, FREAD, td);
760 UDEBUG(("union: copied up %s\n", un->un_path));
763 VOP_UNLOCK(uvp, 0, td);
764 union_newupper(un, uvp);
765 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
766 union_vn_close(uvp, FWRITE, cred, td);
767 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount));
769 * Subsequent IOs will go to the top layer, so
770 * call close on the lower vnode and open on the
771 * upper vnode to ensure that the filesystem keeps
772 * its references counts right. This doesn't do
773 * the right thing with (cred) and (FREAD) though.
774 * Ignoring error returns is not right, either.
779 for (i = 0; i < un->un_openl; i++) {
780 (void) VOP_CLOSE(lvp, FREAD, td);
781 (void) VOP_OPEN(uvp, FREAD, cred, td);
784 if (vn_canvmio(uvp) == TRUE)
785 error = vfs_object_create(uvp, td);
797 * dvp should be locked on entry and will be locked on return. No
798 * net change in the ref count will occur.
800 * If an error is returned, *vpp will be invalid, otherwise it
801 * will hold a locked, referenced vnode. If *vpp == dvp then
802 * remember that only one exclusive lock is held.
806 union_relookup(struct union_mount *um, struct vnode *dvp, struct vnode **vpp,
807 struct componentname *cnp, struct componentname *cn, char *path,
813 * A new componentname structure must be faked up because
814 * there is no way to know where the upper level cnp came
815 * from or what it is being used for. This must duplicate
816 * some of the work done by NDINIT, some of the work done
817 * by namei, some of the work done by lookup and some of
818 * the work done by VOP_LOOKUP when given a CREATE flag.
819 * Conclusion: Horrible.
821 cn->cn_namelen = pathlen;
822 cn->cn_pnbuf = zalloc(namei_zone);
823 bcopy(path, cn->cn_pnbuf, cn->cn_namelen);
824 cn->cn_pnbuf[cn->cn_namelen] = '\0';
826 cn->cn_nameiop = NAMEI_CREATE;
827 cn->cn_flags = (CNP_LOCKPARENT | CNP_LOCKLEAF | CNP_HASBUF |
828 CNP_SAVENAME | CNP_ISLASTCN);
829 cn->cn_td = cnp->cn_td;
830 if (um->um_op == UNMNT_ABOVE)
831 cn->cn_cred = cnp->cn_cred;
833 cn->cn_cred = um->um_cred;
834 cn->cn_nameptr = cn->cn_pnbuf;
835 cn->cn_consume = cnp->cn_consume;
838 VOP_UNLOCK(dvp, 0, cnp->cn_td);
841 * Pass dvp unlocked and referenced on call to relookup().
843 * If an error occurs, dvp will be returned unlocked and dereferenced.
846 if ((error = relookup(dvp, vpp, cn)) != 0) {
847 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_td);
852 * If no error occurs, dvp will be returned locked with the reference
853 * left as before, and vpp will be returned referenced and locked.
855 * We want to return with dvp as it was passed to us, so we get
856 * rid of our reference.
863 * Create a shadow directory in the upper layer.
864 * The new vnode is returned locked.
866 * (um) points to the union mount structure for access to the
867 * the mounting process's credentials.
868 * (dvp) is the directory in which to create the shadow directory,
869 * it is locked (but not ref'd) on entry and return.
870 * (cnp) is the componentname to be created.
871 * (vpp) is the returned newly created shadow directory, which
872 * is returned locked and ref'd
875 union_mkshadow(struct union_mount *um, struct vnode *dvp,
876 struct componentname *cnp, struct vnode **vpp)
880 struct thread *td = cnp->cn_td;
881 struct componentname cn;
883 error = union_relookup(um, dvp, vpp, cnp, &cn,
884 cnp->cn_nameptr, cnp->cn_namelen);
889 if (cn.cn_flags & CNP_HASBUF) {
890 zfree(namei_zone, cn.cn_pnbuf);
891 cn.cn_flags &= ~CNP_HASBUF;
902 * policy: when creating the shadow directory in the
903 * upper layer, create it owned by the user who did
904 * the mount, group from parent directory, and mode
905 * 777 modified by umask (ie mostly identical to the
906 * mkdir syscall). (jsp, kb)
911 va.va_mode = um->um_cmode;
913 /* VOP_LEASE: dvp is locked */
914 VOP_LEASE(dvp, td, cn.cn_cred, LEASE_WRITE);
916 error = VOP_MKDIR(dvp, NCPNULL, vpp, &cn, &va);
917 if (cn.cn_flags & CNP_HASBUF) {
918 zfree(namei_zone, cn.cn_pnbuf);
919 cn.cn_flags &= ~CNP_HASBUF;
926 * Create a whiteout entry in the upper layer.
928 * (um) points to the union mount structure for access to the
929 * the mounting process's credentials.
930 * (dvp) is the directory in which to create the whiteout.
931 * it is locked on entry and return.
932 * (cnp) is the componentname to be created.
935 union_mkwhiteout(struct union_mount *um, struct vnode *dvp,
936 struct componentname *cnp, char *path)
939 struct thread *td = cnp->cn_td;
941 struct componentname cn;
944 KKASSERT(td->td_proc);
945 cred = td->td_proc->p_ucred;
947 error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
952 if (cn.cn_flags & CNP_HASBUF) {
953 zfree(namei_zone, cn.cn_pnbuf);
954 cn.cn_flags &= ~CNP_HASBUF;
963 /* VOP_LEASE: dvp is locked */
964 VOP_LEASE(dvp, td, cred, LEASE_WRITE);
966 error = VOP_WHITEOUT(dvp, NCPNULL, &cn, NAMEI_CREATE);
967 if (cn.cn_flags & CNP_HASBUF) {
968 zfree(namei_zone, cn.cn_pnbuf);
969 cn.cn_flags &= ~CNP_HASBUF;
975 * union_vn_create: creates and opens a new shadow file
976 * on the upper union layer. this function is similar
977 * in spirit to calling vn_open but it avoids calling namei().
978 * the problem with calling namei is that a) it locks too many
979 * things, and b) it doesn't start at the "right" directory,
980 * whereas relookup is told where to start.
982 * On entry, the vnode associated with un is locked. It remains locked
985 * If no error occurs, *vpp contains a locked referenced vnode for your
986 * use. If an error occurs *vpp iis undefined.
989 union_vn_create(struct vnode **vpp, struct union_node *un, struct thread *td)
994 struct vattr *vap = &vat;
995 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
998 struct componentname cn;
1000 KKASSERT(td->td_proc);
1001 cred = td->td_proc->p_ucred;
1002 cmode = UN_FILEMODE & ~td->td_proc->p_fd->fd_cmask;
1007 * Build a new componentname structure (for the same
1008 * reasons outlines in union_mkshadow).
1009 * The difference here is that the file is owned by
1010 * the current user, rather than by the person who
1011 * did the mount, since the current user needs to be
1012 * able to write the file (that's why it is being
1013 * copied in the first place).
1015 cn.cn_namelen = strlen(un->un_path);
1016 cn.cn_pnbuf = zalloc(namei_zone);
1017 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
1018 cn.cn_nameiop = NAMEI_CREATE;
1019 cn.cn_flags = (CNP_LOCKPARENT | CNP_LOCKLEAF | CNP_HASBUF |
1020 CNP_SAVENAME | CNP_ISLASTCN);
1023 cn.cn_nameptr = cn.cn_pnbuf;
1027 * Pass dvp unlocked and referenced on call to relookup().
1029 * If an error occurs, dvp will be returned unlocked and dereferenced.
1032 error = relookup(un->un_dirvp, &vp, &cn);
1037 * If no error occurs, dvp will be returned locked with the reference
1038 * left as before, and vpp will be returned referenced and locked.
1042 if (cn.cn_flags & CNP_HASBUF) {
1043 zfree(namei_zone, cn.cn_pnbuf);
1044 cn.cn_flags &= ~CNP_HASBUF;
1046 if (vp == un->un_dirvp)
1054 * Good - there was no race to create the file
1055 * so go ahead and create it. The permissions
1056 * on the file will be 0666 modified by the
1057 * current user's umask. Access to the file, while
1058 * it is unioned, will require access to the top *and*
1059 * bottom files. Access when not unioned will simply
1060 * require access to the top-level file.
1061 * TODO: confirm choice of access permissions.
1064 vap->va_type = VREG;
1065 vap->va_mode = cmode;
1066 VOP_LEASE(un->un_dirvp, td, cred, LEASE_WRITE);
1067 error = VOP_CREATE(un->un_dirvp, NCPNULL, &vp, &cn, vap);
1068 if (cn.cn_flags & CNP_HASBUF) {
1069 zfree(namei_zone, cn.cn_pnbuf);
1070 cn.cn_flags &= ~CNP_HASBUF;
1076 error = VOP_OPEN(vp, fmode, cred, td);
1077 if (error == 0 && vn_canvmio(vp) == TRUE)
1078 error = vfs_object_create(vp, td);
1089 union_vn_close(struct vnode *vp, int fmode, struct ucred *cred,
1094 return (VOP_CLOSE(vp, fmode, td));
1100 * union_removed_upper:
1102 * called with union_node unlocked. XXX
1106 union_removed_upper(struct union_node *un)
1108 struct thread *td = curthread; /* XXX */
1112 * Do not set the uppervp to NULLVP. If lowervp is NULLVP,
1113 * union node will have neither uppervp nor lowervp. We remove
1114 * the union node from cache, so that it will not be referrenced.
1116 union_newupper(un, NULLVP);
1117 if (un->un_dircache != 0) {
1118 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1120 free(un->un_dircache, M_TEMP);
1121 un->un_dircache = 0;
1124 if (un->un_flags & UN_CACHED) {
1125 un->un_flags &= ~UN_CACHED;
1126 LIST_REMOVE(un, un_cache);
1133 * determine whether a whiteout is needed
1134 * during a remove/rmdir operation.
1137 union_dowhiteout(struct union_node *un, struct ucred *cred, struct thread *td)
1141 if (un->un_lowervp != NULLVP)
1144 if (VOP_GETATTR(un->un_uppervp, &va, td) == 0 &&
1145 (va.va_flags & OPAQUE))
1152 union_dircache_r(struct vnode *vp, struct vnode ***vppp, int *cntp)
1154 struct union_node *un;
1156 if (vp->v_tag != VT_UNION) {
1161 panic("union: dircache table too small");
1170 if (un->un_uppervp != NULLVP)
1171 union_dircache_r(un->un_uppervp, vppp, cntp);
1172 if (un->un_lowervp != NULLVP)
1173 union_dircache_r(un->un_lowervp, vppp, cntp);
1177 union_dircache(struct vnode *vp, struct thread *td)
1182 struct vnode **dircache;
1183 struct union_node *un;
1186 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1187 dircache = VTOUNION(vp)->un_dircache;
1191 if (dircache == NULL) {
1193 union_dircache_r(vp, 0, &cnt);
1195 dircache = malloc(cnt * sizeof(struct vnode *),
1198 union_dircache_r(vp, &vpp, &cnt);
1204 if (*vpp++ == VTOUNION(vp)->un_uppervp)
1206 } while (*vpp != NULLVP);
1212 /*vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);*/
1213 UDEBUG(("ALLOCVP-3 %p ref %d\n", *vpp, (*vpp ? (*vpp)->v_usecount : -99)));
1215 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, NULL, *vpp, NULLVP, 0);
1216 UDEBUG(("ALLOCVP-3B %p ref %d\n", nvp, (*vpp ? (*vpp)->v_usecount : -99)));
1220 VTOUNION(vp)->un_dircache = 0;
1222 un->un_dircache = dircache;
1225 VOP_UNLOCK(vp, 0, td);
1230 * Guarentee coherency with the VM cache by invalidating any clean VM pages
1231 * associated with this write and updating any dirty VM pages. Since our
1232 * vnode is locked, other processes will not be able to read the pages in
1233 * again until after our write completes.
1235 * We also have to be coherent with reads, by flushing any pending dirty
1236 * pages prior to issuing the read.
1238 * XXX this is somewhat of a hack at the moment. To support this properly
1239 * we would have to be able to run VOP_READ and VOP_WRITE through the VM
1240 * cache. Then we wouldn't need to worry about coherency.
1244 union_vm_coherency(struct vnode *vp, struct uio *uio, int cleanfls)
1251 if ((object = vp->v_object) == NULL)
1254 pgoff = uio->uio_offset & PAGE_MASK;
1255 pstart = uio->uio_offset / PAGE_SIZE;
1256 pend = pstart + (uio->uio_resid + pgoff + PAGE_MASK) / PAGE_SIZE;
1258 vm_object_page_clean(object, pstart, pend, OBJPC_SYNC);
1260 vm_object_page_remove(object, pstart, pend, TRUE);
1264 * Module glue to remove #ifdef UNION from vfs_syscalls.c
1267 union_dircheck(struct thread *td, struct vnode **vp, struct file *fp)
1271 if ((*vp)->v_tag == VT_UNION) {
1274 lvp = union_dircache(*vp, td);
1275 if (lvp != NULLVP) {
1279 * If the directory is opaque,
1280 * then don't show lower entries
1282 error = VOP_GETATTR(*vp, &va, td);
1283 if (va.va_flags & OPAQUE) {
1289 if (lvp != NULLVP) {
1290 error = VOP_OPEN(lvp, FREAD, fp->f_cred, td);
1291 if (error == 0 && vn_canvmio(lvp) == TRUE)
1292 error = vfs_object_create(lvp, td);
1297 VOP_UNLOCK(lvp, 0, td);
1298 fp->f_data = (caddr_t) lvp;
1300 error = vn_close(*vp, FREAD, td);
1304 return -1; /* goto unionread */
1311 union_modevent(module_t mod, int type, void *data)
1315 union_dircheckp = union_dircheck;
1318 union_dircheckp = NULL;
1326 static moduledata_t union_mod = {
1332 DECLARE_MODULE(union_dircheck, union_mod, SI_SUB_VFS, SI_ORDER_ANY);