2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 hammer2_tid_t, meta.inum);
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 if (ip1->meta.inum < ip2->meta.inum)
54 if (ip1->meta.inum > ip2->meta.inum)
62 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
65 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
66 * inode locking function will automatically set the RDONLY flag.
68 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69 * Most front-end inode locks do.
71 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72 * the inode data be resolved. This is used by the syncthr because
73 * it can run on an unresolved/out-of-sync cluster, and also by the
74 * vnode reclamation code to avoid unnecessary I/O (particularly when
75 * disposing of hundreds of thousands of cached vnodes).
77 * The inode locking function locks the inode itself, resolves any stale
78 * chains in the inode's cluster, and allocates a fresh copy of the
79 * cluster with 1 ref and all the underlying chains locked.
81 * ip->cluster will be stable while the inode is locked.
83 * NOTE: We don't combine the inode/chain lock because putting away an
84 * inode would otherwise confuse multiple lock holders of the inode.
86 * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87 * and never point to a hardlink pointer.
89 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90 * will feel free to reduce the chain set in the cluster as an
91 * optimization. It will still be validated against the quorum if
92 * appropriate, but the optimization might be able to reduce data
93 * accesses to one node. This flag is automatically set if the inode
94 * is locked with HAMMER2_RESOLVE_SHARED.
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
99 hammer2_inode_ref(ip);
102 * Inode structure mutex
104 if (how & HAMMER2_RESOLVE_SHARED) {
105 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
106 hammer2_mtx_sh(&ip->lock);
108 hammer2_mtx_ex(&ip->lock);
113 * Create a locked copy of ip->cluster. Note that the copy will have a
114 * ref on the cluster AND its chains and we don't want a second ref to
115 * either when we lock it.
117 * Exclusive inode locks set the template focus chain in (ip)
118 * as a hint. Cluster locks can ALWAYS replace the focus in the
119 * working copy if the hint does not work out, so beware.
122 hammer2_inode_cluster(hammer2_inode_t *ip, int how)
124 hammer2_cluster_t *cluster;
126 cluster = hammer2_cluster_copy(&ip->cluster);
127 hammer2_cluster_lock(cluster, how);
128 hammer2_cluster_resolve(cluster);
131 * cluster->focus will be set if resolving RESOLVE_ALWAYS, but
132 * only update the cached focus in the inode structure when taking
133 * out an exclusive lock.
135 if ((how & HAMMER2_RESOLVE_SHARED) == 0)
136 ip->cluster.focus = cluster->focus;
142 * Select a chain out of an inode's cluster and lock it.
144 * The inode does not have to be locked.
147 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
149 hammer2_chain_t *chain;
151 hammer2_spin_sh(&ip->cluster_spin);
152 if (clindex >= ip->cluster.nchains)
155 chain = ip->cluster.array[clindex].chain;
157 hammer2_chain_ref(chain);
158 hammer2_spin_unsh(&ip->cluster_spin);
159 hammer2_chain_lock(chain, how);
161 hammer2_spin_unsh(&ip->cluster_spin);
167 hammer2_inode_unlock(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
170 hammer2_cluster_unlock(cluster);
171 hammer2_cluster_drop(cluster);
173 hammer2_mtx_unlock(&ip->lock);
174 hammer2_inode_drop(ip);
178 * Temporarily release a lock held shared or exclusive. Caller must
179 * hold the lock shared or exclusive on call and lock will be released
182 * Restore a lock that was temporarily released.
185 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
187 return hammer2_mtx_temp_release(&ip->lock);
191 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
193 hammer2_mtx_temp_restore(&ip->lock, ostate);
197 * Upgrade a shared inode lock to exclusive and return. If the inode lock
198 * is already held exclusively this is a NOP.
200 * The caller MUST hold the inode lock either shared or exclusive on call
201 * and will own the lock exclusively on return.
203 * Returns non-zero if the lock was already exclusive prior to the upgrade.
206 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
210 if (mtx_islocked_ex(&ip->lock)) {
213 hammer2_mtx_unlock(&ip->lock);
214 hammer2_mtx_ex(&ip->lock);
221 * Downgrade an inode lock from exclusive to shared only if the inode
222 * lock was previously shared. If the inode lock was previously exclusive,
226 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
228 if (wasexclusive == 0)
229 mtx_downgrade(&ip->lock);
233 * Lookup an inode by inode number
236 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
244 hammer2_spin_ex(&pmp->inum_spin);
245 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
247 hammer2_inode_ref(ip);
248 hammer2_spin_unex(&pmp->inum_spin);
254 * Adding a ref to an inode is only legal if the inode already has at least
257 * (can be called with spinlock held)
260 hammer2_inode_ref(hammer2_inode_t *ip)
262 atomic_add_int(&ip->refs, 1);
266 * Drop an inode reference, freeing the inode when the last reference goes
270 hammer2_inode_drop(hammer2_inode_t *ip)
273 hammer2_inode_t *pip;
281 * Transition to zero, must interlock with
282 * the inode inumber lookup tree (if applicable).
283 * It should not be possible for anyone to race
284 * the transition to 0.
288 hammer2_spin_ex(&pmp->inum_spin);
290 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
291 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
292 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
293 atomic_clear_int(&ip->flags,
294 HAMMER2_INODE_ONRBTREE);
295 RB_REMOVE(hammer2_inode_tree,
296 &pmp->inum_tree, ip);
298 hammer2_spin_unex(&pmp->inum_spin);
305 * Cleaning out ip->cluster isn't entirely
308 hammer2_inode_repoint(ip, NULL, NULL);
311 * We have to drop pip (if non-NULL) to
312 * dispose of our implied reference from
313 * ip->pip. We can simply loop on it.
315 kfree(ip, pmp->minode);
316 atomic_add_long(&pmp->inmem_inodes, -1);
318 /* continue with pip (can be NULL) */
320 hammer2_spin_unex(&ip->pmp->inum_spin);
324 * Non zero transition
326 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
333 * Get the vnode associated with the given inode, allocating the vnode if
334 * necessary. The vnode will be returned exclusively locked.
336 * The caller must lock the inode (shared or exclusive).
338 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
342 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
348 KKASSERT(pmp != NULL);
353 * Attempt to reuse an existing vnode assignment. It is
354 * possible to race a reclaim so the vget() may fail. The
355 * inode must be unlocked during the vget() to avoid a
356 * deadlock against a reclaim.
363 * Inode must be unlocked during the vget() to avoid
364 * possible deadlocks, but leave the ip ref intact.
366 * vnode is held to prevent destruction during the
367 * vget(). The vget() can still fail if we lost
368 * a reclaim race on the vnode.
370 hammer2_mtx_state_t ostate;
373 ostate = hammer2_inode_lock_temp_release(ip);
374 if (vget(vp, LK_EXCLUSIVE)) {
376 hammer2_inode_lock_temp_restore(ip, ostate);
379 hammer2_inode_lock_temp_restore(ip, ostate);
381 /* vp still locked and ref from vget */
383 kprintf("hammer2: igetv race %p/%p\n",
393 * No vnode exists, allocate a new vnode. Beware of
394 * allocation races. This function will return an
395 * exclusively locked and referenced vnode.
397 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
399 kprintf("hammer2: igetv getnewvnode failed %d\n",
406 * Lock the inode and check for an allocation race.
408 wasexclusive = hammer2_inode_lock_upgrade(ip);
409 if (ip->vp != NULL) {
412 hammer2_inode_lock_downgrade(ip, wasexclusive);
416 switch (ip->meta.type) {
417 case HAMMER2_OBJTYPE_DIRECTORY:
420 case HAMMER2_OBJTYPE_REGFILE:
422 vinitvmio(vp, ip->meta.size,
424 (int)ip->meta.size & HAMMER2_LBUFMASK);
426 case HAMMER2_OBJTYPE_SOFTLINK:
428 * XXX for now we are using the generic file_read
429 * and file_write code so we need a buffer cache
433 vinitvmio(vp, ip->meta.size,
435 (int)ip->meta.size & HAMMER2_LBUFMASK);
437 case HAMMER2_OBJTYPE_CDEV:
440 case HAMMER2_OBJTYPE_BDEV:
441 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
442 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
448 case HAMMER2_OBJTYPE_FIFO:
450 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
453 panic("hammer2: unhandled objtype %d",
458 if (ip == pmp->iroot)
459 vsetflags(vp, VROOT);
463 hammer2_inode_ref(ip); /* vp association */
464 hammer2_inode_lock_downgrade(ip, wasexclusive);
469 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
471 if (hammer2_debug & 0x0002) {
472 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
473 vp, vp->v_refcnt, vp->v_auxrefs);
479 * Returns the inode associated with the passed-in cluster, creating the
480 * inode if necessary and synchronizing it to the passed-in cluster otherwise.
482 * The passed-in cluster must be locked and will remain locked on return.
483 * The returned inode will be locked and the caller may dispose of both
484 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
485 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
487 * The hammer2_inode structure regulates the interface between the high level
488 * kernel VNOPS API and the filesystem backend (the chains).
490 * On return the inode is locked with the supplied cluster.
493 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
494 hammer2_cluster_t *cluster)
496 hammer2_inode_t *nip;
497 const hammer2_inode_data_t *iptmp;
498 const hammer2_inode_data_t *nipdata;
500 KKASSERT(cluster == NULL ||
501 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
505 * Interlocked lookup/ref of the inode. This code is only needed
506 * when looking up inodes with nlinks != 0 (TODO: optimize out
507 * otherwise and test for duplicates).
509 * Cluster can be NULL during the initial pfs allocation.
513 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
514 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
518 hammer2_mtx_ex(&nip->lock);
521 * Handle SMP race (not applicable to the super-root spmp
522 * which can't index inodes due to duplicative inode numbers).
524 if (pmp->spmp_hmp == NULL &&
525 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
526 hammer2_mtx_unlock(&nip->lock);
527 hammer2_inode_drop(nip);
530 hammer2_inode_repoint(nip, NULL, cluster);
536 * We couldn't find the inode number, create a new inode.
538 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
539 spin_init(&nip->cluster_spin, "h2clspin");
540 atomic_add_long(&pmp->inmem_inodes, 1);
541 hammer2_pfs_memory_inc(pmp);
542 hammer2_pfs_memory_wakeup(pmp);
544 nip->flags = HAMMER2_INODE_SROOT;
547 * Initialize nip's cluster. A cluster is provided for normal
548 * inodes but typically not for the super-root or PFS inodes.
550 nip->cluster.refs = 1;
551 nip->cluster.pmp = pmp;
552 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
554 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
555 nip->meta = nipdata->meta;
556 hammer2_cluster_bref(cluster, &nip->bref);
557 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
558 hammer2_inode_repoint(nip, NULL, cluster);
560 nip->meta.inum = 1; /* PFS inum is always 1 XXX */
561 /* mtime will be updated when a cluster is available */
562 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
565 nip->pip = dip; /* can be NULL */
567 hammer2_inode_ref(dip); /* ref dip for nip->pip */
572 * ref and lock on nip gives it state compatible to after a
573 * hammer2_inode_lock() call.
576 hammer2_mtx_init(&nip->lock, "h2inode");
577 hammer2_mtx_ex(&nip->lock);
578 /* combination of thread lock and chain lock == inode lock */
581 * Attempt to add the inode. If it fails we raced another inode
582 * get. Undo all the work and try again.
584 if (pmp->spmp_hmp == NULL) {
585 hammer2_spin_ex(&pmp->inum_spin);
586 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
587 hammer2_spin_unex(&pmp->inum_spin);
588 hammer2_mtx_unlock(&nip->lock);
589 hammer2_inode_drop(nip);
592 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
593 hammer2_spin_unex(&pmp->inum_spin);
600 * Create a new inode in the specified directory using the vattr to
601 * figure out the type of inode.
603 * If no error occurs the new inode with its cluster locked is returned in
604 * *nipp, otherwise an error is returned and *nipp is set to NULL.
606 * If vap and/or cred are NULL the related fields are not set and the
607 * inode type defaults to a directory. This is used when creating PFSs
608 * under the super-root, so the inode number is set to 1 in this case.
610 * dip is not locked on entry.
612 * NOTE: When used to create a snapshot, the inode is temporarily associated
613 * with the super-root spmp. XXX should pass new pmp for snapshot.
616 hammer2_inode_create(hammer2_inode_t *dip,
617 struct vattr *vap, struct ucred *cred,
618 const uint8_t *name, size_t name_len,
619 hammer2_key_t inum, uint8_t type, uint8_t target_type,
620 int flags, int *errorp)
622 hammer2_xop_scanlhc_t *sxop;
623 hammer2_xop_create_t *xop;
624 hammer2_inode_t *nip;
625 hammer2_key_t lhcbase;
632 uint8_t dip_comp_algo;
633 uint8_t dip_check_algo;
635 lhc = hammer2_dirhash(name, name_len);
640 * Locate the inode or indirect block to create the new
641 * entry in. At the same time check for key collisions
642 * and iterate until we don't get one.
644 * NOTE: hidden inodes do not have iterators.
646 * Lock the directory exclusively for now to guarantee that
647 * we can find an unused lhc for the name. Due to collisions,
648 * two different creates can end up with the same lhc so we
649 * cannot depend on the OS to prevent the collision.
651 hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS);
653 dip_uid = dip->meta.uid;
654 dip_gid = dip->meta.gid;
655 dip_mode = dip->meta.mode;
656 dip_comp_algo = dip->meta.comp_algo;
657 dip_check_algo = dip->meta.check_algo;
660 * Locate an unused key in the collision space.
663 sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
665 hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
666 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
667 if (lhc != sxop->head.cluster.focus->bref.key)
671 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
679 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
685 * Create the inode with the lhc as the key.
687 xop = &hammer2_xop_alloc(dip)->xop_create;
690 bzero(&xop->meta, sizeof(xop->meta));
693 xop->meta.type = hammer2_get_obj_type(vap->va_type);
695 switch (xop->meta.type) {
696 case HAMMER2_OBJTYPE_CDEV:
697 case HAMMER2_OBJTYPE_BDEV:
698 xop->meta.rmajor = vap->va_rmajor;
699 xop->meta.rminor = vap->va_rminor;
704 type = xop->meta.type;
706 xop->meta.type = type;
707 xop->meta.target_type = target_type;
709 xop->meta.inum = inum;
711 /* Inherit parent's inode compression mode. */
712 xop->meta.comp_algo = dip_comp_algo;
713 xop->meta.check_algo = dip_check_algo;
714 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
715 hammer2_update_time(&xop->meta.ctime);
716 xop->meta.mtime = xop->meta.ctime;
718 xop->meta.mode = vap->va_mode;
719 xop->meta.nlinks = 1;
721 if (dip && dip->pmp) {
722 xuid = hammer2_to_unix_xid(&dip_uid);
723 xuid = vop_helper_create_uid(dip->pmp->mp,
729 /* super-root has no dip and/or pmp */
732 if (vap->va_vaflags & VA_UID_UUID_VALID)
733 xop->meta.uid = vap->va_uid_uuid;
734 else if (vap->va_uid != (uid_t)VNOVAL)
735 hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
737 hammer2_guid_to_uuid(&xop->meta.uid, xuid);
739 if (vap->va_vaflags & VA_GID_UUID_VALID)
740 xop->meta.gid = vap->va_gid_uuid;
741 else if (vap->va_gid != (gid_t)VNOVAL)
742 hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
744 xop->meta.gid = dip_gid;
748 * Regular files and softlinks allow a small amount of data to be
749 * directly embedded in the inode. This flag will be cleared if
750 * the size is extended past the embedded limit.
752 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
753 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
754 xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
755 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
757 hammer2_xop_setname(&xop->head, name, name_len);
758 xop->meta.name_len = name_len;
759 xop->meta.name_key = lhc;
760 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
762 hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
764 error = hammer2_xop_collect(&xop->head, 0);
766 kprintf("CREATE INODE %*.*s\n",
767 (int)name_len, (int)name_len, name);
776 * Set up the new inode if not a hardlink pointer.
778 * NOTE: *_get() integrates chain's lock into the inode lock.
780 * NOTE: Only one new inode can currently be created per
781 * transaction. If the need arises we can adjust
782 * hammer2_trans_init() to allow more.
784 * NOTE: nipdata will have chain's blockset data.
786 if (type != HAMMER2_OBJTYPE_HARDLINK) {
787 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster);
788 nip->comp_heuristic = 0;
794 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
796 hammer2_inode_unlock(dip, NULL);
802 * Connect the disconnected inode (ip) to the directory (dip) with the
803 * specified (name, name_len). If name is NULL, (lhc) will be used as
804 * the directory key and the inode's embedded name will not be modified
805 * for future recovery purposes.
807 * dip and ip must both be locked exclusively (dip in particular to avoid
811 hammer2_inode_connect_simple(hammer2_inode_t *dip, hammer2_inode_t *ip,
812 const char *name, size_t name_len,
815 hammer2_xop_scanlhc_t *sxop;
816 hammer2_xop_connect_t *xop;
817 hammer2_inode_t *opip;
818 hammer2_key_t lhcbase;
822 * Calculate the lhc and resolve the collision space.
825 lhc = lhcbase = hammer2_dirhash(name, name_len);
826 sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
828 hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
829 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
830 if (lhc != sxop->head.cluster.focus->bref.key)
834 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
842 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
851 * Formally reconnect the in-memory structure. ip must
852 * be locked exclusively to safely change ip->pip.
854 if (ip->pip != dip) {
855 hammer2_inode_ref(dip);
859 hammer2_inode_drop(opip);
865 xop = &hammer2_xop_alloc(dip)->xop_connect;
867 hammer2_xop_setname(&xop->head, name, name_len);
868 hammer2_xop_setip2(&xop->head, ip);
870 hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
871 error = hammer2_xop_collect(&xop->head, 0);
872 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
875 * On success make the same adjustments to ip->meta or the
876 * next flush may blow up the chain.
879 hammer2_inode_modify(ip);
880 ip->meta.name_key = lhc;
882 ip->meta.name_len = name_len;
889 * Repoint ip->cluster's chains to cluster's chains and fixup the default
890 * focus. Only valid elements are repointed. Invalid elements have to be
891 * adjusted by the appropriate slave sync threads.
893 * Caller must hold the inode and cluster exclusive locked, if not NULL,
894 * must also be locked.
896 * Cluster may be NULL to clean out any chains in ip->cluster.
899 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
900 hammer2_cluster_t *cluster)
902 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
903 hammer2_chain_t *ochain;
904 hammer2_chain_t *nchain;
905 hammer2_inode_t *opip;
908 bzero(dropch, sizeof(dropch));
911 * Replace chains in ip->cluster with chains from cluster and
912 * adjust the focus if necessary.
914 * NOTE: nchain and/or ochain can be NULL due to gaps
915 * in the cluster arrays.
917 hammer2_spin_ex(&ip->cluster_spin);
918 for (i = 0; cluster && i < cluster->nchains; ++i) {
920 * Do not replace invalid elements as this might race
921 * syncthr replacements.
923 if (cluster->array[i].flags & HAMMER2_CITEM_INVALID)
927 * Do not replace elements which are the same. Also handle
928 * element count discrepancies.
930 nchain = cluster->array[i].chain;
931 if (i < ip->cluster.nchains) {
932 ochain = ip->cluster.array[i].chain;
933 if (ochain == nchain)
942 ip->cluster.array[i].chain = nchain;
943 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
944 ip->cluster.array[i].flags |= cluster->array[i].flags &
945 HAMMER2_CITEM_INVALID;
947 hammer2_chain_ref(nchain);
952 * Release any left-over chains in ip->cluster.
954 while (i < ip->cluster.nchains) {
955 nchain = ip->cluster.array[i].chain;
957 ip->cluster.array[i].chain = NULL;
958 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
965 * Fixup fields. Note that the inode-embedded cluster is never
969 ip->cluster.nchains = cluster->nchains;
970 ip->cluster.focus = cluster->focus;
971 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
973 ip->cluster.nchains = 0;
974 ip->cluster.focus = NULL;
975 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
979 * Repoint ip->pip if requested (non-NULL pip).
981 if (pip && ip->pip != pip) {
983 hammer2_inode_ref(pip);
988 hammer2_spin_unex(&ip->cluster_spin);
991 * Cleanup outside of spinlock
995 hammer2_chain_drop(dropch[i]);
998 hammer2_inode_drop(opip);
1002 * Repoint a single element from the cluster to the ip. Used by the
1003 * synchronization threads to piecemeal update inodes. Does not change
1004 * focus and requires inode to be re-locked to clean-up flags (XXX).
1007 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1010 hammer2_chain_t *ochain;
1011 hammer2_chain_t *nchain;
1014 hammer2_spin_ex(&ip->cluster_spin);
1015 KKASSERT(idx < cluster->nchains);
1016 if (idx < ip->cluster.nchains) {
1017 ochain = ip->cluster.array[idx].chain;
1018 nchain = cluster->array[idx].chain;
1021 nchain = cluster->array[idx].chain;
1022 ip->cluster.nchains = idx + 1;
1023 for (i = ip->cluster.nchains; i <= idx; ++i) {
1024 bzero(&ip->cluster.array[i],
1025 sizeof(ip->cluster.array[i]));
1026 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1029 if (ochain != nchain) {
1033 ip->cluster.array[idx].chain = nchain;
1034 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1035 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1036 HAMMER2_CITEM_INVALID;
1038 hammer2_spin_unex(&ip->cluster_spin);
1039 if (ochain != nchain) {
1041 hammer2_chain_ref(nchain);
1043 hammer2_chain_drop(ochain);
1048 * Called with a locked inode to finish unlinking an inode after xop_unlink
1049 * had been run. This function is responsible for decrementing nlinks and
1050 * moving deleted inodes to the hidden directory if they are still open.
1052 * We don't bother decrementing nlinks if the file is not open and this was
1055 * If the inode is a hardlink target it's chain has not yet been deleted,
1056 * otherwise it's chain has been deleted.
1058 * If isopen then any prior deletion was not permanent and the inode must
1059 * be moved to the hidden directory.
1062 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1070 * Decrement nlinks. If this is the last link and the file is
1071 * not open, the chain has already been removed and we don't bother
1072 * dirtying the inode.
1074 if (ip->meta.nlinks == 1) {
1075 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1080 hammer2_inode_modify(ip);
1082 if ((int64_t)ip->meta.nlinks < 0)
1083 ip->meta.nlinks = 0; /* safety */
1086 * If nlinks is not zero we are done. However, this should only be
1087 * possible with a hardlink target. If the inode is an embedded
1088 * hardlink nlinks should have dropped to zero, warn and proceed
1089 * with the next step.
1091 if (ip->meta.nlinks) {
1092 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1094 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1095 (intmax_t)ip->meta.nlinks);
1100 * nlinks is now zero, the inode should have already been deleted.
1101 * If the file is open it was deleted non-permanently and must be
1102 * moved to the hidden directory.
1104 * When moving to the hidden directory we force the name_key to the
1105 * inode number to avoid collisions.
1108 hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1109 error = hammer2_inode_connect_simple(pmp->ihidden, ip,
1110 NULL, 0, ip->meta.inum);
1111 hammer2_inode_unlock(pmp->ihidden, NULL);
1119 * This is called from the mount code to initialize pmp->ihidden
1122 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1124 hammer2_cluster_t *cparent;
1125 hammer2_cluster_t *cluster;
1126 hammer2_cluster_t *scan;
1127 const hammer2_inode_data_t *ripdata;
1128 hammer2_inode_data_t *wipdata;
1129 hammer2_key_t key_dummy;
1130 hammer2_key_t key_next;
1140 * Find the hidden directory
1142 bzero(&key_dummy, sizeof(key_dummy));
1143 hammer2_trans_init(pmp, 0);
1146 * Setup for lookup, retrieve iroot's check and compression
1147 * algorithm request which was likely generated by newfs_hammer2.
1149 * The check/comp fields will probably never be used since inodes
1150 * are renamed into the hidden directory and not created relative to
1151 * the hidden directory, chain creation inherits from bref.methods,
1152 * and data chains inherit from their respective file inode *_algo
1155 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1156 cparent = hammer2_inode_cluster(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1157 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1158 dip_check_algo = ripdata->meta.check_algo;
1159 dip_comp_algo = ripdata->meta.comp_algo;
1162 cluster = hammer2_cluster_lookup(cparent, &key_dummy,
1163 HAMMER2_INODE_HIDDENDIR,
1164 HAMMER2_INODE_HIDDENDIR,
1167 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
1168 hammer2_inode_ref(pmp->ihidden);
1171 * Remove any unlinked files which were left open as-of
1174 * Don't pass NODATA, we need the inode data so the delete
1175 * can do proper statistics updates.
1178 scan = hammer2_cluster_lookup(cluster, &key_next,
1179 0, HAMMER2_TID_MAX, 0);
1181 if (hammer2_cluster_type(scan) ==
1182 HAMMER2_BREF_TYPE_INODE) {
1183 hammer2_cluster_delete(cluster, scan,
1184 HAMMER2_DELETE_PERMANENT);
1187 scan = hammer2_cluster_next(cluster, scan, &key_next,
1188 0, HAMMER2_TID_MAX, 0);
1191 hammer2_inode_unlock(pmp->ihidden, cluster);
1192 hammer2_inode_unlock(pmp->iroot, cparent);
1193 hammer2_trans_done(pmp);
1194 kprintf("hammer2: PFS loaded hidden dir, "
1195 "removed %d dead entries\n", count);
1200 * Create the hidden directory
1202 error = hammer2_cluster_create(pmp, cparent, &cluster,
1203 HAMMER2_INODE_HIDDENDIR, 0,
1204 HAMMER2_BREF_TYPE_INODE,
1205 HAMMER2_INODE_BYTES,
1207 hammer2_inode_unlock(pmp->iroot, cparent);
1209 hammer2_cluster_modify(cluster, 0);
1210 wipdata = &hammer2_cluster_wdata(cluster)->ipdata;
1211 wipdata->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1212 wipdata->meta.inum = HAMMER2_INODE_HIDDENDIR;
1213 wipdata->meta.nlinks = 1;
1214 wipdata->meta.comp_algo = dip_comp_algo;
1215 wipdata->meta.check_algo = dip_check_algo;
1216 hammer2_cluster_modsync(cluster);
1217 kprintf("hammer2: PFS root missing hidden directory, creating\n");
1219 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
1220 hammer2_inode_ref(pmp->ihidden);
1221 hammer2_inode_unlock(pmp->ihidden, cluster);
1222 hammer2_trans_done(pmp);
1227 * If an open file is unlinked H2 needs to retain the file in the topology
1228 * to ensure that its backing store is not recovered by the bulk free scan.
1229 * This also allows us to avoid having to special-case the CHAIN_DELETED flag.
1231 * To do this the file is moved to a hidden directory in the PFS root and
1232 * renamed. The hidden directory must be created if it does not exist.
1236 hammer2_inode_move_to_hidden(hammer2_cluster_t **cparentp,
1237 hammer2_cluster_t **clusterp,
1240 hammer2_cluster_t *dcluster;
1244 pmp = (*clusterp)->pmp;
1245 KKASSERT(pmp != NULL);
1246 KKASSERT(pmp->ihidden != NULL);
1248 hammer2_cluster_delete(*cparentp, *clusterp, 0);
1249 hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1250 dcluster = hammer2_inode_cluster(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1251 error = hammer2_inode_connect(NULL/*XXX*/, clusterp, 0,
1252 pmp->ihidden, dcluster,
1254 hammer2_inode_unlock(pmp->ihidden, dcluster);
1255 KKASSERT(error == 0);
1260 * Find the directory common to both fdip and tdip.
1262 * Returns a held but not locked inode. Caller typically locks the inode,
1263 * and when through unlocks AND drops it.
1266 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1268 hammer2_inode_t *scan1;
1269 hammer2_inode_t *scan2;
1272 * We used to have a depth field but it complicated matters too
1273 * much for directory renames. So now its ugly. Check for
1274 * simple cases before giving up and doing it the expensive way.
1276 * XXX need a bottom-up topology stability lock
1278 if (fdip == tdip || fdip == tdip->pip) {
1279 hammer2_inode_ref(fdip);
1282 if (fdip->pip == tdip) {
1283 hammer2_inode_ref(tdip);
1290 for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1292 while (scan2->pmp == tdip->pmp) {
1293 if (scan1 == scan2) {
1294 hammer2_inode_ref(scan1);
1302 panic("hammer2_inode_common_parent: no common parent %p %p\n",
1309 * Set an inode's cluster modified, marking the related chains RW and
1310 * duplicating them if necessary.
1312 * The passed-in chain is a localized copy of the chain previously acquired
1313 * when the inode was locked (and possilby replaced in the mean time), and
1314 * must also be updated. In fact, we update it first and then synchronize
1315 * the inode's cluster cache.
1318 hammer2_inode_modify(hammer2_inode_t *ip)
1320 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1322 vsetisdirty(ip->vp);
1326 * Synchronize the inode's frontend state with the chain state prior
1327 * to any explicit flush of the inode or any strategy write call.
1329 * Called with a locked inode.
1332 hammer2_inode_fsync(hammer2_inode_t *ip, hammer2_cluster_t *cparent)
1334 int clear_directdata = 0;
1336 /* temporary hack, allow cparent to be NULL */
1337 if (cparent == NULL) {
1338 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1339 hammer2_inode_fsync(ip, cparent);
1340 hammer2_cluster_unlock(cparent);
1341 hammer2_cluster_drop(cparent);
1345 if ((ip->flags & HAMMER2_INODE_RESIZED) == 0) {
1347 } else if (ip->meta.size < ip->osize) {
1349 * We must delete any chains beyond the EOF. The chain
1350 * straddling the EOF will be pending in the bioq.
1352 hammer2_cluster_t *dparent;
1353 hammer2_cluster_t *cluster;
1354 hammer2_key_t lbase;
1355 hammer2_key_t key_next;
1357 lbase = (ip->meta.size + HAMMER2_PBUFMASK64) &
1358 ~HAMMER2_PBUFMASK64;
1359 dparent = hammer2_cluster_lookup_init(&ip->cluster, 0);
1360 cluster = hammer2_cluster_lookup(dparent, &key_next,
1361 lbase, (hammer2_key_t)-1,
1362 HAMMER2_LOOKUP_NODATA);
1365 * Degenerate embedded case, nothing to loop on
1367 switch (hammer2_cluster_type(cluster)) {
1368 case HAMMER2_BREF_TYPE_INODE:
1369 hammer2_cluster_unlock(cluster);
1370 hammer2_cluster_drop(cluster);
1373 case HAMMER2_BREF_TYPE_DATA:
1374 hammer2_cluster_delete(dparent, cluster,
1375 HAMMER2_DELETE_PERMANENT);
1378 cluster = hammer2_cluster_next(dparent, cluster,
1380 key_next, (hammer2_key_t)-1,
1381 HAMMER2_LOOKUP_NODATA);
1385 hammer2_cluster_lookup_done(dparent);
1386 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1387 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1388 } else if (ip->meta.size > ip->osize) {
1390 * When resizing larger we may not have any direct-data
1393 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1394 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1395 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1396 clear_directdata = 1;
1398 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1399 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1402 * RESIZED was set but size didn't change.
1404 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1405 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1409 * Sync inode meta-data
1411 if (ip->flags & HAMMER2_INODE_MODIFIED) {
1412 hammer2_inode_data_t *wipdata;
1414 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1415 hammer2_cluster_modify(cparent, 0);
1416 hammer2_inode_repoint(ip, NULL, cparent);
1418 wipdata = &hammer2_cluster_wdata(cparent)->ipdata;
1419 wipdata->meta = ip->meta;
1420 if (clear_directdata) {
1421 bzero(&wipdata->u.blockset,
1422 sizeof(wipdata->u.blockset));
1424 hammer2_cluster_modsync(cparent);
1429 * Inode create helper (threaded, backend)
1431 * Used by ncreate, nmknod, nsymlink, nmkdir.
1432 * Used by nlink and rename to create HARDLINK pointers.
1434 * Frontend holds the parent directory ip locked exclusively. We
1435 * create the inode and feed the exclusively locked chain to the
1439 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1441 hammer2_xop_create_t *xop = &arg->xop_create;
1442 hammer2_chain_t *parent;
1443 hammer2_chain_t *chain;
1444 hammer2_key_t key_next;
1445 int cache_index = -1;
1449 parent = hammer2_inode_chain(xop->head.ip, clindex,
1450 HAMMER2_RESOLVE_ALWAYS);
1451 if (parent == NULL) {
1455 chain = hammer2_chain_lookup(&parent, &key_next,
1459 hammer2_chain_unlock(chain);
1464 error = hammer2_chain_create(&parent, &chain,
1467 HAMMER2_BREF_TYPE_INODE,
1468 HAMMER2_INODE_BYTES,
1471 hammer2_chain_modify(chain, 0);
1472 chain->data->ipdata.meta = xop->meta;
1473 bcopy(xop->head.name, chain->data->ipdata.filename,
1474 xop->head.name_len);
1476 hammer2_chain_unlock(chain);
1477 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1478 HAMMER2_RESOLVE_SHARED);
1481 hammer2_chain_unlock(parent);
1482 hammer2_chain_drop(parent);
1484 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
1486 hammer2_chain_drop(chain);
1490 * Inode delete helper (backend, threaded)
1493 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1495 /*hammer2_xop_inode_t *xop = &arg->xop_inode;*/
1499 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1501 hammer2_xop_connect_t *xop = &arg->xop_connect;
1502 hammer2_inode_data_t *wipdata;
1503 hammer2_chain_t *parent;
1504 hammer2_chain_t *chain;
1506 hammer2_key_t key_dummy;
1507 int cache_index = -1;
1511 * Get directory, then issue a lookup to prime the parent chain
1512 * for the create. The lookup is expected to fail.
1514 pmp = xop->head.ip->pmp;
1515 parent = hammer2_inode_chain(xop->head.ip, clindex,
1516 HAMMER2_RESOLVE_ALWAYS);
1517 if (parent == NULL) {
1522 chain = hammer2_chain_lookup(&parent, &key_dummy,
1526 hammer2_chain_unlock(chain);
1527 hammer2_chain_drop(chain);
1534 * Adjust the filename in the inode, set the name key.
1536 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1539 chain = hammer2_inode_chain(xop->head.ip2, clindex,
1540 HAMMER2_RESOLVE_ALWAYS);
1541 hammer2_chain_modify(chain, 0);
1542 wipdata = &chain->data->ipdata;
1544 hammer2_inode_modify(xop->head.ip2);
1545 if (xop->head.name) {
1546 bzero(wipdata->filename, sizeof(wipdata->filename));
1547 bcopy(xop->head.name, wipdata->filename, xop->head.name_len);
1548 wipdata->meta.name_len = xop->head.name_len;
1550 wipdata->meta.name_key = xop->lhc;
1553 * Reconnect the chain to the new parent directory
1555 error = hammer2_chain_create(&parent, &chain, pmp,
1557 HAMMER2_BREF_TYPE_INODE,
1558 HAMMER2_INODE_BYTES,
1565 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1567 hammer2_chain_unlock(parent);
1568 hammer2_chain_drop(parent);
1571 hammer2_chain_unlock(chain);
1572 hammer2_chain_drop(chain);