2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 hammer2_tid_t, meta.inum);
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 if (ip1->meta.inum < ip2->meta.inum)
54 if (ip1->meta.inum > ip2->meta.inum)
61 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
63 hammer2_inode_sideq_t *ipul;
64 hammer2_pfs_t *pmp = ip->pmp;
66 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
67 ipul = kmalloc(sizeof(*ipul), pmp->minode,
70 hammer2_spin_ex(&pmp->list_spin);
71 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
72 hammer2_inode_ref(ip);
73 atomic_set_int(&ip->flags,
74 HAMMER2_INODE_ONSIDEQ);
75 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
76 hammer2_spin_unex(&pmp->list_spin);
78 hammer2_spin_unex(&pmp->list_spin);
79 kfree(ipul, pmp->minode);
87 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
90 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
91 * inode locking function will automatically set the RDONLY flag.
93 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
94 * Most front-end inode locks do.
96 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
97 * the inode data be resolved. This is used by the syncthr because
98 * it can run on an unresolved/out-of-sync cluster, and also by the
99 * vnode reclamation code to avoid unnecessary I/O (particularly when
100 * disposing of hundreds of thousands of cached vnodes).
102 * The inode locking function locks the inode itself, resolves any stale
103 * chains in the inode's cluster, and allocates a fresh copy of the
104 * cluster with 1 ref and all the underlying chains locked.
106 * ip->cluster will be stable while the inode is locked.
108 * NOTE: We don't combine the inode/chain lock because putting away an
109 * inode would otherwise confuse multiple lock holders of the inode.
111 * NOTE: In-memory inodes always point to hardlink targets (the actual file),
112 * and never point to a hardlink pointer.
114 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
115 * will feel free to reduce the chain set in the cluster as an
116 * optimization. It will still be validated against the quorum if
117 * appropriate, but the optimization might be able to reduce data
118 * accesses to one node. This flag is automatically set if the inode
119 * is locked with HAMMER2_RESOLVE_SHARED.
122 hammer2_inode_lock(hammer2_inode_t *ip, int how)
124 hammer2_inode_ref(ip);
127 * Inode structure mutex
129 if (how & HAMMER2_RESOLVE_SHARED) {
130 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
131 hammer2_mtx_sh(&ip->lock);
133 hammer2_mtx_ex(&ip->lock);
138 * Select a chain out of an inode's cluster and lock it.
140 * The inode does not have to be locked.
143 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
145 hammer2_chain_t *chain;
146 hammer2_cluster_t *cluster;
148 hammer2_spin_sh(&ip->cluster_spin);
150 cluster = ip->cluster_cache;
152 if (clindex >= cluster->nchains)
155 chain = cluster->array[clindex].chain;
157 hammer2_chain_ref(chain);
158 hammer2_spin_unsh(&ip->cluster_spin);
159 hammer2_chain_lock(chain, how);
165 cluster = &ip->cluster;
166 if (clindex >= cluster->nchains)
169 chain = cluster->array[clindex].chain;
171 hammer2_chain_ref(chain);
172 hammer2_spin_unsh(&ip->cluster_spin);
173 hammer2_chain_lock(chain, how);
175 hammer2_spin_unsh(&ip->cluster_spin);
181 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
182 hammer2_chain_t **parentp, int how)
184 hammer2_chain_t *chain;
185 hammer2_chain_t *parent;
188 hammer2_spin_sh(&ip->cluster_spin);
189 if (clindex >= ip->cluster.nchains)
192 chain = ip->cluster.array[clindex].chain;
194 hammer2_chain_ref(chain);
195 hammer2_spin_unsh(&ip->cluster_spin);
196 hammer2_chain_lock(chain, how);
198 hammer2_spin_unsh(&ip->cluster_spin);
202 * Get parent, lock order must be (parent, chain).
204 parent = chain->parent;
205 hammer2_chain_ref(parent);
206 hammer2_chain_unlock(chain);
207 hammer2_chain_lock(parent, how);
208 hammer2_chain_lock(chain, how);
209 if (ip->cluster.array[clindex].chain == chain &&
210 chain->parent == parent) {
217 hammer2_chain_unlock(chain);
218 hammer2_chain_drop(chain);
219 hammer2_chain_unlock(parent);
220 hammer2_chain_drop(parent);
228 hammer2_inode_unlock(hammer2_inode_t *ip)
230 hammer2_mtx_unlock(&ip->lock);
231 hammer2_inode_drop(ip);
235 * Temporarily release a lock held shared or exclusive. Caller must
236 * hold the lock shared or exclusive on call and lock will be released
239 * Restore a lock that was temporarily released.
242 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
244 return hammer2_mtx_temp_release(&ip->lock);
248 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
250 hammer2_mtx_temp_restore(&ip->lock, ostate);
254 * Upgrade a shared inode lock to exclusive and return. If the inode lock
255 * is already held exclusively this is a NOP.
257 * The caller MUST hold the inode lock either shared or exclusive on call
258 * and will own the lock exclusively on return.
260 * Returns non-zero if the lock was already exclusive prior to the upgrade.
263 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
267 if (mtx_islocked_ex(&ip->lock)) {
270 hammer2_mtx_unlock(&ip->lock);
271 hammer2_mtx_ex(&ip->lock);
278 * Downgrade an inode lock from exclusive to shared only if the inode
279 * lock was previously shared. If the inode lock was previously exclusive,
283 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
285 if (wasexclusive == 0)
286 mtx_downgrade(&ip->lock);
290 * Lookup an inode by inode number
293 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
301 hammer2_spin_ex(&pmp->inum_spin);
302 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
304 hammer2_inode_ref(ip);
305 hammer2_spin_unex(&pmp->inum_spin);
311 * Adding a ref to an inode is only legal if the inode already has at least
314 * (can be called with spinlock held)
317 hammer2_inode_ref(hammer2_inode_t *ip)
319 atomic_add_int(&ip->refs, 1);
320 if (hammer2_debug & 0x80000) {
321 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
327 * Drop an inode reference, freeing the inode when the last reference goes
331 hammer2_inode_drop(hammer2_inode_t *ip)
337 if (hammer2_debug & 0x80000) {
338 kprintf("INODE-1 %p (%d->%d)\n",
339 ip, ip->refs, ip->refs - 1);
346 * Transition to zero, must interlock with
347 * the inode inumber lookup tree (if applicable).
348 * It should not be possible for anyone to race
349 * the transition to 0.
353 hammer2_spin_ex(&pmp->inum_spin);
355 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
356 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
357 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
358 atomic_clear_int(&ip->flags,
359 HAMMER2_INODE_ONRBTREE);
360 RB_REMOVE(hammer2_inode_tree,
361 &pmp->inum_tree, ip);
363 hammer2_spin_unex(&pmp->inum_spin);
369 * Clean out the cluster cache
371 hammer2_cluster_t *tmpclu;
372 tmpclu = ip->cluster_cache;
374 ip->cluster_cache = NULL;
375 hammer2_cluster_drop(tmpclu);
380 * Cleaning out ip->cluster isn't entirely
383 hammer2_inode_repoint(ip, NULL, NULL);
385 kfree(ip, pmp->minode);
386 atomic_add_long(&pmp->inmem_inodes, -1);
387 ip = NULL; /* will terminate loop */
389 hammer2_spin_unex(&ip->pmp->inum_spin);
393 * Non zero transition
395 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
402 * Get the vnode associated with the given inode, allocating the vnode if
403 * necessary. The vnode will be returned exclusively locked.
405 * The caller must lock the inode (shared or exclusive).
407 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
411 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
417 KKASSERT(pmp != NULL);
422 * Attempt to reuse an existing vnode assignment. It is
423 * possible to race a reclaim so the vget() may fail. The
424 * inode must be unlocked during the vget() to avoid a
425 * deadlock against a reclaim.
432 * Inode must be unlocked during the vget() to avoid
433 * possible deadlocks, but leave the ip ref intact.
435 * vnode is held to prevent destruction during the
436 * vget(). The vget() can still fail if we lost
437 * a reclaim race on the vnode.
439 hammer2_mtx_state_t ostate;
442 ostate = hammer2_inode_lock_temp_release(ip);
443 if (vget(vp, LK_EXCLUSIVE)) {
445 hammer2_inode_lock_temp_restore(ip, ostate);
448 hammer2_inode_lock_temp_restore(ip, ostate);
450 /* vp still locked and ref from vget */
452 kprintf("hammer2: igetv race %p/%p\n",
462 * No vnode exists, allocate a new vnode. Beware of
463 * allocation races. This function will return an
464 * exclusively locked and referenced vnode.
466 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
468 kprintf("hammer2: igetv getnewvnode failed %d\n",
475 * Lock the inode and check for an allocation race.
477 wasexclusive = hammer2_inode_lock_upgrade(ip);
478 if (ip->vp != NULL) {
481 hammer2_inode_lock_downgrade(ip, wasexclusive);
485 switch (ip->meta.type) {
486 case HAMMER2_OBJTYPE_DIRECTORY:
489 case HAMMER2_OBJTYPE_REGFILE:
491 vinitvmio(vp, ip->meta.size,
493 (int)ip->meta.size & HAMMER2_LBUFMASK);
495 case HAMMER2_OBJTYPE_SOFTLINK:
497 * XXX for now we are using the generic file_read
498 * and file_write code so we need a buffer cache
502 vinitvmio(vp, ip->meta.size,
504 (int)ip->meta.size & HAMMER2_LBUFMASK);
506 case HAMMER2_OBJTYPE_CDEV:
509 case HAMMER2_OBJTYPE_BDEV:
510 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
511 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
517 case HAMMER2_OBJTYPE_FIFO:
519 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
521 case HAMMER2_OBJTYPE_SOCKET:
525 panic("hammer2: unhandled objtype %d",
530 if (ip == pmp->iroot)
531 vsetflags(vp, VROOT);
535 hammer2_inode_ref(ip); /* vp association */
536 hammer2_inode_lock_downgrade(ip, wasexclusive);
541 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
543 if (hammer2_debug & 0x0002) {
544 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
545 vp, vp->v_refcnt, vp->v_auxrefs);
551 * Returns the inode associated with the passed-in cluster, creating the
552 * inode if necessary and synchronizing it to the passed-in cluster otherwise.
553 * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
554 * Otherwise the whole cluster is synchronized.
556 * The passed-in cluster must be locked and will remain locked on return.
557 * The returned inode will be locked and the caller may dispose of both
558 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
559 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
561 * The hammer2_inode structure regulates the interface between the high level
562 * kernel VNOPS API and the filesystem backend (the chains).
564 * On return the inode is locked with the supplied cluster.
567 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
568 hammer2_cluster_t *cluster, int idx)
570 hammer2_inode_t *nip;
571 const hammer2_inode_data_t *iptmp;
572 const hammer2_inode_data_t *nipdata;
574 KKASSERT(cluster == NULL ||
575 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
579 * Interlocked lookup/ref of the inode. This code is only needed
580 * when looking up inodes with nlinks != 0 (TODO: optimize out
581 * otherwise and test for duplicates).
583 * Cluster can be NULL during the initial pfs allocation.
587 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
588 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
592 hammer2_mtx_ex(&nip->lock);
595 * Handle SMP race (not applicable to the super-root spmp
596 * which can't index inodes due to duplicative inode numbers).
598 if (pmp->spmp_hmp == NULL &&
599 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
600 hammer2_mtx_unlock(&nip->lock);
601 hammer2_inode_drop(nip);
605 hammer2_inode_repoint_one(nip, cluster, idx);
607 hammer2_inode_repoint(nip, NULL, cluster);
613 * We couldn't find the inode number, create a new inode.
615 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
616 spin_init(&nip->cluster_spin, "h2clspin");
617 atomic_add_long(&pmp->inmem_inodes, 1);
618 hammer2_pfs_memory_inc(pmp);
619 hammer2_pfs_memory_wakeup(pmp);
621 nip->flags = HAMMER2_INODE_SROOT;
624 * Initialize nip's cluster. A cluster is provided for normal
625 * inodes but typically not for the super-root or PFS inodes.
627 nip->cluster.refs = 1;
628 nip->cluster.pmp = pmp;
629 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
631 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
632 nip->meta = nipdata->meta;
633 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
634 hammer2_inode_repoint(nip, NULL, cluster);
636 nip->meta.inum = 1; /* PFS inum is always 1 XXX */
637 /* mtime will be updated when a cluster is available */
638 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
644 * ref and lock on nip gives it state compatible to after a
645 * hammer2_inode_lock() call.
648 hammer2_mtx_init(&nip->lock, "h2inode");
649 hammer2_mtx_ex(&nip->lock);
650 /* combination of thread lock and chain lock == inode lock */
653 * Attempt to add the inode. If it fails we raced another inode
654 * get. Undo all the work and try again.
656 if (pmp->spmp_hmp == NULL) {
657 hammer2_spin_ex(&pmp->inum_spin);
658 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
659 hammer2_spin_unex(&pmp->inum_spin);
660 hammer2_mtx_unlock(&nip->lock);
661 hammer2_inode_drop(nip);
664 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
665 hammer2_spin_unex(&pmp->inum_spin);
672 * Create a new inode in the specified directory using the vattr to
673 * figure out the type. A non-zero type field overrides vattr.
675 * If no error occurs the new inode with its cluster locked is returned.
676 * However, when creating an OBJTYPE_HARDLINK, the caller can assume
677 * that NULL will be returned (that is, the caller already has the inode
678 * in-hand and is creating a hardlink to it, we do not need to return a
679 * representitive ip).
681 * If vap and/or cred are NULL the related fields are not set and the
682 * inode type defaults to a directory. This is used when creating PFSs
683 * under the super-root, so the inode number is set to 1 in this case.
685 * dip is not locked on entry.
687 * NOTE: This function is used to create all manners of inodes, including
688 * super-root entries for snapshots and PFSs. When used to create a
689 * snapshot the inode will be temporarily associated with the spmp.
691 * NOTE: When creating a normal file or directory the caller must call this
692 * function twice, once to create the actual inode and once to create
693 * the hardlink representing the directory entry. This function is
694 * only called once when creating a softlink. The softlink itself.
696 * NOTE: When creating a hardlink target (a real inode), name/name_len is
697 * passed as NULL/0, and caller should pass lhc as inum.
700 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
701 struct vattr *vap, struct ucred *cred,
702 const uint8_t *name, size_t name_len, hammer2_key_t lhc,
704 uint8_t type, uint8_t target_type,
705 int flags, int *errorp)
707 hammer2_xop_create_t *xop;
708 hammer2_inode_t *nip;
714 uint8_t pip_comp_algo;
715 uint8_t pip_check_algo;
716 hammer2_tid_t pip_inum;
719 lhc = hammer2_dirhash(name, name_len);
724 * Locate the inode or indirect block to create the new
725 * entry in. At the same time check for key collisions
726 * and iterate until we don't get one.
728 * Lock the directory exclusively for now to guarantee that
729 * we can find an unused lhc for the name. Due to collisions,
730 * two different creates can end up with the same lhc so we
731 * cannot depend on the OS to prevent the collision.
733 hammer2_inode_lock(dip, 0);
735 pip_uid = pip->meta.uid;
736 pip_gid = pip->meta.gid;
737 pip_mode = pip->meta.mode;
738 pip_comp_algo = pip->meta.comp_algo;
739 pip_check_algo = pip->meta.check_algo;
740 pip_inum = (pip == pip->pmp->iroot) ? 0 : pip->meta.inum;
743 * If name specified, locate an unused key in the collision space.
744 * Otherwise use the passed-in lhc directly.
747 hammer2_xop_scanlhc_t *sxop;
748 hammer2_key_t lhcbase;
751 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
753 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
754 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
755 if (lhc != sxop->head.cluster.focus->bref.key)
759 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
767 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
774 * Create the inode with the lhc as the key.
776 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
779 bzero(&xop->meta, sizeof(xop->meta));
782 xop->meta.type = hammer2_get_obj_type(vap->va_type);
784 switch (xop->meta.type) {
785 case HAMMER2_OBJTYPE_CDEV:
786 case HAMMER2_OBJTYPE_BDEV:
787 xop->meta.rmajor = vap->va_rmajor;
788 xop->meta.rminor = vap->va_rminor;
793 type = xop->meta.type;
795 xop->meta.type = type;
796 xop->meta.target_type = target_type;
798 xop->meta.inum = inum;
799 xop->meta.iparent = pip_inum;
801 /* Inherit parent's inode compression mode. */
802 xop->meta.comp_algo = pip_comp_algo;
803 xop->meta.check_algo = pip_check_algo;
804 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
805 hammer2_update_time(&xop->meta.ctime);
806 xop->meta.mtime = xop->meta.ctime;
808 xop->meta.mode = vap->va_mode;
809 xop->meta.nlinks = 1;
812 xuid = hammer2_to_unix_xid(&pip_uid);
813 xuid = vop_helper_create_uid(dip->pmp->mp,
819 /* super-root has no dip and/or pmp */
822 if (vap->va_vaflags & VA_UID_UUID_VALID)
823 xop->meta.uid = vap->va_uid_uuid;
824 else if (vap->va_uid != (uid_t)VNOVAL)
825 hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
827 hammer2_guid_to_uuid(&xop->meta.uid, xuid);
829 if (vap->va_vaflags & VA_GID_UUID_VALID)
830 xop->meta.gid = vap->va_gid_uuid;
831 else if (vap->va_gid != (gid_t)VNOVAL)
832 hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
834 xop->meta.gid = pip_gid;
838 * Regular files and softlinks allow a small amount of data to be
839 * directly embedded in the inode. This flag will be cleared if
840 * the size is extended past the embedded limit.
842 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
843 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
844 xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
845 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
848 hammer2_xop_setname(&xop->head, name, name_len);
850 name_len = hammer2_xop_setname_inum(&xop->head, inum);
851 KKASSERT(lhc == inum);
853 xop->meta.name_len = name_len;
854 xop->meta.name_key = lhc;
855 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
857 hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
859 error = hammer2_xop_collect(&xop->head, 0);
861 kprintf("CREATE INODE %*.*s\n",
862 (int)name_len, (int)name_len, name);
871 * Set up the new inode if not a hardlink pointer.
873 * NOTE: *_get() integrates chain's lock into the inode lock.
875 * NOTE: Only one new inode can currently be created per
876 * transaction. If the need arises we can adjust
877 * hammer2_trans_init() to allow more.
879 * NOTE: nipdata will have chain's blockset data.
881 if (type != HAMMER2_OBJTYPE_HARDLINK) {
882 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
883 nip->comp_heuristic = 0;
889 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
891 hammer2_inode_unlock(dip);
897 * Repoint ip->cluster's chains to cluster's chains and fixup the default
898 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
899 * filters out invalid or non-matching elements.
901 * Caller must hold the inode and cluster exclusive locked, if not NULL,
902 * must also be locked.
904 * Cluster may be NULL to clean out any chains in ip->cluster.
907 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
908 hammer2_cluster_t *cluster)
910 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
911 hammer2_chain_t *ochain;
912 hammer2_chain_t *nchain;
915 bzero(dropch, sizeof(dropch));
918 * Replace chains in ip->cluster with chains from cluster and
919 * adjust the focus if necessary.
921 * NOTE: nchain and/or ochain can be NULL due to gaps
922 * in the cluster arrays.
924 hammer2_spin_ex(&ip->cluster_spin);
925 for (i = 0; cluster && i < cluster->nchains; ++i) {
927 * Do not replace elements which are the same. Also handle
928 * element count discrepancies.
930 nchain = cluster->array[i].chain;
931 if (i < ip->cluster.nchains) {
932 ochain = ip->cluster.array[i].chain;
933 if (ochain == nchain)
942 ip->cluster.array[i].chain = nchain;
943 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
944 ip->cluster.array[i].flags |= cluster->array[i].flags &
945 HAMMER2_CITEM_INVALID;
947 hammer2_chain_ref(nchain);
952 * Release any left-over chains in ip->cluster.
954 while (i < ip->cluster.nchains) {
955 nchain = ip->cluster.array[i].chain;
957 ip->cluster.array[i].chain = NULL;
958 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
965 * Fixup fields. Note that the inode-embedded cluster is never
969 ip->cluster.nchains = cluster->nchains;
970 ip->cluster.focus = cluster->focus;
971 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
973 ip->cluster.nchains = 0;
974 ip->cluster.focus = NULL;
975 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
978 hammer2_spin_unex(&ip->cluster_spin);
981 * Cleanup outside of spinlock
985 hammer2_chain_drop(dropch[i]);
990 * Repoint a single element from the cluster to the ip. Used by the
991 * synchronization threads to piecemeal update inodes. Does not change
992 * focus and requires inode to be re-locked to clean-up flags (XXX).
995 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
998 hammer2_chain_t *ochain;
999 hammer2_chain_t *nchain;
1002 hammer2_spin_ex(&ip->cluster_spin);
1003 KKASSERT(idx < cluster->nchains);
1004 if (idx < ip->cluster.nchains) {
1005 ochain = ip->cluster.array[idx].chain;
1006 nchain = cluster->array[idx].chain;
1009 nchain = cluster->array[idx].chain;
1010 ip->cluster.nchains = idx + 1;
1011 for (i = ip->cluster.nchains; i <= idx; ++i) {
1012 bzero(&ip->cluster.array[i],
1013 sizeof(ip->cluster.array[i]));
1014 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1017 if (ochain != nchain) {
1021 ip->cluster.array[idx].chain = nchain;
1022 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1023 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1024 HAMMER2_CITEM_INVALID;
1026 hammer2_spin_unex(&ip->cluster_spin);
1027 if (ochain != nchain) {
1029 hammer2_chain_ref(nchain);
1031 hammer2_chain_drop(ochain);
1036 * Called with a locked inode to finish unlinking an inode after xop_unlink
1037 * had been run. This function is responsible for decrementing nlinks.
1039 * We don't bother decrementing nlinks if the file is not open and this was
1042 * If the inode is a hardlink target it's chain has not yet been deleted,
1043 * otherwise it's chain has been deleted.
1045 * If isopen then any prior deletion was not permanent and the inode is
1046 * left intact with nlinks == 0;
1049 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1057 * Decrement nlinks. If this is the last link and the file is
1058 * not open we can just delete the inode and not bother dropping
1059 * nlinks to 0 (avoiding unnecessary block updates).
1061 if (ip->meta.nlinks == 1) {
1062 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1067 hammer2_inode_modify(ip);
1069 if ((int64_t)ip->meta.nlinks < 0)
1070 ip->meta.nlinks = 0; /* safety */
1073 * If nlinks is not zero we are done. However, this should only be
1074 * possible with a hardlink target. If the inode is an embedded
1075 * hardlink nlinks should have dropped to zero, warn and proceed
1076 * with the next step.
1078 if (ip->meta.nlinks) {
1079 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1081 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1082 (intmax_t)ip->meta.nlinks);
1087 * nlinks is now zero, delete the inode if not open.
1090 hammer2_xop_destroy_t *xop;
1093 atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1094 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1095 hammer2_xop_start(&xop->head,
1096 hammer2_inode_xop_destroy);
1097 error = hammer2_xop_collect(&xop->head, 0);
1098 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1105 * Mark an inode as being modified, meaning that the caller will modify
1108 * If a vnode is present we set the vnode dirty and the nominal filesystem
1109 * sync will also handle synchronizing the inode meta-data. If no vnode
1110 * is present we must ensure that the inode is on pmp->sideq.
1112 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1113 * only modifying the in-memory inode. A modify_tid is synchronized
1114 * later when the inode gets flushed.
1117 hammer2_inode_modify(hammer2_inode_t *ip)
1121 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1123 vsetisdirty(ip->vp);
1124 } else if ((pmp = ip->pmp) != NULL) {
1125 hammer2_inode_delayed_sideq(ip);
1130 * Synchronize the inode's frontend state with the chain state prior
1131 * to any explicit flush of the inode or any strategy write call.
1133 * Called with a locked inode inside a transaction.
1136 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1138 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1139 hammer2_xop_fsync_t *xop;
1142 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1143 xop->clear_directdata = 0;
1144 if (ip->flags & HAMMER2_INODE_RESIZED) {
1145 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1146 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1147 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1148 xop->clear_directdata = 1;
1150 xop->osize = ip->osize;
1152 xop->osize = ip->meta.size; /* safety */
1154 xop->ipflags = ip->flags;
1155 xop->meta = ip->meta;
1157 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1158 HAMMER2_INODE_MODIFIED);
1159 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1160 error = hammer2_xop_collect(&xop->head, 0);
1161 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1162 if (error == ENOENT)
1165 kprintf("hammer2: unable to fsync inode %p\n", ip);
1167 atomic_set_int(&ip->flags,
1168 xop->ipflags & (HAMMER2_INODE_RESIZED |
1169 HAMMER2_INODE_MODIFIED));
1171 /* XXX return error somehow? */
1177 * The normal filesystem sync no longer has visibility to an inode structure
1178 * after its vnode has been reclaimed. In this situation an unlinked-but-open
1179 * inode or a dirty inode may require additional processing to synchronize
1180 * ip->meta to its underlying cluster nodes.
1182 * In particular, reclaims can occur in almost any state (for example, when
1183 * doing operations on unrelated vnodes) and flushing the reclaimed inode
1184 * in the reclaim path itself is a non-starter.
1186 * Caller must be in a transaction.
1189 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1191 hammer2_xop_destroy_t *xop;
1192 hammer2_inode_sideq_t *ipul;
1193 hammer2_inode_t *ip;
1196 if (TAILQ_EMPTY(&pmp->sideq))
1200 hammer2_spin_ex(&pmp->list_spin);
1201 while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1202 TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1204 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1205 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1206 hammer2_spin_unex(&pmp->list_spin);
1207 kfree(ipul, pmp->minode);
1209 hammer2_inode_lock(ip, 0);
1210 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1212 * The inode was unlinked while open. The inode must
1213 * be deleted and destroyed.
1215 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1216 hammer2_xop_start(&xop->head,
1217 hammer2_inode_xop_destroy);
1218 error = hammer2_xop_collect(&xop->head, 0);
1219 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1222 * The inode was dirty as-of the reclaim, requiring
1223 * synchronization of ip->meta with its underlying
1226 hammer2_inode_chain_sync(ip);
1229 hammer2_inode_unlock(ip);
1230 hammer2_inode_drop(ip); /* ipul ref */
1232 hammer2_spin_ex(&pmp->list_spin);
1234 hammer2_spin_unex(&pmp->list_spin);
1239 * Inode create helper (threaded, backend)
1241 * Used by ncreate, nmknod, nsymlink, nmkdir.
1242 * Used by nlink and rename to create HARDLINK pointers.
1244 * Frontend holds the parent directory ip locked exclusively. We
1245 * create the inode and feed the exclusively locked chain to the
1249 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1251 hammer2_xop_create_t *xop = &arg->xop_create;
1252 hammer2_chain_t *parent;
1253 hammer2_chain_t *chain;
1254 hammer2_key_t key_next;
1255 int cache_index = -1;
1258 if (hammer2_debug & 0x0001)
1259 kprintf("inode_create lhc %016jx clindex %d\n",
1262 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1263 HAMMER2_RESOLVE_ALWAYS);
1264 if (parent == NULL) {
1269 chain = hammer2_chain_lookup(&parent, &key_next,
1277 error = hammer2_chain_create(&parent, &chain,
1280 HAMMER2_BREF_TYPE_INODE,
1281 HAMMER2_INODE_BYTES,
1282 xop->head.mtid, 0, xop->flags);
1284 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1285 chain->data->ipdata.meta = xop->meta;
1286 if (xop->head.name1) {
1287 bcopy(xop->head.name1,
1288 chain->data->ipdata.filename,
1289 xop->head.name1_len);
1290 chain->data->ipdata.meta.name_len = xop->head.name1_len;
1292 chain->data->ipdata.meta.name_key = xop->lhc;
1296 hammer2_chain_unlock(parent);
1297 hammer2_chain_drop(parent);
1299 hammer2_xop_feed(&xop->head, chain, clindex, error);
1301 hammer2_chain_unlock(chain);
1302 hammer2_chain_drop(chain);
1307 * Inode delete helper (backend, threaded)
1309 * Generally used by hammer2_run_sideq()
1312 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1314 hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1316 hammer2_chain_t *parent;
1317 hammer2_chain_t *chain;
1318 hammer2_inode_t *ip;
1322 * We need the precise parent chain to issue the deletion.
1328 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1330 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1331 if (parent == NULL) {
1335 chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1336 if (chain == NULL) {
1340 hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1343 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1345 hammer2_chain_unlock(parent);
1346 hammer2_chain_drop(parent);
1349 hammer2_chain_unlock(chain);
1350 hammer2_chain_drop(chain);
1355 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1357 hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1358 hammer2_chain_t *parent;
1359 hammer2_chain_t *chain;
1360 hammer2_key_t key_next;
1361 int cache_index = -1;
1364 * We need the precise parent chain to issue the deletion.
1366 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1367 HAMMER2_RESOLVE_ALWAYS);
1369 if (parent == NULL) {
1373 chain = hammer2_chain_lookup(&parent, &key_next,
1374 xop->key_beg, xop->key_end,
1376 HAMMER2_LOOKUP_ALWAYS);
1378 hammer2_chain_delete(parent, chain,
1379 xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1380 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1381 /* depend on function to unlock the shared lock */
1382 chain = hammer2_chain_next(&parent, chain, &key_next,
1383 key_next, xop->key_end,
1385 HAMMER2_LOOKUP_ALWAYS);
1388 hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1390 hammer2_chain_unlock(parent);
1391 hammer2_chain_drop(parent);
1394 hammer2_chain_unlock(chain);
1395 hammer2_chain_drop(chain);
1400 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1402 hammer2_xop_connect_t *xop = &arg->xop_connect;
1403 hammer2_inode_data_t *wipdata;
1404 hammer2_chain_t *parent;
1405 hammer2_chain_t *chain;
1407 hammer2_key_t key_dummy;
1408 int cache_index = -1;
1412 * Get directory, then issue a lookup to prime the parent chain
1413 * for the create. The lookup is expected to fail.
1415 pmp = xop->head.ip1->pmp;
1416 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1417 HAMMER2_RESOLVE_ALWAYS);
1418 if (parent == NULL) {
1423 chain = hammer2_chain_lookup(&parent, &key_dummy,
1427 hammer2_chain_unlock(chain);
1428 hammer2_chain_drop(chain);
1435 * Adjust the filename in the inode, set the name key.
1437 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1440 chain = hammer2_inode_chain(xop->head.ip2, clindex,
1441 HAMMER2_RESOLVE_ALWAYS);
1442 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1443 wipdata = &chain->data->ipdata;
1445 hammer2_inode_modify(xop->head.ip2);
1446 if (xop->head.name1) {
1447 bzero(wipdata->filename, sizeof(wipdata->filename));
1448 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1449 wipdata->meta.name_len = xop->head.name1_len;
1451 wipdata->meta.name_key = xop->lhc;
1454 * Reconnect the chain to the new parent directory
1456 error = hammer2_chain_create(&parent, &chain, pmp,
1458 HAMMER2_BREF_TYPE_INODE,
1459 HAMMER2_INODE_BYTES,
1460 xop->head.mtid, 0, 0);
1466 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1468 hammer2_chain_unlock(parent);
1469 hammer2_chain_drop(parent);
1472 hammer2_chain_unlock(chain);
1473 hammer2_chain_drop(chain);
1478 * Synchronize the in-memory inode with the chain.
1481 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1483 hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1484 hammer2_chain_t *parent;
1485 hammer2_chain_t *chain;
1488 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1489 HAMMER2_RESOLVE_ALWAYS);
1491 if (parent == NULL) {
1495 if (parent->error) {
1496 error = parent->error;
1502 if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1503 /* osize must be ignored */
1504 } else if (xop->meta.size < xop->osize) {
1506 * We must delete any chains beyond the EOF. The chain
1507 * straddling the EOF will be pending in the bioq.
1509 hammer2_key_t lbase;
1510 hammer2_key_t key_next;
1511 int cache_index = -1;
1513 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1514 ~HAMMER2_PBUFMASK64;
1515 chain = hammer2_chain_lookup(&parent, &key_next,
1516 lbase, HAMMER2_KEY_MAX,
1518 HAMMER2_LOOKUP_NODATA |
1519 HAMMER2_LOOKUP_NODIRECT);
1522 * Degenerate embedded case, nothing to loop on
1524 switch (chain->bref.type) {
1525 case HAMMER2_BREF_TYPE_INODE:
1528 case HAMMER2_BREF_TYPE_DATA:
1529 hammer2_chain_delete(parent, chain,
1531 HAMMER2_DELETE_PERMANENT);
1534 chain = hammer2_chain_next(&parent, chain, &key_next,
1535 key_next, HAMMER2_KEY_MAX,
1537 HAMMER2_LOOKUP_NODATA |
1538 HAMMER2_LOOKUP_NODIRECT);
1542 * Reset to point at inode for following code, if necessary.
1544 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1545 hammer2_chain_unlock(parent);
1546 hammer2_chain_drop(parent);
1547 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1548 HAMMER2_RESOLVE_ALWAYS);
1549 kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1550 parent->data->ipdata.filename);
1555 * Sync the inode meta-data, potentially clear the blockset area
1556 * of direct data so it can be used for blockrefs.
1558 hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1559 parent->data->ipdata.meta = xop->meta;
1560 if (xop->clear_directdata) {
1561 bzero(&parent->data->ipdata.u.blockset,
1562 sizeof(parent->data->ipdata.u.blockset));
1566 hammer2_chain_unlock(chain);
1567 hammer2_chain_drop(chain);
1570 hammer2_chain_unlock(parent);
1571 hammer2_chain_drop(parent);
1573 hammer2_xop_feed(&xop->head, NULL, clindex, error);