2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 hammer2_tid_t, meta.inum);
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 if (ip1->meta.inum < ip2->meta.inum)
54 if (ip1->meta.inum > ip2->meta.inum)
62 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
65 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
66 * inode locking function will automatically set the RDONLY flag.
68 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69 * Most front-end inode locks do.
71 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72 * the inode data be resolved. This is used by the syncthr because
73 * it can run on an unresolved/out-of-sync cluster, and also by the
74 * vnode reclamation code to avoid unnecessary I/O (particularly when
75 * disposing of hundreds of thousands of cached vnodes).
77 * The inode locking function locks the inode itself, resolves any stale
78 * chains in the inode's cluster, and allocates a fresh copy of the
79 * cluster with 1 ref and all the underlying chains locked.
81 * ip->cluster will be stable while the inode is locked.
83 * NOTE: We don't combine the inode/chain lock because putting away an
84 * inode would otherwise confuse multiple lock holders of the inode.
86 * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87 * and never point to a hardlink pointer.
89 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90 * will feel free to reduce the chain set in the cluster as an
91 * optimization. It will still be validated against the quorum if
92 * appropriate, but the optimization might be able to reduce data
93 * accesses to one node. This flag is automatically set if the inode
94 * is locked with HAMMER2_RESOLVE_SHARED.
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
99 hammer2_inode_ref(ip);
102 * Inode structure mutex
104 if (how & HAMMER2_RESOLVE_SHARED) {
105 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
106 hammer2_mtx_sh(&ip->lock);
108 hammer2_mtx_ex(&ip->lock);
113 * Select a chain out of an inode's cluster and lock it.
115 * The inode does not have to be locked.
118 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
120 hammer2_chain_t *chain;
122 hammer2_spin_sh(&ip->cluster_spin);
123 if (clindex >= ip->cluster.nchains)
126 chain = ip->cluster.array[clindex].chain;
128 hammer2_chain_ref(chain);
129 hammer2_spin_unsh(&ip->cluster_spin);
130 hammer2_chain_lock(chain, how);
132 hammer2_spin_unsh(&ip->cluster_spin);
138 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
139 hammer2_chain_t **parentp, int how)
141 hammer2_chain_t *chain;
142 hammer2_chain_t *parent;
145 hammer2_spin_sh(&ip->cluster_spin);
146 if (clindex >= ip->cluster.nchains)
149 chain = ip->cluster.array[clindex].chain;
151 hammer2_chain_ref(chain);
152 hammer2_spin_unsh(&ip->cluster_spin);
153 hammer2_chain_lock(chain, how);
155 hammer2_spin_unsh(&ip->cluster_spin);
159 * Get parent, lock order must be (parent, chain).
161 parent = chain->parent;
162 hammer2_chain_ref(parent);
163 hammer2_chain_unlock(chain);
164 hammer2_chain_lock(parent, how);
165 hammer2_chain_lock(chain, how);
166 if (ip->cluster.array[clindex].chain == chain &&
167 chain->parent == parent) {
174 hammer2_chain_unlock(chain);
175 hammer2_chain_drop(chain);
176 hammer2_chain_unlock(parent);
177 hammer2_chain_drop(parent);
185 hammer2_inode_unlock(hammer2_inode_t *ip)
187 hammer2_mtx_unlock(&ip->lock);
188 hammer2_inode_drop(ip);
192 * Temporarily release a lock held shared or exclusive. Caller must
193 * hold the lock shared or exclusive on call and lock will be released
196 * Restore a lock that was temporarily released.
199 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
201 return hammer2_mtx_temp_release(&ip->lock);
205 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
207 hammer2_mtx_temp_restore(&ip->lock, ostate);
211 * Upgrade a shared inode lock to exclusive and return. If the inode lock
212 * is already held exclusively this is a NOP.
214 * The caller MUST hold the inode lock either shared or exclusive on call
215 * and will own the lock exclusively on return.
217 * Returns non-zero if the lock was already exclusive prior to the upgrade.
220 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
224 if (mtx_islocked_ex(&ip->lock)) {
227 hammer2_mtx_unlock(&ip->lock);
228 hammer2_mtx_ex(&ip->lock);
235 * Downgrade an inode lock from exclusive to shared only if the inode
236 * lock was previously shared. If the inode lock was previously exclusive,
240 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
242 if (wasexclusive == 0)
243 mtx_downgrade(&ip->lock);
247 * Lookup an inode by inode number
250 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
258 hammer2_spin_ex(&pmp->inum_spin);
259 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
261 hammer2_inode_ref(ip);
262 hammer2_spin_unex(&pmp->inum_spin);
268 * Adding a ref to an inode is only legal if the inode already has at least
271 * (can be called with spinlock held)
274 hammer2_inode_ref(hammer2_inode_t *ip)
276 atomic_add_int(&ip->refs, 1);
277 if (hammer2_debug & 0x80000) {
278 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
284 * Drop an inode reference, freeing the inode when the last reference goes
288 hammer2_inode_drop(hammer2_inode_t *ip)
291 hammer2_inode_t *pip;
295 if (hammer2_debug & 0x80000) {
296 kprintf("INODE-1 %p (%d->%d)\n",
297 ip, ip->refs, ip->refs - 1);
304 * Transition to zero, must interlock with
305 * the inode inumber lookup tree (if applicable).
306 * It should not be possible for anyone to race
307 * the transition to 0.
311 hammer2_spin_ex(&pmp->inum_spin);
313 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
314 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
315 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
316 atomic_clear_int(&ip->flags,
317 HAMMER2_INODE_ONRBTREE);
318 RB_REMOVE(hammer2_inode_tree,
319 &pmp->inum_tree, ip);
321 hammer2_spin_unex(&pmp->inum_spin);
328 * Cleaning out ip->cluster isn't entirely
331 hammer2_inode_repoint(ip, NULL, NULL);
334 * We have to drop pip (if non-NULL) to
335 * dispose of our implied reference from
336 * ip->pip. We can simply loop on it.
338 kfree(ip, pmp->minode);
339 atomic_add_long(&pmp->inmem_inodes, -1);
341 /* continue with pip (can be NULL) */
343 hammer2_spin_unex(&ip->pmp->inum_spin);
347 * Non zero transition
349 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
356 * Get the vnode associated with the given inode, allocating the vnode if
357 * necessary. The vnode will be returned exclusively locked.
359 * The caller must lock the inode (shared or exclusive).
361 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
365 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
371 KKASSERT(pmp != NULL);
376 * Attempt to reuse an existing vnode assignment. It is
377 * possible to race a reclaim so the vget() may fail. The
378 * inode must be unlocked during the vget() to avoid a
379 * deadlock against a reclaim.
386 * Inode must be unlocked during the vget() to avoid
387 * possible deadlocks, but leave the ip ref intact.
389 * vnode is held to prevent destruction during the
390 * vget(). The vget() can still fail if we lost
391 * a reclaim race on the vnode.
393 hammer2_mtx_state_t ostate;
396 ostate = hammer2_inode_lock_temp_release(ip);
397 if (vget(vp, LK_EXCLUSIVE)) {
399 hammer2_inode_lock_temp_restore(ip, ostate);
402 hammer2_inode_lock_temp_restore(ip, ostate);
404 /* vp still locked and ref from vget */
406 kprintf("hammer2: igetv race %p/%p\n",
416 * No vnode exists, allocate a new vnode. Beware of
417 * allocation races. This function will return an
418 * exclusively locked and referenced vnode.
420 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
422 kprintf("hammer2: igetv getnewvnode failed %d\n",
429 * Lock the inode and check for an allocation race.
431 wasexclusive = hammer2_inode_lock_upgrade(ip);
432 if (ip->vp != NULL) {
435 hammer2_inode_lock_downgrade(ip, wasexclusive);
439 switch (ip->meta.type) {
440 case HAMMER2_OBJTYPE_DIRECTORY:
443 case HAMMER2_OBJTYPE_REGFILE:
445 vinitvmio(vp, ip->meta.size,
447 (int)ip->meta.size & HAMMER2_LBUFMASK);
449 case HAMMER2_OBJTYPE_SOFTLINK:
451 * XXX for now we are using the generic file_read
452 * and file_write code so we need a buffer cache
456 vinitvmio(vp, ip->meta.size,
458 (int)ip->meta.size & HAMMER2_LBUFMASK);
460 case HAMMER2_OBJTYPE_CDEV:
463 case HAMMER2_OBJTYPE_BDEV:
464 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
465 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
471 case HAMMER2_OBJTYPE_FIFO:
473 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
475 case HAMMER2_OBJTYPE_SOCKET:
479 panic("hammer2: unhandled objtype %d",
484 if (ip == pmp->iroot)
485 vsetflags(vp, VROOT);
489 hammer2_inode_ref(ip); /* vp association */
490 hammer2_inode_lock_downgrade(ip, wasexclusive);
495 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
497 if (hammer2_debug & 0x0002) {
498 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
499 vp, vp->v_refcnt, vp->v_auxrefs);
505 * Returns the inode associated with the passed-in cluster, creating the
506 * inode if necessary and synchronizing it to the passed-in cluster otherwise.
507 * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
508 * Otherwise the whole cluster is synchronized.
510 * The passed-in cluster must be locked and will remain locked on return.
511 * The returned inode will be locked and the caller may dispose of both
512 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
513 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
515 * The hammer2_inode structure regulates the interface between the high level
516 * kernel VNOPS API and the filesystem backend (the chains).
518 * On return the inode is locked with the supplied cluster.
521 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
522 hammer2_cluster_t *cluster, int idx)
524 hammer2_inode_t *nip;
525 const hammer2_inode_data_t *iptmp;
526 const hammer2_inode_data_t *nipdata;
528 KKASSERT(cluster == NULL ||
529 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
533 * Interlocked lookup/ref of the inode. This code is only needed
534 * when looking up inodes with nlinks != 0 (TODO: optimize out
535 * otherwise and test for duplicates).
537 * Cluster can be NULL during the initial pfs allocation.
541 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
542 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
546 hammer2_mtx_ex(&nip->lock);
549 * Handle SMP race (not applicable to the super-root spmp
550 * which can't index inodes due to duplicative inode numbers).
552 if (pmp->spmp_hmp == NULL &&
553 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
554 hammer2_mtx_unlock(&nip->lock);
555 hammer2_inode_drop(nip);
559 hammer2_inode_repoint_one(nip, cluster, idx);
561 hammer2_inode_repoint(nip, NULL, cluster);
567 * We couldn't find the inode number, create a new inode.
569 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
570 spin_init(&nip->cluster_spin, "h2clspin");
571 atomic_add_long(&pmp->inmem_inodes, 1);
572 hammer2_pfs_memory_inc(pmp);
573 hammer2_pfs_memory_wakeup(pmp);
575 nip->flags = HAMMER2_INODE_SROOT;
578 * Initialize nip's cluster. A cluster is provided for normal
579 * inodes but typically not for the super-root or PFS inodes.
581 nip->cluster.refs = 1;
582 nip->cluster.pmp = pmp;
583 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
585 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
586 nip->meta = nipdata->meta;
587 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
588 hammer2_inode_repoint(nip, NULL, cluster);
590 nip->meta.inum = 1; /* PFS inum is always 1 XXX */
591 /* mtime will be updated when a cluster is available */
592 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
595 nip->pip = dip; /* can be NULL */
597 hammer2_inode_ref(dip); /* ref dip for nip->pip */
602 * ref and lock on nip gives it state compatible to after a
603 * hammer2_inode_lock() call.
606 hammer2_mtx_init(&nip->lock, "h2inode");
607 hammer2_mtx_ex(&nip->lock);
608 /* combination of thread lock and chain lock == inode lock */
611 * Attempt to add the inode. If it fails we raced another inode
612 * get. Undo all the work and try again.
614 if (pmp->spmp_hmp == NULL) {
615 hammer2_spin_ex(&pmp->inum_spin);
616 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
617 hammer2_spin_unex(&pmp->inum_spin);
618 hammer2_mtx_unlock(&nip->lock);
619 hammer2_inode_drop(nip);
622 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
623 hammer2_spin_unex(&pmp->inum_spin);
630 * Create a new inode in the specified directory using the vattr to
631 * figure out the type of inode.
633 * If no error occurs the new inode with its cluster locked is returned in
634 * *nipp, otherwise an error is returned and *nipp is set to NULL.
636 * If vap and/or cred are NULL the related fields are not set and the
637 * inode type defaults to a directory. This is used when creating PFSs
638 * under the super-root, so the inode number is set to 1 in this case.
640 * dip is not locked on entry.
642 * NOTE: When used to create a snapshot, the inode is temporarily associated
643 * with the super-root spmp. XXX should pass new pmp for snapshot.
646 hammer2_inode_create(hammer2_inode_t *dip,
647 struct vattr *vap, struct ucred *cred,
648 const uint8_t *name, size_t name_len, hammer2_key_t lhc,
649 hammer2_key_t inum, uint8_t type, uint8_t target_type,
650 int flags, int *errorp)
652 hammer2_xop_create_t *xop;
653 hammer2_inode_t *nip;
659 uint8_t dip_comp_algo;
660 uint8_t dip_check_algo;
663 lhc = hammer2_dirhash(name, name_len);
668 * Locate the inode or indirect block to create the new
669 * entry in. At the same time check for key collisions
670 * and iterate until we don't get one.
672 * NOTE: hidden inodes do not have iterators.
674 * Lock the directory exclusively for now to guarantee that
675 * we can find an unused lhc for the name. Due to collisions,
676 * two different creates can end up with the same lhc so we
677 * cannot depend on the OS to prevent the collision.
679 hammer2_inode_lock(dip, 0);
681 dip_uid = dip->meta.uid;
682 dip_gid = dip->meta.gid;
683 dip_mode = dip->meta.mode;
684 dip_comp_algo = dip->meta.comp_algo;
685 dip_check_algo = dip->meta.check_algo;
688 * If name specified, locate an unused key in the collision space.
689 * Otherwise use the passed-in lhc directly.
692 hammer2_xop_scanlhc_t *sxop;
693 hammer2_key_t lhcbase;
696 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
698 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
699 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
700 if (lhc != sxop->head.cluster.focus->bref.key)
704 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
712 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
719 * Create the inode with the lhc as the key.
721 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
724 bzero(&xop->meta, sizeof(xop->meta));
727 xop->meta.type = hammer2_get_obj_type(vap->va_type);
729 switch (xop->meta.type) {
730 case HAMMER2_OBJTYPE_CDEV:
731 case HAMMER2_OBJTYPE_BDEV:
732 xop->meta.rmajor = vap->va_rmajor;
733 xop->meta.rminor = vap->va_rminor;
738 type = xop->meta.type;
740 xop->meta.type = type;
741 xop->meta.target_type = target_type;
743 xop->meta.inum = inum;
745 /* Inherit parent's inode compression mode. */
746 xop->meta.comp_algo = dip_comp_algo;
747 xop->meta.check_algo = dip_check_algo;
748 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
749 hammer2_update_time(&xop->meta.ctime);
750 xop->meta.mtime = xop->meta.ctime;
752 xop->meta.mode = vap->va_mode;
753 xop->meta.nlinks = 1;
755 if (dip && dip->pmp) {
756 xuid = hammer2_to_unix_xid(&dip_uid);
757 xuid = vop_helper_create_uid(dip->pmp->mp,
763 /* super-root has no dip and/or pmp */
766 if (vap->va_vaflags & VA_UID_UUID_VALID)
767 xop->meta.uid = vap->va_uid_uuid;
768 else if (vap->va_uid != (uid_t)VNOVAL)
769 hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
771 hammer2_guid_to_uuid(&xop->meta.uid, xuid);
773 if (vap->va_vaflags & VA_GID_UUID_VALID)
774 xop->meta.gid = vap->va_gid_uuid;
775 else if (vap->va_gid != (gid_t)VNOVAL)
776 hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
778 xop->meta.gid = dip_gid;
782 * Regular files and softlinks allow a small amount of data to be
783 * directly embedded in the inode. This flag will be cleared if
784 * the size is extended past the embedded limit.
786 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
787 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
788 xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
789 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
792 hammer2_xop_setname(&xop->head, name, name_len);
793 xop->meta.name_len = name_len;
794 xop->meta.name_key = lhc;
795 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
797 hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
799 error = hammer2_xop_collect(&xop->head, 0);
801 kprintf("CREATE INODE %*.*s\n",
802 (int)name_len, (int)name_len, name);
811 * Set up the new inode if not a hardlink pointer.
813 * NOTE: *_get() integrates chain's lock into the inode lock.
815 * NOTE: Only one new inode can currently be created per
816 * transaction. If the need arises we can adjust
817 * hammer2_trans_init() to allow more.
819 * NOTE: nipdata will have chain's blockset data.
821 if (type != HAMMER2_OBJTYPE_HARDLINK) {
822 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
823 nip->comp_heuristic = 0;
829 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
831 hammer2_inode_unlock(dip);
837 * Connect the disconnected inode (ip) to the directory (dip) with the
838 * specified (name, name_len). If name is NULL, (lhc) will be used as
839 * the directory key and the inode's embedded name will not be modified
840 * for future recovery purposes.
842 * dip and ip must both be locked exclusively (dip in particular to avoid
846 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
847 const char *name, size_t name_len,
850 hammer2_xop_scanlhc_t *sxop;
851 hammer2_xop_connect_t *xop;
852 hammer2_inode_t *opip;
853 hammer2_key_t lhcbase;
857 * Calculate the lhc and resolve the collision space.
860 lhc = lhcbase = hammer2_dirhash(name, name_len);
861 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
863 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
864 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
865 if (lhc != sxop->head.cluster.focus->bref.key)
869 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
877 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
886 * Formally reconnect the in-memory structure. ip must
887 * be locked exclusively to safely change ip->pip.
889 if (ip->pip != dip) {
890 hammer2_inode_ref(dip);
894 hammer2_inode_drop(opip);
900 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
902 hammer2_xop_setname(&xop->head, name, name_len);
903 hammer2_xop_setip2(&xop->head, ip);
905 hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
906 error = hammer2_xop_collect(&xop->head, 0);
907 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
910 * On success make the same adjustments to ip->meta or the
911 * next flush may blow up the chain.
914 hammer2_inode_modify(ip);
915 ip->meta.name_key = lhc;
917 ip->meta.name_len = name_len;
924 * Repoint ip->cluster's chains to cluster's chains and fixup the default
925 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
926 * filters out invalid or non-matching elements.
928 * Caller must hold the inode and cluster exclusive locked, if not NULL,
929 * must also be locked.
931 * Cluster may be NULL to clean out any chains in ip->cluster.
934 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
935 hammer2_cluster_t *cluster)
937 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
938 hammer2_chain_t *ochain;
939 hammer2_chain_t *nchain;
940 hammer2_inode_t *opip;
943 bzero(dropch, sizeof(dropch));
946 * Replace chains in ip->cluster with chains from cluster and
947 * adjust the focus if necessary.
949 * NOTE: nchain and/or ochain can be NULL due to gaps
950 * in the cluster arrays.
952 hammer2_spin_ex(&ip->cluster_spin);
953 for (i = 0; cluster && i < cluster->nchains; ++i) {
955 * Do not replace elements which are the same. Also handle
956 * element count discrepancies.
958 nchain = cluster->array[i].chain;
959 if (i < ip->cluster.nchains) {
960 ochain = ip->cluster.array[i].chain;
961 if (ochain == nchain)
970 ip->cluster.array[i].chain = nchain;
971 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
972 ip->cluster.array[i].flags |= cluster->array[i].flags &
973 HAMMER2_CITEM_INVALID;
975 hammer2_chain_ref(nchain);
980 * Release any left-over chains in ip->cluster.
982 while (i < ip->cluster.nchains) {
983 nchain = ip->cluster.array[i].chain;
985 ip->cluster.array[i].chain = NULL;
986 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
993 * Fixup fields. Note that the inode-embedded cluster is never
997 ip->cluster.nchains = cluster->nchains;
998 ip->cluster.focus = cluster->focus;
999 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1001 ip->cluster.nchains = 0;
1002 ip->cluster.focus = NULL;
1003 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1007 * Repoint ip->pip if requested (non-NULL pip).
1009 if (pip && ip->pip != pip) {
1011 hammer2_inode_ref(pip);
1016 hammer2_spin_unex(&ip->cluster_spin);
1019 * Cleanup outside of spinlock
1023 hammer2_chain_drop(dropch[i]);
1026 hammer2_inode_drop(opip);
1030 * Repoint a single element from the cluster to the ip. Used by the
1031 * synchronization threads to piecemeal update inodes. Does not change
1032 * focus and requires inode to be re-locked to clean-up flags (XXX).
1035 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1038 hammer2_chain_t *ochain;
1039 hammer2_chain_t *nchain;
1042 hammer2_spin_ex(&ip->cluster_spin);
1043 KKASSERT(idx < cluster->nchains);
1044 if (idx < ip->cluster.nchains) {
1045 ochain = ip->cluster.array[idx].chain;
1046 nchain = cluster->array[idx].chain;
1049 nchain = cluster->array[idx].chain;
1050 ip->cluster.nchains = idx + 1;
1051 for (i = ip->cluster.nchains; i <= idx; ++i) {
1052 bzero(&ip->cluster.array[i],
1053 sizeof(ip->cluster.array[i]));
1054 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1057 if (ochain != nchain) {
1061 ip->cluster.array[idx].chain = nchain;
1062 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1063 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1064 HAMMER2_CITEM_INVALID;
1066 hammer2_spin_unex(&ip->cluster_spin);
1067 if (ochain != nchain) {
1069 hammer2_chain_ref(nchain);
1071 hammer2_chain_drop(ochain);
1076 * Called with a locked inode to finish unlinking an inode after xop_unlink
1077 * had been run. This function is responsible for decrementing nlinks and
1078 * moving deleted inodes to the hidden directory if they are still open.
1080 * We don't bother decrementing nlinks if the file is not open and this was
1083 * If the inode is a hardlink target it's chain has not yet been deleted,
1084 * otherwise it's chain has been deleted.
1086 * If isopen then any prior deletion was not permanent and the inode must
1087 * be moved to the hidden directory.
1090 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1098 * Decrement nlinks. If this is the last link and the file is
1099 * not open, the chain has already been removed and we don't bother
1100 * dirtying the inode.
1102 if (ip->meta.nlinks == 1) {
1103 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1105 atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1110 hammer2_inode_modify(ip);
1112 if ((int64_t)ip->meta.nlinks < 0)
1113 ip->meta.nlinks = 0; /* safety */
1116 * If nlinks is not zero we are done. However, this should only be
1117 * possible with a hardlink target. If the inode is an embedded
1118 * hardlink nlinks should have dropped to zero, warn and proceed
1119 * with the next step.
1121 if (ip->meta.nlinks) {
1122 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1124 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1125 (intmax_t)ip->meta.nlinks);
1130 * nlinks is now zero, the inode should have already been deleted.
1131 * If the file is open it was deleted non-permanently and must be
1132 * moved to the hidden directory.
1134 * When moving to the hidden directory we force the name_key to the
1135 * inode number to avoid collisions.
1138 hammer2_inode_lock(pmp->ihidden, 0);
1139 error = hammer2_inode_connect(pmp->ihidden, ip,
1140 NULL, 0, ip->meta.inum);
1141 hammer2_inode_unlock(pmp->ihidden);
1149 * This is called from the mount code to initialize pmp->ihidden
1152 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1159 hammer2_trans_init(pmp, 0);
1160 hammer2_inode_lock(pmp->iroot, 0);
1163 * Find the hidden directory
1166 hammer2_xop_lookup_t *xop;
1168 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1169 xop->lhc = HAMMER2_INODE_HIDDENDIR;
1170 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1171 error = hammer2_xop_collect(&xop->head, 0);
1175 * Found the hidden directory
1177 kprintf("PFS FOUND HIDDEN DIR\n");
1178 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1181 hammer2_inode_ref(pmp->ihidden);
1182 hammer2_inode_unlock(pmp->ihidden);
1184 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1188 * Create the hidden directory if it could not be found.
1190 if (error == ENOENT) {
1191 kprintf("PFS CREATE HIDDEN DIR\n");
1193 pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1195 /* lhc */ HAMMER2_INODE_HIDDENDIR,
1196 /* inum */ HAMMER2_INODE_HIDDENDIR,
1197 /* type */ HAMMER2_OBJTYPE_DIRECTORY,
1198 /* target_type */ 0,
1202 hammer2_inode_ref(pmp->ihidden);
1203 hammer2_inode_unlock(pmp->ihidden);
1206 kprintf("PFS CREATE ERROR %d\n", error);
1210 * Scan the hidden directory on-mount and destroy its contents
1213 hammer2_xop_unlinkall_t *xop;
1215 hammer2_inode_lock(pmp->ihidden, 0);
1216 xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1217 xop->key_beg = HAMMER2_KEY_MIN;
1218 xop->key_end = HAMMER2_KEY_MAX;
1219 hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1221 while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1224 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1225 hammer2_inode_unlock(pmp->ihidden);
1228 hammer2_inode_unlock(pmp->iroot);
1229 hammer2_trans_done(pmp);
1233 * Find the directory common to both fdip and tdip.
1235 * Returns a held but not locked inode. Caller typically locks the inode,
1236 * and when through unlocks AND drops it.
1239 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1241 hammer2_inode_t *scan1;
1242 hammer2_inode_t *scan2;
1245 * We used to have a depth field but it complicated matters too
1246 * much for directory renames. So now its ugly. Check for
1247 * simple cases before giving up and doing it the expensive way.
1249 * XXX need a bottom-up topology stability lock
1251 if (fdip == tdip || fdip == tdip->pip) {
1252 hammer2_inode_ref(fdip);
1255 if (fdip->pip == tdip) {
1256 hammer2_inode_ref(tdip);
1263 for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1265 while (scan2->pmp == tdip->pmp) {
1266 if (scan1 == scan2) {
1267 hammer2_inode_ref(scan1);
1275 panic("hammer2_inode_common_parent: no common parent %p %p\n",
1282 * Mark an inode as being modified, meaning that the caller will modify
1285 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1286 * only modifying the in-memory inode. A modify_tid is synchronized
1287 * later when the inode gets flushed.
1290 hammer2_inode_modify(hammer2_inode_t *ip)
1292 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1294 vsetisdirty(ip->vp);
1298 * Synchronize the inode's frontend state with the chain state prior
1299 * to any explicit flush of the inode or any strategy write call.
1301 * Called with a locked inode inside a transaction.
1304 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1306 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1307 hammer2_xop_fsync_t *xop;
1310 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1311 xop->clear_directdata = 0;
1312 if (ip->flags & HAMMER2_INODE_RESIZED) {
1313 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1314 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1315 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1316 xop->clear_directdata = 1;
1318 xop->osize = ip->osize;
1320 xop->osize = ip->meta.size; /* safety */
1322 xop->ipflags = ip->flags;
1323 xop->meta = ip->meta;
1325 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1326 HAMMER2_INODE_MODIFIED);
1327 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1328 error = hammer2_xop_collect(&xop->head, 0);
1329 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1330 if (error == ENOENT)
1333 kprintf("hammer2: unable to fsync inode %p\n", ip);
1335 atomic_set_int(&ip->flags,
1336 xop->ipflags & (HAMMER2_INODE_RESIZED |
1337 HAMMER2_INODE_MODIFIED));
1339 /* XXX return error somehow? */
1345 * This handles unlinked open files after the vnode is finally dereferenced.
1346 * To avoid deadlocks it cannot be called from the normal vnode recycling
1347 * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
1348 * flush, and (3) on umount.
1350 * Caller must be in a transaction.
1353 hammer2_inode_run_unlinkq(hammer2_pfs_t *pmp)
1355 hammer2_xop_destroy_t *xop;
1356 hammer2_inode_unlink_t *ipul;
1357 hammer2_inode_t *ip;
1360 if (TAILQ_EMPTY(&pmp->unlinkq))
1364 hammer2_spin_ex(&pmp->list_spin);
1365 while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
1366 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
1367 hammer2_spin_unex(&pmp->list_spin);
1369 kfree(ipul, pmp->minode);
1371 hammer2_inode_lock(ip, 0);
1372 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1373 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1374 error = hammer2_xop_collect(&xop->head, 0);
1375 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1377 atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1379 hammer2_inode_unlock(ip);
1380 hammer2_inode_drop(ip); /* ipul ref */
1382 hammer2_spin_ex(&pmp->list_spin);
1384 hammer2_spin_unex(&pmp->list_spin);
1389 * Inode create helper (threaded, backend)
1391 * Used by ncreate, nmknod, nsymlink, nmkdir.
1392 * Used by nlink and rename to create HARDLINK pointers.
1394 * Frontend holds the parent directory ip locked exclusively. We
1395 * create the inode and feed the exclusively locked chain to the
1399 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1401 hammer2_xop_create_t *xop = &arg->xop_create;
1402 hammer2_chain_t *parent;
1403 hammer2_chain_t *chain;
1404 hammer2_key_t key_next;
1405 int cache_index = -1;
1408 if (hammer2_debug & 0x0001)
1409 kprintf("inode_create lhc %016jx clindex %d\n",
1413 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1414 HAMMER2_RESOLVE_ALWAYS);
1415 if (parent == NULL) {
1419 chain = hammer2_chain_lookup(&parent, &key_next,
1423 hammer2_chain_unlock(chain);
1428 error = hammer2_chain_create(&parent, &chain,
1431 HAMMER2_BREF_TYPE_INODE,
1432 HAMMER2_INODE_BYTES,
1433 xop->head.mtid, 0, xop->flags);
1435 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1436 chain->data->ipdata.meta = xop->meta;
1437 if (xop->head.name1) {
1438 bcopy(xop->head.name1,
1439 chain->data->ipdata.filename,
1440 xop->head.name1_len);
1441 chain->data->ipdata.meta.name_len = xop->head.name1_len;
1443 chain->data->ipdata.meta.name_key = xop->lhc;
1445 hammer2_chain_unlock(chain);
1446 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1447 HAMMER2_RESOLVE_SHARED);
1450 hammer2_chain_unlock(parent);
1451 hammer2_chain_drop(parent);
1453 hammer2_xop_feed(&xop->head, chain, clindex, error);
1455 hammer2_chain_drop(chain);
1459 * Inode delete helper (backend, threaded)
1461 * Generally used by hammer2_run_unlinkq()
1464 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1466 hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1468 hammer2_chain_t *parent;
1469 hammer2_chain_t *chain;
1470 hammer2_inode_t *ip;
1474 * We need the precise parent chain to issue the deletion.
1480 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1482 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1483 if (parent == NULL) {
1487 chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1488 if (chain == NULL) {
1492 hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1495 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1497 hammer2_chain_unlock(parent);
1498 hammer2_chain_drop(parent);
1501 hammer2_chain_unlock(chain);
1502 hammer2_chain_drop(chain);
1507 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1509 hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1510 hammer2_chain_t *parent;
1511 hammer2_chain_t *chain;
1512 hammer2_key_t key_next;
1513 int cache_index = -1;
1516 * We need the precise parent chain to issue the deletion.
1518 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1519 HAMMER2_RESOLVE_ALWAYS);
1521 if (parent == NULL) {
1525 chain = hammer2_chain_lookup(&parent, &key_next,
1526 xop->key_beg, xop->key_end,
1528 HAMMER2_LOOKUP_ALWAYS);
1530 hammer2_chain_delete(parent, chain,
1531 xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1532 hammer2_chain_unlock(chain);
1533 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1534 HAMMER2_RESOLVE_SHARED);
1535 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1536 chain = hammer2_chain_next(&parent, chain, &key_next,
1537 key_next, xop->key_end,
1539 HAMMER2_LOOKUP_ALWAYS |
1540 HAMMER2_LOOKUP_NOUNLOCK);
1543 hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1545 hammer2_chain_unlock(parent);
1546 hammer2_chain_drop(parent);
1549 hammer2_chain_unlock(chain);
1550 hammer2_chain_drop(chain);
1555 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1557 hammer2_xop_connect_t *xop = &arg->xop_connect;
1558 hammer2_inode_data_t *wipdata;
1559 hammer2_chain_t *parent;
1560 hammer2_chain_t *chain;
1562 hammer2_key_t key_dummy;
1563 int cache_index = -1;
1567 * Get directory, then issue a lookup to prime the parent chain
1568 * for the create. The lookup is expected to fail.
1570 pmp = xop->head.ip1->pmp;
1571 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1572 HAMMER2_RESOLVE_ALWAYS);
1573 if (parent == NULL) {
1578 chain = hammer2_chain_lookup(&parent, &key_dummy,
1582 hammer2_chain_unlock(chain);
1583 hammer2_chain_drop(chain);
1590 * Adjust the filename in the inode, set the name key.
1592 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1595 chain = hammer2_inode_chain(xop->head.ip2, clindex,
1596 HAMMER2_RESOLVE_ALWAYS);
1597 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1598 wipdata = &chain->data->ipdata;
1600 hammer2_inode_modify(xop->head.ip2);
1601 if (xop->head.name1) {
1602 bzero(wipdata->filename, sizeof(wipdata->filename));
1603 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1604 wipdata->meta.name_len = xop->head.name1_len;
1606 wipdata->meta.name_key = xop->lhc;
1609 * Reconnect the chain to the new parent directory
1611 error = hammer2_chain_create(&parent, &chain, pmp,
1613 HAMMER2_BREF_TYPE_INODE,
1614 HAMMER2_INODE_BYTES,
1615 xop->head.mtid, 0, 0);
1621 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1623 hammer2_chain_unlock(parent);
1624 hammer2_chain_drop(parent);
1627 hammer2_chain_unlock(chain);
1628 hammer2_chain_drop(chain);
1633 * Synchronize the in-memory inode with the chain.
1636 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1638 hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1639 hammer2_chain_t *parent;
1640 hammer2_chain_t *chain;
1643 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1644 HAMMER2_RESOLVE_ALWAYS);
1646 if (parent == NULL) {
1650 if (parent->error) {
1651 error = parent->error;
1657 if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1658 /* osize must be ignored */
1659 } else if (xop->meta.size < xop->osize) {
1661 * We must delete any chains beyond the EOF. The chain
1662 * straddling the EOF will be pending in the bioq.
1664 hammer2_key_t lbase;
1665 hammer2_key_t key_next;
1666 int cache_index = -1;
1668 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1669 ~HAMMER2_PBUFMASK64;
1670 chain = hammer2_chain_lookup(&parent, &key_next,
1671 lbase, HAMMER2_KEY_MAX,
1673 HAMMER2_LOOKUP_NODATA |
1674 HAMMER2_LOOKUP_NODIRECT);
1677 * Degenerate embedded case, nothing to loop on
1679 switch (chain->bref.type) {
1680 case HAMMER2_BREF_TYPE_INODE:
1683 case HAMMER2_BREF_TYPE_DATA:
1684 hammer2_chain_delete(parent, chain,
1686 HAMMER2_DELETE_PERMANENT);
1689 chain = hammer2_chain_next(&parent, chain, &key_next,
1690 key_next, HAMMER2_KEY_MAX,
1692 HAMMER2_LOOKUP_NODATA |
1693 HAMMER2_LOOKUP_NODIRECT);
1697 * Reset to point at inode for following code, if necessary.
1699 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1700 hammer2_chain_unlock(parent);
1701 hammer2_chain_drop(parent);
1702 parent = hammer2_inode_chain(xop->head.ip1, clindex,
1703 HAMMER2_RESOLVE_ALWAYS);
1704 kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1705 parent->data->ipdata.filename);
1710 * Sync the inode meta-data, potentially clear the blockset area
1711 * of direct data so it can be used for blockrefs.
1713 hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1714 parent->data->ipdata.meta = xop->meta;
1715 if (xop->clear_directdata) {
1716 bzero(&parent->data->ipdata.u.blockset,
1717 sizeof(parent->data->ipdata.u.blockset));
1721 hammer2_chain_unlock(chain);
1722 hammer2_chain_drop(chain);
1725 hammer2_chain_unlock(parent);
1726 hammer2_chain_drop(parent);
1728 hammer2_xop_feed(&xop->head, NULL, clindex, error);