2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 hammer2_tid_t, meta.inum);
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 if (ip1->meta.inum < ip2->meta.inum)
54 if (ip1->meta.inum > ip2->meta.inum)
61 hammer2_knote(struct vnode *vp, int flags)
64 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
68 * Caller holds pmp->list_spin and the inode should be locked. Merge ip
69 * with the specified depend.
71 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
72 * that successive calls must ensure the ip is on a pass2 depend (or they are
73 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then
74 * we can set pass2 on it and return.
76 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
77 * a self-depend if necessary, and depend->pass2 is set according
78 * to the PASS2 flag. SIDEQ is set.
82 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
84 hammer2_pfs_t *pmp = ip->pmp;
85 hammer2_depend_t *dtmp;
86 hammer2_inode_t *iptmp;
89 * If ip is SYNCQ its entry is used for the syncq list and it will
90 * no longer be associated with a dependency. Merging this status
91 * with a passed-in depend implies PASS2.
93 if (ip->flags & HAMMER2_INODE_SYNCQ) {
94 if (depend == (void *)-1 ||
99 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
105 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
106 * If it is not, associate the ip with the passed-in depend, creating
107 * a single-entry dependency using depend_static if necessary.
109 * NOTE: The use of ip->depend_static always requires that the
110 * specific ip containing the structure is part of that
111 * particular depend_static's dependency group.
113 if (ip->flags & HAMMER2_INODE_SIDEQ) {
115 * Merge ip->depend with the passed-in depend. If the
116 * passed-in depend is not a special case, all ips associated
117 * with ip->depend (including the original ip) must be moved
118 * to the passed-in depend.
120 if (depend == NULL) {
122 } else if (depend == (void *)-1) {
125 } else if (depend != ip->depend) {
130 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
135 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
136 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
137 iptmp->depend = depend;
139 KKASSERT(sanitychk == 1);
140 depend->count += dtmp->count;
141 depend->pass2 |= dtmp->pass2;
142 TAILQ_REMOVE(&pmp->depq, dtmp, entry);
148 * Add ip to the sideq, creating a self-dependency if
151 hammer2_inode_ref(ip);
152 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
153 if (depend == NULL) {
154 depend = &ip->depend_static;
155 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
156 } else if (depend == (void *)-1) {
157 depend = &ip->depend_static;
159 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
160 } /* else add ip to passed-in depend */
161 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
167 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
170 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
176 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also
177 * occur from inode_lock4() and inode_depend().
179 * Caller must pass-in a locked inode.
182 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
184 hammer2_pfs_t *pmp = ip->pmp;
187 * Optimize case to avoid pmp spinlock.
189 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
190 hammer2_spin_ex(&pmp->list_spin);
191 hammer2_inode_setdepend_locked(ip, NULL);
192 hammer2_spin_unex(&pmp->list_spin);
197 * Lock an inode, with SYNCQ semantics.
199 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
202 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
203 * inode locking function will automatically set the RDONLY flag.
204 * shared locks are not subject to SYNCQ semantics, exclusive locks
207 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
208 * Most front-end inode locks do.
210 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
211 * the inode data be resolved. This is used by the syncthr because
212 * it can run on an unresolved/out-of-sync cluster, and also by the
213 * vnode reclamation code to avoid unnecessary I/O (particularly when
214 * disposing of hundreds of thousands of cached vnodes).
216 * This function, along with lock4, has SYNCQ semantics. If the inode being
217 * locked is on the SYNCQ, that is it has been staged by the syncer, we must
218 * block until the operation is complete (even if we can lock the inode). In
219 * order to reduce the stall time, we re-order the inode to the front of the
220 * pmp->syncq prior to blocking. This reordering VERY significantly improves
223 * The inode locking function locks the inode itself, resolves any stale
224 * chains in the inode's cluster, and allocates a fresh copy of the
225 * cluster with 1 ref and all the underlying chains locked.
227 * ip->cluster will be stable while the inode is locked.
229 * NOTE: We don't combine the inode/chain lock because putting away an
230 * inode would otherwise confuse multiple lock holders of the inode.
233 hammer2_inode_lock(hammer2_inode_t *ip, int how)
237 hammer2_inode_ref(ip);
241 * Inode structure mutex - Shared lock
243 if (how & HAMMER2_RESOLVE_SHARED) {
244 hammer2_mtx_sh(&ip->lock);
249 * Inode structure mutex - Exclusive lock
251 * An exclusive lock (if not recursive) must wait for inodes on
252 * SYNCQ to flush first, to ensure that meta-data dependencies such
253 * as the nlink count and related directory entries are not split
256 * If the vnode is locked by the current thread it must be unlocked
257 * across the tsleep() to avoid a deadlock.
259 hammer2_mtx_ex(&ip->lock);
260 if (hammer2_mtx_refs(&ip->lock) > 1)
262 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
263 hammer2_spin_ex(&pmp->list_spin);
264 if (ip->flags & HAMMER2_INODE_SYNCQ) {
265 tsleep_interlock(&ip->flags, 0);
266 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
267 TAILQ_REMOVE(&pmp->syncq, ip, entry);
268 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
269 hammer2_spin_unex(&pmp->list_spin);
270 hammer2_mtx_unlock(&ip->lock);
271 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
272 hammer2_mtx_ex(&ip->lock);
275 hammer2_spin_unex(&pmp->list_spin);
281 * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
282 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is
283 * NULL then ip4 must also be NULL.
285 * This creates a dependency between up to four inodes.
288 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
289 hammer2_inode_t *ip3, hammer2_inode_t *ip4)
291 hammer2_inode_t *ips[4];
292 hammer2_inode_t *iptmp;
293 hammer2_inode_t *ipslp;
294 hammer2_depend_t *depend;
299 pmp = ip1->pmp; /* may be NULL */
300 KKASSERT(pmp == ip2->pmp);
306 } else if (ip4 == NULL) {
309 KKASSERT(pmp == ip3->pmp);
314 KKASSERT(pmp == ip3->pmp);
315 KKASSERT(pmp == ip4->pmp);
318 for (i = 0; i < count; ++i)
319 hammer2_inode_ref(ips[i]);
323 * Lock the inodes in order
325 for (i = 0; i < count; ++i) {
326 hammer2_mtx_ex(&ips[i]->lock);
330 * Associate dependencies, record the first inode found on SYNCQ
331 * (operation is allowed to proceed for inodes on PASS2) for our
332 * sleep operation, this inode is theoretically the last one sync'd
335 * All inodes found on SYNCQ are moved to the head of the syncq
338 hammer2_spin_ex(&pmp->list_spin);
341 for (i = 0; i < count; ++i) {
343 depend = hammer2_inode_setdepend_locked(iptmp, depend);
344 if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
345 TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
346 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
351 hammer2_spin_unex(&pmp->list_spin);
354 * Block and retry if any of the inodes are on SYNCQ. It is
355 * important that we allow the operation to proceed in the
356 * PASS2 case, to avoid deadlocking against the vnode.
359 for (i = 0; i < count; ++i)
360 hammer2_mtx_unlock(&ips[i]->lock);
361 tsleep(&ipslp->flags, 0, "h2sync", 2);
367 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP
371 hammer2_inode_unlock(hammer2_inode_t *ip)
373 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
374 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
375 hammer2_mtx_unlock(&ip->lock);
378 hammer2_mtx_unlock(&ip->lock);
380 hammer2_inode_drop(ip);
384 * If either ip1 or ip2 have been tapped by the syncer, make sure that both
385 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced
386 * together. For dirent-v-inode depends, pass the dirent as ip1.
388 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
389 * single dependency. Dependencies are entered into pmp->depq. This
390 * effectively flags the inodes SIDEQ.
392 * Both ip1 and ip2 must be locked by the caller. This also ensures
393 * that we can't race the end of the syncer's queue run.
396 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
399 hammer2_depend_t *depend;
402 hammer2_spin_ex(&pmp->list_spin);
403 depend = hammer2_inode_setdepend_locked(ip1, NULL);
404 depend = hammer2_inode_setdepend_locked(ip2, depend);
405 hammer2_spin_unex(&pmp->list_spin);
409 * Select a chain out of an inode's cluster and lock it.
411 * The inode does not have to be locked.
414 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
416 hammer2_chain_t *chain;
417 hammer2_cluster_t *cluster;
419 hammer2_spin_sh(&ip->cluster_spin);
420 cluster = &ip->cluster;
421 if (clindex >= cluster->nchains)
424 chain = cluster->array[clindex].chain;
426 hammer2_chain_ref(chain);
427 hammer2_spin_unsh(&ip->cluster_spin);
428 hammer2_chain_lock(chain, how);
430 hammer2_spin_unsh(&ip->cluster_spin);
436 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
437 hammer2_chain_t **parentp, int how)
439 hammer2_chain_t *chain;
440 hammer2_chain_t *parent;
443 hammer2_spin_sh(&ip->cluster_spin);
444 if (clindex >= ip->cluster.nchains)
447 chain = ip->cluster.array[clindex].chain;
449 hammer2_chain_ref(chain);
450 hammer2_spin_unsh(&ip->cluster_spin);
451 hammer2_chain_lock(chain, how);
453 hammer2_spin_unsh(&ip->cluster_spin);
457 * Get parent, lock order must be (parent, chain).
459 parent = chain->parent;
461 hammer2_chain_ref(parent);
462 hammer2_chain_unlock(chain);
463 hammer2_chain_lock(parent, how);
464 hammer2_chain_lock(chain, how);
466 if (ip->cluster.array[clindex].chain == chain &&
467 chain->parent == parent) {
474 hammer2_chain_unlock(chain);
475 hammer2_chain_drop(chain);
477 hammer2_chain_unlock(parent);
478 hammer2_chain_drop(parent);
487 * Temporarily release a lock held shared or exclusive. Caller must
488 * hold the lock shared or exclusive on call and lock will be released
491 * Restore a lock that was temporarily released.
494 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
496 return hammer2_mtx_temp_release(&ip->lock);
500 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
502 hammer2_mtx_temp_restore(&ip->lock, ostate);
506 * Upgrade a shared inode lock to exclusive and return. If the inode lock
507 * is already held exclusively this is a NOP.
509 * The caller MUST hold the inode lock either shared or exclusive on call
510 * and will own the lock exclusively on return.
512 * Returns non-zero if the lock was already exclusive prior to the upgrade.
515 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
519 if (mtx_islocked_ex(&ip->lock)) {
522 hammer2_mtx_unlock(&ip->lock);
523 hammer2_mtx_ex(&ip->lock);
530 * Downgrade an inode lock from exclusive to shared only if the inode
531 * lock was previously shared. If the inode lock was previously exclusive,
535 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
537 if (wasexclusive == 0)
538 hammer2_mtx_downgrade(&ip->lock);
542 * Lookup an inode by inode number
545 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
553 hammer2_spin_ex(&pmp->inum_spin);
554 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
556 hammer2_inode_ref(ip);
557 hammer2_spin_unex(&pmp->inum_spin);
563 * Adding a ref to an inode is only legal if the inode already has at least
566 * (can be called with spinlock held)
569 hammer2_inode_ref(hammer2_inode_t *ip)
571 atomic_add_int(&ip->refs, 1);
572 if (hammer2_debug & 0x80000) {
573 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
579 * Drop an inode reference, freeing the inode when the last reference goes
583 hammer2_inode_drop(hammer2_inode_t *ip)
589 if (hammer2_debug & 0x80000) {
590 kprintf("INODE-1 %p (%d->%d)\n",
591 ip, ip->refs, ip->refs - 1);
598 * Transition to zero, must interlock with
599 * the inode inumber lookup tree (if applicable).
600 * It should not be possible for anyone to race
601 * the transition to 0.
605 hammer2_spin_ex(&pmp->inum_spin);
607 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
608 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
609 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
610 atomic_clear_int(&ip->flags,
611 HAMMER2_INODE_ONRBTREE);
612 RB_REMOVE(hammer2_inode_tree,
613 &pmp->inum_tree, ip);
616 hammer2_spin_unex(&pmp->inum_spin);
621 * Cleaning out ip->cluster isn't entirely
624 hammer2_inode_repoint(ip, NULL, NULL);
626 kfree(ip, pmp->minode);
627 atomic_add_long(&pmp->inmem_inodes, -1);
628 ip = NULL; /* will terminate loop */
630 hammer2_spin_unex(&ip->pmp->inum_spin);
634 * Non zero transition
636 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
643 * Get the vnode associated with the given inode, allocating the vnode if
644 * necessary. The vnode will be returned exclusively locked.
646 * *errorp is set to a UNIX error, not a HAMMER2 error.
648 * The caller must lock the inode (shared or exclusive).
650 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
654 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
660 KKASSERT(pmp != NULL);
665 * Attempt to reuse an existing vnode assignment. It is
666 * possible to race a reclaim so the vget() may fail. The
667 * inode must be unlocked during the vget() to avoid a
668 * deadlock against a reclaim.
675 * Inode must be unlocked during the vget() to avoid
676 * possible deadlocks, but leave the ip ref intact.
678 * vnode is held to prevent destruction during the
679 * vget(). The vget() can still fail if we lost
680 * a reclaim race on the vnode.
682 hammer2_mtx_state_t ostate;
685 ostate = hammer2_inode_lock_temp_release(ip);
686 if (vget(vp, LK_EXCLUSIVE)) {
688 hammer2_inode_lock_temp_restore(ip, ostate);
691 hammer2_inode_lock_temp_restore(ip, ostate);
693 /* vp still locked and ref from vget */
695 kprintf("hammer2: igetv race %p/%p\n",
705 * No vnode exists, allocate a new vnode. Beware of
706 * allocation races. This function will return an
707 * exclusively locked and referenced vnode.
709 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
711 kprintf("hammer2: igetv getnewvnode failed %d\n",
718 * Lock the inode and check for an allocation race.
720 wasexclusive = hammer2_inode_lock_upgrade(ip);
721 if (ip->vp != NULL) {
724 hammer2_inode_lock_downgrade(ip, wasexclusive);
728 switch (ip->meta.type) {
729 case HAMMER2_OBJTYPE_DIRECTORY:
732 case HAMMER2_OBJTYPE_REGFILE:
734 * Regular file must use buffer cache I/O
735 * (VKVABIO cpu sync semantics supported)
738 vsetflags(vp, VKVABIO);
739 vinitvmio(vp, ip->meta.size,
741 (int)ip->meta.size & HAMMER2_LBUFMASK);
743 case HAMMER2_OBJTYPE_SOFTLINK:
745 * XXX for now we are using the generic file_read
746 * and file_write code so we need a buffer cache
749 * (VKVABIO cpu sync semantics supported)
752 vsetflags(vp, VKVABIO);
753 vinitvmio(vp, ip->meta.size,
755 (int)ip->meta.size & HAMMER2_LBUFMASK);
757 case HAMMER2_OBJTYPE_CDEV:
760 case HAMMER2_OBJTYPE_BDEV:
761 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
762 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
768 case HAMMER2_OBJTYPE_FIFO:
770 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
772 case HAMMER2_OBJTYPE_SOCKET:
776 panic("hammer2: unhandled objtype %d",
781 if (ip == pmp->iroot)
782 vsetflags(vp, VROOT);
786 hammer2_inode_ref(ip); /* vp association */
787 hammer2_inode_lock_downgrade(ip, wasexclusive);
792 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
794 if (hammer2_debug & 0x0002) {
795 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
796 vp, vp->v_refcnt, vp->v_auxrefs);
802 * Returns the inode associated with the passed-in cluster, allocating a new
803 * hammer2_inode structure if necessary, then synchronizing it to the passed
804 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx)
805 * is synchronized. Otherwise the whole cluster is synchronized. inum will
806 * be extracted from the passed-in xop and the inum argument will be ignored.
808 * If xop is passed as NULL then a new hammer2_inode is allocated with the
809 * specified inum, and returned. For normal inodes, the inode will be
810 * indexed in memory and if it already exists the existing ip will be
811 * returned instead of allocating a new one. The superroot and PFS inodes
812 * are not indexed in memory.
814 * The passed-in cluster must be locked and will remain locked on return.
815 * The returned inode will be locked and the caller may dispose of both
816 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
817 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
819 * The hammer2_inode structure regulates the interface between the high level
820 * kernel VNOPS API and the filesystem backend (the chains).
822 * On return the inode is locked with the supplied cluster.
825 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
826 hammer2_tid_t inum, int idx)
828 hammer2_inode_t *nip;
829 const hammer2_inode_data_t *iptmp;
830 const hammer2_inode_data_t *nipdata;
832 KKASSERT(xop == NULL ||
833 hammer2_cluster_type(&xop->cluster) ==
834 HAMMER2_BREF_TYPE_INODE);
838 * Interlocked lookup/ref of the inode. This code is only needed
839 * when looking up inodes with nlinks != 0 (TODO: optimize out
840 * otherwise and test for duplicates).
842 * Cluster can be NULL during the initial pfs allocation.
845 iptmp = &hammer2_xop_gdata(xop)->ipdata;
846 inum = iptmp->meta.inum;
847 hammer2_xop_pdata(xop);
850 nip = hammer2_inode_lookup(pmp, inum);
853 * Handle SMP race (not applicable to the super-root spmp
854 * which can't index inodes due to duplicative inode numbers).
856 hammer2_mtx_ex(&nip->lock);
857 if (pmp->spmp_hmp == NULL &&
858 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
859 hammer2_mtx_unlock(&nip->lock);
860 hammer2_inode_drop(nip);
865 hammer2_inode_repoint_one(nip, &xop->cluster,
868 hammer2_inode_repoint(nip, NULL, &xop->cluster);
874 * We couldn't find the inode number, create a new inode and try to
875 * insert it, handle insertion races.
877 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
878 spin_init(&nip->cluster_spin, "h2clspin");
879 atomic_add_long(&pmp->inmem_inodes, 1);
881 nip->flags = HAMMER2_INODE_SROOT;
884 * Initialize nip's cluster. A cluster is provided for normal
885 * inodes but typically not for the super-root or PFS inodes.
887 nip->cluster.refs = 1;
888 nip->cluster.pmp = pmp;
889 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
891 nipdata = &hammer2_xop_gdata(xop)->ipdata;
892 nip->meta = nipdata->meta;
893 hammer2_xop_pdata(xop);
894 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
895 hammer2_inode_repoint(nip, NULL, &xop->cluster);
897 nip->meta.inum = inum; /* PFS inum is always 1 XXX */
898 /* mtime will be updated when a cluster is available */
899 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); /*XXX*/
905 * ref and lock on nip gives it state compatible to after a
906 * hammer2_inode_lock() call.
909 hammer2_mtx_init(&nip->lock, "h2inode");
910 hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
911 hammer2_mtx_ex(&nip->lock);
912 TAILQ_INIT(&nip->depend_static.sideq);
913 /* combination of thread lock and chain lock == inode lock */
916 * Attempt to add the inode. If it fails we raced another inode
917 * get. Undo all the work and try again.
919 if (pmp->spmp_hmp == NULL) {
920 hammer2_spin_ex(&pmp->inum_spin);
921 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
922 hammer2_spin_unex(&pmp->inum_spin);
923 hammer2_mtx_unlock(&nip->lock);
924 hammer2_inode_drop(nip);
927 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
929 hammer2_spin_unex(&pmp->inum_spin);
935 * Create a PFS inode under the superroot. This function will create the
936 * inode, its media chains, and also insert it into the media.
938 * Caller must be in a flush transaction because we are inserting the inode
942 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
943 const uint8_t *name, size_t name_len,
946 hammer2_xop_create_t *xop;
947 hammer2_inode_t *pip;
948 hammer2_inode_t *nip;
953 uint8_t pip_comp_algo;
954 uint8_t pip_check_algo;
955 hammer2_tid_t pip_inum;
961 lhc = hammer2_dirhash(name, name_len);
965 * Locate the inode or indirect block to create the new
966 * entry in. At the same time check for key collisions
967 * and iterate until we don't get one.
969 * Lock the directory exclusively for now to guarantee that
970 * we can find an unused lhc for the name. Due to collisions,
971 * two different creates can end up with the same lhc so we
972 * cannot depend on the OS to prevent the collision.
974 hammer2_inode_lock(pip, 0);
976 pip_uid = pip->meta.uid;
977 pip_gid = pip->meta.gid;
978 pip_mode = pip->meta.mode;
979 pip_comp_algo = pip->meta.comp_algo;
980 pip_check_algo = pip->meta.check_algo;
981 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
984 * Locate an unused key in the collision space.
987 hammer2_xop_scanlhc_t *sxop;
988 hammer2_key_t lhcbase;
991 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
993 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
994 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
995 if (lhc != sxop->head.cluster.focus->bref.key)
999 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1002 if (error != HAMMER2_ERROR_ENOENT)
1007 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1008 error = HAMMER2_ERROR_ENOSPC;
1014 * Create the inode with the lhc as the key.
1016 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1018 xop->flags = HAMMER2_INSERT_PFSROOT;
1019 bzero(&xop->meta, sizeof(xop->meta));
1021 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1023 xop->meta.iparent = pip_inum;
1025 /* Inherit parent's inode compression mode. */
1026 xop->meta.comp_algo = pip_comp_algo;
1027 xop->meta.check_algo = pip_check_algo;
1028 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1029 hammer2_update_time(&xop->meta.ctime);
1030 xop->meta.mtime = xop->meta.ctime;
1031 xop->meta.mode = 0755;
1032 xop->meta.nlinks = 1;
1035 * Regular files and softlinks allow a small amount of data to be
1036 * directly embedded in the inode. This flag will be cleared if
1037 * the size is extended past the embedded limit.
1039 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1040 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1041 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1043 hammer2_xop_setname(&xop->head, name, name_len);
1044 xop->meta.name_len = name_len;
1045 xop->meta.name_key = lhc;
1046 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1048 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1050 error = hammer2_xop_collect(&xop->head, 0);
1052 kprintf("CREATE INODE %*.*s\n",
1053 (int)name_len, (int)name_len, name);
1062 * Set up the new inode if not a hardlink pointer.
1064 * NOTE: *_get() integrates chain's lock into the inode lock.
1066 * NOTE: Only one new inode can currently be created per
1067 * transaction. If the need arises we can adjust
1068 * hammer2_trans_init() to allow more.
1070 * NOTE: nipdata will have chain's blockset data.
1072 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1073 nip->comp_heuristic = 0;
1075 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1077 hammer2_inode_unlock(pip);
1083 * Create a new, normal inode. This function will create the inode,
1084 * the media chains, but will not insert the chains onto the media topology
1085 * (doing so would require a flush transaction and cause long stalls).
1087 * Caller must be in a normal transaction.
1090 hammer2_inode_create_normal(hammer2_inode_t *pip,
1091 struct vattr *vap, struct ucred *cred,
1092 hammer2_key_t inum, int *errorp)
1094 hammer2_xop_create_t *xop;
1095 hammer2_inode_t *dip;
1096 hammer2_inode_t *nip;
1102 uint8_t pip_comp_algo;
1103 uint8_t pip_check_algo;
1104 hammer2_tid_t pip_inum;
1107 dip = pip->pmp->iroot;
1108 KKASSERT(dip != NULL);
1112 /*hammer2_inode_lock(dip, 0);*/
1114 pip_uid = pip->meta.uid;
1115 pip_gid = pip->meta.gid;
1116 pip_mode = pip->meta.mode;
1117 pip_comp_algo = pip->meta.comp_algo;
1118 pip_check_algo = pip->meta.check_algo;
1119 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1122 * Create the in-memory hammer2_inode structure for the specified
1125 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1126 nip->comp_heuristic = 0;
1127 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1128 nip->cluster.nchains == 0);
1129 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1132 * Setup the inode meta-data
1134 nip->meta.type = hammer2_get_obj_type(vap->va_type);
1136 switch (nip->meta.type) {
1137 case HAMMER2_OBJTYPE_CDEV:
1138 case HAMMER2_OBJTYPE_BDEV:
1139 nip->meta.rmajor = vap->va_rmajor;
1140 nip->meta.rminor = vap->va_rminor;
1145 type = nip->meta.type;
1147 KKASSERT(nip->meta.inum == inum);
1148 nip->meta.iparent = pip_inum;
1150 /* Inherit parent's inode compression mode. */
1151 nip->meta.comp_algo = pip_comp_algo;
1152 nip->meta.check_algo = pip_check_algo;
1153 nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1154 hammer2_update_time(&nip->meta.ctime);
1155 nip->meta.mtime = nip->meta.ctime;
1156 nip->meta.mode = vap->va_mode;
1157 nip->meta.nlinks = 1;
1159 xuid = hammer2_to_unix_xid(&pip_uid);
1160 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1163 if (vap->va_vaflags & VA_UID_UUID_VALID)
1164 nip->meta.uid = vap->va_uid_uuid;
1165 else if (vap->va_uid != (uid_t)VNOVAL)
1166 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1168 hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1170 if (vap->va_vaflags & VA_GID_UUID_VALID)
1171 nip->meta.gid = vap->va_gid_uuid;
1172 else if (vap->va_gid != (gid_t)VNOVAL)
1173 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1175 nip->meta.gid = pip_gid;
1178 * Regular files and softlinks allow a small amount of data to be
1179 * directly embedded in the inode. This flag will be cleared if
1180 * the size is extended past the embedded limit.
1182 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1183 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1184 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1188 * Create the inode using (inum) as the key. Pass pip for
1189 * method inheritance.
1191 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1194 xop->meta = nip->meta;
1197 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1198 xop->meta.name_key = inum;
1199 nip->meta.name_len = xop->meta.name_len;
1200 nip->meta.name_key = xop->meta.name_key;
1201 hammer2_inode_modify(nip);
1204 * Create the inode media chains but leave them detached. We are
1205 * not in a flush transaction so we can't mess with media topology
1206 * above normal inodes (i.e. the index of the inodes themselves).
1208 * We've already set the INODE_CREATING flag. The inode's media
1209 * chains will be inserted onto the media topology on the next
1212 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1214 error = hammer2_xop_collect(&xop->head, 0);
1216 kprintf("create inode type %d error %d\n", nip->meta.type, error);
1225 * Associate the media chains created by the backend with the
1228 hammer2_inode_repoint(nip, NULL, &xop->head.cluster);
1230 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1231 /*hammer2_inode_unlock(dip);*/
1237 * Create a directory entry under dip with the specified name, inode number,
1238 * and OBJTYPE (type).
1240 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1242 * Caller must hold dip locked.
1245 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1246 hammer2_key_t inum, uint8_t type)
1248 hammer2_xop_mkdirent_t *xop;
1255 KKASSERT(name != NULL);
1256 lhc = hammer2_dirhash(name, name_len);
1259 * Locate the inode or indirect block to create the new
1260 * entry in. At the same time check for key collisions
1261 * and iterate until we don't get one.
1263 * Lock the directory exclusively for now to guarantee that
1264 * we can find an unused lhc for the name. Due to collisions,
1265 * two different creates can end up with the same lhc so we
1266 * cannot depend on the OS to prevent the collision.
1268 hammer2_inode_modify(dip);
1271 * If name specified, locate an unused key in the collision space.
1272 * Otherwise use the passed-in lhc directly.
1275 hammer2_xop_scanlhc_t *sxop;
1276 hammer2_key_t lhcbase;
1279 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1281 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1282 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1283 if (lhc != sxop->head.cluster.focus->bref.key)
1287 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1290 if (error != HAMMER2_ERROR_ENOENT)
1295 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1296 error = HAMMER2_ERROR_ENOSPC;
1302 * Create the directory entry with the lhc as the key.
1304 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1306 bzero(&xop->dirent, sizeof(xop->dirent));
1307 xop->dirent.inum = inum;
1308 xop->dirent.type = type;
1309 xop->dirent.namlen = name_len;
1311 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1312 hammer2_xop_setname(&xop->head, name, name_len);
1314 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1316 error = hammer2_xop_collect(&xop->head, 0);
1318 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1320 error = hammer2_error_to_errno(error);
1326 * Repoint ip->cluster's chains to cluster's chains and fixup the default
1327 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
1328 * filters out invalid or non-matching elements.
1330 * Caller must hold the inode and cluster exclusive locked, if not NULL,
1331 * must also be locked.
1333 * Cluster may be NULL to clean out any chains in ip->cluster.
1336 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1337 hammer2_cluster_t *cluster)
1339 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1340 hammer2_chain_t *ochain;
1341 hammer2_chain_t *nchain;
1344 bzero(dropch, sizeof(dropch));
1347 * Replace chains in ip->cluster with chains from cluster and
1348 * adjust the focus if necessary.
1350 * NOTE: nchain and/or ochain can be NULL due to gaps
1351 * in the cluster arrays.
1353 hammer2_spin_ex(&ip->cluster_spin);
1354 for (i = 0; cluster && i < cluster->nchains; ++i) {
1356 * Do not replace elements which are the same. Also handle
1357 * element count discrepancies.
1359 nchain = cluster->array[i].chain;
1360 if (i < ip->cluster.nchains) {
1361 ochain = ip->cluster.array[i].chain;
1362 if (ochain == nchain)
1371 ip->cluster.array[i].chain = nchain;
1372 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1373 ip->cluster.array[i].flags |= cluster->array[i].flags &
1374 HAMMER2_CITEM_INVALID;
1376 hammer2_chain_ref(nchain);
1381 * Release any left-over chains in ip->cluster.
1383 while (i < ip->cluster.nchains) {
1384 nchain = ip->cluster.array[i].chain;
1386 ip->cluster.array[i].chain = NULL;
1387 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1394 * Fixup fields. Note that the inode-embedded cluster is never
1398 ip->cluster.nchains = cluster->nchains;
1399 ip->cluster.focus = cluster->focus;
1400 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1402 ip->cluster.nchains = 0;
1403 ip->cluster.focus = NULL;
1404 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1407 hammer2_spin_unex(&ip->cluster_spin);
1410 * Cleanup outside of spinlock
1414 hammer2_chain_drop(dropch[i]);
1419 * Repoint a single element from the cluster to the ip. Used by the
1420 * synchronization threads to piecemeal update inodes. Does not change
1421 * focus and requires inode to be re-locked to clean-up flags (XXX).
1424 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1427 hammer2_chain_t *ochain;
1428 hammer2_chain_t *nchain;
1431 hammer2_spin_ex(&ip->cluster_spin);
1432 KKASSERT(idx < cluster->nchains);
1433 if (idx < ip->cluster.nchains) {
1434 ochain = ip->cluster.array[idx].chain;
1435 nchain = cluster->array[idx].chain;
1438 nchain = cluster->array[idx].chain;
1439 for (i = ip->cluster.nchains; i <= idx; ++i) {
1440 bzero(&ip->cluster.array[i],
1441 sizeof(ip->cluster.array[i]));
1442 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1444 ip->cluster.nchains = idx + 1;
1446 if (ochain != nchain) {
1450 ip->cluster.array[idx].chain = nchain;
1451 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1452 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1453 HAMMER2_CITEM_INVALID;
1455 hammer2_spin_unex(&ip->cluster_spin);
1456 if (ochain != nchain) {
1458 hammer2_chain_ref(nchain);
1460 hammer2_chain_drop(ochain);
1465 * Called with a locked inode to finish unlinking an inode after xop_unlink
1466 * had been run. This function is responsible for decrementing nlinks.
1468 * We don't bother decrementing nlinks if the file is not open and this was
1471 * If the inode is a hardlink target it's chain has not yet been deleted,
1472 * otherwise it's chain has been deleted.
1474 * If isopen then any prior deletion was not permanent and the inode is
1475 * left intact with nlinks == 0;
1478 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1486 * Decrement nlinks. If this is the last link and the file is
1487 * not open we can just delete the inode and not bother dropping
1488 * nlinks to 0 (avoiding unnecessary block updates).
1490 if (ip->meta.nlinks == 1) {
1491 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1496 hammer2_inode_modify(ip);
1498 if ((int64_t)ip->meta.nlinks < 0)
1499 ip->meta.nlinks = 0; /* safety */
1502 * If nlinks is not zero we are done. However, this should only be
1503 * possible with a hardlink target. If the inode is an embedded
1504 * hardlink nlinks should have dropped to zero, warn and proceed
1505 * with the next step.
1507 if (ip->meta.nlinks) {
1508 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1510 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1511 (intmax_t)ip->meta.nlinks);
1516 hammer2_knote(ip->vp, NOTE_DELETE);
1519 * nlinks is now an implied zero, delete the inode if not open.
1520 * We avoid unnecessary media updates by not bothering to actually
1521 * decrement nlinks for the 1->0 transition
1523 * Put the inode on the sideq to ensure that any disconnected chains
1524 * get properly flushed (so they can be freed). Defer the deletion
1525 * to the sync code, doing it now will desynchronize the inode from
1526 * related directory entries (which is bad).
1528 * NOTE: killit can be reached without modifying the inode, so
1529 * make sure that it is on the SIDEQ.
1533 hammer2_xop_destroy_t *xop;
1537 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
1538 hammer2_inode_delayed_sideq(ip);
1540 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1541 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1542 error = hammer2_xop_collect(&xop->head, 0);
1543 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1546 error = 0; /* XXX */
1552 * Mark an inode as being modified, meaning that the caller will modify
1555 * If a vnode is present we set the vnode dirty and the nominal filesystem
1556 * sync will also handle synchronizing the inode meta-data. If no vnode
1557 * is present we must ensure that the inode is on pmp->sideq.
1559 * NOTE: We must always queue the inode to the sideq. This allows H2 to
1560 * shortcut vsyncscan() and flush inodes and their related vnodes
1561 * in a two stages. H2 still calls vfsync() for each vnode.
1563 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1564 * only modifying the in-memory inode. A modify_tid is synchronized
1565 * later when the inode gets flushed.
1567 * NOTE: As an exception to the general rule, the inode MAY be locked
1568 * shared for this particular call.
1571 hammer2_inode_modify(hammer2_inode_t *ip)
1573 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1575 vsetisdirty(ip->vp);
1576 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1577 hammer2_inode_delayed_sideq(ip);
1581 * Synchronize the inode's frontend state with the chain state prior
1582 * to any explicit flush of the inode or any strategy write call. This
1583 * does not flush the inode's chain or its sub-topology to media (higher
1584 * level layers are responsible for doing that).
1586 * Called with a locked inode inside a normal transaction.
1588 * inode must be locked.
1591 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1596 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1597 hammer2_xop_fsync_t *xop;
1599 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1600 xop->clear_directdata = 0;
1601 if (ip->flags & HAMMER2_INODE_RESIZED) {
1602 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1603 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1604 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1605 xop->clear_directdata = 1;
1607 xop->osize = ip->osize;
1609 xop->osize = ip->meta.size; /* safety */
1611 xop->ipflags = ip->flags;
1612 xop->meta = ip->meta;
1614 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1615 HAMMER2_INODE_MODIFIED);
1616 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1617 error = hammer2_xop_collect(&xop->head, 0);
1618 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1619 if (error == HAMMER2_ERROR_ENOENT)
1622 kprintf("hammer2: unable to fsync inode %p\n", ip);
1624 atomic_set_int(&ip->flags,
1625 xop->ipflags & (HAMMER2_INODE_RESIZED |
1626 HAMMER2_INODE_MODIFIED));
1628 /* XXX return error somehow? */
1635 * When an inode is flagged INODE_CREATING its chains have not actually
1636 * been inserting into the on-media tree yet.
1639 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1644 if (ip->flags & HAMMER2_INODE_CREATING) {
1645 hammer2_xop_create_t *xop;
1647 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1648 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1649 xop->lhc = ip->meta.inum;
1651 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1652 error = hammer2_xop_collect(&xop->head, 0);
1653 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1654 if (error == HAMMER2_ERROR_ENOENT)
1657 kprintf("hammer2: backend unable to "
1658 "insert inode %p %ld\n", ip, ip->meta.inum);
1659 /* XXX return error somehow? */
1666 * When an inode is flagged INODE_DELETING it has been deleted (no directory
1667 * entry or open refs are left, though as an optimization H2 might leave
1668 * nlinks == 1 to avoid unnecessary block updates). The backend flush then
1669 * needs to actually remove it from the topology.
1671 * NOTE: backend flush must still sync and flush the deleted inode to clean
1672 * out related chains.
1675 hammer2_inode_chain_des(hammer2_inode_t *ip)
1680 if (ip->flags & HAMMER2_INODE_DELETING) {
1681 hammer2_xop_destroy_t *xop;
1683 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING);
1684 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1685 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1686 error = hammer2_xop_collect(&xop->head, 0);
1687 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1689 if (error == HAMMER2_ERROR_ENOENT)
1692 kprintf("hammer2: backend unable to "
1693 "insert inode %p %ld\n", ip, ip->meta.inum);
1694 /* XXX return error somehow? */
1701 * Flushes the inode's chain and its sub-topology to media. Interlocks
1702 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy
1703 * function creating or modifying a chain under this inode will re-set the
1706 * inode must be locked.
1709 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1711 hammer2_xop_fsync_t *xop;
1714 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1715 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1716 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1717 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1718 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1719 if (error == HAMMER2_ERROR_ENOENT)