2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47 hammer2_tid_t, meta.inum);
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
52 if (ip1->meta.inum < ip2->meta.inum)
54 if (ip1->meta.inum > ip2->meta.inum)
61 hammer2_knote(struct vnode *vp, int flags)
64 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
68 * Caller holds pmp->list_spin and the inode should be locked. Merge ip
69 * with the specified depend.
71 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
72 * that successive calls must ensure the ip is on a pass2 depend (or they are
73 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then
74 * we can set pass2 on it and return.
76 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
77 * a self-depend if necessary, and depend->pass2 is set according
78 * to the PASS2 flag. SIDEQ is set.
82 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
84 hammer2_pfs_t *pmp = ip->pmp;
85 hammer2_depend_t *dtmp;
86 hammer2_inode_t *iptmp;
89 * If ip is SYNCQ its entry is used for the syncq list and it will
90 * no longer be associated with a dependency. Merging this status
91 * with a passed-in depend implies PASS2.
93 if (ip->flags & HAMMER2_INODE_SYNCQ) {
94 if (depend == (void *)-1 ||
99 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
105 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
106 * If it is not, associate the ip with the passed-in depend, creating
107 * a single-entry dependency using depend_static if necessary.
109 * NOTE: The use of ip->depend_static always requires that the
110 * specific ip containing the structure is part of that
111 * particular depend_static's dependency group.
113 if (ip->flags & HAMMER2_INODE_SIDEQ) {
115 * Merge ip->depend with the passed-in depend. If the
116 * passed-in depend is not a special case, all ips associated
117 * with ip->depend (including the original ip) must be moved
118 * to the passed-in depend.
120 if (depend == NULL) {
122 } else if (depend == (void *)-1) {
125 } else if (depend != ip->depend) {
130 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
135 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
136 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
137 iptmp->depend = depend;
139 KKASSERT(sanitychk == 1);
140 depend->count += dtmp->count;
141 depend->pass2 |= dtmp->pass2;
142 TAILQ_REMOVE(&pmp->depq, dtmp, entry);
148 * Add ip to the sideq, creating a self-dependency if
151 hammer2_inode_ref(ip);
152 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
153 if (depend == NULL) {
154 depend = &ip->depend_static;
155 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
156 } else if (depend == (void *)-1) {
157 depend = &ip->depend_static;
159 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
160 } /* else add ip to passed-in depend */
161 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
167 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
170 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
176 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also
177 * occur from inode_lock4() and inode_depend().
179 * Caller must pass-in a locked inode.
182 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
184 hammer2_pfs_t *pmp = ip->pmp;
187 * Optimize case to avoid pmp spinlock.
189 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
190 hammer2_spin_ex(&pmp->list_spin);
191 hammer2_inode_setdepend_locked(ip, NULL);
192 hammer2_spin_unex(&pmp->list_spin);
197 * Lock an inode, with SYNCQ semantics.
199 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
202 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
203 * inode locking function will automatically set the RDONLY flag.
204 * shared locks are not subject to SYNCQ semantics, exclusive locks
207 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
208 * Most front-end inode locks do.
210 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
211 * the inode data be resolved. This is used by the syncthr because
212 * it can run on an unresolved/out-of-sync cluster, and also by the
213 * vnode reclamation code to avoid unnecessary I/O (particularly when
214 * disposing of hundreds of thousands of cached vnodes).
216 * When an exclusive lock is obtained on an inode that is on the SYNCQ,
217 * HAMMER2 will automatically move the inode to the front of the queue before
218 * blocking to avoid long stalls against filesystem sync operations.
220 * The inode locking function locks the inode itself, resolves any stale
221 * chains in the inode's cluster, and allocates a fresh copy of the
222 * cluster with 1 ref and all the underlying chains locked.
224 * ip->cluster will be stable while the inode is locked.
226 * NOTE: We don't combine the inode/chain lock because putting away an
227 * inode would otherwise confuse multiple lock holders of the inode.
229 * NOTE: In-memory inodes always point to hardlink targets (the actual file),
230 * and never point to a hardlink pointer.
232 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
233 * will feel free to reduce the chain set in the cluster as an
234 * optimization. It will still be validated against the quorum if
235 * appropriate, but the optimization might be able to reduce data
236 * accesses to one node. This flag is automatically set if the inode
237 * is locked with HAMMER2_RESOLVE_SHARED.
240 hammer2_inode_lock(hammer2_inode_t *ip, int how)
244 hammer2_inode_ref(ip);
248 * Inode structure mutex - Shared lock
250 if (how & HAMMER2_RESOLVE_SHARED) {
251 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
252 hammer2_mtx_sh(&ip->lock);
257 * Inode structure mutex - Exclusive lock
259 * An exclusive lock (if not recursive) must wait for inodes on
260 * SYNCQ to flush first, to ensure that meta-data dependencies such
261 * as the nlink count and related directory entries are not split
264 * If the vnode is locked by the current thread it must be unlocked
265 * across the tsleep() to avoid a deadlock.
267 hammer2_mtx_ex(&ip->lock);
268 if (hammer2_mtx_refs(&ip->lock) > 1)
270 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
271 hammer2_spin_ex(&pmp->list_spin);
272 if (ip->flags & HAMMER2_INODE_SYNCQ) {
273 tsleep_interlock(&ip->flags, 0);
274 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
275 TAILQ_REMOVE(&pmp->syncq, ip, entry);
276 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
277 hammer2_spin_unex(&pmp->list_spin);
278 hammer2_mtx_unlock(&ip->lock);
279 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
280 hammer2_mtx_ex(&ip->lock);
283 hammer2_spin_unex(&pmp->list_spin);
289 * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
290 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is
291 * NULL then ip4 must also be NULL.
293 * This creates a dependency between up to four inodes.
296 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
297 hammer2_inode_t *ip3, hammer2_inode_t *ip4)
299 hammer2_inode_t *ips[4];
300 hammer2_inode_t *iptmp;
301 hammer2_inode_t *ipslp;
302 hammer2_depend_t *depend;
307 pmp = ip1->pmp; /* may be NULL */
308 KKASSERT(pmp == ip2->pmp);
314 } else if (ip4 == NULL) {
317 KKASSERT(pmp == ip3->pmp);
322 KKASSERT(pmp == ip3->pmp);
323 KKASSERT(pmp == ip4->pmp);
326 for (i = 0; i < count; ++i)
327 hammer2_inode_ref(ips[i]);
331 * Lock the inodes in order
333 for (i = 0; i < count; ++i) {
334 hammer2_mtx_ex(&ips[i]->lock);
338 * Associate dependencies, record the first inode found on SYNCQ
339 * (operation is allowed to proceed for inodes on PASS2) for our
340 * sleep operation, this inode is theoretically the last one sync'd
343 * All inodes found on SYNCQ are moved to the head of the syncq
346 hammer2_spin_ex(&pmp->list_spin);
349 for (i = 0; i < count; ++i) {
351 depend = hammer2_inode_setdepend_locked(iptmp, depend);
352 if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
353 TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
354 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
359 hammer2_spin_unex(&pmp->list_spin);
362 * Block and retry if any of the inodes are on SYNCQ. It is
363 * important that we allow the operation to proceed in the
364 * PASS2 case, to avoid deadlocking against the vnode.
367 for (i = 0; i < count; ++i)
368 hammer2_mtx_unlock(&ips[i]->lock);
369 tsleep(&ipslp->flags, 0, "h2sync", 2);
375 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP
379 hammer2_inode_unlock(hammer2_inode_t *ip)
381 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
382 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
383 hammer2_mtx_unlock(&ip->lock);
386 hammer2_mtx_unlock(&ip->lock);
388 hammer2_inode_drop(ip);
392 * If either ip1 or ip2 have been tapped by the syncer, make sure that both
393 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced
394 * together. For dirent-v-inode depends, pass the dirent as ip1.
396 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
397 * single dependency. Dependencies are entered into pmp->depq. This
398 * effectively flags the inodes SIDEQ.
400 * Both ip1 and ip2 must be locked by the caller. This also ensures
401 * that we can't race the end of the syncer's queue run.
404 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
407 hammer2_depend_t *depend;
410 hammer2_spin_ex(&pmp->list_spin);
411 depend = hammer2_inode_setdepend_locked(ip1, NULL);
412 depend = hammer2_inode_setdepend_locked(ip2, depend);
413 hammer2_spin_unex(&pmp->list_spin);
417 * Select a chain out of an inode's cluster and lock it.
419 * The inode does not have to be locked.
422 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
424 hammer2_chain_t *chain;
425 hammer2_cluster_t *cluster;
427 hammer2_spin_sh(&ip->cluster_spin);
428 cluster = &ip->cluster;
429 if (clindex >= cluster->nchains)
432 chain = cluster->array[clindex].chain;
434 hammer2_chain_ref(chain);
435 hammer2_spin_unsh(&ip->cluster_spin);
436 hammer2_chain_lock(chain, how);
438 hammer2_spin_unsh(&ip->cluster_spin);
444 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
445 hammer2_chain_t **parentp, int how)
447 hammer2_chain_t *chain;
448 hammer2_chain_t *parent;
451 hammer2_spin_sh(&ip->cluster_spin);
452 if (clindex >= ip->cluster.nchains)
455 chain = ip->cluster.array[clindex].chain;
457 hammer2_chain_ref(chain);
458 hammer2_spin_unsh(&ip->cluster_spin);
459 hammer2_chain_lock(chain, how);
461 hammer2_spin_unsh(&ip->cluster_spin);
465 * Get parent, lock order must be (parent, chain).
467 parent = chain->parent;
469 hammer2_chain_ref(parent);
470 hammer2_chain_unlock(chain);
471 hammer2_chain_lock(parent, how);
472 hammer2_chain_lock(chain, how);
474 if (ip->cluster.array[clindex].chain == chain &&
475 chain->parent == parent) {
482 hammer2_chain_unlock(chain);
483 hammer2_chain_drop(chain);
485 hammer2_chain_unlock(parent);
486 hammer2_chain_drop(parent);
495 * Temporarily release a lock held shared or exclusive. Caller must
496 * hold the lock shared or exclusive on call and lock will be released
499 * Restore a lock that was temporarily released.
502 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
504 return hammer2_mtx_temp_release(&ip->lock);
508 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
510 hammer2_mtx_temp_restore(&ip->lock, ostate);
514 * Upgrade a shared inode lock to exclusive and return. If the inode lock
515 * is already held exclusively this is a NOP.
517 * The caller MUST hold the inode lock either shared or exclusive on call
518 * and will own the lock exclusively on return.
520 * Returns non-zero if the lock was already exclusive prior to the upgrade.
523 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
527 if (mtx_islocked_ex(&ip->lock)) {
530 hammer2_mtx_unlock(&ip->lock);
531 hammer2_mtx_ex(&ip->lock);
538 * Downgrade an inode lock from exclusive to shared only if the inode
539 * lock was previously shared. If the inode lock was previously exclusive,
543 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
545 if (wasexclusive == 0)
546 mtx_downgrade(&ip->lock);
550 * Lookup an inode by inode number
553 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
561 hammer2_spin_ex(&pmp->inum_spin);
562 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
564 hammer2_inode_ref(ip);
565 hammer2_spin_unex(&pmp->inum_spin);
571 * Adding a ref to an inode is only legal if the inode already has at least
574 * (can be called with spinlock held)
577 hammer2_inode_ref(hammer2_inode_t *ip)
579 atomic_add_int(&ip->refs, 1);
580 if (hammer2_debug & 0x80000) {
581 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
587 * Drop an inode reference, freeing the inode when the last reference goes
591 hammer2_inode_drop(hammer2_inode_t *ip)
597 if (hammer2_debug & 0x80000) {
598 kprintf("INODE-1 %p (%d->%d)\n",
599 ip, ip->refs, ip->refs - 1);
606 * Transition to zero, must interlock with
607 * the inode inumber lookup tree (if applicable).
608 * It should not be possible for anyone to race
609 * the transition to 0.
613 hammer2_spin_ex(&pmp->inum_spin);
615 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
616 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
617 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
618 atomic_clear_int(&ip->flags,
619 HAMMER2_INODE_ONRBTREE);
620 RB_REMOVE(hammer2_inode_tree,
621 &pmp->inum_tree, ip);
624 hammer2_spin_unex(&pmp->inum_spin);
629 * Cleaning out ip->cluster isn't entirely
632 hammer2_inode_repoint(ip, NULL, NULL);
634 kfree(ip, pmp->minode);
635 atomic_add_long(&pmp->inmem_inodes, -1);
636 ip = NULL; /* will terminate loop */
638 hammer2_spin_unex(&ip->pmp->inum_spin);
642 * Non zero transition
644 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
651 * Get the vnode associated with the given inode, allocating the vnode if
652 * necessary. The vnode will be returned exclusively locked.
654 * *errorp is set to a UNIX error, not a HAMMER2 error.
656 * The caller must lock the inode (shared or exclusive).
658 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
662 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
668 KKASSERT(pmp != NULL);
673 * Attempt to reuse an existing vnode assignment. It is
674 * possible to race a reclaim so the vget() may fail. The
675 * inode must be unlocked during the vget() to avoid a
676 * deadlock against a reclaim.
683 * Inode must be unlocked during the vget() to avoid
684 * possible deadlocks, but leave the ip ref intact.
686 * vnode is held to prevent destruction during the
687 * vget(). The vget() can still fail if we lost
688 * a reclaim race on the vnode.
690 hammer2_mtx_state_t ostate;
693 ostate = hammer2_inode_lock_temp_release(ip);
694 if (vget(vp, LK_EXCLUSIVE)) {
696 hammer2_inode_lock_temp_restore(ip, ostate);
699 hammer2_inode_lock_temp_restore(ip, ostate);
701 /* vp still locked and ref from vget */
703 kprintf("hammer2: igetv race %p/%p\n",
713 * No vnode exists, allocate a new vnode. Beware of
714 * allocation races. This function will return an
715 * exclusively locked and referenced vnode.
717 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
719 kprintf("hammer2: igetv getnewvnode failed %d\n",
726 * Lock the inode and check for an allocation race.
728 wasexclusive = hammer2_inode_lock_upgrade(ip);
729 if (ip->vp != NULL) {
732 hammer2_inode_lock_downgrade(ip, wasexclusive);
736 switch (ip->meta.type) {
737 case HAMMER2_OBJTYPE_DIRECTORY:
740 case HAMMER2_OBJTYPE_REGFILE:
742 * Regular file must use buffer cache I/O
743 * (VKVABIO cpu sync semantics supported)
746 vsetflags(vp, VKVABIO);
747 vinitvmio(vp, ip->meta.size,
749 (int)ip->meta.size & HAMMER2_LBUFMASK);
751 case HAMMER2_OBJTYPE_SOFTLINK:
753 * XXX for now we are using the generic file_read
754 * and file_write code so we need a buffer cache
757 * (VKVABIO cpu sync semantics supported)
760 vsetflags(vp, VKVABIO);
761 vinitvmio(vp, ip->meta.size,
763 (int)ip->meta.size & HAMMER2_LBUFMASK);
765 case HAMMER2_OBJTYPE_CDEV:
768 case HAMMER2_OBJTYPE_BDEV:
769 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
770 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
776 case HAMMER2_OBJTYPE_FIFO:
778 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
780 case HAMMER2_OBJTYPE_SOCKET:
784 panic("hammer2: unhandled objtype %d",
789 if (ip == pmp->iroot)
790 vsetflags(vp, VROOT);
794 hammer2_inode_ref(ip); /* vp association */
795 hammer2_inode_lock_downgrade(ip, wasexclusive);
800 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
802 if (hammer2_debug & 0x0002) {
803 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
804 vp, vp->v_refcnt, vp->v_auxrefs);
810 * Returns the inode associated with the passed-in cluster, allocating a new
811 * hammer2_inode structure if necessary, then synchronizing it to the passed
812 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx)
813 * is synchronized. Otherwise the whole cluster is synchronized. inum will
814 * be extracted from the passed-in xop and the inum argument will be ignored.
816 * If xop is passed as NULL then a new hammer2_inode is allocated with the
817 * specified inum, and returned. For normal inodes, the inode will be
818 * indexed in memory and if it already exists the existing ip will be
819 * returned instead of allocating a new one. The superroot and PFS inodes
820 * are not indexed in memory.
822 * The passed-in cluster must be locked and will remain locked on return.
823 * The returned inode will be locked and the caller may dispose of both
824 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
825 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
827 * The hammer2_inode structure regulates the interface between the high level
828 * kernel VNOPS API and the filesystem backend (the chains).
830 * On return the inode is locked with the supplied cluster.
833 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
834 hammer2_tid_t inum, int idx)
836 hammer2_inode_t *nip;
837 const hammer2_inode_data_t *iptmp;
838 const hammer2_inode_data_t *nipdata;
840 KKASSERT(xop == NULL ||
841 hammer2_cluster_type(&xop->cluster) ==
842 HAMMER2_BREF_TYPE_INODE);
846 * Interlocked lookup/ref of the inode. This code is only needed
847 * when looking up inodes with nlinks != 0 (TODO: optimize out
848 * otherwise and test for duplicates).
850 * Cluster can be NULL during the initial pfs allocation.
853 iptmp = &hammer2_xop_gdata(xop)->ipdata;
854 inum = iptmp->meta.inum;
855 hammer2_xop_pdata(xop);
858 nip = hammer2_inode_lookup(pmp, inum);
861 * Handle SMP race (not applicable to the super-root spmp
862 * which can't index inodes due to duplicative inode numbers).
864 hammer2_mtx_ex(&nip->lock);
865 if (pmp->spmp_hmp == NULL &&
866 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
867 hammer2_mtx_unlock(&nip->lock);
868 hammer2_inode_drop(nip);
873 hammer2_inode_repoint_one(nip, &xop->cluster,
876 hammer2_inode_repoint(nip, NULL, &xop->cluster);
882 * We couldn't find the inode number, create a new inode and try to
883 * insert it, handle insertion races.
885 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
886 spin_init(&nip->cluster_spin, "h2clspin");
887 atomic_add_long(&pmp->inmem_inodes, 1);
888 hammer2_pfs_memory_inc(pmp);
889 hammer2_pfs_memory_wakeup(pmp);
891 nip->flags = HAMMER2_INODE_SROOT;
894 * Initialize nip's cluster. A cluster is provided for normal
895 * inodes but typically not for the super-root or PFS inodes.
897 nip->cluster.refs = 1;
898 nip->cluster.pmp = pmp;
899 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
901 nipdata = &hammer2_xop_gdata(xop)->ipdata;
902 nip->meta = nipdata->meta;
903 hammer2_xop_pdata(xop);
904 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
905 hammer2_inode_repoint(nip, NULL, &xop->cluster);
907 nip->meta.inum = inum; /* PFS inum is always 1 XXX */
908 /* mtime will be updated when a cluster is available */
909 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); /*XXX*/
915 * ref and lock on nip gives it state compatible to after a
916 * hammer2_inode_lock() call.
919 hammer2_mtx_init(&nip->lock, "h2inode");
920 hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
921 hammer2_mtx_ex(&nip->lock);
922 TAILQ_INIT(&nip->depend_static.sideq);
923 /* combination of thread lock and chain lock == inode lock */
926 * Attempt to add the inode. If it fails we raced another inode
927 * get. Undo all the work and try again.
929 if (pmp->spmp_hmp == NULL) {
930 hammer2_spin_ex(&pmp->inum_spin);
931 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
932 hammer2_spin_unex(&pmp->inum_spin);
933 hammer2_mtx_unlock(&nip->lock);
934 hammer2_inode_drop(nip);
937 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
939 hammer2_spin_unex(&pmp->inum_spin);
945 * Create a PFS inode under the superroot. This function will create the
946 * inode, its media chains, and also insert it into the media.
948 * Caller must be in a flush transaction because we are inserting the inode
952 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
953 const uint8_t *name, size_t name_len,
956 hammer2_xop_create_t *xop;
957 hammer2_inode_t *pip;
958 hammer2_inode_t *nip;
963 uint8_t pip_comp_algo;
964 uint8_t pip_check_algo;
965 hammer2_tid_t pip_inum;
971 lhc = hammer2_dirhash(name, name_len);
975 * Locate the inode or indirect block to create the new
976 * entry in. At the same time check for key collisions
977 * and iterate until we don't get one.
979 * Lock the directory exclusively for now to guarantee that
980 * we can find an unused lhc for the name. Due to collisions,
981 * two different creates can end up with the same lhc so we
982 * cannot depend on the OS to prevent the collision.
984 hammer2_inode_lock(pip, 0);
986 pip_uid = pip->meta.uid;
987 pip_gid = pip->meta.gid;
988 pip_mode = pip->meta.mode;
989 pip_comp_algo = pip->meta.comp_algo;
990 pip_check_algo = pip->meta.check_algo;
991 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
994 * Locate an unused key in the collision space.
997 hammer2_xop_scanlhc_t *sxop;
998 hammer2_key_t lhcbase;
1001 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1003 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1004 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1005 if (lhc != sxop->head.cluster.focus->bref.key)
1009 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1012 if (error != HAMMER2_ERROR_ENOENT)
1017 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1018 error = HAMMER2_ERROR_ENOSPC;
1024 * Create the inode with the lhc as the key.
1026 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1028 xop->flags = HAMMER2_INSERT_PFSROOT;
1029 bzero(&xop->meta, sizeof(xop->meta));
1031 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1033 xop->meta.iparent = pip_inum;
1035 /* Inherit parent's inode compression mode. */
1036 xop->meta.comp_algo = pip_comp_algo;
1037 xop->meta.check_algo = pip_check_algo;
1038 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1039 hammer2_update_time(&xop->meta.ctime);
1040 xop->meta.mtime = xop->meta.ctime;
1041 xop->meta.mode = 0755;
1042 xop->meta.nlinks = 1;
1045 * Regular files and softlinks allow a small amount of data to be
1046 * directly embedded in the inode. This flag will be cleared if
1047 * the size is extended past the embedded limit.
1049 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1050 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1051 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1053 hammer2_xop_setname(&xop->head, name, name_len);
1054 xop->meta.name_len = name_len;
1055 xop->meta.name_key = lhc;
1056 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1058 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1060 error = hammer2_xop_collect(&xop->head, 0);
1062 kprintf("CREATE INODE %*.*s\n",
1063 (int)name_len, (int)name_len, name);
1072 * Set up the new inode if not a hardlink pointer.
1074 * NOTE: *_get() integrates chain's lock into the inode lock.
1076 * NOTE: Only one new inode can currently be created per
1077 * transaction. If the need arises we can adjust
1078 * hammer2_trans_init() to allow more.
1080 * NOTE: nipdata will have chain's blockset data.
1082 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1083 nip->comp_heuristic = 0;
1085 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1087 hammer2_inode_unlock(pip);
1093 * Create a new, normal inode. This function will create the inode,
1094 * the media chains, but will not insert the chains onto the media topology
1095 * (doing so would require a flush transaction and cause long stalls).
1097 * Caller must be in a normal transaction.
1100 hammer2_inode_create_normal(hammer2_inode_t *pip,
1101 struct vattr *vap, struct ucred *cred,
1102 hammer2_key_t inum, int *errorp)
1104 hammer2_xop_create_t *xop;
1105 hammer2_inode_t *dip;
1106 hammer2_inode_t *nip;
1112 uint8_t pip_comp_algo;
1113 uint8_t pip_check_algo;
1114 hammer2_tid_t pip_inum;
1117 dip = pip->pmp->iroot;
1118 KKASSERT(dip != NULL);
1122 /*hammer2_inode_lock(dip, 0);*/
1124 pip_uid = pip->meta.uid;
1125 pip_gid = pip->meta.gid;
1126 pip_mode = pip->meta.mode;
1127 pip_comp_algo = pip->meta.comp_algo;
1128 pip_check_algo = pip->meta.check_algo;
1129 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1132 * Create the in-memory hammer2_inode structure for the specified
1135 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1136 nip->comp_heuristic = 0;
1137 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1138 nip->cluster.nchains == 0);
1139 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1142 * Setup the inode meta-data
1144 nip->meta.type = hammer2_get_obj_type(vap->va_type);
1146 switch (nip->meta.type) {
1147 case HAMMER2_OBJTYPE_CDEV:
1148 case HAMMER2_OBJTYPE_BDEV:
1149 nip->meta.rmajor = vap->va_rmajor;
1150 nip->meta.rminor = vap->va_rminor;
1155 type = nip->meta.type;
1157 KKASSERT(nip->meta.inum == inum);
1158 nip->meta.iparent = pip_inum;
1160 /* Inherit parent's inode compression mode. */
1161 nip->meta.comp_algo = pip_comp_algo;
1162 nip->meta.check_algo = pip_check_algo;
1163 nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1164 hammer2_update_time(&nip->meta.ctime);
1165 nip->meta.mtime = nip->meta.ctime;
1166 nip->meta.mode = vap->va_mode;
1167 nip->meta.nlinks = 1;
1169 xuid = hammer2_to_unix_xid(&pip_uid);
1170 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1173 if (vap->va_vaflags & VA_UID_UUID_VALID)
1174 nip->meta.uid = vap->va_uid_uuid;
1175 else if (vap->va_uid != (uid_t)VNOVAL)
1176 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1178 hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1180 if (vap->va_vaflags & VA_GID_UUID_VALID)
1181 nip->meta.gid = vap->va_gid_uuid;
1182 else if (vap->va_gid != (gid_t)VNOVAL)
1183 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1185 nip->meta.gid = pip_gid;
1188 * Regular files and softlinks allow a small amount of data to be
1189 * directly embedded in the inode. This flag will be cleared if
1190 * the size is extended past the embedded limit.
1192 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1193 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1194 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1198 * Create the inode using (inum) as the key. Pass pip for
1199 * method inheritance.
1201 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1204 xop->meta = nip->meta;
1207 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1208 xop->meta.name_key = inum;
1209 nip->meta.name_len = xop->meta.name_len;
1210 nip->meta.name_key = xop->meta.name_key;
1211 hammer2_inode_modify(nip);
1214 * Create the inode media chains but leave them detached. We are
1215 * not in a flush transaction so we can't mess with media topology
1216 * above normal inodes (i.e. the index of the inodes themselves).
1218 * We've already set the INODE_CREATING flag. The inode's media
1219 * chains will be inserted onto the media topology on the next
1222 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1224 error = hammer2_xop_collect(&xop->head, 0);
1226 kprintf("create inode type %d error %d\n", nip->meta.type, error);
1235 * Associate the media chains created by the backend with the
1238 hammer2_inode_repoint(nip, NULL, &xop->head.cluster);
1240 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1241 /*hammer2_inode_unlock(dip);*/
1247 * Create a directory entry under dip with the specified name, inode number,
1248 * and OBJTYPE (type).
1250 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1252 * Caller must hold dip locked.
1255 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1256 hammer2_key_t inum, uint8_t type)
1258 hammer2_xop_mkdirent_t *xop;
1265 KKASSERT(name != NULL);
1266 lhc = hammer2_dirhash(name, name_len);
1269 * Locate the inode or indirect block to create the new
1270 * entry in. At the same time check for key collisions
1271 * and iterate until we don't get one.
1273 * Lock the directory exclusively for now to guarantee that
1274 * we can find an unused lhc for the name. Due to collisions,
1275 * two different creates can end up with the same lhc so we
1276 * cannot depend on the OS to prevent the collision.
1278 hammer2_inode_modify(dip);
1281 * If name specified, locate an unused key in the collision space.
1282 * Otherwise use the passed-in lhc directly.
1285 hammer2_xop_scanlhc_t *sxop;
1286 hammer2_key_t lhcbase;
1289 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1291 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1292 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1293 if (lhc != sxop->head.cluster.focus->bref.key)
1297 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1300 if (error != HAMMER2_ERROR_ENOENT)
1305 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1306 error = HAMMER2_ERROR_ENOSPC;
1312 * Create the directory entry with the lhc as the key.
1314 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1316 bzero(&xop->dirent, sizeof(xop->dirent));
1317 xop->dirent.inum = inum;
1318 xop->dirent.type = type;
1319 xop->dirent.namlen = name_len;
1321 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1322 hammer2_xop_setname(&xop->head, name, name_len);
1324 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1326 error = hammer2_xop_collect(&xop->head, 0);
1328 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1330 error = hammer2_error_to_errno(error);
1336 * Repoint ip->cluster's chains to cluster's chains and fixup the default
1337 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
1338 * filters out invalid or non-matching elements.
1340 * Caller must hold the inode and cluster exclusive locked, if not NULL,
1341 * must also be locked.
1343 * Cluster may be NULL to clean out any chains in ip->cluster.
1346 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1347 hammer2_cluster_t *cluster)
1349 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1350 hammer2_chain_t *ochain;
1351 hammer2_chain_t *nchain;
1354 bzero(dropch, sizeof(dropch));
1357 * Replace chains in ip->cluster with chains from cluster and
1358 * adjust the focus if necessary.
1360 * NOTE: nchain and/or ochain can be NULL due to gaps
1361 * in the cluster arrays.
1363 hammer2_spin_ex(&ip->cluster_spin);
1364 for (i = 0; cluster && i < cluster->nchains; ++i) {
1366 * Do not replace elements which are the same. Also handle
1367 * element count discrepancies.
1369 nchain = cluster->array[i].chain;
1370 if (i < ip->cluster.nchains) {
1371 ochain = ip->cluster.array[i].chain;
1372 if (ochain == nchain)
1381 ip->cluster.array[i].chain = nchain;
1382 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1383 ip->cluster.array[i].flags |= cluster->array[i].flags &
1384 HAMMER2_CITEM_INVALID;
1386 hammer2_chain_ref(nchain);
1391 * Release any left-over chains in ip->cluster.
1393 while (i < ip->cluster.nchains) {
1394 nchain = ip->cluster.array[i].chain;
1396 ip->cluster.array[i].chain = NULL;
1397 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1404 * Fixup fields. Note that the inode-embedded cluster is never
1408 ip->cluster.nchains = cluster->nchains;
1409 ip->cluster.focus = cluster->focus;
1410 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1412 ip->cluster.nchains = 0;
1413 ip->cluster.focus = NULL;
1414 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1417 hammer2_spin_unex(&ip->cluster_spin);
1420 * Cleanup outside of spinlock
1424 hammer2_chain_drop(dropch[i]);
1429 * Repoint a single element from the cluster to the ip. Used by the
1430 * synchronization threads to piecemeal update inodes. Does not change
1431 * focus and requires inode to be re-locked to clean-up flags (XXX).
1434 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1437 hammer2_chain_t *ochain;
1438 hammer2_chain_t *nchain;
1441 hammer2_spin_ex(&ip->cluster_spin);
1442 KKASSERT(idx < cluster->nchains);
1443 if (idx < ip->cluster.nchains) {
1444 ochain = ip->cluster.array[idx].chain;
1445 nchain = cluster->array[idx].chain;
1448 nchain = cluster->array[idx].chain;
1449 for (i = ip->cluster.nchains; i <= idx; ++i) {
1450 bzero(&ip->cluster.array[i],
1451 sizeof(ip->cluster.array[i]));
1452 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1454 ip->cluster.nchains = idx + 1;
1456 if (ochain != nchain) {
1460 ip->cluster.array[idx].chain = nchain;
1461 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1462 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1463 HAMMER2_CITEM_INVALID;
1465 hammer2_spin_unex(&ip->cluster_spin);
1466 if (ochain != nchain) {
1468 hammer2_chain_ref(nchain);
1470 hammer2_chain_drop(ochain);
1475 * Called with a locked inode to finish unlinking an inode after xop_unlink
1476 * had been run. This function is responsible for decrementing nlinks.
1478 * We don't bother decrementing nlinks if the file is not open and this was
1481 * If the inode is a hardlink target it's chain has not yet been deleted,
1482 * otherwise it's chain has been deleted.
1484 * If isopen then any prior deletion was not permanent and the inode is
1485 * left intact with nlinks == 0;
1488 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1496 * Decrement nlinks. If this is the last link and the file is
1497 * not open we can just delete the inode and not bother dropping
1498 * nlinks to 0 (avoiding unnecessary block updates).
1500 if (ip->meta.nlinks == 1) {
1501 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1506 hammer2_inode_modify(ip);
1508 if ((int64_t)ip->meta.nlinks < 0)
1509 ip->meta.nlinks = 0; /* safety */
1512 * If nlinks is not zero we are done. However, this should only be
1513 * possible with a hardlink target. If the inode is an embedded
1514 * hardlink nlinks should have dropped to zero, warn and proceed
1515 * with the next step.
1517 if (ip->meta.nlinks) {
1518 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1520 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1521 (intmax_t)ip->meta.nlinks);
1526 hammer2_knote(ip->vp, NOTE_DELETE);
1529 * nlinks is now an implied zero, delete the inode if not open.
1530 * We avoid unnecessary media updates by not bothering to actually
1531 * decrement nlinks for the 1->0 transition
1533 * Put the inode on the sideq to ensure that any disconnected chains
1534 * get properly flushed (so they can be freed). Defer the deletion
1535 * to the sync code, doing it now will desynchronize the inode from
1536 * related directory entries (which is bad).
1538 * NOTE: killit can be reached without modifying the inode, so
1539 * make sure that it is on the SIDEQ.
1543 hammer2_xop_destroy_t *xop;
1547 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
1548 hammer2_inode_delayed_sideq(ip);
1550 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1551 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1552 error = hammer2_xop_collect(&xop->head, 0);
1553 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1556 error = 0; /* XXX */
1562 * Mark an inode as being modified, meaning that the caller will modify
1565 * If a vnode is present we set the vnode dirty and the nominal filesystem
1566 * sync will also handle synchronizing the inode meta-data. If no vnode
1567 * is present we must ensure that the inode is on pmp->sideq.
1569 * NOTE: We must always queue the inode to the sideq. This allows H2 to
1570 * shortcut vsyncscan() and flush inodes and their related vnodes
1571 * in a two stages. H2 still calls vfsync() for each vnode.
1573 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1574 * only modifying the in-memory inode. A modify_tid is synchronized
1575 * later when the inode gets flushed.
1577 * NOTE: As an exception to the general rule, the inode MAY be locked
1578 * shared for this particular call.
1581 hammer2_inode_modify(hammer2_inode_t *ip)
1583 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1585 vsetisdirty(ip->vp);
1586 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1587 hammer2_inode_delayed_sideq(ip);
1591 * Synchronize the inode's frontend state with the chain state prior
1592 * to any explicit flush of the inode or any strategy write call. This
1593 * does not flush the inode's chain or its sub-topology to media (higher
1594 * level layers are responsible for doing that).
1596 * Called with a locked inode inside a normal transaction.
1598 * inode must be locked.
1601 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1606 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1607 hammer2_xop_fsync_t *xop;
1609 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1610 xop->clear_directdata = 0;
1611 if (ip->flags & HAMMER2_INODE_RESIZED) {
1612 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1613 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1614 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1615 xop->clear_directdata = 1;
1617 xop->osize = ip->osize;
1619 xop->osize = ip->meta.size; /* safety */
1621 xop->ipflags = ip->flags;
1622 xop->meta = ip->meta;
1624 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1625 HAMMER2_INODE_MODIFIED);
1626 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1627 error = hammer2_xop_collect(&xop->head, 0);
1628 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1629 if (error == HAMMER2_ERROR_ENOENT)
1632 kprintf("hammer2: unable to fsync inode %p\n", ip);
1634 atomic_set_int(&ip->flags,
1635 xop->ipflags & (HAMMER2_INODE_RESIZED |
1636 HAMMER2_INODE_MODIFIED));
1638 /* XXX return error somehow? */
1645 * When an inode is flagged INODE_CREATING its chains have not actually
1646 * been inserting into the on-media tree yet.
1649 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1654 if (ip->flags & HAMMER2_INODE_CREATING) {
1655 hammer2_xop_create_t *xop;
1657 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1658 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1659 xop->lhc = ip->meta.inum;
1661 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1662 error = hammer2_xop_collect(&xop->head, 0);
1663 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1664 if (error == HAMMER2_ERROR_ENOENT)
1667 kprintf("hammer2: backend unable to "
1668 "insert inode %p %ld\n", ip, ip->meta.inum);
1669 /* XXX return error somehow? */
1676 * When an inode is flagged INODE_DELETING it has been deleted (no directory
1677 * entry or open refs are left, though as an optimization H2 might leave
1678 * nlinks == 1 to avoid unnecessary block updates). The backend flush then
1679 * needs to actually remove it from the topology.
1681 * NOTE: backend flush must still sync and flush the deleted inode to clean
1682 * out related chains.
1685 hammer2_inode_chain_des(hammer2_inode_t *ip)
1690 if (ip->flags & HAMMER2_INODE_DELETING) {
1691 hammer2_xop_destroy_t *xop;
1693 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING);
1694 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1695 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1696 error = hammer2_xop_collect(&xop->head, 0);
1697 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1699 if (error == HAMMER2_ERROR_ENOENT)
1702 kprintf("hammer2: backend unable to "
1703 "insert inode %p %ld\n", ip, ip->meta.inum);
1704 /* XXX return error somehow? */
1711 * Flushes the inode's chain and its sub-topology to media. Interlocks
1712 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy
1713 * function creating or modifying a chain under this inode will re-set the
1716 * inode must be locked.
1719 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1721 hammer2_xop_fsync_t *xop;
1724 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1725 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1726 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1727 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1728 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1729 if (error == HAMMER2_ERROR_ENOENT)