hammer2 - Flush asynchronization, bug fixes, stabilization (2)
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 static __inline
60 void
61 hammer2_knote(struct vnode *vp, int flags)
62 {
63         if (flags)
64                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
65 }
66
67 static
68 void
69 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
70 {
71         hammer2_inode_sideq_t *ipul;
72         hammer2_pfs_t *pmp = ip->pmp;
73
74         if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
75                 ipul = kmalloc(sizeof(*ipul), pmp->minode,
76                                M_WAITOK | M_ZERO);
77                 ipul->ip = ip;
78                 hammer2_spin_ex(&pmp->list_spin);
79                 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
80                         hammer2_inode_ref(ip);
81                         atomic_set_int(&ip->flags,
82                                        HAMMER2_INODE_ONSIDEQ);
83                         TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
84                         ++pmp->sideq_count;
85                         hammer2_spin_unex(&pmp->list_spin);
86                 } else {
87                         hammer2_spin_unex(&pmp->list_spin);
88                         kfree(ipul, pmp->minode);
89                 }
90         }
91 }
92
93 /*
94  * HAMMER2 inode locks
95  *
96  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
97  * flags for options:
98  *
99  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
100  *        inode locking function will automatically set the RDONLY flag.
101  *
102  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
103  *        Most front-end inode locks do.
104  *
105  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
106  *        the inode data be resolved.  This is used by the syncthr because
107  *        it can run on an unresolved/out-of-sync cluster, and also by the
108  *        vnode reclamation code to avoid unnecessary I/O (particularly when
109  *        disposing of hundreds of thousands of cached vnodes).
110  *
111  * The inode locking function locks the inode itself, resolves any stale
112  * chains in the inode's cluster, and allocates a fresh copy of the
113  * cluster with 1 ref and all the underlying chains locked.
114  *
115  * ip->cluster will be stable while the inode is locked.
116  *
117  * NOTE: We don't combine the inode/chain lock because putting away an
118  *       inode would otherwise confuse multiple lock holders of the inode.
119  *
120  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
121  *       and never point to a hardlink pointer.
122  *
123  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
124  *       will feel free to reduce the chain set in the cluster as an
125  *       optimization.  It will still be validated against the quorum if
126  *       appropriate, but the optimization might be able to reduce data
127  *       accesses to one node.  This flag is automatically set if the inode
128  *       is locked with HAMMER2_RESOLVE_SHARED.
129  */
130 void
131 hammer2_inode_lock(hammer2_inode_t *ip, int how)
132 {
133         hammer2_inode_ref(ip);
134
135         /* 
136          * Inode structure mutex
137          */
138         if (how & HAMMER2_RESOLVE_SHARED) {
139                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
140                 hammer2_mtx_sh(&ip->lock);
141         } else {
142                 hammer2_mtx_ex(&ip->lock);
143         }
144 }
145
146 /*
147  * Select a chain out of an inode's cluster and lock it.
148  *
149  * The inode does not have to be locked.
150  */
151 hammer2_chain_t *
152 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
153 {
154         hammer2_chain_t *chain;
155         hammer2_cluster_t *cluster;
156
157         hammer2_spin_sh(&ip->cluster_spin);
158         cluster = &ip->cluster;
159         if (clindex >= cluster->nchains)
160                 chain = NULL;
161         else
162                 chain = cluster->array[clindex].chain;
163         if (chain) {
164                 hammer2_chain_ref(chain);
165                 hammer2_spin_unsh(&ip->cluster_spin);
166                 hammer2_chain_lock(chain, how);
167         } else {
168                 hammer2_spin_unsh(&ip->cluster_spin);
169         }
170         return chain;
171 }
172
173 hammer2_chain_t *
174 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
175                                hammer2_chain_t **parentp, int how)
176 {
177         hammer2_chain_t *chain;
178         hammer2_chain_t *parent;
179
180         for (;;) {
181                 hammer2_spin_sh(&ip->cluster_spin);
182                 if (clindex >= ip->cluster.nchains)
183                         chain = NULL;
184                 else
185                         chain = ip->cluster.array[clindex].chain;
186                 if (chain) {
187                         hammer2_chain_ref(chain);
188                         hammer2_spin_unsh(&ip->cluster_spin);
189                         hammer2_chain_lock(chain, how);
190                 } else {
191                         hammer2_spin_unsh(&ip->cluster_spin);
192                 }
193
194                 /*
195                  * Get parent, lock order must be (parent, chain).
196                  */
197                 parent = chain->parent;
198                 if (parent) {
199                         hammer2_chain_ref(parent);
200                         hammer2_chain_unlock(chain);
201                         hammer2_chain_lock(parent, how);
202                         hammer2_chain_lock(chain, how);
203                 }
204                 if (ip->cluster.array[clindex].chain == chain &&
205                     chain->parent == parent) {
206                         break;
207                 }
208
209                 /*
210                  * Retry
211                  */
212                 hammer2_chain_unlock(chain);
213                 hammer2_chain_drop(chain);
214                 if (parent) {
215                         hammer2_chain_unlock(parent);
216                         hammer2_chain_drop(parent);
217                 }
218         }
219         *parentp = parent;
220
221         return chain;
222 }
223
224 void
225 hammer2_inode_unlock(hammer2_inode_t *ip)
226 {
227         hammer2_mtx_unlock(&ip->lock);
228         hammer2_inode_drop(ip);
229 }
230
231 /*
232  * Temporarily release a lock held shared or exclusive.  Caller must
233  * hold the lock shared or exclusive on call and lock will be released
234  * on return.
235  *
236  * Restore a lock that was temporarily released.
237  */
238 hammer2_mtx_state_t
239 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
240 {
241         return hammer2_mtx_temp_release(&ip->lock);
242 }
243
244 void
245 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
246 {
247         hammer2_mtx_temp_restore(&ip->lock, ostate);
248 }
249
250 /*
251  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
252  * is already held exclusively this is a NOP.
253  *
254  * The caller MUST hold the inode lock either shared or exclusive on call
255  * and will own the lock exclusively on return.
256  *
257  * Returns non-zero if the lock was already exclusive prior to the upgrade.
258  */
259 int
260 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
261 {
262         int wasexclusive;
263
264         if (mtx_islocked_ex(&ip->lock)) {
265                 wasexclusive = 1;
266         } else {
267                 hammer2_mtx_unlock(&ip->lock);
268                 hammer2_mtx_ex(&ip->lock);
269                 wasexclusive = 0;
270         }
271         return wasexclusive;
272 }
273
274 /*
275  * Downgrade an inode lock from exclusive to shared only if the inode
276  * lock was previously shared.  If the inode lock was previously exclusive,
277  * this is a NOP.
278  */
279 void
280 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
281 {
282         if (wasexclusive == 0)
283                 mtx_downgrade(&ip->lock);
284 }
285
286 /*
287  * Lookup an inode by inode number
288  */
289 hammer2_inode_t *
290 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
291 {
292         hammer2_inode_t *ip;
293
294         KKASSERT(pmp);
295         if (pmp->spmp_hmp) {
296                 ip = NULL;
297         } else {
298                 hammer2_spin_ex(&pmp->inum_spin);
299                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
300                 if (ip)
301                         hammer2_inode_ref(ip);
302                 hammer2_spin_unex(&pmp->inum_spin);
303         }
304         return(ip);
305 }
306
307 /*
308  * Adding a ref to an inode is only legal if the inode already has at least
309  * one ref.
310  *
311  * (can be called with spinlock held)
312  */
313 void
314 hammer2_inode_ref(hammer2_inode_t *ip)
315 {
316         atomic_add_int(&ip->refs, 1);
317         if (hammer2_debug & 0x80000) {
318                 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
319                 print_backtrace(8);
320         }
321 }
322
323 /*
324  * Drop an inode reference, freeing the inode when the last reference goes
325  * away.
326  */
327 void
328 hammer2_inode_drop(hammer2_inode_t *ip)
329 {
330         hammer2_pfs_t *pmp;
331         u_int refs;
332
333         while (ip) {
334                 if (hammer2_debug & 0x80000) {
335                         kprintf("INODE-1 %p (%d->%d)\n",
336                                 ip, ip->refs, ip->refs - 1);
337                         print_backtrace(8);
338                 }
339                 refs = ip->refs;
340                 cpu_ccfence();
341                 if (refs == 1) {
342                         /*
343                          * Transition to zero, must interlock with
344                          * the inode inumber lookup tree (if applicable).
345                          * It should not be possible for anyone to race
346                          * the transition to 0.
347                          */
348                         pmp = ip->pmp;
349                         KKASSERT(pmp);
350                         hammer2_spin_ex(&pmp->inum_spin);
351
352                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
353                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
354                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
355                                         atomic_clear_int(&ip->flags,
356                                                      HAMMER2_INODE_ONRBTREE);
357                                         RB_REMOVE(hammer2_inode_tree,
358                                                   &pmp->inum_tree, ip);
359                                         --pmp->inum_count;
360                                 }
361                                 hammer2_spin_unex(&pmp->inum_spin);
362
363                                 ip->pmp = NULL;
364
365                                 /*
366                                  * Cleaning out ip->cluster isn't entirely
367                                  * trivial.
368                                  */
369                                 hammer2_inode_repoint(ip, NULL, NULL);
370
371                                 kfree(ip, pmp->minode);
372                                 atomic_add_long(&pmp->inmem_inodes, -1);
373                                 ip = NULL;      /* will terminate loop */
374                         } else {
375                                 hammer2_spin_unex(&ip->pmp->inum_spin);
376                         }
377                 } else {
378                         /*
379                          * Non zero transition
380                          */
381                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
382                                 break;
383                 }
384         }
385 }
386
387 /*
388  * Get the vnode associated with the given inode, allocating the vnode if
389  * necessary.  The vnode will be returned exclusively locked.
390  *
391  * *errorp is set to a UNIX error, not a HAMMER2 error.
392  *
393  * The caller must lock the inode (shared or exclusive).
394  *
395  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
396  * races.
397  */
398 struct vnode *
399 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
400 {
401         hammer2_pfs_t *pmp;
402         struct vnode *vp;
403
404         pmp = ip->pmp;
405         KKASSERT(pmp != NULL);
406         *errorp = 0;
407
408         for (;;) {
409                 /*
410                  * Attempt to reuse an existing vnode assignment.  It is
411                  * possible to race a reclaim so the vget() may fail.  The
412                  * inode must be unlocked during the vget() to avoid a
413                  * deadlock against a reclaim.
414                  */
415                 int wasexclusive;
416
417                 vp = ip->vp;
418                 if (vp) {
419                         /*
420                          * Inode must be unlocked during the vget() to avoid
421                          * possible deadlocks, but leave the ip ref intact.
422                          *
423                          * vnode is held to prevent destruction during the
424                          * vget().  The vget() can still fail if we lost
425                          * a reclaim race on the vnode.
426                          */
427                         hammer2_mtx_state_t ostate;
428
429                         vhold(vp);
430                         ostate = hammer2_inode_lock_temp_release(ip);
431                         if (vget(vp, LK_EXCLUSIVE)) {
432                                 vdrop(vp);
433                                 hammer2_inode_lock_temp_restore(ip, ostate);
434                                 continue;
435                         }
436                         hammer2_inode_lock_temp_restore(ip, ostate);
437                         vdrop(vp);
438                         /* vp still locked and ref from vget */
439                         if (ip->vp != vp) {
440                                 kprintf("hammer2: igetv race %p/%p\n",
441                                         ip->vp, vp);
442                                 vput(vp);
443                                 continue;
444                         }
445                         *errorp = 0;
446                         break;
447                 }
448
449                 /*
450                  * No vnode exists, allocate a new vnode.  Beware of
451                  * allocation races.  This function will return an
452                  * exclusively locked and referenced vnode.
453                  */
454                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
455                 if (*errorp) {
456                         kprintf("hammer2: igetv getnewvnode failed %d\n",
457                                 *errorp);
458                         vp = NULL;
459                         break;
460                 }
461
462                 /*
463                  * Lock the inode and check for an allocation race.
464                  */
465                 wasexclusive = hammer2_inode_lock_upgrade(ip);
466                 if (ip->vp != NULL) {
467                         vp->v_type = VBAD;
468                         vx_put(vp);
469                         hammer2_inode_lock_downgrade(ip, wasexclusive);
470                         continue;
471                 }
472
473                 switch (ip->meta.type) {
474                 case HAMMER2_OBJTYPE_DIRECTORY:
475                         vp->v_type = VDIR;
476                         break;
477                 case HAMMER2_OBJTYPE_REGFILE:
478                         /*
479                          * Regular file must use buffer cache I/O
480                          * (VKVABIO cpu sync semantics supported)
481                          */
482                         vp->v_type = VREG;
483                         vsetflags(vp, VKVABIO);
484                         vinitvmio(vp, ip->meta.size,
485                                   HAMMER2_LBUFSIZE,
486                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
487                         break;
488                 case HAMMER2_OBJTYPE_SOFTLINK:
489                         /*
490                          * XXX for now we are using the generic file_read
491                          * and file_write code so we need a buffer cache
492                          * association.
493                          *
494                          * (VKVABIO cpu sync semantics supported)
495                          */
496                         vp->v_type = VLNK;
497                         vsetflags(vp, VKVABIO);
498                         vinitvmio(vp, ip->meta.size,
499                                   HAMMER2_LBUFSIZE,
500                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
501                         break;
502                 case HAMMER2_OBJTYPE_CDEV:
503                         vp->v_type = VCHR;
504                         /* fall through */
505                 case HAMMER2_OBJTYPE_BDEV:
506                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
507                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
508                                 vp->v_type = VBLK;
509                         addaliasu(vp,
510                                   ip->meta.rmajor,
511                                   ip->meta.rminor);
512                         break;
513                 case HAMMER2_OBJTYPE_FIFO:
514                         vp->v_type = VFIFO;
515                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
516                         break;
517                 case HAMMER2_OBJTYPE_SOCKET:
518                         vp->v_type = VSOCK;
519                         break;
520                 default:
521                         panic("hammer2: unhandled objtype %d",
522                               ip->meta.type);
523                         break;
524                 }
525
526                 if (ip == pmp->iroot)
527                         vsetflags(vp, VROOT);
528
529                 vp->v_data = ip;
530                 ip->vp = vp;
531                 hammer2_inode_ref(ip);          /* vp association */
532                 hammer2_inode_lock_downgrade(ip, wasexclusive);
533                 break;
534         }
535
536         /*
537          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
538          */
539         if (hammer2_debug & 0x0002) {
540                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
541                         vp, vp->v_refcnt, vp->v_auxrefs);
542         }
543         return (vp);
544 }
545
546 /*
547  * Returns the inode associated with the passed-in cluster, creating the
548  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
549  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
550  * Otherwise the whole cluster is synchronized.
551  *
552  * The passed-in cluster must be locked and will remain locked on return.
553  * The returned inode will be locked and the caller may dispose of both
554  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
555  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
556  *
557  * The hammer2_inode structure regulates the interface between the high level
558  * kernel VNOPS API and the filesystem backend (the chains).
559  *
560  * On return the inode is locked with the supplied cluster.
561  */
562 hammer2_inode_t *
563 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
564                   hammer2_cluster_t *cluster, int idx)
565 {
566         hammer2_inode_t *nip;
567         const hammer2_inode_data_t *iptmp;
568         const hammer2_inode_data_t *nipdata;
569
570         KKASSERT(cluster == NULL ||
571                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
572         KKASSERT(pmp);
573
574         /*
575          * Interlocked lookup/ref of the inode.  This code is only needed
576          * when looking up inodes with nlinks != 0 (TODO: optimize out
577          * otherwise and test for duplicates).
578          *
579          * Cluster can be NULL during the initial pfs allocation.
580          */
581 again:
582         while (cluster) {
583                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
584                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
585                 if (nip == NULL)
586                         break;
587
588                 hammer2_mtx_ex(&nip->lock);
589
590                 /*
591                  * Handle SMP race (not applicable to the super-root spmp
592                  * which can't index inodes due to duplicative inode numbers).
593                  */
594                 if (pmp->spmp_hmp == NULL &&
595                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
596                         hammer2_mtx_unlock(&nip->lock);
597                         hammer2_inode_drop(nip);
598                         continue;
599                 }
600                 if (idx >= 0)
601                         hammer2_inode_repoint_one(nip, cluster, idx);
602                 else
603                         hammer2_inode_repoint(nip, NULL, cluster);
604
605                 return nip;
606         }
607
608         /*
609          * We couldn't find the inode number, create a new inode.
610          */
611         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
612         spin_init(&nip->cluster_spin, "h2clspin");
613         atomic_add_long(&pmp->inmem_inodes, 1);
614         hammer2_pfs_memory_inc(pmp);
615         hammer2_pfs_memory_wakeup(pmp);
616         if (pmp->spmp_hmp)
617                 nip->flags = HAMMER2_INODE_SROOT;
618
619         /*
620          * Initialize nip's cluster.  A cluster is provided for normal
621          * inodes but typically not for the super-root or PFS inodes.
622          */
623         nip->cluster.refs = 1;
624         nip->cluster.pmp = pmp;
625         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
626         if (cluster) {
627                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
628                 nip->meta = nipdata->meta;
629                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
630                 hammer2_inode_repoint(nip, NULL, cluster);
631         } else {
632                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
633                 /* mtime will be updated when a cluster is available */
634                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
635         }
636
637         nip->pmp = pmp;
638
639         /*
640          * ref and lock on nip gives it state compatible to after a
641          * hammer2_inode_lock() call.
642          */
643         nip->refs = 1;
644         hammer2_mtx_init(&nip->lock, "h2inode");
645         hammer2_mtx_ex(&nip->lock);
646         /* combination of thread lock and chain lock == inode lock */
647
648         /*
649          * Attempt to add the inode.  If it fails we raced another inode
650          * get.  Undo all the work and try again.
651          */
652         if (pmp->spmp_hmp == NULL) {
653                 hammer2_spin_ex(&pmp->inum_spin);
654                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
655                         hammer2_spin_unex(&pmp->inum_spin);
656                         hammer2_mtx_unlock(&nip->lock);
657                         hammer2_inode_drop(nip);
658                         goto again;
659                 }
660                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
661                 ++pmp->inum_count;
662                 hammer2_spin_unex(&pmp->inum_spin);
663         }
664
665         return (nip);
666 }
667
668 /*
669  * MESSY! CLEANUP!
670  *
671  * Create a new inode using the vattr to figure out the type.  A non-zero
672  * type field overrides vattr.  We need the directory to set iparent or to
673  * use when the inode is directly embedded in a directory (typically super-root
674  * entries), but note that this really only applies OBJTYPE_DIRECTORY as
675  * non-directory inodes can be hardlinked.
676  *
677  * If no error occurs the new inode is returned, otherwise NULL is returned.
678  * It is possible for an error to create a junk inode and then fail later.
679  * It will attempt to delete the junk inode and return NULL in this situation.
680  *
681  * If vap and/or cred are NULL the related fields are not set and the
682  * inode type defaults to a directory.  This is used when creating PFSs
683  * under the super-root, so the inode number is set to 1 in this case.
684  *
685  * dip is not locked on entry.
686  *
687  * NOTE: This function is used to create all manners of inodes, including
688  *       super-root entries for snapshots and PFSs.  When used to create a
689  *       snapshot the inode will be temporarily associated with the spmp.
690  *
691  * NOTE: When creating a normal file or directory the name/name_len/lhc
692  *       is optional, but is typically specified to make debugging and
693  *       recovery easeier.
694  */
695 hammer2_inode_t *
696 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
697                      struct vattr *vap, struct ucred *cred,
698                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
699                      hammer2_key_t inum,
700                      uint8_t type, uint8_t target_type,
701                      int flags, int *errorp)
702 {
703         hammer2_xop_create_t *xop;
704         hammer2_inode_t *nip;
705         int error;
706         uid_t xuid;
707         uuid_t pip_uid;
708         uuid_t pip_gid;
709         uint32_t pip_mode;
710         uint8_t pip_comp_algo;
711         uint8_t pip_check_algo;
712         hammer2_tid_t pip_inum;
713
714         if (name)
715                 lhc = hammer2_dirhash(name, name_len);
716         *errorp = 0;
717         nip = NULL;
718
719         /*
720          * Locate the inode or indirect block to create the new
721          * entry in.  At the same time check for key collisions
722          * and iterate until we don't get one.
723          *
724          * Lock the directory exclusively for now to guarantee that
725          * we can find an unused lhc for the name.  Due to collisions,
726          * two different creates can end up with the same lhc so we
727          * cannot depend on the OS to prevent the collision.
728          */
729         hammer2_inode_lock(dip, 0);
730
731         pip_uid = pip->meta.uid;
732         pip_gid = pip->meta.gid;
733         pip_mode = pip->meta.mode;
734         pip_comp_algo = pip->meta.comp_algo;
735         pip_check_algo = pip->meta.check_algo;
736         pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
737
738         /*
739          * If name specified, locate an unused key in the collision space.
740          * Otherwise use the passed-in lhc directly.
741          */
742         if (name) {
743                 hammer2_xop_scanlhc_t *sxop;
744                 hammer2_key_t lhcbase;
745
746                 lhcbase = lhc;
747                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
748                 sxop->lhc = lhc;
749                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
750                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
751                         if (lhc != sxop->head.cluster.focus->bref.key)
752                                 break;
753                         ++lhc;
754                 }
755                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
756
757                 if (error) {
758                         if (error != HAMMER2_ERROR_ENOENT)
759                                 goto done2;
760                         ++lhc;
761                         error = 0;
762                 }
763                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
764                         error = HAMMER2_ERROR_ENOSPC;
765                         goto done2;
766                 }
767         }
768
769         /*
770          * Create the inode with the lhc as the key.
771          */
772         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
773         xop->lhc = lhc;
774         xop->flags = flags;
775         bzero(&xop->meta, sizeof(xop->meta));
776
777         if (vap) {
778                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
779
780                 switch (xop->meta.type) {
781                 case HAMMER2_OBJTYPE_CDEV:
782                 case HAMMER2_OBJTYPE_BDEV:
783                         xop->meta.rmajor = vap->va_rmajor;
784                         xop->meta.rminor = vap->va_rminor;
785                         break;
786                 default:
787                         break;
788                 }
789                 type = xop->meta.type;
790         } else {
791                 xop->meta.type = type;
792                 xop->meta.target_type = target_type;
793         }
794         xop->meta.inum = inum;
795         xop->meta.iparent = pip_inum;
796         
797         /* Inherit parent's inode compression mode. */
798         xop->meta.comp_algo = pip_comp_algo;
799         xop->meta.check_algo = pip_check_algo;
800         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
801         hammer2_update_time(&xop->meta.ctime);
802         xop->meta.mtime = xop->meta.ctime;
803         if (vap)
804                 xop->meta.mode = vap->va_mode;
805         xop->meta.nlinks = 1;
806         if (vap) {
807                 if (dip->pmp) {
808                         xuid = hammer2_to_unix_xid(&pip_uid);
809                         xuid = vop_helper_create_uid(dip->pmp->mp,
810                                                      pip_mode,
811                                                      xuid,
812                                                      cred,
813                                                      &vap->va_mode);
814                 } else {
815                         /* super-root has no dip and/or pmp */
816                         xuid = 0;
817                 }
818                 if (vap->va_vaflags & VA_UID_UUID_VALID)
819                         xop->meta.uid = vap->va_uid_uuid;
820                 else if (vap->va_uid != (uid_t)VNOVAL)
821                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
822                 else
823                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
824
825                 if (vap->va_vaflags & VA_GID_UUID_VALID)
826                         xop->meta.gid = vap->va_gid_uuid;
827                 else if (vap->va_gid != (gid_t)VNOVAL)
828                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
829                 else
830                         xop->meta.gid = pip_gid;
831         }
832
833         /*
834          * Regular files and softlinks allow a small amount of data to be
835          * directly embedded in the inode.  This flag will be cleared if
836          * the size is extended past the embedded limit.
837          */
838         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
839             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
840                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
841         }
842         if (name) {
843                 hammer2_xop_setname(&xop->head, name, name_len);
844         } else {
845                 name_len = hammer2_xop_setname_inum(&xop->head, inum);
846                 KKASSERT(lhc == inum);
847         }
848         xop->meta.name_len = name_len;
849         xop->meta.name_key = lhc;
850         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
851
852         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
853
854         error = hammer2_xop_collect(&xop->head, 0);
855 #if INODE_DEBUG
856         kprintf("CREATE INODE %*.*s\n",
857                 (int)name_len, (int)name_len, name);
858 #endif
859
860         if (error) {
861                 *errorp = error;
862                 goto done;
863         }
864
865         /*
866          * Set up the new inode if not a hardlink pointer.
867          *
868          * NOTE: *_get() integrates chain's lock into the inode lock.
869          *
870          * NOTE: Only one new inode can currently be created per
871          *       transaction.  If the need arises we can adjust
872          *       hammer2_trans_init() to allow more.
873          *
874          * NOTE: nipdata will have chain's blockset data.
875          */
876         nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
877         nip->comp_heuristic = 0;
878 done:
879         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
880 done2:
881         hammer2_inode_unlock(dip);
882
883         return (nip);
884 }
885
886 /*
887  * Create a directory entry under dip with the specified name, inode number,
888  * and OBJTYPE (type).
889  *
890  * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
891  */
892 int
893 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
894                       hammer2_key_t inum, uint8_t type)
895 {
896         hammer2_xop_mkdirent_t *xop;
897         hammer2_key_t lhc;
898         int error;
899
900         lhc = 0;
901         error = 0;
902
903         KKASSERT(name != NULL);
904         lhc = hammer2_dirhash(name, name_len);
905
906         /*
907          * Locate the inode or indirect block to create the new
908          * entry in.  At the same time check for key collisions
909          * and iterate until we don't get one.
910          *
911          * Lock the directory exclusively for now to guarantee that
912          * we can find an unused lhc for the name.  Due to collisions,
913          * two different creates can end up with the same lhc so we
914          * cannot depend on the OS to prevent the collision.
915          */
916         hammer2_inode_lock(dip, 0);
917
918         /*
919          * If name specified, locate an unused key in the collision space.
920          * Otherwise use the passed-in lhc directly.
921          */
922         {
923                 hammer2_xop_scanlhc_t *sxop;
924                 hammer2_key_t lhcbase;
925
926                 lhcbase = lhc;
927                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
928                 sxop->lhc = lhc;
929                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
930                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
931                         if (lhc != sxop->head.cluster.focus->bref.key)
932                                 break;
933                         ++lhc;
934                 }
935                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
936
937                 if (error) {
938                         if (error != HAMMER2_ERROR_ENOENT)
939                                 goto done2;
940                         ++lhc;
941                         error = 0;
942                 }
943                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
944                         error = HAMMER2_ERROR_ENOSPC;
945                         goto done2;
946                 }
947         }
948
949         /*
950          * Create the directory entry with the lhc as the key.
951          */
952         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
953         xop->lhc = lhc;
954         bzero(&xop->dirent, sizeof(xop->dirent));
955         xop->dirent.inum = inum;
956         xop->dirent.type = type;
957         xop->dirent.namlen = name_len;
958
959         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
960         hammer2_xop_setname(&xop->head, name, name_len);
961
962         hammer2_xop_start(&xop->head, hammer2_inode_xop_mkdirent);
963
964         error = hammer2_xop_collect(&xop->head, 0);
965
966         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
967 done2:
968         error = hammer2_error_to_errno(error);
969         hammer2_inode_unlock(dip);
970
971         return error;
972 }
973
974 /*
975  * Repoint ip->cluster's chains to cluster's chains and fixup the default
976  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
977  * filters out invalid or non-matching elements.
978  *
979  * Caller must hold the inode and cluster exclusive locked, if not NULL,
980  * must also be locked.
981  *
982  * Cluster may be NULL to clean out any chains in ip->cluster.
983  */
984 void
985 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
986                       hammer2_cluster_t *cluster)
987 {
988         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
989         hammer2_chain_t *ochain;
990         hammer2_chain_t *nchain;
991         int i;
992
993         bzero(dropch, sizeof(dropch));
994
995         /*
996          * Replace chains in ip->cluster with chains from cluster and
997          * adjust the focus if necessary.
998          *
999          * NOTE: nchain and/or ochain can be NULL due to gaps
1000          *       in the cluster arrays.
1001          */
1002         hammer2_spin_ex(&ip->cluster_spin);
1003         for (i = 0; cluster && i < cluster->nchains; ++i) {
1004                 /*
1005                  * Do not replace elements which are the same.  Also handle
1006                  * element count discrepancies.
1007                  */
1008                 nchain = cluster->array[i].chain;
1009                 if (i < ip->cluster.nchains) {
1010                         ochain = ip->cluster.array[i].chain;
1011                         if (ochain == nchain)
1012                                 continue;
1013                 } else {
1014                         ochain = NULL;
1015                 }
1016
1017                 /*
1018                  * Make adjustments
1019                  */
1020                 ip->cluster.array[i].chain = nchain;
1021                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1022                 ip->cluster.array[i].flags |= cluster->array[i].flags &
1023                                               HAMMER2_CITEM_INVALID;
1024                 if (nchain)
1025                         hammer2_chain_ref(nchain);
1026                 dropch[i] = ochain;
1027         }
1028
1029         /*
1030          * Release any left-over chains in ip->cluster.
1031          */
1032         while (i < ip->cluster.nchains) {
1033                 nchain = ip->cluster.array[i].chain;
1034                 if (nchain) {
1035                         ip->cluster.array[i].chain = NULL;
1036                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1037                 }
1038                 dropch[i] = nchain;
1039                 ++i;
1040         }
1041
1042         /*
1043          * Fixup fields.  Note that the inode-embedded cluster is never
1044          * directly locked.
1045          */
1046         if (cluster) {
1047                 ip->cluster.nchains = cluster->nchains;
1048                 ip->cluster.focus = cluster->focus;
1049                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1050         } else {
1051                 ip->cluster.nchains = 0;
1052                 ip->cluster.focus = NULL;
1053                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1054         }
1055
1056         hammer2_spin_unex(&ip->cluster_spin);
1057
1058         /*
1059          * Cleanup outside of spinlock
1060          */
1061         while (--i >= 0) {
1062                 if (dropch[i])
1063                         hammer2_chain_drop(dropch[i]);
1064         }
1065 }
1066
1067 /*
1068  * Repoint a single element from the cluster to the ip.  Used by the
1069  * synchronization threads to piecemeal update inodes.  Does not change
1070  * focus and requires inode to be re-locked to clean-up flags (XXX).
1071  */
1072 void
1073 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1074                           int idx)
1075 {
1076         hammer2_chain_t *ochain;
1077         hammer2_chain_t *nchain;
1078         int i;
1079
1080         hammer2_spin_ex(&ip->cluster_spin);
1081         KKASSERT(idx < cluster->nchains);
1082         if (idx < ip->cluster.nchains) {
1083                 ochain = ip->cluster.array[idx].chain;
1084                 nchain = cluster->array[idx].chain;
1085         } else {
1086                 ochain = NULL;
1087                 nchain = cluster->array[idx].chain;
1088                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1089                         bzero(&ip->cluster.array[i],
1090                               sizeof(ip->cluster.array[i]));
1091                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1092                 }
1093                 ip->cluster.nchains = idx + 1;
1094         }
1095         if (ochain != nchain) {
1096                 /*
1097                  * Make adjustments.
1098                  */
1099                 ip->cluster.array[idx].chain = nchain;
1100                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1101                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1102                                                 HAMMER2_CITEM_INVALID;
1103         }
1104         hammer2_spin_unex(&ip->cluster_spin);
1105         if (ochain != nchain) {
1106                 if (nchain)
1107                         hammer2_chain_ref(nchain);
1108                 if (ochain)
1109                         hammer2_chain_drop(ochain);
1110         }
1111 }
1112
1113 /*
1114  * Called with a locked inode to finish unlinking an inode after xop_unlink
1115  * had been run.  This function is responsible for decrementing nlinks.
1116  *
1117  * We don't bother decrementing nlinks if the file is not open and this was
1118  * the last link.
1119  *
1120  * If the inode is a hardlink target it's chain has not yet been deleted,
1121  * otherwise it's chain has been deleted.
1122  *
1123  * If isopen then any prior deletion was not permanent and the inode is
1124  * left intact with nlinks == 0;
1125  */
1126 int
1127 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1128 {
1129         hammer2_pfs_t *pmp;
1130         int error;
1131
1132         pmp = ip->pmp;
1133
1134         /*
1135          * Decrement nlinks.  If this is the last link and the file is
1136          * not open we can just delete the inode and not bother dropping
1137          * nlinks to 0 (avoiding unnecessary block updates).
1138          */
1139         if (ip->meta.nlinks == 1) {
1140                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1141                 if (isopen == 0)
1142                         goto killit;
1143         }
1144
1145         hammer2_inode_modify(ip);
1146         --ip->meta.nlinks;
1147         if ((int64_t)ip->meta.nlinks < 0)
1148                 ip->meta.nlinks = 0;    /* safety */
1149
1150         /*
1151          * If nlinks is not zero we are done.  However, this should only be
1152          * possible with a hardlink target.  If the inode is an embedded
1153          * hardlink nlinks should have dropped to zero, warn and proceed
1154          * with the next step.
1155          */
1156         if (ip->meta.nlinks) {
1157                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1158                         return 0;
1159                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1160                         (intmax_t)ip->meta.nlinks);
1161                 return 0;
1162         }
1163
1164         if (ip->vp)
1165                 hammer2_knote(ip->vp, NOTE_DELETE);
1166
1167         /*
1168          * nlinks is now an implied zero, delete the inode if not open.
1169          * We avoid unnecessary media updates by not bothering to actually
1170          * decrement nlinks for the 1->0 transition
1171          *
1172          * Put the inode on the sideq to ensure that any disconnected chains
1173          * get properly flushed (so they can be freed).
1174          */
1175         if (isopen == 0) {
1176                 hammer2_xop_destroy_t *xop;
1177
1178 killit:
1179                 hammer2_inode_delayed_sideq(ip);
1180                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1181                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1182                 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1183                 error = hammer2_xop_collect(&xop->head, 0);
1184                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1185         }
1186         error = 0;      /* XXX */
1187
1188         return error;
1189 }
1190
1191 /*
1192  * Mark an inode as being modified, meaning that the caller will modify
1193  * ip->meta.
1194  *
1195  * If a vnode is present we set the vnode dirty and the nominal filesystem
1196  * sync will also handle synchronizing the inode meta-data.  If no vnode
1197  * is present we must ensure that the inode is on pmp->sideq.
1198  *
1199  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1200  *       only modifying the in-memory inode.  A modify_tid is synchronized
1201  *       later when the inode gets flushed.
1202  *
1203  * NOTE: As an exception to the general rule, the inode MAY be locked
1204  *       shared for this particular call.
1205  */
1206 void
1207 hammer2_inode_modify(hammer2_inode_t *ip)
1208 {
1209         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1210         if (ip->vp) {
1211                 vsetisdirty(ip->vp);
1212         } else if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) {
1213                 hammer2_inode_delayed_sideq(ip);
1214         }
1215 }
1216
1217 /*
1218  * Synchronize the inode's frontend state with the chain state prior
1219  * to any explicit flush of the inode or any strategy write call.  This
1220  * does not flush the inode's chain or its sub-topology to media (higher
1221  * level layers are responsible for doing that).
1222  *
1223  * Called with a locked inode inside a normal transaction.
1224  *
1225  * inode must be locked.
1226  */
1227 int
1228 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1229 {
1230         int error;
1231
1232         error = 0;
1233         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1234                 hammer2_xop_fsync_t *xop;
1235
1236                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1237                 xop->clear_directdata = 0;
1238                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1239                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1240                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1241                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1242                                 xop->clear_directdata = 1;
1243                         }
1244                         xop->osize = ip->osize;
1245                 } else {
1246                         xop->osize = ip->meta.size;     /* safety */
1247                 }
1248                 xop->ipflags = ip->flags;
1249                 xop->meta = ip->meta;
1250
1251                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1252                                              HAMMER2_INODE_MODIFIED);
1253                 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1254                 error = hammer2_xop_collect(&xop->head, 0);
1255                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1256                 if (error == HAMMER2_ERROR_ENOENT)
1257                         error = 0;
1258                 if (error) {
1259                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1260                         /*
1261                         atomic_set_int(&ip->flags,
1262                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1263                                                        HAMMER2_INODE_MODIFIED));
1264                         */
1265                         /* XXX return error somehow? */
1266                 }
1267         }
1268         return error;
1269 }
1270
1271 /*
1272  * Flushes the inode's chain and its sub-topology to media.  Interlocks
1273  * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
1274  * function creating or modifying a chain under this inode will re-set the
1275  * flag.
1276  *
1277  * inode must be locked.
1278  */
1279 int
1280 hammer2_inode_chain_flush(hammer2_inode_t *ip)
1281 {
1282         hammer2_xop_fsync_t *xop;
1283         int error;
1284
1285         atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1286         xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
1287                                     HAMMER2_XOP_INODE_STOP);
1288         hammer2_xop_start(&xop->head, hammer2_inode_xop_flush);
1289         error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1290         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1291         if (error == HAMMER2_ERROR_ENOENT)
1292                 error = 0;
1293
1294         return error;
1295 }
1296
1297 /*
1298  * The normal filesystem sync no longer has visibility to an inode structure
1299  * after its vnode has been reclaimed.  In this situation a dirty inode may
1300  * require additional processing to synchronize ip->meta to its underlying
1301  * cluster nodes.
1302  *
1303  * In particular, reclaims can occur in almost any state (for example, when
1304  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1305  * in the reclaim path itself is a non-starter.
1306  *
1307  * Caller must be in a transaction.
1308  */
1309 void
1310 hammer2_inode_run_sideq(hammer2_pfs_t *pmp, int doall)
1311 {
1312         hammer2_xop_destroy_t *xop;
1313         hammer2_inode_sideq_t *ipul;
1314         hammer2_inode_t *ip;
1315         int error;
1316
1317         /*
1318          * Nothing to do if sideq is empty or (if doall == 0) there just
1319          * aren't very many sideq entries.
1320          */
1321         if (TAILQ_EMPTY(&pmp->sideq))
1322                 return;
1323         if (doall == 0) {
1324                 if (pmp->sideq_count > (pmp->inum_count >> 3)) {
1325                         if (hammer2_debug & 0x0001) {
1326                                 kprintf("hammer2: flush sideq %ld/%ld\n",
1327                                         pmp->sideq_count, pmp->inum_count);
1328                         }
1329                 }
1330         }
1331
1332         if (doall == 0 && pmp->sideq_count <= (pmp->inum_count >> 3))
1333                 return;
1334
1335         hammer2_spin_ex(&pmp->list_spin);
1336         while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1337                 TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1338                 --pmp->sideq_count;
1339                 ip = ipul->ip;
1340                 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1341                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1342                 hammer2_spin_unex(&pmp->list_spin);
1343                 kfree(ipul, pmp->minode);
1344
1345                 hammer2_inode_lock(ip, 0);
1346                 if (ip->flags & HAMMER2_INODE_ISDELETED) {
1347                         /*
1348                          * The inode has already been deleted.  This is a
1349                          * fairly rare circumstance.  For now we don't rock
1350                          * the boat and synchronize it normally.
1351                          */
1352                         hammer2_inode_chain_sync(ip);
1353                         hammer2_inode_chain_flush(ip);
1354                 } else if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1355                         /*
1356                          * The inode was unlinked while open.  The inode must
1357                          * be deleted and destroyed.
1358                          */
1359                         xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1360                         hammer2_xop_start(&xop->head,
1361                                           hammer2_inode_xop_destroy);
1362                         error = hammer2_xop_collect(&xop->head, 0);
1363                         /* XXX error handling */
1364                         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1365                 } else {
1366                         /*
1367                          * The inode was dirty as-of the reclaim, requiring
1368                          * synchronization of ip->meta with its underlying
1369                          * chains.
1370                          */
1371                         hammer2_inode_chain_sync(ip);
1372                         hammer2_inode_chain_flush(ip);
1373                 }
1374
1375                 hammer2_inode_unlock(ip);
1376                 hammer2_inode_drop(ip);                 /* ipul ref */
1377
1378                 hammer2_spin_ex(&pmp->list_spin);
1379
1380                 /*
1381                  * If doall is 0 the original sideq_count was greater than
1382                  * 1/8 the inode count.  Add some hysteresis in the loop,
1383                  * don't stop flushing until sideq_count drops below 1/16.
1384                  */
1385                 if (doall == 0 && pmp->sideq_count <= (pmp->inum_count >> 4)) {
1386                         if (hammer2_debug & 0x0001) {
1387                                 kprintf("hammer2: flush sideq %ld/%ld (end)\n",
1388                                         pmp->sideq_count, pmp->inum_count);
1389                         }
1390                         break;
1391                 }
1392         }
1393         hammer2_spin_unex(&pmp->list_spin);
1394 }
1395
1396 /*
1397  * Helper to create a directory entry.
1398  */
1399 void
1400 hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *arg)
1401 {
1402         hammer2_xop_mkdirent_t *xop = &arg->xop_mkdirent;
1403         hammer2_chain_t *parent;
1404         hammer2_chain_t *chain;
1405         hammer2_key_t key_next;
1406         size_t data_len;
1407         int error;
1408
1409         if (hammer2_debug & 0x0001)
1410                 kprintf("dirent_create lhc %016jx clindex %d\n",
1411                         xop->lhc, thr->clindex);
1412
1413         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1414                                      HAMMER2_RESOLVE_ALWAYS);
1415         if (parent == NULL) {
1416                 error = HAMMER2_ERROR_EIO;
1417                 chain = NULL;
1418                 goto fail;
1419         }
1420         chain = hammer2_chain_lookup(&parent, &key_next,
1421                                      xop->lhc, xop->lhc,
1422                                      &error, 0);
1423         if (chain) {
1424                 error = HAMMER2_ERROR_EEXIST;
1425                 goto fail;
1426         }
1427
1428         /*
1429          * We may be able to embed the directory entry directly in the
1430          * blockref.
1431          */
1432         if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1433                 data_len = 0;
1434         else
1435                 data_len = HAMMER2_ALLOC_MIN;
1436
1437         error = hammer2_chain_create(&parent, &chain,
1438                                      xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1439                                      xop->lhc, 0,
1440                                      HAMMER2_BREF_TYPE_DIRENT,
1441                                      data_len,
1442                                      xop->head.mtid, 0, 0);
1443         if (error == 0) {
1444                 /*
1445                  * WARNING: chain->data->buf is sized to chain->bytes,
1446                  *          do not use sizeof(chain->data->buf), which
1447                  *          will be much larger.
1448                  */
1449                 error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1450                 if (error == 0) {
1451                         chain->bref.embed.dirent = xop->dirent;
1452                         if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1453                                 bcopy(xop->head.name1, chain->bref.check.buf,
1454                                       xop->dirent.namlen);
1455                         else
1456                                 bcopy(xop->head.name1, chain->data->buf,
1457                                       xop->dirent.namlen);
1458                 }
1459         }
1460 fail:
1461         if (parent) {
1462                 hammer2_chain_unlock(parent);
1463                 hammer2_chain_drop(parent);
1464         }
1465         hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1466         if (chain) {
1467                 hammer2_chain_unlock(chain);
1468                 hammer2_chain_drop(chain);
1469         }
1470 }
1471
1472 /*
1473  * Inode create helper (threaded, backend)
1474  *
1475  * Used by ncreate, nmknod, nsymlink, nmkdir.
1476  * Used by nlink and rename to create HARDLINK pointers.
1477  *
1478  * Frontend holds the parent directory ip locked exclusively.  We
1479  * create the inode and feed the exclusively locked chain to the
1480  * frontend.
1481  */
1482 void
1483 hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *arg)
1484 {
1485         hammer2_xop_create_t *xop = &arg->xop_create;
1486         hammer2_chain_t *parent;
1487         hammer2_chain_t *chain;
1488         hammer2_key_t key_next;
1489         int error;
1490
1491         if (hammer2_debug & 0x0001)
1492                 kprintf("inode_create lhc %016jx clindex %d\n",
1493                         xop->lhc, thr->clindex);
1494
1495         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1496                                      HAMMER2_RESOLVE_ALWAYS);
1497         if (parent == NULL) {
1498                 error = HAMMER2_ERROR_EIO;
1499                 chain = NULL;
1500                 goto fail;
1501         }
1502         chain = hammer2_chain_lookup(&parent, &key_next,
1503                                      xop->lhc, xop->lhc,
1504                                      &error, 0);
1505         if (chain) {
1506                 error = HAMMER2_ERROR_EEXIST;
1507                 goto fail;
1508         }
1509
1510         error = hammer2_chain_create(&parent, &chain,
1511                                      xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1512                                      xop->lhc, 0,
1513                                      HAMMER2_BREF_TYPE_INODE,
1514                                      HAMMER2_INODE_BYTES,
1515                                      xop->head.mtid, 0, xop->flags);
1516         if (error == 0) {
1517                 error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1518                 if (error == 0) {
1519                         chain->data->ipdata.meta = xop->meta;
1520                         if (xop->head.name1) {
1521                                 bcopy(xop->head.name1,
1522                                       chain->data->ipdata.filename,
1523                                       xop->head.name1_len);
1524                                 chain->data->ipdata.meta.name_len =
1525                                         xop->head.name1_len;
1526                         }
1527                         chain->data->ipdata.meta.name_key = xop->lhc;
1528                 }
1529         }
1530 fail:
1531         if (parent) {
1532                 hammer2_chain_unlock(parent);
1533                 hammer2_chain_drop(parent);
1534         }
1535         hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1536         if (chain) {
1537                 hammer2_chain_unlock(chain);
1538                 hammer2_chain_drop(chain);
1539         }
1540 }
1541
1542 /*
1543  * Inode delete helper (backend, threaded)
1544  *
1545  * Generally used by hammer2_run_sideq()
1546  */
1547 void
1548 hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *arg)
1549 {
1550         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1551         hammer2_pfs_t *pmp;
1552         hammer2_chain_t *parent;
1553         hammer2_chain_t *chain;
1554         hammer2_inode_t *ip;
1555         int error;
1556
1557         /*
1558          * We need the precise parent chain to issue the deletion.
1559          */
1560         ip = xop->head.ip1;
1561         pmp = ip->pmp;
1562
1563         chain = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1564         if (chain == NULL) {
1565                 parent = NULL;
1566                 error = HAMMER2_ERROR_EIO;
1567                 goto done;
1568         }
1569         parent = hammer2_chain_getparent(chain, HAMMER2_RESOLVE_ALWAYS);
1570         if (parent == NULL) {
1571                 error = HAMMER2_ERROR_EIO;
1572                 goto done;
1573         }
1574         KKASSERT(chain->parent == parent);
1575
1576         /*
1577          * We have the correct parent, we can issue the deletion.
1578          */
1579         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1580         error = 0;
1581 done:
1582         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1583         if (parent) {
1584                 hammer2_chain_unlock(parent);
1585                 hammer2_chain_drop(parent);
1586         }
1587         if (chain) {
1588                 hammer2_chain_unlock(chain);
1589                 hammer2_chain_drop(chain);
1590         }
1591 }
1592
1593 void
1594 hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *arg)
1595 {
1596         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1597         hammer2_chain_t *parent;
1598         hammer2_chain_t *chain;
1599         hammer2_key_t key_next;
1600         int error;
1601
1602         /*
1603          * We need the precise parent chain to issue the deletion.
1604          */
1605         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1606                                      HAMMER2_RESOLVE_ALWAYS);
1607         chain = NULL;
1608         if (parent == NULL) {
1609                 error = 0;
1610                 goto done;
1611         }
1612         chain = hammer2_chain_lookup(&parent, &key_next,
1613                                      xop->key_beg, xop->key_end,
1614                                      &error, HAMMER2_LOOKUP_ALWAYS);
1615         while (chain) {
1616                 hammer2_chain_delete(parent, chain,
1617                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1618                 hammer2_xop_feed(&xop->head, chain, thr->clindex, chain->error);
1619                 /* depend on function to unlock the shared lock */
1620                 chain = hammer2_chain_next(&parent, chain, &key_next,
1621                                            key_next, xop->key_end,
1622                                            &error,
1623                                            HAMMER2_LOOKUP_ALWAYS);
1624         }
1625 done:
1626         if (error == 0)
1627                 error = HAMMER2_ERROR_ENOENT;
1628         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1629         if (parent) {
1630                 hammer2_chain_unlock(parent);
1631                 hammer2_chain_drop(parent);
1632         }
1633         if (chain) {
1634                 hammer2_chain_unlock(chain);
1635                 hammer2_chain_drop(chain);
1636         }
1637 }
1638
1639 void
1640 hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *arg)
1641 {
1642         hammer2_xop_connect_t *xop = &arg->xop_connect;
1643         hammer2_inode_data_t *wipdata;
1644         hammer2_chain_t *parent;
1645         hammer2_chain_t *chain;
1646         hammer2_pfs_t *pmp;
1647         hammer2_key_t key_dummy;
1648         int error;
1649
1650         /*
1651          * Get directory, then issue a lookup to prime the parent chain
1652          * for the create.  The lookup is expected to fail.
1653          */
1654         pmp = xop->head.ip1->pmp;
1655         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1656                                      HAMMER2_RESOLVE_ALWAYS);
1657         if (parent == NULL) {
1658                 chain = NULL;
1659                 error = HAMMER2_ERROR_EIO;
1660                 goto fail;
1661         }
1662         chain = hammer2_chain_lookup(&parent, &key_dummy,
1663                                      xop->lhc, xop->lhc,
1664                                      &error, 0);
1665         if (chain) {
1666                 hammer2_chain_unlock(chain);
1667                 hammer2_chain_drop(chain);
1668                 chain = NULL;
1669                 error = HAMMER2_ERROR_EEXIST;
1670                 goto fail;
1671         }
1672         if (error)
1673                 goto fail;
1674
1675         /*
1676          * Adjust the filename in the inode, set the name key.
1677          *
1678          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1679          *       do it here.
1680          */
1681         chain = hammer2_inode_chain(xop->head.ip2, thr->clindex,
1682                                     HAMMER2_RESOLVE_ALWAYS);
1683         error = hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1684         if (error)
1685                 goto fail;
1686
1687         wipdata = &chain->data->ipdata;
1688
1689         hammer2_inode_modify(xop->head.ip2);
1690         if (xop->head.name1) {
1691                 bzero(wipdata->filename, sizeof(wipdata->filename));
1692                 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1693                 wipdata->meta.name_len = xop->head.name1_len;
1694         }
1695         wipdata->meta.name_key = xop->lhc;
1696
1697         /*
1698          * Reconnect the chain to the new parent directory
1699          */
1700         error = hammer2_chain_create(&parent, &chain,
1701                                      pmp, HAMMER2_METH_DEFAULT,
1702                                      xop->lhc, 0,
1703                                      HAMMER2_BREF_TYPE_INODE,
1704                                      HAMMER2_INODE_BYTES,
1705                                      xop->head.mtid, 0, 0);
1706
1707         /*
1708          * Feed result back.
1709          */
1710 fail:
1711         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1712         if (parent) {
1713                 hammer2_chain_unlock(parent);
1714                 hammer2_chain_drop(parent);
1715         }
1716         if (chain) {
1717                 hammer2_chain_unlock(chain);
1718                 hammer2_chain_drop(chain);
1719         }
1720 }
1721
1722 /*
1723  * Synchronize the in-memory inode with the chain.  This does not flush
1724  * the chain to disk.  Instead, it makes front-end inode changes visible
1725  * in the chain topology, thus visible to the backend.  This is done in an
1726  * ad-hoc manner outside of the filesystem vfs_sync, and in a controlled
1727  * manner inside the vfs_sync.
1728  */
1729 void
1730 hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *arg)
1731 {
1732         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1733         hammer2_chain_t *parent;
1734         hammer2_chain_t *chain;
1735         int error;
1736
1737         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1738                                      HAMMER2_RESOLVE_ALWAYS);
1739         chain = NULL;
1740         if (parent == NULL) {
1741                 error = HAMMER2_ERROR_EIO;
1742                 goto done;
1743         }
1744         if (parent->error) {
1745                 error = parent->error;
1746                 goto done;
1747         }
1748
1749         error = 0;
1750
1751         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1752                 /* osize must be ignored */
1753         } else if (xop->meta.size < xop->osize) {
1754                 /*
1755                  * We must delete any chains beyond the EOF.  The chain
1756                  * straddling the EOF will be pending in the bioq.
1757                  */
1758                 hammer2_key_t lbase;
1759                 hammer2_key_t key_next;
1760
1761                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1762                         ~HAMMER2_PBUFMASK64;
1763                 chain = hammer2_chain_lookup(&parent, &key_next,
1764                                              lbase, HAMMER2_KEY_MAX,
1765                                              &error,
1766                                              HAMMER2_LOOKUP_NODATA |
1767                                              HAMMER2_LOOKUP_NODIRECT);
1768                 while (chain) {
1769                         /*
1770                          * Degenerate embedded case, nothing to loop on
1771                          */
1772                         switch (chain->bref.type) {
1773                         case HAMMER2_BREF_TYPE_DIRENT:
1774                         case HAMMER2_BREF_TYPE_INODE:
1775                                 KKASSERT(0);
1776                                 break;
1777                         case HAMMER2_BREF_TYPE_DATA:
1778                                 hammer2_chain_delete(parent, chain,
1779                                                      xop->head.mtid,
1780                                                      HAMMER2_DELETE_PERMANENT);
1781                                 break;
1782                         }
1783                         chain = hammer2_chain_next(&parent, chain, &key_next,
1784                                                    key_next, HAMMER2_KEY_MAX,
1785                                                    &error,
1786                                                    HAMMER2_LOOKUP_NODATA |
1787                                                    HAMMER2_LOOKUP_NODIRECT);
1788                 }
1789
1790                 /*
1791                  * Reset to point at inode for following code, if necessary.
1792                  */
1793                 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1794                         hammer2_chain_unlock(parent);
1795                         hammer2_chain_drop(parent);
1796                         parent = hammer2_inode_chain(xop->head.ip1,
1797                                                      thr->clindex,
1798                                                      HAMMER2_RESOLVE_ALWAYS);
1799                         kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1800                                 parent->data->ipdata.filename);
1801                 }
1802         }
1803
1804         /*
1805          * Sync the inode meta-data, potentially clear the blockset area
1806          * of direct data so it can be used for blockrefs.
1807          */
1808         if (error == 0) {
1809                 error = hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1810                 if (error == 0) {
1811                         parent->data->ipdata.meta = xop->meta;
1812                         if (xop->clear_directdata) {
1813                                 bzero(&parent->data->ipdata.u.blockset,
1814                                       sizeof(parent->data->ipdata.u.blockset));
1815                         }
1816                 }
1817         }
1818 done:
1819         if (chain) {
1820                 hammer2_chain_unlock(chain);
1821                 hammer2_chain_drop(chain);
1822         }
1823         if (parent) {
1824                 hammer2_chain_unlock(parent);
1825                 hammer2_chain_drop(parent);
1826         }
1827         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1828 }
1829