hammer2 - Stabilization pass
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 static
60 void
61 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
62 {
63         hammer2_inode_sideq_t *ipul;
64         hammer2_pfs_t *pmp = ip->pmp;
65
66         if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
67                 ipul = kmalloc(sizeof(*ipul), pmp->minode,
68                                M_WAITOK | M_ZERO);
69                 ipul->ip = ip;
70                 hammer2_spin_ex(&pmp->list_spin);
71                 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
72                         hammer2_inode_ref(ip);
73                         atomic_set_int(&ip->flags,
74                                        HAMMER2_INODE_ONSIDEQ);
75                         TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
76                         hammer2_spin_unex(&pmp->list_spin);
77                 } else {
78                         hammer2_spin_unex(&pmp->list_spin);
79                         kfree(ipul, pmp->minode);
80                 }
81         }
82 }
83
84 /*
85  * HAMMER2 inode locks
86  *
87  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
88  * flags for options:
89  *
90  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
91  *        inode locking function will automatically set the RDONLY flag.
92  *
93  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
94  *        Most front-end inode locks do.
95  *
96  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
97  *        the inode data be resolved.  This is used by the syncthr because
98  *        it can run on an unresolved/out-of-sync cluster, and also by the
99  *        vnode reclamation code to avoid unnecessary I/O (particularly when
100  *        disposing of hundreds of thousands of cached vnodes).
101  *
102  * The inode locking function locks the inode itself, resolves any stale
103  * chains in the inode's cluster, and allocates a fresh copy of the
104  * cluster with 1 ref and all the underlying chains locked.
105  *
106  * ip->cluster will be stable while the inode is locked.
107  *
108  * NOTE: We don't combine the inode/chain lock because putting away an
109  *       inode would otherwise confuse multiple lock holders of the inode.
110  *
111  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
112  *       and never point to a hardlink pointer.
113  *
114  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
115  *       will feel free to reduce the chain set in the cluster as an
116  *       optimization.  It will still be validated against the quorum if
117  *       appropriate, but the optimization might be able to reduce data
118  *       accesses to one node.  This flag is automatically set if the inode
119  *       is locked with HAMMER2_RESOLVE_SHARED.
120  */
121 void
122 hammer2_inode_lock(hammer2_inode_t *ip, int how)
123 {
124         hammer2_inode_ref(ip);
125
126         /* 
127          * Inode structure mutex
128          */
129         if (how & HAMMER2_RESOLVE_SHARED) {
130                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
131                 hammer2_mtx_sh(&ip->lock);
132         } else {
133                 hammer2_mtx_ex(&ip->lock);
134         }
135 }
136
137 /*
138  * Select a chain out of an inode's cluster and lock it.
139  *
140  * The inode does not have to be locked.
141  */
142 hammer2_chain_t *
143 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
144 {
145         hammer2_chain_t *chain;
146         hammer2_cluster_t *cluster;
147
148         hammer2_spin_sh(&ip->cluster_spin);
149 #if 0
150         cluster = ip->cluster_cache;
151         if (cluster) {
152                 if (clindex >= cluster->nchains)
153                         chain = NULL;
154                 else
155                         chain = cluster->array[clindex].chain;
156                 if (chain) {
157                         hammer2_chain_ref(chain);
158                         hammer2_spin_unsh(&ip->cluster_spin);
159                         hammer2_chain_lock(chain, how);
160                         return chain;
161                 }
162         }
163 #endif
164
165         cluster = &ip->cluster;
166         if (clindex >= cluster->nchains)
167                 chain = NULL;
168         else
169                 chain = cluster->array[clindex].chain;
170         if (chain) {
171                 hammer2_chain_ref(chain);
172                 hammer2_spin_unsh(&ip->cluster_spin);
173                 hammer2_chain_lock(chain, how);
174         } else {
175                 hammer2_spin_unsh(&ip->cluster_spin);
176         }
177         return chain;
178 }
179
180 hammer2_chain_t *
181 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
182                                hammer2_chain_t **parentp, int how)
183 {
184         hammer2_chain_t *chain;
185         hammer2_chain_t *parent;
186
187         for (;;) {
188                 hammer2_spin_sh(&ip->cluster_spin);
189                 if (clindex >= ip->cluster.nchains)
190                         chain = NULL;
191                 else
192                         chain = ip->cluster.array[clindex].chain;
193                 if (chain) {
194                         hammer2_chain_ref(chain);
195                         hammer2_spin_unsh(&ip->cluster_spin);
196                         hammer2_chain_lock(chain, how);
197                 } else {
198                         hammer2_spin_unsh(&ip->cluster_spin);
199                 }
200
201                 /*
202                  * Get parent, lock order must be (parent, chain).
203                  */
204                 parent = chain->parent;
205                 hammer2_chain_ref(parent);
206                 hammer2_chain_unlock(chain);
207                 hammer2_chain_lock(parent, how);
208                 hammer2_chain_lock(chain, how);
209                 if (ip->cluster.array[clindex].chain == chain &&
210                     chain->parent == parent) {
211                         break;
212                 }
213
214                 /*
215                  * Retry
216                  */
217                 hammer2_chain_unlock(chain);
218                 hammer2_chain_drop(chain);
219                 hammer2_chain_unlock(parent);
220                 hammer2_chain_drop(parent);
221         }
222         *parentp = parent;
223
224         return chain;
225 }
226
227 void
228 hammer2_inode_unlock(hammer2_inode_t *ip)
229 {
230         hammer2_mtx_unlock(&ip->lock);
231         hammer2_inode_drop(ip);
232 }
233
234 /*
235  * Temporarily release a lock held shared or exclusive.  Caller must
236  * hold the lock shared or exclusive on call and lock will be released
237  * on return.
238  *
239  * Restore a lock that was temporarily released.
240  */
241 hammer2_mtx_state_t
242 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
243 {
244         return hammer2_mtx_temp_release(&ip->lock);
245 }
246
247 void
248 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
249 {
250         hammer2_mtx_temp_restore(&ip->lock, ostate);
251 }
252
253 /*
254  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
255  * is already held exclusively this is a NOP.
256  *
257  * The caller MUST hold the inode lock either shared or exclusive on call
258  * and will own the lock exclusively on return.
259  *
260  * Returns non-zero if the lock was already exclusive prior to the upgrade.
261  */
262 int
263 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
264 {
265         int wasexclusive;
266
267         if (mtx_islocked_ex(&ip->lock)) {
268                 wasexclusive = 1;
269         } else {
270                 hammer2_mtx_unlock(&ip->lock);
271                 hammer2_mtx_ex(&ip->lock);
272                 wasexclusive = 0;
273         }
274         return wasexclusive;
275 }
276
277 /*
278  * Downgrade an inode lock from exclusive to shared only if the inode
279  * lock was previously shared.  If the inode lock was previously exclusive,
280  * this is a NOP.
281  */
282 void
283 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
284 {
285         if (wasexclusive == 0)
286                 mtx_downgrade(&ip->lock);
287 }
288
289 /*
290  * Lookup an inode by inode number
291  */
292 hammer2_inode_t *
293 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
294 {
295         hammer2_inode_t *ip;
296
297         KKASSERT(pmp);
298         if (pmp->spmp_hmp) {
299                 ip = NULL;
300         } else {
301                 hammer2_spin_ex(&pmp->inum_spin);
302                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
303                 if (ip)
304                         hammer2_inode_ref(ip);
305                 hammer2_spin_unex(&pmp->inum_spin);
306         }
307         return(ip);
308 }
309
310 /*
311  * Adding a ref to an inode is only legal if the inode already has at least
312  * one ref.
313  *
314  * (can be called with spinlock held)
315  */
316 void
317 hammer2_inode_ref(hammer2_inode_t *ip)
318 {
319         atomic_add_int(&ip->refs, 1);
320         if (hammer2_debug & 0x80000) {
321                 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
322                 print_backtrace(8);
323         }
324 }
325
326 /*
327  * Drop an inode reference, freeing the inode when the last reference goes
328  * away.
329  */
330 void
331 hammer2_inode_drop(hammer2_inode_t *ip)
332 {
333         hammer2_pfs_t *pmp;
334         u_int refs;
335
336         while (ip) {
337                 if (hammer2_debug & 0x80000) {
338                         kprintf("INODE-1 %p (%d->%d)\n",
339                                 ip, ip->refs, ip->refs - 1);
340                         print_backtrace(8);
341                 }
342                 refs = ip->refs;
343                 cpu_ccfence();
344                 if (refs == 1) {
345                         /*
346                          * Transition to zero, must interlock with
347                          * the inode inumber lookup tree (if applicable).
348                          * It should not be possible for anyone to race
349                          * the transition to 0.
350                          */
351                         pmp = ip->pmp;
352                         KKASSERT(pmp);
353                         hammer2_spin_ex(&pmp->inum_spin);
354
355                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
356                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
357                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
358                                         atomic_clear_int(&ip->flags,
359                                                      HAMMER2_INODE_ONRBTREE);
360                                         RB_REMOVE(hammer2_inode_tree,
361                                                   &pmp->inum_tree, ip);
362                                 }
363                                 hammer2_spin_unex(&pmp->inum_spin);
364
365                                 ip->pmp = NULL;
366
367 #if 0
368                                 /*
369                                  * Clean out the cluster cache
370                                  */
371                                 hammer2_cluster_t *tmpclu;
372                                 tmpclu = ip->cluster_cache;
373                                 if (tmpclu) {
374                                         ip->cluster_cache = NULL;
375                                         hammer2_cluster_drop(tmpclu);
376                                 }
377 #endif
378
379                                 /*
380                                  * Cleaning out ip->cluster isn't entirely
381                                  * trivial.
382                                  */
383                                 hammer2_inode_repoint(ip, NULL, NULL);
384
385                                 kfree(ip, pmp->minode);
386                                 atomic_add_long(&pmp->inmem_inodes, -1);
387                                 ip = NULL;      /* will terminate loop */
388                         } else {
389                                 hammer2_spin_unex(&ip->pmp->inum_spin);
390                         }
391                 } else {
392                         /*
393                          * Non zero transition
394                          */
395                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
396                                 break;
397                 }
398         }
399 }
400
401 /*
402  * Get the vnode associated with the given inode, allocating the vnode if
403  * necessary.  The vnode will be returned exclusively locked.
404  *
405  * The caller must lock the inode (shared or exclusive).
406  *
407  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
408  * races.
409  */
410 struct vnode *
411 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
412 {
413         hammer2_pfs_t *pmp;
414         struct vnode *vp;
415
416         pmp = ip->pmp;
417         KKASSERT(pmp != NULL);
418         *errorp = 0;
419
420         for (;;) {
421                 /*
422                  * Attempt to reuse an existing vnode assignment.  It is
423                  * possible to race a reclaim so the vget() may fail.  The
424                  * inode must be unlocked during the vget() to avoid a
425                  * deadlock against a reclaim.
426                  */
427                 int wasexclusive;
428
429                 vp = ip->vp;
430                 if (vp) {
431                         /*
432                          * Inode must be unlocked during the vget() to avoid
433                          * possible deadlocks, but leave the ip ref intact.
434                          *
435                          * vnode is held to prevent destruction during the
436                          * vget().  The vget() can still fail if we lost
437                          * a reclaim race on the vnode.
438                          */
439                         hammer2_mtx_state_t ostate;
440
441                         vhold(vp);
442                         ostate = hammer2_inode_lock_temp_release(ip);
443                         if (vget(vp, LK_EXCLUSIVE)) {
444                                 vdrop(vp);
445                                 hammer2_inode_lock_temp_restore(ip, ostate);
446                                 continue;
447                         }
448                         hammer2_inode_lock_temp_restore(ip, ostate);
449                         vdrop(vp);
450                         /* vp still locked and ref from vget */
451                         if (ip->vp != vp) {
452                                 kprintf("hammer2: igetv race %p/%p\n",
453                                         ip->vp, vp);
454                                 vput(vp);
455                                 continue;
456                         }
457                         *errorp = 0;
458                         break;
459                 }
460
461                 /*
462                  * No vnode exists, allocate a new vnode.  Beware of
463                  * allocation races.  This function will return an
464                  * exclusively locked and referenced vnode.
465                  */
466                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
467                 if (*errorp) {
468                         kprintf("hammer2: igetv getnewvnode failed %d\n",
469                                 *errorp);
470                         vp = NULL;
471                         break;
472                 }
473
474                 /*
475                  * Lock the inode and check for an allocation race.
476                  */
477                 wasexclusive = hammer2_inode_lock_upgrade(ip);
478                 if (ip->vp != NULL) {
479                         vp->v_type = VBAD;
480                         vx_put(vp);
481                         hammer2_inode_lock_downgrade(ip, wasexclusive);
482                         continue;
483                 }
484
485                 switch (ip->meta.type) {
486                 case HAMMER2_OBJTYPE_DIRECTORY:
487                         vp->v_type = VDIR;
488                         break;
489                 case HAMMER2_OBJTYPE_REGFILE:
490                         vp->v_type = VREG;
491                         vinitvmio(vp, ip->meta.size,
492                                   HAMMER2_LBUFSIZE,
493                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
494                         break;
495                 case HAMMER2_OBJTYPE_SOFTLINK:
496                         /*
497                          * XXX for now we are using the generic file_read
498                          * and file_write code so we need a buffer cache
499                          * association.
500                          */
501                         vp->v_type = VLNK;
502                         vinitvmio(vp, ip->meta.size,
503                                   HAMMER2_LBUFSIZE,
504                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
505                         break;
506                 case HAMMER2_OBJTYPE_CDEV:
507                         vp->v_type = VCHR;
508                         /* fall through */
509                 case HAMMER2_OBJTYPE_BDEV:
510                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
511                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
512                                 vp->v_type = VBLK;
513                         addaliasu(vp,
514                                   ip->meta.rmajor,
515                                   ip->meta.rminor);
516                         break;
517                 case HAMMER2_OBJTYPE_FIFO:
518                         vp->v_type = VFIFO;
519                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
520                         break;
521                 case HAMMER2_OBJTYPE_SOCKET:
522                         vp->v_type = VSOCK;
523                         break;
524                 default:
525                         panic("hammer2: unhandled objtype %d",
526                               ip->meta.type);
527                         break;
528                 }
529
530                 if (ip == pmp->iroot)
531                         vsetflags(vp, VROOT);
532
533                 vp->v_data = ip;
534                 ip->vp = vp;
535                 hammer2_inode_ref(ip);          /* vp association */
536                 hammer2_inode_lock_downgrade(ip, wasexclusive);
537                 break;
538         }
539
540         /*
541          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
542          */
543         if (hammer2_debug & 0x0002) {
544                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
545                         vp, vp->v_refcnt, vp->v_auxrefs);
546         }
547         return (vp);
548 }
549
550 /*
551  * Returns the inode associated with the passed-in cluster, creating the
552  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
553  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
554  * Otherwise the whole cluster is synchronized.
555  *
556  * The passed-in cluster must be locked and will remain locked on return.
557  * The returned inode will be locked and the caller may dispose of both
558  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
559  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
560  *
561  * The hammer2_inode structure regulates the interface between the high level
562  * kernel VNOPS API and the filesystem backend (the chains).
563  *
564  * On return the inode is locked with the supplied cluster.
565  */
566 hammer2_inode_t *
567 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
568                   hammer2_cluster_t *cluster, int idx)
569 {
570         hammer2_inode_t *nip;
571         const hammer2_inode_data_t *iptmp;
572         const hammer2_inode_data_t *nipdata;
573
574         KKASSERT(cluster == NULL ||
575                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
576         KKASSERT(pmp);
577
578         /*
579          * Interlocked lookup/ref of the inode.  This code is only needed
580          * when looking up inodes with nlinks != 0 (TODO: optimize out
581          * otherwise and test for duplicates).
582          *
583          * Cluster can be NULL during the initial pfs allocation.
584          */
585 again:
586         while (cluster) {
587                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
588                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
589                 if (nip == NULL)
590                         break;
591
592                 hammer2_mtx_ex(&nip->lock);
593
594                 /*
595                  * Handle SMP race (not applicable to the super-root spmp
596                  * which can't index inodes due to duplicative inode numbers).
597                  */
598                 if (pmp->spmp_hmp == NULL &&
599                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
600                         hammer2_mtx_unlock(&nip->lock);
601                         hammer2_inode_drop(nip);
602                         continue;
603                 }
604                 if (idx >= 0)
605                         hammer2_inode_repoint_one(nip, cluster, idx);
606                 else
607                         hammer2_inode_repoint(nip, NULL, cluster);
608
609                 return nip;
610         }
611
612         /*
613          * We couldn't find the inode number, create a new inode.
614          */
615         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
616         spin_init(&nip->cluster_spin, "h2clspin");
617         atomic_add_long(&pmp->inmem_inodes, 1);
618         hammer2_pfs_memory_inc(pmp);
619         hammer2_pfs_memory_wakeup(pmp);
620         if (pmp->spmp_hmp)
621                 nip->flags = HAMMER2_INODE_SROOT;
622
623         /*
624          * Initialize nip's cluster.  A cluster is provided for normal
625          * inodes but typically not for the super-root or PFS inodes.
626          */
627         nip->cluster.refs = 1;
628         nip->cluster.pmp = pmp;
629         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
630         if (cluster) {
631                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
632                 nip->meta = nipdata->meta;
633                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
634                 hammer2_inode_repoint(nip, NULL, cluster);
635         } else {
636                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
637                 /* mtime will be updated when a cluster is available */
638                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
639         }
640
641         nip->pmp = pmp;
642
643         /*
644          * ref and lock on nip gives it state compatible to after a
645          * hammer2_inode_lock() call.
646          */
647         nip->refs = 1;
648         hammer2_mtx_init(&nip->lock, "h2inode");
649         hammer2_mtx_ex(&nip->lock);
650         /* combination of thread lock and chain lock == inode lock */
651
652         /*
653          * Attempt to add the inode.  If it fails we raced another inode
654          * get.  Undo all the work and try again.
655          */
656         if (pmp->spmp_hmp == NULL) {
657                 hammer2_spin_ex(&pmp->inum_spin);
658                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
659                         hammer2_spin_unex(&pmp->inum_spin);
660                         hammer2_mtx_unlock(&nip->lock);
661                         hammer2_inode_drop(nip);
662                         goto again;
663                 }
664                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
665                 hammer2_spin_unex(&pmp->inum_spin);
666         }
667
668         return (nip);
669 }
670
671 /*
672  * Create a new inode in the specified directory using the vattr to
673  * figure out the type.  A non-zero type field overrides vattr.
674  *
675  * If no error occurs the new inode with its cluster locked is returned.
676  * However, when creating an OBJTYPE_HARDLINK, the caller can assume
677  * that NULL will be returned (that is, the caller already has the inode
678  * in-hand and is creating a hardlink to it, we do not need to return a
679  * representitive ip).
680  *
681  * If vap and/or cred are NULL the related fields are not set and the
682  * inode type defaults to a directory.  This is used when creating PFSs
683  * under the super-root, so the inode number is set to 1 in this case.
684  *
685  * dip is not locked on entry.
686  *
687  * NOTE: This function is used to create all manners of inodes, including
688  *       super-root entries for snapshots and PFSs.  When used to create a
689  *       snapshot the inode will be temporarily associated with the spmp.
690  *
691  * NOTE: When creating a normal file or directory the caller must call this
692  *       function twice, once to create the actual inode and once to create
693  *       the hardlink representing the directory entry.  This function is
694  *       only called once when creating a softlink.  The softlink itself.
695  *
696  * NOTE: When creating a hardlink target (a real inode), name/name_len is
697  *       passed as NULL/0, and caller should pass lhc as inum.
698  */
699 hammer2_inode_t *
700 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
701                      struct vattr *vap, struct ucred *cred,
702                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
703                      hammer2_key_t inum,
704                      uint8_t type, uint8_t target_type,
705                      int flags, int *errorp)
706 {
707         hammer2_xop_create_t *xop;
708         hammer2_inode_t *nip;
709         int error;
710         uid_t xuid;
711         uuid_t pip_uid;
712         uuid_t pip_gid;
713         uint32_t pip_mode;
714         uint8_t pip_comp_algo;
715         uint8_t pip_check_algo;
716         hammer2_tid_t pip_inum;
717
718         if (name)
719                 lhc = hammer2_dirhash(name, name_len);
720         *errorp = 0;
721         nip = NULL;
722
723         /*
724          * Locate the inode or indirect block to create the new
725          * entry in.  At the same time check for key collisions
726          * and iterate until we don't get one.
727          *
728          * NOTE: hidden inodes do not have iterators.
729          *
730          * Lock the directory exclusively for now to guarantee that
731          * we can find an unused lhc for the name.  Due to collisions,
732          * two different creates can end up with the same lhc so we
733          * cannot depend on the OS to prevent the collision.
734          */
735         hammer2_inode_lock(dip, 0);
736
737         pip_uid = pip->meta.uid;
738         pip_gid = pip->meta.gid;
739         pip_mode = pip->meta.mode;
740         pip_comp_algo = pip->meta.comp_algo;
741         pip_check_algo = pip->meta.check_algo;
742         pip_inum = (pip == pip->pmp->iroot) ? 0 : pip->meta.inum;
743
744         /*
745          * If name specified, locate an unused key in the collision space.
746          * Otherwise use the passed-in lhc directly.
747          */
748         if (name) {
749                 hammer2_xop_scanlhc_t *sxop;
750                 hammer2_key_t lhcbase;
751
752                 lhcbase = lhc;
753                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
754                 sxop->lhc = lhc;
755                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
756                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
757                         if (lhc != sxop->head.cluster.focus->bref.key)
758                                 break;
759                         ++lhc;
760                 }
761                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
762
763                 if (error) {
764                         if (error != ENOENT)
765                                 goto done2;
766                         ++lhc;
767                         error = 0;
768                 }
769                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
770                         error = ENOSPC;
771                         goto done2;
772                 }
773         }
774
775         /*
776          * Create the inode with the lhc as the key.
777          */
778         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
779         xop->lhc = lhc;
780         xop->flags = flags;
781         bzero(&xop->meta, sizeof(xop->meta));
782
783         if (vap) {
784                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
785
786                 switch (xop->meta.type) {
787                 case HAMMER2_OBJTYPE_CDEV:
788                 case HAMMER2_OBJTYPE_BDEV:
789                         xop->meta.rmajor = vap->va_rmajor;
790                         xop->meta.rminor = vap->va_rminor;
791                         break;
792                 default:
793                         break;
794                 }
795                 type = xop->meta.type;
796         } else {
797                 xop->meta.type = type;
798                 xop->meta.target_type = target_type;
799         }
800         xop->meta.inum = inum;
801         xop->meta.iparent = pip_inum;
802         
803         /* Inherit parent's inode compression mode. */
804         xop->meta.comp_algo = pip_comp_algo;
805         xop->meta.check_algo = pip_check_algo;
806         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
807         hammer2_update_time(&xop->meta.ctime);
808         xop->meta.mtime = xop->meta.ctime;
809         if (vap)
810                 xop->meta.mode = vap->va_mode;
811         xop->meta.nlinks = 1;
812         if (vap) {
813                 if (dip->pmp) {
814                         xuid = hammer2_to_unix_xid(&pip_uid);
815                         xuid = vop_helper_create_uid(dip->pmp->mp,
816                                                      pip_mode,
817                                                      xuid,
818                                                      cred,
819                                                      &vap->va_mode);
820                 } else {
821                         /* super-root has no dip and/or pmp */
822                         xuid = 0;
823                 }
824                 if (vap->va_vaflags & VA_UID_UUID_VALID)
825                         xop->meta.uid = vap->va_uid_uuid;
826                 else if (vap->va_uid != (uid_t)VNOVAL)
827                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
828                 else
829                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
830
831                 if (vap->va_vaflags & VA_GID_UUID_VALID)
832                         xop->meta.gid = vap->va_gid_uuid;
833                 else if (vap->va_gid != (gid_t)VNOVAL)
834                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
835                 else
836                         xop->meta.gid = pip_gid;
837         }
838
839         /*
840          * Regular files and softlinks allow a small amount of data to be
841          * directly embedded in the inode.  This flag will be cleared if
842          * the size is extended past the embedded limit.
843          */
844         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
845             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
846             xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
847                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
848         }
849         if (name) {
850                 hammer2_xop_setname(&xop->head, name, name_len);
851         } else {
852                 name_len = hammer2_xop_setname_inum(&xop->head, inum);
853                 KKASSERT(lhc == inum);
854         }
855         xop->meta.name_len = name_len;
856         xop->meta.name_key = lhc;
857         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
858
859         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
860
861         error = hammer2_xop_collect(&xop->head, 0);
862 #if INODE_DEBUG
863         kprintf("CREATE INODE %*.*s\n",
864                 (int)name_len, (int)name_len, name);
865 #endif
866
867         if (error) {
868                 *errorp = error;
869                 goto done;
870         }
871
872         /*
873          * Set up the new inode if not a hardlink pointer.
874          *
875          * NOTE: *_get() integrates chain's lock into the inode lock.
876          *
877          * NOTE: Only one new inode can currently be created per
878          *       transaction.  If the need arises we can adjust
879          *       hammer2_trans_init() to allow more.
880          *
881          * NOTE: nipdata will have chain's blockset data.
882          */
883         if (type != HAMMER2_OBJTYPE_HARDLINK) {
884                 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
885                 nip->comp_heuristic = 0;
886         } else {
887                 nip = NULL;
888         }
889
890 done:
891         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
892 done2:
893         hammer2_inode_unlock(dip);
894
895         return (nip);
896 }
897
898 /*
899  * Connect the disconnected inode (ip) to the directory (dip) with the
900  * specified (name, name_len).  If name is NULL, (lhc) will be used as
901  * the directory key and the inode's embedded name will not be modified
902  * for future recovery purposes.
903  *
904  * dip and ip must both be locked exclusively (dip in particular to avoid
905  * lhc collisions).
906  */
907 static
908 int
909 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
910                       const char *name, size_t name_len,
911                       hammer2_key_t lhc)
912 {
913         hammer2_xop_scanlhc_t *sxop;
914         hammer2_xop_connect_t *xop;
915         hammer2_key_t lhcbase;
916         int error;
917
918         /*
919          * Calculate the lhc and resolve the collision space.
920          */
921         if (name) {
922                 lhc = lhcbase = hammer2_dirhash(name, name_len);
923                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
924                 sxop->lhc = lhc;
925                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
926                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
927                         if (lhc != sxop->head.cluster.focus->bref.key)
928                                 break;
929                         ++lhc;
930                 }
931                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
932
933                 if (error) {
934                         if (error != ENOENT)
935                                 goto done;
936                         ++lhc;
937                         error = 0;
938                 }
939                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
940                         error = ENOSPC;
941                         goto done;
942                 }
943         } else {
944                 error = 0;
945         }
946
947         /*
948          * Connect her up
949          */
950         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
951         if (name)
952                 hammer2_xop_setname(&xop->head, name, name_len);
953         hammer2_xop_setip2(&xop->head, ip);
954         xop->lhc = lhc;
955         hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
956         error = hammer2_xop_collect(&xop->head, 0);
957         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
958
959         /*
960          * On success make the same adjustments to ip->meta or the
961          * next flush may blow up the chain.
962          */
963         if (error == 0) {
964                 hammer2_inode_modify(ip);
965                 ip->meta.name_key = lhc;
966                 if (name)
967                         ip->meta.name_len = name_len;
968         }
969 done:
970         return error;
971 }
972
973 /*
974  * Repoint ip->cluster's chains to cluster's chains and fixup the default
975  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
976  * filters out invalid or non-matching elements.
977  *
978  * Caller must hold the inode and cluster exclusive locked, if not NULL,
979  * must also be locked.
980  *
981  * Cluster may be NULL to clean out any chains in ip->cluster.
982  */
983 void
984 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
985                       hammer2_cluster_t *cluster)
986 {
987         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
988         hammer2_chain_t *ochain;
989         hammer2_chain_t *nchain;
990         int i;
991
992         bzero(dropch, sizeof(dropch));
993
994         /*
995          * Replace chains in ip->cluster with chains from cluster and
996          * adjust the focus if necessary.
997          *
998          * NOTE: nchain and/or ochain can be NULL due to gaps
999          *       in the cluster arrays.
1000          */
1001         hammer2_spin_ex(&ip->cluster_spin);
1002         for (i = 0; cluster && i < cluster->nchains; ++i) {
1003                 /*
1004                  * Do not replace elements which are the same.  Also handle
1005                  * element count discrepancies.
1006                  */
1007                 nchain = cluster->array[i].chain;
1008                 if (i < ip->cluster.nchains) {
1009                         ochain = ip->cluster.array[i].chain;
1010                         if (ochain == nchain)
1011                                 continue;
1012                 } else {
1013                         ochain = NULL;
1014                 }
1015
1016                 /*
1017                  * Make adjustments
1018                  */
1019                 ip->cluster.array[i].chain = nchain;
1020                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1021                 ip->cluster.array[i].flags |= cluster->array[i].flags &
1022                                               HAMMER2_CITEM_INVALID;
1023                 if (nchain)
1024                         hammer2_chain_ref(nchain);
1025                 dropch[i] = ochain;
1026         }
1027
1028         /*
1029          * Release any left-over chains in ip->cluster.
1030          */
1031         while (i < ip->cluster.nchains) {
1032                 nchain = ip->cluster.array[i].chain;
1033                 if (nchain) {
1034                         ip->cluster.array[i].chain = NULL;
1035                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1036                 }
1037                 dropch[i] = nchain;
1038                 ++i;
1039         }
1040
1041         /*
1042          * Fixup fields.  Note that the inode-embedded cluster is never
1043          * directly locked.
1044          */
1045         if (cluster) {
1046                 ip->cluster.nchains = cluster->nchains;
1047                 ip->cluster.focus = cluster->focus;
1048                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1049         } else {
1050                 ip->cluster.nchains = 0;
1051                 ip->cluster.focus = NULL;
1052                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1053         }
1054
1055         hammer2_spin_unex(&ip->cluster_spin);
1056
1057         /*
1058          * Cleanup outside of spinlock
1059          */
1060         while (--i >= 0) {
1061                 if (dropch[i])
1062                         hammer2_chain_drop(dropch[i]);
1063         }
1064 }
1065
1066 /*
1067  * Repoint a single element from the cluster to the ip.  Used by the
1068  * synchronization threads to piecemeal update inodes.  Does not change
1069  * focus and requires inode to be re-locked to clean-up flags (XXX).
1070  */
1071 void
1072 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1073                           int idx)
1074 {
1075         hammer2_chain_t *ochain;
1076         hammer2_chain_t *nchain;
1077         int i;
1078
1079         hammer2_spin_ex(&ip->cluster_spin);
1080         KKASSERT(idx < cluster->nchains);
1081         if (idx < ip->cluster.nchains) {
1082                 ochain = ip->cluster.array[idx].chain;
1083                 nchain = cluster->array[idx].chain;
1084         } else {
1085                 ochain = NULL;
1086                 nchain = cluster->array[idx].chain;
1087                 ip->cluster.nchains = idx + 1;
1088                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1089                         bzero(&ip->cluster.array[i],
1090                               sizeof(ip->cluster.array[i]));
1091                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1092                 }
1093         }
1094         if (ochain != nchain) {
1095                 /*
1096                  * Make adjustments.
1097                  */
1098                 ip->cluster.array[idx].chain = nchain;
1099                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1100                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1101                                                 HAMMER2_CITEM_INVALID;
1102         }
1103         hammer2_spin_unex(&ip->cluster_spin);
1104         if (ochain != nchain) {
1105                 if (nchain)
1106                         hammer2_chain_ref(nchain);
1107                 if (ochain)
1108                         hammer2_chain_drop(ochain);
1109         }
1110 }
1111
1112 /*
1113  * Called with a locked inode to finish unlinking an inode after xop_unlink
1114  * had been run.  This function is responsible for decrementing nlinks and
1115  * moving deleted inodes to the hidden directory if they are still open.
1116  *
1117  * We don't bother decrementing nlinks if the file is not open and this was
1118  * the last link.
1119  *
1120  * If the inode is a hardlink target it's chain has not yet been deleted,
1121  * otherwise it's chain has been deleted.
1122  *
1123  * If isopen then any prior deletion was not permanent and the inode must
1124  * be moved to the hidden directory.
1125  */
1126 int
1127 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1128 {
1129         hammer2_pfs_t *pmp;
1130         int error;
1131
1132         pmp = ip->pmp;
1133
1134         /*
1135          * Decrement nlinks.  If this is the last link and the file is
1136          * not open, the chain has already been removed and we don't bother
1137          * dirtying the inode.
1138          */
1139         if (ip->meta.nlinks == 1) {
1140                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1141                 if (isopen == 0) {
1142                         atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1143                         return 0;
1144                 }
1145         }
1146
1147         hammer2_inode_modify(ip);
1148         --ip->meta.nlinks;
1149         if ((int64_t)ip->meta.nlinks < 0)
1150                 ip->meta.nlinks = 0;    /* safety */
1151
1152         /*
1153          * If nlinks is not zero we are done.  However, this should only be
1154          * possible with a hardlink target.  If the inode is an embedded
1155          * hardlink nlinks should have dropped to zero, warn and proceed
1156          * with the next step.
1157          */
1158         if (ip->meta.nlinks) {
1159                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1160                         return 0;
1161                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1162                         (intmax_t)ip->meta.nlinks);
1163                 return 0;
1164         }
1165
1166         /*
1167          * nlinks is now zero, the inode should have already been deleted.
1168          * If the file is open it was deleted non-permanently and must be
1169          * moved to the hidden directory.
1170          *
1171          * When moving to the hidden directory we force the name_key to the
1172          * inode number to avoid collisions.
1173          */
1174         if (isopen) {
1175                 hammer2_inode_lock(pmp->ihidden, 0);
1176                 error = hammer2_inode_connect(pmp->ihidden, ip,
1177                                               NULL, 0, ip->meta.inum);
1178                 hammer2_inode_unlock(pmp->ihidden);
1179         } else {
1180                 error = 0;
1181         }
1182         return error;
1183 }
1184
1185 /*
1186  * This is called from the mount code to initialize pmp->ihidden
1187  */
1188 void
1189 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1190 {
1191         int error;
1192
1193         if (pmp->ihidden)
1194                 return;
1195
1196         hammer2_trans_init(pmp, 0);
1197         hammer2_inode_lock(pmp->iroot, 0);
1198
1199         /*
1200          * Find the hidden directory
1201          */
1202         {
1203                 hammer2_xop_lookup_t *xop;
1204
1205                 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1206                 xop->lhc = HAMMER2_INODE_HIDDENDIR;
1207                 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1208                 error = hammer2_xop_collect(&xop->head, 0);
1209
1210                 if (error == 0) {
1211                         /*
1212                          * Found the hidden directory
1213                          */
1214                         kprintf("PFS FOUND HIDDEN DIR\n");
1215                         pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1216                                                          &xop->head.cluster,
1217                                                          -1);
1218                         hammer2_inode_ref(pmp->ihidden);
1219                         hammer2_inode_unlock(pmp->ihidden);
1220                 }
1221                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1222         }
1223
1224         /*
1225          * Create the hidden directory if it could not be found.
1226          */
1227         if (error == ENOENT) {
1228                 kprintf("PFS CREATE HIDDEN DIR\n");
1229
1230                 pmp->ihidden = hammer2_inode_create(pmp->iroot, pmp->iroot,
1231                                                     NULL, NULL,
1232                                                     NULL, 0,
1233                                 /* lhc */           HAMMER2_INODE_HIDDENDIR,
1234                                 /* inum */          HAMMER2_INODE_HIDDENDIR,
1235                                 /* type */          HAMMER2_OBJTYPE_DIRECTORY,
1236                                 /* target_type */   0,
1237                                 /* flags */         0,
1238                                                     &error);
1239                 if (pmp->ihidden) {
1240                         hammer2_inode_ref(pmp->ihidden);
1241                         hammer2_inode_unlock(pmp->ihidden);
1242                 }
1243                 if (error)
1244                         kprintf("PFS CREATE ERROR %d\n", error);
1245         }
1246
1247         /*
1248          * Scan the hidden directory on-mount and destroy its contents
1249          */
1250         if (error == 0) {
1251                 hammer2_xop_unlinkall_t *xop;
1252
1253                 hammer2_inode_lock(pmp->ihidden, 0);
1254                 xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1255                 xop->key_beg = HAMMER2_KEY_MIN;
1256                 xop->key_end = HAMMER2_KEY_MAX;
1257                 hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1258
1259                 while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1260                         ;
1261                 }
1262                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1263                 hammer2_inode_unlock(pmp->ihidden);
1264         }
1265
1266         hammer2_inode_unlock(pmp->iroot);
1267         hammer2_trans_done(pmp);
1268 }
1269
1270 /*
1271  * Mark an inode as being modified, meaning that the caller will modify
1272  * ip->meta.
1273  *
1274  * If a vnode is present we set the vnode dirty and the nominal filesystem
1275  * sync will also handle synchronizing the inode meta-data.  If no vnode
1276  * is present we must ensure that the inode is on pmp->sideq.
1277  *
1278  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1279  *       only modifying the in-memory inode.  A modify_tid is synchronized
1280  *       later when the inode gets flushed.
1281  */
1282 void
1283 hammer2_inode_modify(hammer2_inode_t *ip)
1284 {
1285         hammer2_pfs_t *pmp;
1286
1287         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1288         if (ip->vp) {
1289                 vsetisdirty(ip->vp);
1290         } else if ((pmp = ip->pmp) != NULL) {
1291                 hammer2_inode_delayed_sideq(ip);
1292         }
1293 }
1294
1295 /*
1296  * Synchronize the inode's frontend state with the chain state prior
1297  * to any explicit flush of the inode or any strategy write call.
1298  *
1299  * Called with a locked inode inside a transaction.
1300  */
1301 void
1302 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1303 {
1304         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1305                 hammer2_xop_fsync_t *xop;
1306                 int error;
1307
1308                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1309                 xop->clear_directdata = 0;
1310                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1311                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1312                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1313                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1314                                 xop->clear_directdata = 1;
1315                         }
1316                         xop->osize = ip->osize;
1317                 } else {
1318                         xop->osize = ip->meta.size;     /* safety */
1319                 }
1320                 xop->ipflags = ip->flags;
1321                 xop->meta = ip->meta;
1322
1323                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1324                                              HAMMER2_INODE_MODIFIED);
1325                 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1326                 error = hammer2_xop_collect(&xop->head, 0);
1327                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1328                 if (error == ENOENT)
1329                         error = 0;
1330                 if (error) {
1331                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1332                         /*
1333                         atomic_set_int(&ip->flags,
1334                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1335                                                        HAMMER2_INODE_MODIFIED));
1336                         */
1337                         /* XXX return error somehow? */
1338                 }
1339         }
1340 }
1341
1342 /*
1343  * The normal filesystem sync no longer has visibility to an inode structure
1344  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1345  * inode or a dirty inode may require additional processing to synchronize
1346  * ip->meta to its underlying cluster nodes.
1347  *
1348  * In particular, reclaims can occur in almost any state (for example, when
1349  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1350  * in the reclaim path itself is a non-starter.
1351  *
1352  * Caller must be in a transaction.
1353  */
1354 void
1355 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1356 {
1357         hammer2_xop_destroy_t *xop;
1358         hammer2_inode_sideq_t *ipul;
1359         hammer2_inode_t *ip;
1360         int error;
1361
1362         if (TAILQ_EMPTY(&pmp->sideq))
1363                 return;
1364
1365         LOCKSTART;
1366         hammer2_spin_ex(&pmp->list_spin);
1367         while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1368                 TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1369                 ip = ipul->ip;
1370                 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1371                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1372                 hammer2_spin_unex(&pmp->list_spin);
1373                 kfree(ipul, pmp->minode);
1374
1375                 hammer2_inode_lock(ip, 0);
1376                 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1377                         /*
1378                          * The inode was unlinked while open, causing H2
1379                          * to relink it to a hidden directory to allow
1380                          * cluster operations to continue until close.
1381                          *
1382                          * The inode must be deleted and destroyed.
1383                          */
1384                         xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1385                         hammer2_xop_start(&xop->head,
1386                                           hammer2_inode_xop_destroy);
1387                         error = hammer2_xop_collect(&xop->head, 0);
1388                         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1389
1390                         atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1391                 } else {
1392                         /*
1393                          * The inode was dirty as-of the reclaim, requiring
1394                          * synchronization of ip->meta with its underlying
1395                          * chains.
1396                          */
1397                         hammer2_inode_chain_sync(ip);
1398                 }
1399
1400                 hammer2_inode_unlock(ip);
1401                 hammer2_inode_drop(ip);                 /* ipul ref */
1402
1403                 hammer2_spin_ex(&pmp->list_spin);
1404         }
1405         hammer2_spin_unex(&pmp->list_spin);
1406         LOCKSTOP;
1407 }
1408
1409 /*
1410  * Inode create helper (threaded, backend)
1411  *
1412  * Used by ncreate, nmknod, nsymlink, nmkdir.
1413  * Used by nlink and rename to create HARDLINK pointers.
1414  *
1415  * Frontend holds the parent directory ip locked exclusively.  We
1416  * create the inode and feed the exclusively locked chain to the
1417  * frontend.
1418  */
1419 void
1420 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1421 {
1422         hammer2_xop_create_t *xop = &arg->xop_create;
1423         hammer2_chain_t *parent;
1424         hammer2_chain_t *chain;
1425         hammer2_key_t key_next;
1426         int cache_index = -1;
1427         int error;
1428
1429         if (hammer2_debug & 0x0001)
1430                 kprintf("inode_create lhc %016jx clindex %d\n",
1431                         xop->lhc, clindex);
1432
1433         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1434                                      HAMMER2_RESOLVE_ALWAYS);
1435         if (parent == NULL) {
1436                 error = EIO;
1437                 chain = NULL;
1438                 goto fail;
1439         }
1440         chain = hammer2_chain_lookup(&parent, &key_next,
1441                                      xop->lhc, xop->lhc,
1442                                      &cache_index, 0);
1443         if (chain) {
1444                 error = EEXIST;
1445                 goto fail;
1446         }
1447
1448         error = hammer2_chain_create(&parent, &chain,
1449                                      xop->head.ip1->pmp,
1450                                      xop->lhc, 0,
1451                                      HAMMER2_BREF_TYPE_INODE,
1452                                      HAMMER2_INODE_BYTES,
1453                                      xop->head.mtid, 0, xop->flags);
1454         if (error == 0) {
1455                 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1456                 chain->data->ipdata.meta = xop->meta;
1457                 if (xop->head.name1) {
1458                         bcopy(xop->head.name1,
1459                               chain->data->ipdata.filename,
1460                               xop->head.name1_len);
1461                         chain->data->ipdata.meta.name_len = xop->head.name1_len;
1462                 }
1463                 chain->data->ipdata.meta.name_key = xop->lhc;
1464         }
1465 fail:
1466         if (parent) {
1467                 hammer2_chain_unlock(parent);
1468                 hammer2_chain_drop(parent);
1469         }
1470         hammer2_xop_feed(&xop->head, chain, clindex, error);
1471         if (chain) {
1472                 hammer2_chain_unlock(chain);
1473                 hammer2_chain_drop(chain);
1474         }
1475 }
1476
1477 /*
1478  * Inode delete helper (backend, threaded)
1479  *
1480  * Generally used by hammer2_run_sideq()
1481  */
1482 void
1483 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1484 {
1485         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1486         hammer2_pfs_t *pmp;
1487         hammer2_chain_t *parent;
1488         hammer2_chain_t *chain;
1489         hammer2_inode_t *ip;
1490         int error;
1491
1492         /*
1493          * We need the precise parent chain to issue the deletion.
1494          */
1495         ip = xop->head.ip1;
1496         pmp = ip->pmp;
1497         chain = NULL;
1498
1499         parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1500         if (parent)
1501                 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1502         if (parent == NULL) {
1503                 error = EIO;
1504                 goto done;
1505         }
1506         chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1507         if (chain == NULL) {
1508                 error = EIO;
1509                 goto done;
1510         }
1511         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1512         error = 0;
1513 done:
1514         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1515         if (parent) {
1516                 hammer2_chain_unlock(parent);
1517                 hammer2_chain_drop(parent);
1518         }
1519         if (chain) {
1520                 hammer2_chain_unlock(chain);
1521                 hammer2_chain_drop(chain);
1522         }
1523 }
1524
1525 void
1526 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1527 {
1528         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1529         hammer2_chain_t *parent;
1530         hammer2_chain_t *chain;
1531         hammer2_key_t key_next;
1532         int cache_index = -1;
1533
1534         /*
1535          * We need the precise parent chain to issue the deletion.
1536          */
1537         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1538                                      HAMMER2_RESOLVE_ALWAYS);
1539         chain = NULL;
1540         if (parent == NULL) {
1541                 /* XXX error */
1542                 goto done;
1543         }
1544         chain = hammer2_chain_lookup(&parent, &key_next,
1545                                      xop->key_beg, xop->key_end,
1546                                      &cache_index,
1547                                      HAMMER2_LOOKUP_ALWAYS);
1548         while (chain) {
1549                 hammer2_chain_delete(parent, chain,
1550                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1551                 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1552                 /* depend on function to unlock the shared lock */
1553                 chain = hammer2_chain_next(&parent, chain, &key_next,
1554                                            key_next, xop->key_end,
1555                                            &cache_index,
1556                                            HAMMER2_LOOKUP_ALWAYS);
1557         }
1558 done:
1559         hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1560         if (parent) {
1561                 hammer2_chain_unlock(parent);
1562                 hammer2_chain_drop(parent);
1563         }
1564         if (chain) {
1565                 hammer2_chain_unlock(chain);
1566                 hammer2_chain_drop(chain);
1567         }
1568 }
1569
1570 void
1571 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1572 {
1573         hammer2_xop_connect_t *xop = &arg->xop_connect;
1574         hammer2_inode_data_t *wipdata;
1575         hammer2_chain_t *parent;
1576         hammer2_chain_t *chain;
1577         hammer2_pfs_t *pmp;
1578         hammer2_key_t key_dummy;
1579         int cache_index = -1;
1580         int error;
1581
1582         /*
1583          * Get directory, then issue a lookup to prime the parent chain
1584          * for the create.  The lookup is expected to fail.
1585          */
1586         pmp = xop->head.ip1->pmp;
1587         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1588                                      HAMMER2_RESOLVE_ALWAYS);
1589         if (parent == NULL) {
1590                 chain = NULL;
1591                 error = EIO;
1592                 goto fail;
1593         }
1594         chain = hammer2_chain_lookup(&parent, &key_dummy,
1595                                      xop->lhc, xop->lhc,
1596                                      &cache_index, 0);
1597         if (chain) {
1598                 hammer2_chain_unlock(chain);
1599                 hammer2_chain_drop(chain);
1600                 chain = NULL;
1601                 error = EEXIST;
1602                 goto fail;
1603         }
1604
1605         /*
1606          * Adjust the filename in the inode, set the name key.
1607          *
1608          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1609          *       do it here.
1610          */
1611         chain = hammer2_inode_chain(xop->head.ip2, clindex,
1612                                     HAMMER2_RESOLVE_ALWAYS);
1613         hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1614         wipdata = &chain->data->ipdata;
1615
1616         hammer2_inode_modify(xop->head.ip2);
1617         if (xop->head.name1) {
1618                 bzero(wipdata->filename, sizeof(wipdata->filename));
1619                 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1620                 wipdata->meta.name_len = xop->head.name1_len;
1621         }
1622         wipdata->meta.name_key = xop->lhc;
1623
1624         /*
1625          * Reconnect the chain to the new parent directory
1626          */
1627         error = hammer2_chain_create(&parent, &chain, pmp,
1628                                      xop->lhc, 0,
1629                                      HAMMER2_BREF_TYPE_INODE,
1630                                      HAMMER2_INODE_BYTES,
1631                                      xop->head.mtid, 0, 0);
1632
1633         /*
1634          * Feed result back.
1635          */
1636 fail:
1637         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1638         if (parent) {
1639                 hammer2_chain_unlock(parent);
1640                 hammer2_chain_drop(parent);
1641         }
1642         if (chain) {
1643                 hammer2_chain_unlock(chain);
1644                 hammer2_chain_drop(chain);
1645         }
1646 }
1647
1648 /*
1649  * Synchronize the in-memory inode with the chain.
1650  */
1651 void
1652 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1653 {
1654         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1655         hammer2_chain_t *parent;
1656         hammer2_chain_t *chain;
1657         int error;
1658
1659         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1660                                      HAMMER2_RESOLVE_ALWAYS);
1661         chain = NULL;
1662         if (parent == NULL) {
1663                 error = EIO;
1664                 goto done;
1665         }
1666         if (parent->error) {
1667                 error = parent->error;
1668                 goto done;
1669         }
1670
1671         error = 0;
1672
1673         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1674                 /* osize must be ignored */
1675         } else if (xop->meta.size < xop->osize) {
1676                 /*
1677                  * We must delete any chains beyond the EOF.  The chain
1678                  * straddling the EOF will be pending in the bioq.
1679                  */
1680                 hammer2_key_t lbase;
1681                 hammer2_key_t key_next;
1682                 int cache_index = -1;
1683
1684                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1685                         ~HAMMER2_PBUFMASK64;
1686                 chain = hammer2_chain_lookup(&parent, &key_next,
1687                                              lbase, HAMMER2_KEY_MAX,
1688                                              &cache_index,
1689                                              HAMMER2_LOOKUP_NODATA |
1690                                              HAMMER2_LOOKUP_NODIRECT);
1691                 while (chain) {
1692                         /*
1693                          * Degenerate embedded case, nothing to loop on
1694                          */
1695                         switch (chain->bref.type) {
1696                         case HAMMER2_BREF_TYPE_INODE:
1697                                 KKASSERT(0);
1698                                 break;
1699                         case HAMMER2_BREF_TYPE_DATA:
1700                                 hammer2_chain_delete(parent, chain,
1701                                                      xop->head.mtid,
1702                                                      HAMMER2_DELETE_PERMANENT);
1703                                 break;
1704                         }
1705                         chain = hammer2_chain_next(&parent, chain, &key_next,
1706                                                    key_next, HAMMER2_KEY_MAX,
1707                                                    &cache_index,
1708                                                    HAMMER2_LOOKUP_NODATA |
1709                                                    HAMMER2_LOOKUP_NODIRECT);
1710                 }
1711
1712                 /*
1713                  * Reset to point at inode for following code, if necessary.
1714                  */
1715                 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1716                         hammer2_chain_unlock(parent);
1717                         hammer2_chain_drop(parent);
1718                         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1719                                                      HAMMER2_RESOLVE_ALWAYS);
1720                         kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1721                                 parent->data->ipdata.filename);
1722                 }
1723         }
1724
1725         /*
1726          * Sync the inode meta-data, potentially clear the blockset area
1727          * of direct data so it can be used for blockrefs.
1728          */
1729         hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1730         parent->data->ipdata.meta = xop->meta;
1731         if (xop->clear_directdata) {
1732                 bzero(&parent->data->ipdata.u.blockset,
1733                       sizeof(parent->data->ipdata.u.blockset));
1734         }
1735 done:
1736         if (chain) {
1737                 hammer2_chain_unlock(chain);
1738                 hammer2_chain_drop(chain);
1739         }
1740         if (parent) {
1741                 hammer2_chain_unlock(parent);
1742                 hammer2_chain_drop(parent);
1743         }
1744         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1745 }
1746