hammer2 - Implement NFS export support
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 static
60 void
61 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
62 {
63         hammer2_inode_sideq_t *ipul;
64         hammer2_pfs_t *pmp = ip->pmp;
65
66         if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
67                 ipul = kmalloc(sizeof(*ipul), pmp->minode,
68                                M_WAITOK | M_ZERO);
69                 ipul->ip = ip;
70                 hammer2_spin_ex(&pmp->list_spin);
71                 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
72                         hammer2_inode_ref(ip);
73                         atomic_set_int(&ip->flags,
74                                        HAMMER2_INODE_ONSIDEQ);
75                         TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
76                         hammer2_spin_unex(&pmp->list_spin);
77                 } else {
78                         hammer2_spin_unex(&pmp->list_spin);
79                         kfree(ipul, pmp->minode);
80                 }
81         }
82 }
83
84 /*
85  * HAMMER2 inode locks
86  *
87  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
88  * flags for options:
89  *
90  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
91  *        inode locking function will automatically set the RDONLY flag.
92  *
93  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
94  *        Most front-end inode locks do.
95  *
96  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
97  *        the inode data be resolved.  This is used by the syncthr because
98  *        it can run on an unresolved/out-of-sync cluster, and also by the
99  *        vnode reclamation code to avoid unnecessary I/O (particularly when
100  *        disposing of hundreds of thousands of cached vnodes).
101  *
102  * The inode locking function locks the inode itself, resolves any stale
103  * chains in the inode's cluster, and allocates a fresh copy of the
104  * cluster with 1 ref and all the underlying chains locked.
105  *
106  * ip->cluster will be stable while the inode is locked.
107  *
108  * NOTE: We don't combine the inode/chain lock because putting away an
109  *       inode would otherwise confuse multiple lock holders of the inode.
110  *
111  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
112  *       and never point to a hardlink pointer.
113  *
114  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
115  *       will feel free to reduce the chain set in the cluster as an
116  *       optimization.  It will still be validated against the quorum if
117  *       appropriate, but the optimization might be able to reduce data
118  *       accesses to one node.  This flag is automatically set if the inode
119  *       is locked with HAMMER2_RESOLVE_SHARED.
120  */
121 void
122 hammer2_inode_lock(hammer2_inode_t *ip, int how)
123 {
124         hammer2_inode_ref(ip);
125
126         /* 
127          * Inode structure mutex
128          */
129         if (how & HAMMER2_RESOLVE_SHARED) {
130                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
131                 hammer2_mtx_sh(&ip->lock);
132         } else {
133                 hammer2_mtx_ex(&ip->lock);
134         }
135 }
136
137 /*
138  * Select a chain out of an inode's cluster and lock it.
139  *
140  * The inode does not have to be locked.
141  */
142 hammer2_chain_t *
143 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
144 {
145         hammer2_chain_t *chain;
146         hammer2_cluster_t *cluster;
147
148         hammer2_spin_sh(&ip->cluster_spin);
149 #if 0
150         cluster = ip->cluster_cache;
151         if (cluster) {
152                 if (clindex >= cluster->nchains)
153                         chain = NULL;
154                 else
155                         chain = cluster->array[clindex].chain;
156                 if (chain) {
157                         hammer2_chain_ref(chain);
158                         hammer2_spin_unsh(&ip->cluster_spin);
159                         hammer2_chain_lock(chain, how);
160                         return chain;
161                 }
162         }
163 #endif
164
165         cluster = &ip->cluster;
166         if (clindex >= cluster->nchains)
167                 chain = NULL;
168         else
169                 chain = cluster->array[clindex].chain;
170         if (chain) {
171                 hammer2_chain_ref(chain);
172                 hammer2_spin_unsh(&ip->cluster_spin);
173                 hammer2_chain_lock(chain, how);
174         } else {
175                 hammer2_spin_unsh(&ip->cluster_spin);
176         }
177         return chain;
178 }
179
180 hammer2_chain_t *
181 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
182                                hammer2_chain_t **parentp, int how)
183 {
184         hammer2_chain_t *chain;
185         hammer2_chain_t *parent;
186
187         for (;;) {
188                 hammer2_spin_sh(&ip->cluster_spin);
189                 if (clindex >= ip->cluster.nchains)
190                         chain = NULL;
191                 else
192                         chain = ip->cluster.array[clindex].chain;
193                 if (chain) {
194                         hammer2_chain_ref(chain);
195                         hammer2_spin_unsh(&ip->cluster_spin);
196                         hammer2_chain_lock(chain, how);
197                 } else {
198                         hammer2_spin_unsh(&ip->cluster_spin);
199                 }
200
201                 /*
202                  * Get parent, lock order must be (parent, chain).
203                  */
204                 parent = chain->parent;
205                 hammer2_chain_ref(parent);
206                 hammer2_chain_unlock(chain);
207                 hammer2_chain_lock(parent, how);
208                 hammer2_chain_lock(chain, how);
209                 if (ip->cluster.array[clindex].chain == chain &&
210                     chain->parent == parent) {
211                         break;
212                 }
213
214                 /*
215                  * Retry
216                  */
217                 hammer2_chain_unlock(chain);
218                 hammer2_chain_drop(chain);
219                 hammer2_chain_unlock(parent);
220                 hammer2_chain_drop(parent);
221         }
222         *parentp = parent;
223
224         return chain;
225 }
226
227 void
228 hammer2_inode_unlock(hammer2_inode_t *ip)
229 {
230         hammer2_mtx_unlock(&ip->lock);
231         hammer2_inode_drop(ip);
232 }
233
234 /*
235  * Temporarily release a lock held shared or exclusive.  Caller must
236  * hold the lock shared or exclusive on call and lock will be released
237  * on return.
238  *
239  * Restore a lock that was temporarily released.
240  */
241 hammer2_mtx_state_t
242 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
243 {
244         return hammer2_mtx_temp_release(&ip->lock);
245 }
246
247 void
248 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
249 {
250         hammer2_mtx_temp_restore(&ip->lock, ostate);
251 }
252
253 /*
254  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
255  * is already held exclusively this is a NOP.
256  *
257  * The caller MUST hold the inode lock either shared or exclusive on call
258  * and will own the lock exclusively on return.
259  *
260  * Returns non-zero if the lock was already exclusive prior to the upgrade.
261  */
262 int
263 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
264 {
265         int wasexclusive;
266
267         if (mtx_islocked_ex(&ip->lock)) {
268                 wasexclusive = 1;
269         } else {
270                 hammer2_mtx_unlock(&ip->lock);
271                 hammer2_mtx_ex(&ip->lock);
272                 wasexclusive = 0;
273         }
274         return wasexclusive;
275 }
276
277 /*
278  * Downgrade an inode lock from exclusive to shared only if the inode
279  * lock was previously shared.  If the inode lock was previously exclusive,
280  * this is a NOP.
281  */
282 void
283 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
284 {
285         if (wasexclusive == 0)
286                 mtx_downgrade(&ip->lock);
287 }
288
289 /*
290  * Lookup an inode by inode number
291  */
292 hammer2_inode_t *
293 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
294 {
295         hammer2_inode_t *ip;
296
297         KKASSERT(pmp);
298         if (pmp->spmp_hmp) {
299                 ip = NULL;
300         } else {
301                 hammer2_spin_ex(&pmp->inum_spin);
302                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
303                 if (ip)
304                         hammer2_inode_ref(ip);
305                 hammer2_spin_unex(&pmp->inum_spin);
306         }
307         return(ip);
308 }
309
310 /*
311  * Adding a ref to an inode is only legal if the inode already has at least
312  * one ref.
313  *
314  * (can be called with spinlock held)
315  */
316 void
317 hammer2_inode_ref(hammer2_inode_t *ip)
318 {
319         atomic_add_int(&ip->refs, 1);
320         if (hammer2_debug & 0x80000) {
321                 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
322                 print_backtrace(8);
323         }
324 }
325
326 /*
327  * Drop an inode reference, freeing the inode when the last reference goes
328  * away.
329  */
330 void
331 hammer2_inode_drop(hammer2_inode_t *ip)
332 {
333         hammer2_pfs_t *pmp;
334         hammer2_inode_t *pip;
335         u_int refs;
336
337         while (ip) {
338                 if (hammer2_debug & 0x80000) {
339                         kprintf("INODE-1 %p (%d->%d)\n",
340                                 ip, ip->refs, ip->refs - 1);
341                         print_backtrace(8);
342                 }
343                 refs = ip->refs;
344                 cpu_ccfence();
345                 if (refs == 1) {
346                         /*
347                          * Transition to zero, must interlock with
348                          * the inode inumber lookup tree (if applicable).
349                          * It should not be possible for anyone to race
350                          * the transition to 0.
351                          */
352                         pmp = ip->pmp;
353                         KKASSERT(pmp);
354                         hammer2_spin_ex(&pmp->inum_spin);
355
356                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
357                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
358                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
359                                         atomic_clear_int(&ip->flags,
360                                                      HAMMER2_INODE_ONRBTREE);
361                                         RB_REMOVE(hammer2_inode_tree,
362                                                   &pmp->inum_tree, ip);
363                                 }
364                                 hammer2_spin_unex(&pmp->inum_spin);
365
366                                 pip = ip->pip;
367                                 ip->pip = NULL;
368                                 ip->pmp = NULL;
369
370 #if 0
371                                 /*
372                                  * Clean out the cluster cache
373                                  */
374                                 hammer2_cluster_t *tmpclu;
375                                 tmpclu = ip->cluster_cache;
376                                 if (tmpclu) {
377                                         ip->cluster_cache = NULL;
378                                         hammer2_cluster_drop(tmpclu);
379                                 }
380 #endif
381
382                                 /*
383                                  * Cleaning out ip->cluster isn't entirely
384                                  * trivial.
385                                  */
386                                 hammer2_inode_repoint(ip, NULL, NULL);
387
388                                 /*
389                                  * We have to drop pip (if non-NULL) to
390                                  * dispose of our implied reference from
391                                  * ip->pip.  We can simply loop on it.
392                                  */
393                                 kfree(ip, pmp->minode);
394                                 atomic_add_long(&pmp->inmem_inodes, -1);
395                                 ip = pip;
396                                 /* continue with pip (can be NULL) */
397                         } else {
398                                 hammer2_spin_unex(&ip->pmp->inum_spin);
399                         }
400                 } else {
401                         /*
402                          * Non zero transition
403                          */
404                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
405                                 break;
406                 }
407         }
408 }
409
410 /*
411  * Get the vnode associated with the given inode, allocating the vnode if
412  * necessary.  The vnode will be returned exclusively locked.
413  *
414  * The caller must lock the inode (shared or exclusive).
415  *
416  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
417  * races.
418  */
419 struct vnode *
420 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
421 {
422         hammer2_pfs_t *pmp;
423         struct vnode *vp;
424
425         pmp = ip->pmp;
426         KKASSERT(pmp != NULL);
427         *errorp = 0;
428
429         for (;;) {
430                 /*
431                  * Attempt to reuse an existing vnode assignment.  It is
432                  * possible to race a reclaim so the vget() may fail.  The
433                  * inode must be unlocked during the vget() to avoid a
434                  * deadlock against a reclaim.
435                  */
436                 int wasexclusive;
437
438                 vp = ip->vp;
439                 if (vp) {
440                         /*
441                          * Inode must be unlocked during the vget() to avoid
442                          * possible deadlocks, but leave the ip ref intact.
443                          *
444                          * vnode is held to prevent destruction during the
445                          * vget().  The vget() can still fail if we lost
446                          * a reclaim race on the vnode.
447                          */
448                         hammer2_mtx_state_t ostate;
449
450                         vhold(vp);
451                         ostate = hammer2_inode_lock_temp_release(ip);
452                         if (vget(vp, LK_EXCLUSIVE)) {
453                                 vdrop(vp);
454                                 hammer2_inode_lock_temp_restore(ip, ostate);
455                                 continue;
456                         }
457                         hammer2_inode_lock_temp_restore(ip, ostate);
458                         vdrop(vp);
459                         /* vp still locked and ref from vget */
460                         if (ip->vp != vp) {
461                                 kprintf("hammer2: igetv race %p/%p\n",
462                                         ip->vp, vp);
463                                 vput(vp);
464                                 continue;
465                         }
466                         *errorp = 0;
467                         break;
468                 }
469
470                 /*
471                  * No vnode exists, allocate a new vnode.  Beware of
472                  * allocation races.  This function will return an
473                  * exclusively locked and referenced vnode.
474                  */
475                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
476                 if (*errorp) {
477                         kprintf("hammer2: igetv getnewvnode failed %d\n",
478                                 *errorp);
479                         vp = NULL;
480                         break;
481                 }
482
483                 /*
484                  * Lock the inode and check for an allocation race.
485                  */
486                 wasexclusive = hammer2_inode_lock_upgrade(ip);
487                 if (ip->vp != NULL) {
488                         vp->v_type = VBAD;
489                         vx_put(vp);
490                         hammer2_inode_lock_downgrade(ip, wasexclusive);
491                         continue;
492                 }
493
494                 switch (ip->meta.type) {
495                 case HAMMER2_OBJTYPE_DIRECTORY:
496                         vp->v_type = VDIR;
497                         break;
498                 case HAMMER2_OBJTYPE_REGFILE:
499                         vp->v_type = VREG;
500                         vinitvmio(vp, ip->meta.size,
501                                   HAMMER2_LBUFSIZE,
502                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
503                         break;
504                 case HAMMER2_OBJTYPE_SOFTLINK:
505                         /*
506                          * XXX for now we are using the generic file_read
507                          * and file_write code so we need a buffer cache
508                          * association.
509                          */
510                         vp->v_type = VLNK;
511                         vinitvmio(vp, ip->meta.size,
512                                   HAMMER2_LBUFSIZE,
513                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
514                         break;
515                 case HAMMER2_OBJTYPE_CDEV:
516                         vp->v_type = VCHR;
517                         /* fall through */
518                 case HAMMER2_OBJTYPE_BDEV:
519                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
520                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
521                                 vp->v_type = VBLK;
522                         addaliasu(vp,
523                                   ip->meta.rmajor,
524                                   ip->meta.rminor);
525                         break;
526                 case HAMMER2_OBJTYPE_FIFO:
527                         vp->v_type = VFIFO;
528                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
529                         break;
530                 case HAMMER2_OBJTYPE_SOCKET:
531                         vp->v_type = VSOCK;
532                         break;
533                 default:
534                         panic("hammer2: unhandled objtype %d",
535                               ip->meta.type);
536                         break;
537                 }
538
539                 if (ip == pmp->iroot)
540                         vsetflags(vp, VROOT);
541
542                 vp->v_data = ip;
543                 ip->vp = vp;
544                 hammer2_inode_ref(ip);          /* vp association */
545                 hammer2_inode_lock_downgrade(ip, wasexclusive);
546                 break;
547         }
548
549         /*
550          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
551          */
552         if (hammer2_debug & 0x0002) {
553                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
554                         vp, vp->v_refcnt, vp->v_auxrefs);
555         }
556         return (vp);
557 }
558
559 /*
560  * Returns the inode associated with the passed-in cluster, creating the
561  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
562  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
563  * Otherwise the whole cluster is synchronized.
564  *
565  * The passed-in cluster must be locked and will remain locked on return.
566  * The returned inode will be locked and the caller may dispose of both
567  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
568  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
569  *
570  * The hammer2_inode structure regulates the interface between the high level
571  * kernel VNOPS API and the filesystem backend (the chains).
572  *
573  * On return the inode is locked with the supplied cluster.
574  */
575 hammer2_inode_t *
576 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
577                   hammer2_cluster_t *cluster, int idx)
578 {
579         hammer2_inode_t *nip;
580         const hammer2_inode_data_t *iptmp;
581         const hammer2_inode_data_t *nipdata;
582
583         KKASSERT(cluster == NULL ||
584                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
585         KKASSERT(pmp);
586
587         /*
588          * Interlocked lookup/ref of the inode.  This code is only needed
589          * when looking up inodes with nlinks != 0 (TODO: optimize out
590          * otherwise and test for duplicates).
591          *
592          * Cluster can be NULL during the initial pfs allocation.
593          */
594 again:
595         while (cluster) {
596                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
597                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
598                 if (nip == NULL)
599                         break;
600
601                 hammer2_mtx_ex(&nip->lock);
602
603                 /*
604                  * Handle SMP race (not applicable to the super-root spmp
605                  * which can't index inodes due to duplicative inode numbers).
606                  */
607                 if (pmp->spmp_hmp == NULL &&
608                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
609                         hammer2_mtx_unlock(&nip->lock);
610                         hammer2_inode_drop(nip);
611                         continue;
612                 }
613                 if (idx >= 0)
614                         hammer2_inode_repoint_one(nip, cluster, idx);
615                 else
616                         hammer2_inode_repoint(nip, NULL, cluster);
617
618                 return nip;
619         }
620
621         /*
622          * We couldn't find the inode number, create a new inode.
623          */
624         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
625         spin_init(&nip->cluster_spin, "h2clspin");
626         atomic_add_long(&pmp->inmem_inodes, 1);
627         hammer2_pfs_memory_inc(pmp);
628         hammer2_pfs_memory_wakeup(pmp);
629         if (pmp->spmp_hmp)
630                 nip->flags = HAMMER2_INODE_SROOT;
631
632         /*
633          * Initialize nip's cluster.  A cluster is provided for normal
634          * inodes but typically not for the super-root or PFS inodes.
635          */
636         nip->cluster.refs = 1;
637         nip->cluster.pmp = pmp;
638         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
639         if (cluster) {
640                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
641                 nip->meta = nipdata->meta;
642                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
643                 hammer2_inode_repoint(nip, NULL, cluster);
644         } else {
645                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
646                 /* mtime will be updated when a cluster is available */
647                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
648         }
649
650         nip->pip = dip;                         /* can be NULL */
651         if (dip)
652                 hammer2_inode_ref(dip); /* ref dip for nip->pip */
653
654         nip->pmp = pmp;
655
656         /*
657          * ref and lock on nip gives it state compatible to after a
658          * hammer2_inode_lock() call.
659          */
660         nip->refs = 1;
661         hammer2_mtx_init(&nip->lock, "h2inode");
662         hammer2_mtx_ex(&nip->lock);
663         /* combination of thread lock and chain lock == inode lock */
664
665         /*
666          * Attempt to add the inode.  If it fails we raced another inode
667          * get.  Undo all the work and try again.
668          */
669         if (pmp->spmp_hmp == NULL) {
670                 hammer2_spin_ex(&pmp->inum_spin);
671                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
672                         hammer2_spin_unex(&pmp->inum_spin);
673                         hammer2_mtx_unlock(&nip->lock);
674                         hammer2_inode_drop(nip);
675                         goto again;
676                 }
677                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
678                 hammer2_spin_unex(&pmp->inum_spin);
679         }
680
681         return (nip);
682 }
683
684 /*
685  * Resolve the parent inode for ip using ip->meta.iparent.
686  *
687  * Called with locked inode as argument
688  */
689 void
690 hammer2_inode_resolve_pip(hammer2_inode_t *ip)
691 {
692         hammer2_pfs_t *pmp = ip->pmp;
693         hammer2_inode_t *pip;
694         hammer2_xop_lookup_t *xop;
695         int first = 1;
696         int error;
697
698         while (ip->pip == NULL && ip != pmp->iroot &&
699                ip->meta.iparent != 0) {
700                 pip = hammer2_inode_lookup(ip->pmp, ip->meta.iparent);
701                 if (pip) {
702                         ip->pip = pip;  /* transfer ref to ip->pip */
703                         return;
704                 }
705                 xop = hammer2_xop_alloc(pmp->iroot, 0);
706                 xop->lhc = ip->meta.iparent;
707                 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
708                 error = hammer2_xop_collect(&xop->head, 0);
709
710                 if (error) {
711                         kprintf("hammer2_inode_resolve_pip: "
712                                 "cannot find %016jx\n",
713                                 ip->meta.iparent);
714                         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
715                         break;
716                 }
717                 kprintf("NFS load inum %016jx iparent %016jx\n",
718                         ip->meta.inum, ip->meta.iparent);
719                 pip = hammer2_inode_get(pmp, NULL, &xop->head.cluster, -1);
720                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
721                 ip->pip = pip;
722                 if (pip)
723                         hammer2_inode_ref(pip);
724                 else
725                         kprintf("Unable to load inode!\n");
726                 if (first) {
727                         first = 0;
728                 } else {
729                         hammer2_inode_unlock(ip);
730                 }
731                 ip = pip;
732         }
733         if (first == 0)
734                 hammer2_inode_unlock(ip);
735 }
736
737 /*
738  * Create a new inode in the specified directory using the vattr to
739  * figure out the type.  A non-zero type field overrides vattr.
740  *
741  * If no error occurs the new inode with its cluster locked is returned.
742  * However, when creating an OBJTYPE_HARDLINK, the caller can assume
743  * that NULL will be returned (that is, the caller already has the inode
744  * in-hand and is creating a hardlink to it, we do not need to return a
745  * representitive ip).
746  *
747  * If vap and/or cred are NULL the related fields are not set and the
748  * inode type defaults to a directory.  This is used when creating PFSs
749  * under the super-root, so the inode number is set to 1 in this case.
750  *
751  * dip is not locked on entry.
752  *
753  * NOTE: This function is used to create all manners of inodes, including
754  *       super-root entries for snapshots and PFSs.  When used to create a
755  *       snapshot the inode will be temporarily associated with the spmp.
756  *
757  * NOTE: When creating a normal file or directory the caller must call this
758  *       function twice, once to create the actual inode and once to create
759  *       the hardlink representing the directory entry.  This function is
760  *       only called once when creating a softlink.  The softlink itself.
761  *
762  * NOTE: When creating a hardlink target (a real inode), name/name_len is
763  *       passed as NULL/0, and caller should pass lhc as inum.
764  */
765 hammer2_inode_t *
766 hammer2_inode_create(hammer2_inode_t *dip,
767                      struct vattr *vap, struct ucred *cred,
768                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
769                      hammer2_key_t inum, uint8_t type, uint8_t target_type,
770                      int flags, int *errorp)
771 {
772         hammer2_xop_create_t *xop;
773         hammer2_inode_t *nip;
774         int error;
775         uid_t xuid;
776         uuid_t dip_uid;
777         uuid_t dip_gid;
778         uint32_t dip_mode;
779         uint8_t dip_comp_algo;
780         uint8_t dip_check_algo;
781
782         if (name)
783                 lhc = hammer2_dirhash(name, name_len);
784         *errorp = 0;
785         nip = NULL;
786
787         /*
788          * Locate the inode or indirect block to create the new
789          * entry in.  At the same time check for key collisions
790          * and iterate until we don't get one.
791          *
792          * NOTE: hidden inodes do not have iterators.
793          *
794          * Lock the directory exclusively for now to guarantee that
795          * we can find an unused lhc for the name.  Due to collisions,
796          * two different creates can end up with the same lhc so we
797          * cannot depend on the OS to prevent the collision.
798          */
799         hammer2_inode_lock(dip, 0);
800
801         dip_uid = dip->meta.uid;
802         dip_gid = dip->meta.gid;
803         dip_mode = dip->meta.mode;
804         dip_comp_algo = dip->meta.comp_algo;
805         dip_check_algo = dip->meta.check_algo;
806
807         /*
808          * If name specified, locate an unused key in the collision space.
809          * Otherwise use the passed-in lhc directly.
810          */
811         if (name) {
812                 hammer2_xop_scanlhc_t *sxop;
813                 hammer2_key_t lhcbase;
814
815                 lhcbase = lhc;
816                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
817                 sxop->lhc = lhc;
818                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
819                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
820                         if (lhc != sxop->head.cluster.focus->bref.key)
821                                 break;
822                         ++lhc;
823                 }
824                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
825
826                 if (error) {
827                         if (error != ENOENT)
828                                 goto done2;
829                         ++lhc;
830                         error = 0;
831                 }
832                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
833                         error = ENOSPC;
834                         goto done2;
835                 }
836         }
837
838         /*
839          * Create the inode with the lhc as the key.
840          */
841         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
842         xop->lhc = lhc;
843         xop->flags = flags;
844         bzero(&xop->meta, sizeof(xop->meta));
845
846         if (vap) {
847                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
848
849                 switch (xop->meta.type) {
850                 case HAMMER2_OBJTYPE_CDEV:
851                 case HAMMER2_OBJTYPE_BDEV:
852                         xop->meta.rmajor = vap->va_rmajor;
853                         xop->meta.rminor = vap->va_rminor;
854                         break;
855                 default:
856                         break;
857                 }
858                 type = xop->meta.type;
859         } else {
860                 xop->meta.type = type;
861                 xop->meta.target_type = target_type;
862         }
863         xop->meta.inum = inum;
864         xop->meta.iparent = dip->meta.inum;
865         
866         /* Inherit parent's inode compression mode. */
867         xop->meta.comp_algo = dip_comp_algo;
868         xop->meta.check_algo = dip_check_algo;
869         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
870         hammer2_update_time(&xop->meta.ctime);
871         xop->meta.mtime = xop->meta.ctime;
872         if (vap)
873                 xop->meta.mode = vap->va_mode;
874         xop->meta.nlinks = 1;
875         if (vap) {
876                 if (dip && dip->pmp) {
877                         xuid = hammer2_to_unix_xid(&dip_uid);
878                         xuid = vop_helper_create_uid(dip->pmp->mp,
879                                                      dip_mode,
880                                                      xuid,
881                                                      cred,
882                                                      &vap->va_mode);
883                 } else {
884                         /* super-root has no dip and/or pmp */
885                         xuid = 0;
886                 }
887                 if (vap->va_vaflags & VA_UID_UUID_VALID)
888                         xop->meta.uid = vap->va_uid_uuid;
889                 else if (vap->va_uid != (uid_t)VNOVAL)
890                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
891                 else
892                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
893
894                 if (vap->va_vaflags & VA_GID_UUID_VALID)
895                         xop->meta.gid = vap->va_gid_uuid;
896                 else if (vap->va_gid != (gid_t)VNOVAL)
897                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
898                 else if (dip)
899                         xop->meta.gid = dip_gid;
900         }
901
902         /*
903          * Regular files and softlinks allow a small amount of data to be
904          * directly embedded in the inode.  This flag will be cleared if
905          * the size is extended past the embedded limit.
906          */
907         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
908             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
909             xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
910                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
911         }
912         if (name) {
913                 hammer2_xop_setname(&xop->head, name, name_len);
914         } else {
915                 name_len = hammer2_xop_setname_inum(&xop->head, inum);
916                 KKASSERT(lhc == inum);
917         }
918         xop->meta.name_len = name_len;
919         xop->meta.name_key = lhc;
920         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
921
922         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
923
924         error = hammer2_xop_collect(&xop->head, 0);
925 #if INODE_DEBUG
926         kprintf("CREATE INODE %*.*s\n",
927                 (int)name_len, (int)name_len, name);
928 #endif
929
930         if (error) {
931                 *errorp = error;
932                 goto done;
933         }
934
935         /*
936          * Set up the new inode if not a hardlink pointer.
937          *
938          * NOTE: *_get() integrates chain's lock into the inode lock.
939          *
940          * NOTE: Only one new inode can currently be created per
941          *       transaction.  If the need arises we can adjust
942          *       hammer2_trans_init() to allow more.
943          *
944          * NOTE: nipdata will have chain's blockset data.
945          */
946         if (type != HAMMER2_OBJTYPE_HARDLINK) {
947                 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
948                 nip->comp_heuristic = 0;
949         } else {
950                 nip = NULL;
951         }
952
953 done:
954         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
955 done2:
956         hammer2_inode_unlock(dip);
957
958         return (nip);
959 }
960
961 /*
962  * Connect the disconnected inode (ip) to the directory (dip) with the
963  * specified (name, name_len).  If name is NULL, (lhc) will be used as
964  * the directory key and the inode's embedded name will not be modified
965  * for future recovery purposes.
966  *
967  * dip and ip must both be locked exclusively (dip in particular to avoid
968  * lhc collisions).
969  */
970 int
971 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
972                       const char *name, size_t name_len,
973                       hammer2_key_t lhc)
974 {
975         hammer2_xop_scanlhc_t *sxop;
976         hammer2_xop_connect_t *xop;
977         hammer2_inode_t *opip;
978         hammer2_key_t lhcbase;
979         int error;
980
981         /*
982          * Calculate the lhc and resolve the collision space.
983          */
984         if (name) {
985                 lhc = lhcbase = hammer2_dirhash(name, name_len);
986                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
987                 sxop->lhc = lhc;
988                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
989                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
990                         if (lhc != sxop->head.cluster.focus->bref.key)
991                                 break;
992                         ++lhc;
993                 }
994                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
995
996                 if (error) {
997                         if (error != ENOENT)
998                                 goto done;
999                         ++lhc;
1000                         error = 0;
1001                 }
1002                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1003                         error = ENOSPC;
1004                         goto done;
1005                 }
1006         } else {
1007                 error = 0;
1008         }
1009
1010         /*
1011          * Formally reconnect the in-memory structure.  ip must
1012          * be locked exclusively to safely change ip->pip.
1013          */
1014         if (ip->pip != dip) {
1015                 hammer2_inode_ref(dip);
1016                 opip = ip->pip;
1017                 ip->pip = dip;
1018                 if (opip)
1019                         hammer2_inode_drop(opip);
1020         }
1021
1022         /*
1023          * Connect her up
1024          */
1025         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1026         if (name)
1027                 hammer2_xop_setname(&xop->head, name, name_len);
1028         hammer2_xop_setip2(&xop->head, ip);
1029         xop->lhc = lhc;
1030         hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
1031         error = hammer2_xop_collect(&xop->head, 0);
1032         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1033
1034         /*
1035          * On success make the same adjustments to ip->meta or the
1036          * next flush may blow up the chain.
1037          */
1038         if (error == 0) {
1039                 hammer2_inode_modify(ip);
1040                 ip->meta.name_key = lhc;
1041                 if (name)
1042                         ip->meta.name_len = name_len;
1043         }
1044 done:
1045         return error;
1046 }
1047
1048 /*
1049  * Repoint ip->cluster's chains to cluster's chains and fixup the default
1050  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
1051  * filters out invalid or non-matching elements.
1052  *
1053  * Caller must hold the inode and cluster exclusive locked, if not NULL,
1054  * must also be locked.
1055  *
1056  * Cluster may be NULL to clean out any chains in ip->cluster.
1057  */
1058 void
1059 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1060                       hammer2_cluster_t *cluster)
1061 {
1062         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1063         hammer2_chain_t *ochain;
1064         hammer2_chain_t *nchain;
1065         hammer2_inode_t *opip;
1066         int i;
1067
1068         bzero(dropch, sizeof(dropch));
1069
1070         /*
1071          * Replace chains in ip->cluster with chains from cluster and
1072          * adjust the focus if necessary.
1073          *
1074          * NOTE: nchain and/or ochain can be NULL due to gaps
1075          *       in the cluster arrays.
1076          */
1077         hammer2_spin_ex(&ip->cluster_spin);
1078         for (i = 0; cluster && i < cluster->nchains; ++i) {
1079                 /*
1080                  * Do not replace elements which are the same.  Also handle
1081                  * element count discrepancies.
1082                  */
1083                 nchain = cluster->array[i].chain;
1084                 if (i < ip->cluster.nchains) {
1085                         ochain = ip->cluster.array[i].chain;
1086                         if (ochain == nchain)
1087                                 continue;
1088                 } else {
1089                         ochain = NULL;
1090                 }
1091
1092                 /*
1093                  * Make adjustments
1094                  */
1095                 ip->cluster.array[i].chain = nchain;
1096                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1097                 ip->cluster.array[i].flags |= cluster->array[i].flags &
1098                                               HAMMER2_CITEM_INVALID;
1099                 if (nchain)
1100                         hammer2_chain_ref(nchain);
1101                 dropch[i] = ochain;
1102         }
1103
1104         /*
1105          * Release any left-over chains in ip->cluster.
1106          */
1107         while (i < ip->cluster.nchains) {
1108                 nchain = ip->cluster.array[i].chain;
1109                 if (nchain) {
1110                         ip->cluster.array[i].chain = NULL;
1111                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1112                 }
1113                 dropch[i] = nchain;
1114                 ++i;
1115         }
1116
1117         /*
1118          * Fixup fields.  Note that the inode-embedded cluster is never
1119          * directly locked.
1120          */
1121         if (cluster) {
1122                 ip->cluster.nchains = cluster->nchains;
1123                 ip->cluster.focus = cluster->focus;
1124                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1125         } else {
1126                 ip->cluster.nchains = 0;
1127                 ip->cluster.focus = NULL;
1128                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1129         }
1130
1131         /*
1132          * Repoint ip->pip if requested (non-NULL pip).
1133          */
1134         if (pip && ip->pip != pip) {
1135                 opip = ip->pip;
1136                 hammer2_inode_ref(pip);
1137                 ip->pip = pip;
1138         } else {
1139                 opip = NULL;
1140         }
1141         hammer2_spin_unex(&ip->cluster_spin);
1142
1143         /*
1144          * Cleanup outside of spinlock
1145          */
1146         while (--i >= 0) {
1147                 if (dropch[i])
1148                         hammer2_chain_drop(dropch[i]);
1149         }
1150         if (opip)
1151                 hammer2_inode_drop(opip);
1152 }
1153
1154 /*
1155  * Repoint a single element from the cluster to the ip.  Used by the
1156  * synchronization threads to piecemeal update inodes.  Does not change
1157  * focus and requires inode to be re-locked to clean-up flags (XXX).
1158  */
1159 void
1160 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1161                           int idx)
1162 {
1163         hammer2_chain_t *ochain;
1164         hammer2_chain_t *nchain;
1165         int i;
1166
1167         hammer2_spin_ex(&ip->cluster_spin);
1168         KKASSERT(idx < cluster->nchains);
1169         if (idx < ip->cluster.nchains) {
1170                 ochain = ip->cluster.array[idx].chain;
1171                 nchain = cluster->array[idx].chain;
1172         } else {
1173                 ochain = NULL;
1174                 nchain = cluster->array[idx].chain;
1175                 ip->cluster.nchains = idx + 1;
1176                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1177                         bzero(&ip->cluster.array[i],
1178                               sizeof(ip->cluster.array[i]));
1179                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1180                 }
1181         }
1182         if (ochain != nchain) {
1183                 /*
1184                  * Make adjustments.
1185                  */
1186                 ip->cluster.array[idx].chain = nchain;
1187                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1188                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1189                                                 HAMMER2_CITEM_INVALID;
1190         }
1191         hammer2_spin_unex(&ip->cluster_spin);
1192         if (ochain != nchain) {
1193                 if (nchain)
1194                         hammer2_chain_ref(nchain);
1195                 if (ochain)
1196                         hammer2_chain_drop(ochain);
1197         }
1198 }
1199
1200 /*
1201  * Called with a locked inode to finish unlinking an inode after xop_unlink
1202  * had been run.  This function is responsible for decrementing nlinks and
1203  * moving deleted inodes to the hidden directory if they are still open.
1204  *
1205  * We don't bother decrementing nlinks if the file is not open and this was
1206  * the last link.
1207  *
1208  * If the inode is a hardlink target it's chain has not yet been deleted,
1209  * otherwise it's chain has been deleted.
1210  *
1211  * If isopen then any prior deletion was not permanent and the inode must
1212  * be moved to the hidden directory.
1213  */
1214 int
1215 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1216 {
1217         hammer2_pfs_t *pmp;
1218         int error;
1219
1220         pmp = ip->pmp;
1221
1222         /*
1223          * Decrement nlinks.  If this is the last link and the file is
1224          * not open, the chain has already been removed and we don't bother
1225          * dirtying the inode.
1226          */
1227         if (ip->meta.nlinks == 1) {
1228                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1229                 if (isopen == 0) {
1230                         atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1231                         return 0;
1232                 }
1233         }
1234
1235         hammer2_inode_modify(ip);
1236         --ip->meta.nlinks;
1237         if ((int64_t)ip->meta.nlinks < 0)
1238                 ip->meta.nlinks = 0;    /* safety */
1239
1240         /*
1241          * If nlinks is not zero we are done.  However, this should only be
1242          * possible with a hardlink target.  If the inode is an embedded
1243          * hardlink nlinks should have dropped to zero, warn and proceed
1244          * with the next step.
1245          */
1246         if (ip->meta.nlinks) {
1247                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1248                         return 0;
1249                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1250                         (intmax_t)ip->meta.nlinks);
1251                 return 0;
1252         }
1253
1254         /*
1255          * nlinks is now zero, the inode should have already been deleted.
1256          * If the file is open it was deleted non-permanently and must be
1257          * moved to the hidden directory.
1258          *
1259          * When moving to the hidden directory we force the name_key to the
1260          * inode number to avoid collisions.
1261          */
1262         if (isopen) {
1263                 hammer2_inode_lock(pmp->ihidden, 0);
1264                 error = hammer2_inode_connect(pmp->ihidden, ip,
1265                                               NULL, 0, ip->meta.inum);
1266                 hammer2_inode_unlock(pmp->ihidden);
1267         } else {
1268                 error = 0;
1269         }
1270         return error;
1271 }
1272
1273 /*
1274  * This is called from the mount code to initialize pmp->ihidden
1275  */
1276 void
1277 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1278 {
1279         int error;
1280
1281         if (pmp->ihidden)
1282                 return;
1283
1284         hammer2_trans_init(pmp, 0);
1285         hammer2_inode_lock(pmp->iroot, 0);
1286
1287         /*
1288          * Find the hidden directory
1289          */
1290         {
1291                 hammer2_xop_lookup_t *xop;
1292
1293                 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1294                 xop->lhc = HAMMER2_INODE_HIDDENDIR;
1295                 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1296                 error = hammer2_xop_collect(&xop->head, 0);
1297
1298                 if (error == 0) {
1299                         /*
1300                          * Found the hidden directory
1301                          */
1302                         kprintf("PFS FOUND HIDDEN DIR\n");
1303                         pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1304                                                          &xop->head.cluster,
1305                                                          -1);
1306                         hammer2_inode_ref(pmp->ihidden);
1307                         hammer2_inode_unlock(pmp->ihidden);
1308                 }
1309                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1310         }
1311
1312         /*
1313          * Create the hidden directory if it could not be found.
1314          */
1315         if (error == ENOENT) {
1316                 kprintf("PFS CREATE HIDDEN DIR\n");
1317
1318                 pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1319                                                     NULL, 0,
1320                                 /* lhc */           HAMMER2_INODE_HIDDENDIR,
1321                                 /* inum */          HAMMER2_INODE_HIDDENDIR,
1322                                 /* type */          HAMMER2_OBJTYPE_DIRECTORY,
1323                                 /* target_type */   0,
1324                                 /* flags */         0,
1325                                                     &error);
1326                 if (pmp->ihidden) {
1327                         hammer2_inode_ref(pmp->ihidden);
1328                         hammer2_inode_unlock(pmp->ihidden);
1329                 }
1330                 if (error)
1331                         kprintf("PFS CREATE ERROR %d\n", error);
1332         }
1333
1334         /*
1335          * Scan the hidden directory on-mount and destroy its contents
1336          */
1337         if (error == 0) {
1338                 hammer2_xop_unlinkall_t *xop;
1339
1340                 hammer2_inode_lock(pmp->ihidden, 0);
1341                 xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1342                 xop->key_beg = HAMMER2_KEY_MIN;
1343                 xop->key_end = HAMMER2_KEY_MAX;
1344                 hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1345
1346                 while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1347                         ;
1348                 }
1349                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1350                 hammer2_inode_unlock(pmp->ihidden);
1351         }
1352
1353         hammer2_inode_unlock(pmp->iroot);
1354         hammer2_trans_done(pmp);
1355 }
1356
1357 #if 0
1358 /*
1359  * REMOVED - No longer applicable now that we are indexing inodes under
1360  *           the iroot.
1361  *
1362  * Find the directory common to both fdip and tdip that satisfies the
1363  * conditions.  The common directory is not allowed to cross a XLINK
1364  * boundary.  If ishardlink is non-zero and we successfully find the
1365  * common parent, we will continue to iterate parents until we hit a
1366  * XLINK boundary.
1367  *
1368  * Returns a held but not locked inode.  Caller typically locks the inode,
1369  * and when through unlocks AND drops it.
1370  */
1371 hammer2_inode_t *
1372 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip,
1373                             int *errorp, int ishardlink)
1374 {
1375         hammer2_inode_t *scan1;
1376         hammer2_inode_t *scan2;
1377         int state;
1378
1379         /*
1380          * We used to have a depth field but it complicated matters too
1381          * much for directory renames.  So now its ugly.  Check for
1382          * simple cases before giving up and doing it the expensive way.
1383          *
1384          * XXX need a bottom-up topology stability lock
1385          */
1386         if (fdip == tdip) {
1387                 hammer2_inode_ref(fdip);
1388                 return(fdip);
1389         }
1390
1391         /*
1392          * XXX not MPSAFE
1393          *
1394          * state: -1    sub-scan failed
1395          *         0
1396          *        +1    sub-scan succeeded (find xlink boundary if rename)
1397          */
1398         for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1399                 scan2 = tdip;
1400                 state = 0;
1401                 while (scan2->pmp == tdip->pmp) {
1402                         if (state == 0 && scan1 == scan2) {
1403                                 /*
1404                                  * Found common parent, stop here on rename,
1405                                  * continue if creating a hardlink.
1406                                  */
1407                                 if (ishardlink == 0) {
1408                                         hammer2_inode_ref(scan1);
1409                                         return(scan1);
1410                                 }
1411                                 state = 1;
1412                         }
1413                         if (state == 1) {
1414                                 /*
1415                                  * Search for XLINK boundary when hardlink.
1416                                  */
1417                                 if ((scan2->meta.uflags &
1418                                      (SF_XLINK | UF_XLINK)) ||
1419                                     scan2->pip == NULL ||
1420                                     scan2->pip->pmp != scan1->pmp) {
1421                                         hammer2_inode_ref(scan2);
1422                                         return(scan2);
1423                                 }
1424                         }
1425                         if (scan2->meta.uflags & (SF_XLINK | UF_XLINK))
1426                                 break;
1427                         scan2 = scan2->pip;
1428                         if (scan2 == NULL)
1429                                 break;
1430                 }
1431                 if (scan1->meta.uflags & (SF_XLINK | UF_XLINK))
1432                         break;
1433         }
1434         *errorp = EXDEV;
1435         return(NULL);
1436 }
1437 #endif
1438
1439 /*
1440  * Mark an inode as being modified, meaning that the caller will modify
1441  * ip->meta.
1442  *
1443  * If a vnode is present we set the vnode dirty and the nominal filesystem
1444  * sync will also handle synchronizing the inode meta-data.  If no vnode
1445  * is present we must ensure that the inode is on pmp->sideq.
1446  *
1447  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1448  *       only modifying the in-memory inode.  A modify_tid is synchronized
1449  *       later when the inode gets flushed.
1450  */
1451 void
1452 hammer2_inode_modify(hammer2_inode_t *ip)
1453 {
1454         hammer2_pfs_t *pmp;
1455
1456         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1457         if (ip->vp) {
1458                 vsetisdirty(ip->vp);
1459         } else if ((pmp = ip->pmp) != NULL) {
1460                 hammer2_inode_delayed_sideq(ip);
1461         }
1462 }
1463
1464 /*
1465  * Synchronize the inode's frontend state with the chain state prior
1466  * to any explicit flush of the inode or any strategy write call.
1467  *
1468  * Called with a locked inode inside a transaction.
1469  */
1470 void
1471 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1472 {
1473         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1474                 hammer2_xop_fsync_t *xop;
1475                 int error;
1476
1477                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1478                 xop->clear_directdata = 0;
1479                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1480                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1481                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1482                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1483                                 xop->clear_directdata = 1;
1484                         }
1485                         xop->osize = ip->osize;
1486                 } else {
1487                         xop->osize = ip->meta.size;     /* safety */
1488                 }
1489                 xop->ipflags = ip->flags;
1490                 xop->meta = ip->meta;
1491
1492                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1493                                              HAMMER2_INODE_MODIFIED);
1494                 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1495                 error = hammer2_xop_collect(&xop->head, 0);
1496                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1497                 if (error == ENOENT)
1498                         error = 0;
1499                 if (error) {
1500                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1501                         /*
1502                         atomic_set_int(&ip->flags,
1503                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1504                                                        HAMMER2_INODE_MODIFIED));
1505                         */
1506                         /* XXX return error somehow? */
1507                 }
1508         }
1509 }
1510
1511 /*
1512  * The normal filesystem sync no longer has visibility to an inode structure
1513  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1514  * inode or a dirty inode may require additional processing to synchronize
1515  * ip->meta to its underlying cluster nodes.
1516  *
1517  * In particular, reclaims can occur in almost any state (for example, when
1518  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1519  * in the reclaim path itself is a non-starter.
1520  *
1521  * Caller must be in a transaction.
1522  */
1523 void
1524 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1525 {
1526         hammer2_xop_destroy_t *xop;
1527         hammer2_inode_sideq_t *ipul;
1528         hammer2_inode_t *ip;
1529         int error;
1530
1531         if (TAILQ_EMPTY(&pmp->sideq))
1532                 return;
1533
1534         LOCKSTART;
1535         hammer2_spin_ex(&pmp->list_spin);
1536         while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1537                 TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1538                 ip = ipul->ip;
1539                 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1540                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1541                 hammer2_spin_unex(&pmp->list_spin);
1542                 kfree(ipul, pmp->minode);
1543
1544                 hammer2_inode_lock(ip, 0);
1545                 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1546                         /*
1547                          * The inode was unlinked while open, causing H2
1548                          * to relink it to a hidden directory to allow
1549                          * cluster operations to continue until close.
1550                          *
1551                          * The inode must be deleted and destroyed.
1552                          */
1553                         xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1554                         hammer2_xop_start(&xop->head,
1555                                           hammer2_inode_xop_destroy);
1556                         error = hammer2_xop_collect(&xop->head, 0);
1557                         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1558
1559                         atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1560                 } else {
1561                         /*
1562                          * The inode was dirty as-of the reclaim, requiring
1563                          * synchronization of ip->meta with its underlying
1564                          * chains.
1565                          */
1566                         hammer2_inode_chain_sync(ip);
1567                 }
1568
1569                 hammer2_inode_unlock(ip);
1570                 hammer2_inode_drop(ip);                 /* ipul ref */
1571
1572                 hammer2_spin_ex(&pmp->list_spin);
1573         }
1574         hammer2_spin_unex(&pmp->list_spin);
1575         LOCKSTOP;
1576 }
1577
1578 /*
1579  * Inode create helper (threaded, backend)
1580  *
1581  * Used by ncreate, nmknod, nsymlink, nmkdir.
1582  * Used by nlink and rename to create HARDLINK pointers.
1583  *
1584  * Frontend holds the parent directory ip locked exclusively.  We
1585  * create the inode and feed the exclusively locked chain to the
1586  * frontend.
1587  */
1588 void
1589 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1590 {
1591         hammer2_xop_create_t *xop = &arg->xop_create;
1592         hammer2_chain_t *parent;
1593         hammer2_chain_t *chain;
1594         hammer2_key_t key_next;
1595         int cache_index = -1;
1596         int error;
1597
1598         if (hammer2_debug & 0x0001)
1599                 kprintf("inode_create lhc %016jx clindex %d\n",
1600                         xop->lhc, clindex);
1601
1602         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1603                                      HAMMER2_RESOLVE_ALWAYS);
1604         if (parent == NULL) {
1605                 error = EIO;
1606                 chain = NULL;
1607                 goto fail;
1608         }
1609         chain = hammer2_chain_lookup(&parent, &key_next,
1610                                      xop->lhc, xop->lhc,
1611                                      &cache_index, 0);
1612         if (chain) {
1613                 error = EEXIST;
1614                 goto fail;
1615         }
1616
1617         error = hammer2_chain_create(&parent, &chain,
1618                                      xop->head.ip1->pmp,
1619                                      xop->lhc, 0,
1620                                      HAMMER2_BREF_TYPE_INODE,
1621                                      HAMMER2_INODE_BYTES,
1622                                      xop->head.mtid, 0, xop->flags);
1623         if (error == 0) {
1624                 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1625                 chain->data->ipdata.meta = xop->meta;
1626                 if (xop->head.name1) {
1627                         bcopy(xop->head.name1,
1628                               chain->data->ipdata.filename,
1629                               xop->head.name1_len);
1630                         chain->data->ipdata.meta.name_len = xop->head.name1_len;
1631                 }
1632                 chain->data->ipdata.meta.name_key = xop->lhc;
1633         }
1634 fail:
1635         if (parent) {
1636                 hammer2_chain_unlock(parent);
1637                 hammer2_chain_drop(parent);
1638         }
1639         hammer2_xop_feed(&xop->head, chain, clindex, error);
1640         if (chain) {
1641                 hammer2_chain_unlock(chain);
1642                 hammer2_chain_drop(chain);
1643         }
1644 }
1645
1646 /*
1647  * Inode delete helper (backend, threaded)
1648  *
1649  * Generally used by hammer2_run_sideq()
1650  */
1651 void
1652 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1653 {
1654         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1655         hammer2_pfs_t *pmp;
1656         hammer2_chain_t *parent;
1657         hammer2_chain_t *chain;
1658         hammer2_inode_t *ip;
1659         int error;
1660
1661         /*
1662          * We need the precise parent chain to issue the deletion.
1663          */
1664         ip = xop->head.ip1;
1665         pmp = ip->pmp;
1666         chain = NULL;
1667
1668         parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1669         if (parent)
1670                 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1671         if (parent == NULL) {
1672                 error = EIO;
1673                 goto done;
1674         }
1675         chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1676         if (chain == NULL) {
1677                 error = EIO;
1678                 goto done;
1679         }
1680         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1681         error = 0;
1682 done:
1683         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1684         if (parent) {
1685                 hammer2_chain_unlock(parent);
1686                 hammer2_chain_drop(parent);
1687         }
1688         if (chain) {
1689                 hammer2_chain_unlock(chain);
1690                 hammer2_chain_drop(chain);
1691         }
1692 }
1693
1694 void
1695 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1696 {
1697         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1698         hammer2_chain_t *parent;
1699         hammer2_chain_t *chain;
1700         hammer2_key_t key_next;
1701         int cache_index = -1;
1702
1703         /*
1704          * We need the precise parent chain to issue the deletion.
1705          */
1706         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1707                                      HAMMER2_RESOLVE_ALWAYS);
1708         chain = NULL;
1709         if (parent == NULL) {
1710                 /* XXX error */
1711                 goto done;
1712         }
1713         chain = hammer2_chain_lookup(&parent, &key_next,
1714                                      xop->key_beg, xop->key_end,
1715                                      &cache_index,
1716                                      HAMMER2_LOOKUP_ALWAYS);
1717         while (chain) {
1718                 hammer2_chain_delete(parent, chain,
1719                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1720                 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1721                 /* depend on function to unlock the shared lock */
1722                 chain = hammer2_chain_next(&parent, chain, &key_next,
1723                                            key_next, xop->key_end,
1724                                            &cache_index,
1725                                            HAMMER2_LOOKUP_ALWAYS);
1726         }
1727 done:
1728         hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1729         if (parent) {
1730                 hammer2_chain_unlock(parent);
1731                 hammer2_chain_drop(parent);
1732         }
1733         if (chain) {
1734                 hammer2_chain_unlock(chain);
1735                 hammer2_chain_drop(chain);
1736         }
1737 }
1738
1739 void
1740 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1741 {
1742         hammer2_xop_connect_t *xop = &arg->xop_connect;
1743         hammer2_inode_data_t *wipdata;
1744         hammer2_chain_t *parent;
1745         hammer2_chain_t *chain;
1746         hammer2_pfs_t *pmp;
1747         hammer2_key_t key_dummy;
1748         int cache_index = -1;
1749         int error;
1750
1751         /*
1752          * Get directory, then issue a lookup to prime the parent chain
1753          * for the create.  The lookup is expected to fail.
1754          */
1755         pmp = xop->head.ip1->pmp;
1756         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1757                                      HAMMER2_RESOLVE_ALWAYS);
1758         if (parent == NULL) {
1759                 chain = NULL;
1760                 error = EIO;
1761                 goto fail;
1762         }
1763         chain = hammer2_chain_lookup(&parent, &key_dummy,
1764                                      xop->lhc, xop->lhc,
1765                                      &cache_index, 0);
1766         if (chain) {
1767                 hammer2_chain_unlock(chain);
1768                 hammer2_chain_drop(chain);
1769                 chain = NULL;
1770                 error = EEXIST;
1771                 goto fail;
1772         }
1773
1774         /*
1775          * Adjust the filename in the inode, set the name key.
1776          *
1777          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1778          *       do it here.
1779          */
1780         chain = hammer2_inode_chain(xop->head.ip2, clindex,
1781                                     HAMMER2_RESOLVE_ALWAYS);
1782         hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1783         wipdata = &chain->data->ipdata;
1784
1785         hammer2_inode_modify(xop->head.ip2);
1786         if (xop->head.name1) {
1787                 bzero(wipdata->filename, sizeof(wipdata->filename));
1788                 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1789                 wipdata->meta.name_len = xop->head.name1_len;
1790         }
1791         wipdata->meta.name_key = xop->lhc;
1792
1793         /*
1794          * Reconnect the chain to the new parent directory
1795          */
1796         error = hammer2_chain_create(&parent, &chain, pmp,
1797                                      xop->lhc, 0,
1798                                      HAMMER2_BREF_TYPE_INODE,
1799                                      HAMMER2_INODE_BYTES,
1800                                      xop->head.mtid, 0, 0);
1801
1802         /*
1803          * Feed result back.
1804          */
1805 fail:
1806         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1807         if (parent) {
1808                 hammer2_chain_unlock(parent);
1809                 hammer2_chain_drop(parent);
1810         }
1811         if (chain) {
1812                 hammer2_chain_unlock(chain);
1813                 hammer2_chain_drop(chain);
1814         }
1815 }
1816
1817 /*
1818  * Synchronize the in-memory inode with the chain.
1819  */
1820 void
1821 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1822 {
1823         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1824         hammer2_chain_t *parent;
1825         hammer2_chain_t *chain;
1826         int error;
1827
1828         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1829                                      HAMMER2_RESOLVE_ALWAYS);
1830         chain = NULL;
1831         if (parent == NULL) {
1832                 error = EIO;
1833                 goto done;
1834         }
1835         if (parent->error) {
1836                 error = parent->error;
1837                 goto done;
1838         }
1839
1840         error = 0;
1841
1842         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1843                 /* osize must be ignored */
1844         } else if (xop->meta.size < xop->osize) {
1845                 /*
1846                  * We must delete any chains beyond the EOF.  The chain
1847                  * straddling the EOF will be pending in the bioq.
1848                  */
1849                 hammer2_key_t lbase;
1850                 hammer2_key_t key_next;
1851                 int cache_index = -1;
1852
1853                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1854                         ~HAMMER2_PBUFMASK64;
1855                 chain = hammer2_chain_lookup(&parent, &key_next,
1856                                              lbase, HAMMER2_KEY_MAX,
1857                                              &cache_index,
1858                                              HAMMER2_LOOKUP_NODATA |
1859                                              HAMMER2_LOOKUP_NODIRECT);
1860                 while (chain) {
1861                         /*
1862                          * Degenerate embedded case, nothing to loop on
1863                          */
1864                         switch (chain->bref.type) {
1865                         case HAMMER2_BREF_TYPE_INODE:
1866                                 KKASSERT(0);
1867                                 break;
1868                         case HAMMER2_BREF_TYPE_DATA:
1869                                 hammer2_chain_delete(parent, chain,
1870                                                      xop->head.mtid,
1871                                                      HAMMER2_DELETE_PERMANENT);
1872                                 break;
1873                         }
1874                         chain = hammer2_chain_next(&parent, chain, &key_next,
1875                                                    key_next, HAMMER2_KEY_MAX,
1876                                                    &cache_index,
1877                                                    HAMMER2_LOOKUP_NODATA |
1878                                                    HAMMER2_LOOKUP_NODIRECT);
1879                 }
1880
1881                 /*
1882                  * Reset to point at inode for following code, if necessary.
1883                  */
1884                 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1885                         hammer2_chain_unlock(parent);
1886                         hammer2_chain_drop(parent);
1887                         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1888                                                      HAMMER2_RESOLVE_ALWAYS);
1889                         kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1890                                 parent->data->ipdata.filename);
1891                 }
1892         }
1893
1894         /*
1895          * Sync the inode meta-data, potentially clear the blockset area
1896          * of direct data so it can be used for blockrefs.
1897          */
1898         hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1899         parent->data->ipdata.meta = xop->meta;
1900         if (xop->clear_directdata) {
1901                 bzero(&parent->data->ipdata.u.blockset,
1902                       sizeof(parent->data->ipdata.u.blockset));
1903         }
1904 done:
1905         if (chain) {
1906                 hammer2_chain_unlock(chain);
1907                 hammer2_chain_drop(chain);
1908         }
1909         if (parent) {
1910                 hammer2_chain_unlock(parent);
1911                 hammer2_chain_drop(parent);
1912         }
1913         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1914 }
1915