8e4f2e5c5a17f74565784239c052463177ffce1d
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 static __inline
60 void
61 hammer2_knote(struct vnode *vp, int flags)
62 {
63         if (flags)
64                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
65 }
66
67 static
68 void
69 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
70 {
71         hammer2_inode_sideq_t *ipul;
72         hammer2_pfs_t *pmp = ip->pmp;
73
74         if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
75                 ipul = kmalloc(sizeof(*ipul), pmp->minode,
76                                M_WAITOK | M_ZERO);
77                 ipul->ip = ip;
78                 hammer2_spin_ex(&pmp->list_spin);
79                 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
80                         hammer2_inode_ref(ip);
81                         atomic_set_int(&ip->flags,
82                                        HAMMER2_INODE_ONSIDEQ);
83                         TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
84                         hammer2_spin_unex(&pmp->list_spin);
85                 } else {
86                         hammer2_spin_unex(&pmp->list_spin);
87                         kfree(ipul, pmp->minode);
88                 }
89         }
90 }
91
92 /*
93  * HAMMER2 inode locks
94  *
95  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
96  * flags for options:
97  *
98  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
99  *        inode locking function will automatically set the RDONLY flag.
100  *
101  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
102  *        Most front-end inode locks do.
103  *
104  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
105  *        the inode data be resolved.  This is used by the syncthr because
106  *        it can run on an unresolved/out-of-sync cluster, and also by the
107  *        vnode reclamation code to avoid unnecessary I/O (particularly when
108  *        disposing of hundreds of thousands of cached vnodes).
109  *
110  * The inode locking function locks the inode itself, resolves any stale
111  * chains in the inode's cluster, and allocates a fresh copy of the
112  * cluster with 1 ref and all the underlying chains locked.
113  *
114  * ip->cluster will be stable while the inode is locked.
115  *
116  * NOTE: We don't combine the inode/chain lock because putting away an
117  *       inode would otherwise confuse multiple lock holders of the inode.
118  *
119  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
120  *       and never point to a hardlink pointer.
121  *
122  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
123  *       will feel free to reduce the chain set in the cluster as an
124  *       optimization.  It will still be validated against the quorum if
125  *       appropriate, but the optimization might be able to reduce data
126  *       accesses to one node.  This flag is automatically set if the inode
127  *       is locked with HAMMER2_RESOLVE_SHARED.
128  */
129 void
130 hammer2_inode_lock(hammer2_inode_t *ip, int how)
131 {
132         hammer2_inode_ref(ip);
133
134         /* 
135          * Inode structure mutex
136          */
137         if (how & HAMMER2_RESOLVE_SHARED) {
138                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
139                 hammer2_mtx_sh(&ip->lock);
140         } else {
141                 hammer2_mtx_ex(&ip->lock);
142         }
143 }
144
145 /*
146  * Select a chain out of an inode's cluster and lock it.
147  *
148  * The inode does not have to be locked.
149  */
150 hammer2_chain_t *
151 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
152 {
153         hammer2_chain_t *chain;
154         hammer2_cluster_t *cluster;
155
156         hammer2_spin_sh(&ip->cluster_spin);
157         cluster = &ip->cluster;
158         if (clindex >= cluster->nchains)
159                 chain = NULL;
160         else
161                 chain = cluster->array[clindex].chain;
162         if (chain) {
163                 hammer2_chain_ref(chain);
164                 hammer2_spin_unsh(&ip->cluster_spin);
165                 hammer2_chain_lock(chain, how);
166         } else {
167                 hammer2_spin_unsh(&ip->cluster_spin);
168         }
169         return chain;
170 }
171
172 hammer2_chain_t *
173 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
174                                hammer2_chain_t **parentp, int how)
175 {
176         hammer2_chain_t *chain;
177         hammer2_chain_t *parent;
178
179         for (;;) {
180                 hammer2_spin_sh(&ip->cluster_spin);
181                 if (clindex >= ip->cluster.nchains)
182                         chain = NULL;
183                 else
184                         chain = ip->cluster.array[clindex].chain;
185                 if (chain) {
186                         hammer2_chain_ref(chain);
187                         hammer2_spin_unsh(&ip->cluster_spin);
188                         hammer2_chain_lock(chain, how);
189                 } else {
190                         hammer2_spin_unsh(&ip->cluster_spin);
191                 }
192
193                 /*
194                  * Get parent, lock order must be (parent, chain).
195                  */
196                 parent = chain->parent;
197                 if (parent) {
198                         hammer2_chain_ref(parent);
199                         hammer2_chain_unlock(chain);
200                         hammer2_chain_lock(parent, how);
201                         hammer2_chain_lock(chain, how);
202                 }
203                 if (ip->cluster.array[clindex].chain == chain &&
204                     chain->parent == parent) {
205                         break;
206                 }
207
208                 /*
209                  * Retry
210                  */
211                 hammer2_chain_unlock(chain);
212                 hammer2_chain_drop(chain);
213                 if (parent) {
214                         hammer2_chain_unlock(parent);
215                         hammer2_chain_drop(parent);
216                 }
217         }
218         *parentp = parent;
219
220         return chain;
221 }
222
223 void
224 hammer2_inode_unlock(hammer2_inode_t *ip)
225 {
226         hammer2_mtx_unlock(&ip->lock);
227         hammer2_inode_drop(ip);
228 }
229
230 /*
231  * Temporarily release a lock held shared or exclusive.  Caller must
232  * hold the lock shared or exclusive on call and lock will be released
233  * on return.
234  *
235  * Restore a lock that was temporarily released.
236  */
237 hammer2_mtx_state_t
238 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
239 {
240         return hammer2_mtx_temp_release(&ip->lock);
241 }
242
243 void
244 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
245 {
246         hammer2_mtx_temp_restore(&ip->lock, ostate);
247 }
248
249 /*
250  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
251  * is already held exclusively this is a NOP.
252  *
253  * The caller MUST hold the inode lock either shared or exclusive on call
254  * and will own the lock exclusively on return.
255  *
256  * Returns non-zero if the lock was already exclusive prior to the upgrade.
257  */
258 int
259 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
260 {
261         int wasexclusive;
262
263         if (mtx_islocked_ex(&ip->lock)) {
264                 wasexclusive = 1;
265         } else {
266                 hammer2_mtx_unlock(&ip->lock);
267                 hammer2_mtx_ex(&ip->lock);
268                 wasexclusive = 0;
269         }
270         return wasexclusive;
271 }
272
273 /*
274  * Downgrade an inode lock from exclusive to shared only if the inode
275  * lock was previously shared.  If the inode lock was previously exclusive,
276  * this is a NOP.
277  */
278 void
279 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
280 {
281         if (wasexclusive == 0)
282                 mtx_downgrade(&ip->lock);
283 }
284
285 /*
286  * Lookup an inode by inode number
287  */
288 hammer2_inode_t *
289 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
290 {
291         hammer2_inode_t *ip;
292
293         KKASSERT(pmp);
294         if (pmp->spmp_hmp) {
295                 ip = NULL;
296         } else {
297                 hammer2_spin_ex(&pmp->inum_spin);
298                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
299                 if (ip)
300                         hammer2_inode_ref(ip);
301                 hammer2_spin_unex(&pmp->inum_spin);
302         }
303         return(ip);
304 }
305
306 /*
307  * Adding a ref to an inode is only legal if the inode already has at least
308  * one ref.
309  *
310  * (can be called with spinlock held)
311  */
312 void
313 hammer2_inode_ref(hammer2_inode_t *ip)
314 {
315         atomic_add_int(&ip->refs, 1);
316         if (hammer2_debug & 0x80000) {
317                 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
318                 print_backtrace(8);
319         }
320 }
321
322 /*
323  * Drop an inode reference, freeing the inode when the last reference goes
324  * away.
325  */
326 void
327 hammer2_inode_drop(hammer2_inode_t *ip)
328 {
329         hammer2_pfs_t *pmp;
330         u_int refs;
331
332         while (ip) {
333                 if (hammer2_debug & 0x80000) {
334                         kprintf("INODE-1 %p (%d->%d)\n",
335                                 ip, ip->refs, ip->refs - 1);
336                         print_backtrace(8);
337                 }
338                 refs = ip->refs;
339                 cpu_ccfence();
340                 if (refs == 1) {
341                         /*
342                          * Transition to zero, must interlock with
343                          * the inode inumber lookup tree (if applicable).
344                          * It should not be possible for anyone to race
345                          * the transition to 0.
346                          */
347                         pmp = ip->pmp;
348                         KKASSERT(pmp);
349                         hammer2_spin_ex(&pmp->inum_spin);
350
351                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
352                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
353                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
354                                         atomic_clear_int(&ip->flags,
355                                                      HAMMER2_INODE_ONRBTREE);
356                                         RB_REMOVE(hammer2_inode_tree,
357                                                   &pmp->inum_tree, ip);
358                                 }
359                                 hammer2_spin_unex(&pmp->inum_spin);
360
361                                 ip->pmp = NULL;
362
363                                 /*
364                                  * Cleaning out ip->cluster isn't entirely
365                                  * trivial.
366                                  */
367                                 hammer2_inode_repoint(ip, NULL, NULL);
368
369                                 kfree(ip, pmp->minode);
370                                 atomic_add_long(&pmp->inmem_inodes, -1);
371                                 ip = NULL;      /* will terminate loop */
372                         } else {
373                                 hammer2_spin_unex(&ip->pmp->inum_spin);
374                         }
375                 } else {
376                         /*
377                          * Non zero transition
378                          */
379                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
380                                 break;
381                 }
382         }
383 }
384
385 /*
386  * Get the vnode associated with the given inode, allocating the vnode if
387  * necessary.  The vnode will be returned exclusively locked.
388  *
389  * The caller must lock the inode (shared or exclusive).
390  *
391  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
392  * races.
393  */
394 struct vnode *
395 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
396 {
397         hammer2_pfs_t *pmp;
398         struct vnode *vp;
399
400         pmp = ip->pmp;
401         KKASSERT(pmp != NULL);
402         *errorp = 0;
403
404         for (;;) {
405                 /*
406                  * Attempt to reuse an existing vnode assignment.  It is
407                  * possible to race a reclaim so the vget() may fail.  The
408                  * inode must be unlocked during the vget() to avoid a
409                  * deadlock against a reclaim.
410                  */
411                 int wasexclusive;
412
413                 vp = ip->vp;
414                 if (vp) {
415                         /*
416                          * Inode must be unlocked during the vget() to avoid
417                          * possible deadlocks, but leave the ip ref intact.
418                          *
419                          * vnode is held to prevent destruction during the
420                          * vget().  The vget() can still fail if we lost
421                          * a reclaim race on the vnode.
422                          */
423                         hammer2_mtx_state_t ostate;
424
425                         vhold(vp);
426                         ostate = hammer2_inode_lock_temp_release(ip);
427                         if (vget(vp, LK_EXCLUSIVE)) {
428                                 vdrop(vp);
429                                 hammer2_inode_lock_temp_restore(ip, ostate);
430                                 continue;
431                         }
432                         hammer2_inode_lock_temp_restore(ip, ostate);
433                         vdrop(vp);
434                         /* vp still locked and ref from vget */
435                         if (ip->vp != vp) {
436                                 kprintf("hammer2: igetv race %p/%p\n",
437                                         ip->vp, vp);
438                                 vput(vp);
439                                 continue;
440                         }
441                         *errorp = 0;
442                         break;
443                 }
444
445                 /*
446                  * No vnode exists, allocate a new vnode.  Beware of
447                  * allocation races.  This function will return an
448                  * exclusively locked and referenced vnode.
449                  */
450                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
451                 if (*errorp) {
452                         kprintf("hammer2: igetv getnewvnode failed %d\n",
453                                 *errorp);
454                         vp = NULL;
455                         break;
456                 }
457
458                 /*
459                  * Lock the inode and check for an allocation race.
460                  */
461                 wasexclusive = hammer2_inode_lock_upgrade(ip);
462                 if (ip->vp != NULL) {
463                         vp->v_type = VBAD;
464                         vx_put(vp);
465                         hammer2_inode_lock_downgrade(ip, wasexclusive);
466                         continue;
467                 }
468
469                 switch (ip->meta.type) {
470                 case HAMMER2_OBJTYPE_DIRECTORY:
471                         vp->v_type = VDIR;
472                         break;
473                 case HAMMER2_OBJTYPE_REGFILE:
474                         vp->v_type = VREG;
475                         vinitvmio(vp, ip->meta.size,
476                                   HAMMER2_LBUFSIZE,
477                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
478                         break;
479                 case HAMMER2_OBJTYPE_SOFTLINK:
480                         /*
481                          * XXX for now we are using the generic file_read
482                          * and file_write code so we need a buffer cache
483                          * association.
484                          */
485                         vp->v_type = VLNK;
486                         vinitvmio(vp, ip->meta.size,
487                                   HAMMER2_LBUFSIZE,
488                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
489                         break;
490                 case HAMMER2_OBJTYPE_CDEV:
491                         vp->v_type = VCHR;
492                         /* fall through */
493                 case HAMMER2_OBJTYPE_BDEV:
494                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
495                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
496                                 vp->v_type = VBLK;
497                         addaliasu(vp,
498                                   ip->meta.rmajor,
499                                   ip->meta.rminor);
500                         break;
501                 case HAMMER2_OBJTYPE_FIFO:
502                         vp->v_type = VFIFO;
503                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
504                         break;
505                 case HAMMER2_OBJTYPE_SOCKET:
506                         vp->v_type = VSOCK;
507                         break;
508                 default:
509                         panic("hammer2: unhandled objtype %d",
510                               ip->meta.type);
511                         break;
512                 }
513
514                 if (ip == pmp->iroot)
515                         vsetflags(vp, VROOT);
516
517                 vp->v_data = ip;
518                 ip->vp = vp;
519                 hammer2_inode_ref(ip);          /* vp association */
520                 hammer2_inode_lock_downgrade(ip, wasexclusive);
521                 break;
522         }
523
524         /*
525          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
526          */
527         if (hammer2_debug & 0x0002) {
528                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
529                         vp, vp->v_refcnt, vp->v_auxrefs);
530         }
531         return (vp);
532 }
533
534 /*
535  * Returns the inode associated with the passed-in cluster, creating the
536  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
537  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
538  * Otherwise the whole cluster is synchronized.
539  *
540  * The passed-in cluster must be locked and will remain locked on return.
541  * The returned inode will be locked and the caller may dispose of both
542  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
543  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
544  *
545  * The hammer2_inode structure regulates the interface between the high level
546  * kernel VNOPS API and the filesystem backend (the chains).
547  *
548  * On return the inode is locked with the supplied cluster.
549  */
550 hammer2_inode_t *
551 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
552                   hammer2_cluster_t *cluster, int idx)
553 {
554         hammer2_inode_t *nip;
555         const hammer2_inode_data_t *iptmp;
556         const hammer2_inode_data_t *nipdata;
557
558         KKASSERT(cluster == NULL ||
559                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
560         KKASSERT(pmp);
561
562         /*
563          * Interlocked lookup/ref of the inode.  This code is only needed
564          * when looking up inodes with nlinks != 0 (TODO: optimize out
565          * otherwise and test for duplicates).
566          *
567          * Cluster can be NULL during the initial pfs allocation.
568          */
569 again:
570         while (cluster) {
571                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
572                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
573                 if (nip == NULL)
574                         break;
575
576                 hammer2_mtx_ex(&nip->lock);
577
578                 /*
579                  * Handle SMP race (not applicable to the super-root spmp
580                  * which can't index inodes due to duplicative inode numbers).
581                  */
582                 if (pmp->spmp_hmp == NULL &&
583                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
584                         hammer2_mtx_unlock(&nip->lock);
585                         hammer2_inode_drop(nip);
586                         continue;
587                 }
588                 if (idx >= 0)
589                         hammer2_inode_repoint_one(nip, cluster, idx);
590                 else
591                         hammer2_inode_repoint(nip, NULL, cluster);
592
593                 return nip;
594         }
595
596         /*
597          * We couldn't find the inode number, create a new inode.
598          */
599         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
600         spin_init(&nip->cluster_spin, "h2clspin");
601         atomic_add_long(&pmp->inmem_inodes, 1);
602         hammer2_pfs_memory_inc(pmp);
603         hammer2_pfs_memory_wakeup(pmp);
604         if (pmp->spmp_hmp)
605                 nip->flags = HAMMER2_INODE_SROOT;
606
607         /*
608          * Initialize nip's cluster.  A cluster is provided for normal
609          * inodes but typically not for the super-root or PFS inodes.
610          */
611         nip->cluster.refs = 1;
612         nip->cluster.pmp = pmp;
613         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
614         if (cluster) {
615                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
616                 nip->meta = nipdata->meta;
617                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
618                 hammer2_inode_repoint(nip, NULL, cluster);
619         } else {
620                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
621                 /* mtime will be updated when a cluster is available */
622                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
623         }
624
625         nip->pmp = pmp;
626
627         /*
628          * ref and lock on nip gives it state compatible to after a
629          * hammer2_inode_lock() call.
630          */
631         nip->refs = 1;
632         hammer2_mtx_init(&nip->lock, "h2inode");
633         hammer2_mtx_ex(&nip->lock);
634         /* combination of thread lock and chain lock == inode lock */
635
636         /*
637          * Attempt to add the inode.  If it fails we raced another inode
638          * get.  Undo all the work and try again.
639          */
640         if (pmp->spmp_hmp == NULL) {
641                 hammer2_spin_ex(&pmp->inum_spin);
642                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
643                         hammer2_spin_unex(&pmp->inum_spin);
644                         hammer2_mtx_unlock(&nip->lock);
645                         hammer2_inode_drop(nip);
646                         goto again;
647                 }
648                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
649                 hammer2_spin_unex(&pmp->inum_spin);
650         }
651
652         return (nip);
653 }
654
655 /*
656  * MESSY! CLEANUP!
657  *
658  * Create a new inode using the vattr to figure out the type.  A non-zero
659  * type field overrides vattr.  We need the directory to set iparent or to
660  * use when the inode is directly embedded in a directory (typically super-root
661  * entries), but note that this really only applies OBJTYPE_DIRECTORY as
662  * non-directory inodes can be hardlinked.
663  *
664  * If no error occurs the new inode with its cluster locked is returned.
665  *
666  * If vap and/or cred are NULL the related fields are not set and the
667  * inode type defaults to a directory.  This is used when creating PFSs
668  * under the super-root, so the inode number is set to 1 in this case.
669  *
670  * dip is not locked on entry.
671  *
672  * NOTE: This function is used to create all manners of inodes, including
673  *       super-root entries for snapshots and PFSs.  When used to create a
674  *       snapshot the inode will be temporarily associated with the spmp.
675  *
676  * NOTE: When creating a normal file or directory the name/name_len/lhc
677  *       is optional, but is typically specified to make debugging and
678  *       recovery easeier.
679  */
680 hammer2_inode_t *
681 hammer2_inode_create(hammer2_inode_t *dip, hammer2_inode_t *pip,
682                      struct vattr *vap, struct ucred *cred,
683                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
684                      hammer2_key_t inum,
685                      uint8_t type, uint8_t target_type,
686                      int flags, int *errorp)
687 {
688         hammer2_xop_create_t *xop;
689         hammer2_inode_t *nip;
690         int error;
691         uid_t xuid;
692         uuid_t pip_uid;
693         uuid_t pip_gid;
694         uint32_t pip_mode;
695         uint8_t pip_comp_algo;
696         uint8_t pip_check_algo;
697         hammer2_tid_t pip_inum;
698
699         if (name)
700                 lhc = hammer2_dirhash(name, name_len);
701         *errorp = 0;
702         nip = NULL;
703
704         /*
705          * Locate the inode or indirect block to create the new
706          * entry in.  At the same time check for key collisions
707          * and iterate until we don't get one.
708          *
709          * Lock the directory exclusively for now to guarantee that
710          * we can find an unused lhc for the name.  Due to collisions,
711          * two different creates can end up with the same lhc so we
712          * cannot depend on the OS to prevent the collision.
713          */
714         hammer2_inode_lock(dip, 0);
715
716         pip_uid = pip->meta.uid;
717         pip_gid = pip->meta.gid;
718         pip_mode = pip->meta.mode;
719         pip_comp_algo = pip->meta.comp_algo;
720         pip_check_algo = pip->meta.check_algo;
721         pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
722
723         /*
724          * If name specified, locate an unused key in the collision space.
725          * Otherwise use the passed-in lhc directly.
726          */
727         if (name) {
728                 hammer2_xop_scanlhc_t *sxop;
729                 hammer2_key_t lhcbase;
730
731                 lhcbase = lhc;
732                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
733                 sxop->lhc = lhc;
734                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
735                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
736                         if (lhc != sxop->head.cluster.focus->bref.key)
737                                 break;
738                         ++lhc;
739                 }
740                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
741
742                 if (error) {
743                         if (error != ENOENT)
744                                 goto done2;
745                         ++lhc;
746                         error = 0;
747                 }
748                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
749                         error = ENOSPC;
750                         goto done2;
751                 }
752         }
753
754         /*
755          * Create the inode with the lhc as the key.
756          */
757         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
758         xop->lhc = lhc;
759         xop->flags = flags;
760         bzero(&xop->meta, sizeof(xop->meta));
761
762         if (vap) {
763                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
764
765                 switch (xop->meta.type) {
766                 case HAMMER2_OBJTYPE_CDEV:
767                 case HAMMER2_OBJTYPE_BDEV:
768                         xop->meta.rmajor = vap->va_rmajor;
769                         xop->meta.rminor = vap->va_rminor;
770                         break;
771                 default:
772                         break;
773                 }
774                 type = xop->meta.type;
775         } else {
776                 xop->meta.type = type;
777                 xop->meta.target_type = target_type;
778         }
779         xop->meta.inum = inum;
780         xop->meta.iparent = pip_inum;
781         
782         /* Inherit parent's inode compression mode. */
783         xop->meta.comp_algo = pip_comp_algo;
784         xop->meta.check_algo = pip_check_algo;
785         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
786         hammer2_update_time(&xop->meta.ctime);
787         xop->meta.mtime = xop->meta.ctime;
788         if (vap)
789                 xop->meta.mode = vap->va_mode;
790         xop->meta.nlinks = 1;
791         if (vap) {
792                 if (dip->pmp) {
793                         xuid = hammer2_to_unix_xid(&pip_uid);
794                         xuid = vop_helper_create_uid(dip->pmp->mp,
795                                                      pip_mode,
796                                                      xuid,
797                                                      cred,
798                                                      &vap->va_mode);
799                 } else {
800                         /* super-root has no dip and/or pmp */
801                         xuid = 0;
802                 }
803                 if (vap->va_vaflags & VA_UID_UUID_VALID)
804                         xop->meta.uid = vap->va_uid_uuid;
805                 else if (vap->va_uid != (uid_t)VNOVAL)
806                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
807                 else
808                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
809
810                 if (vap->va_vaflags & VA_GID_UUID_VALID)
811                         xop->meta.gid = vap->va_gid_uuid;
812                 else if (vap->va_gid != (gid_t)VNOVAL)
813                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
814                 else
815                         xop->meta.gid = pip_gid;
816         }
817
818         /*
819          * Regular files and softlinks allow a small amount of data to be
820          * directly embedded in the inode.  This flag will be cleared if
821          * the size is extended past the embedded limit.
822          */
823         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
824             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
825                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
826         }
827         if (name) {
828                 hammer2_xop_setname(&xop->head, name, name_len);
829         } else {
830                 name_len = hammer2_xop_setname_inum(&xop->head, inum);
831                 KKASSERT(lhc == inum);
832         }
833         xop->meta.name_len = name_len;
834         xop->meta.name_key = lhc;
835         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
836
837         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
838
839         error = hammer2_xop_collect(&xop->head, 0);
840 #if INODE_DEBUG
841         kprintf("CREATE INODE %*.*s\n",
842                 (int)name_len, (int)name_len, name);
843 #endif
844
845         if (error) {
846                 *errorp = error;
847                 goto done;
848         }
849
850         /*
851          * Set up the new inode if not a hardlink pointer.
852          *
853          * NOTE: *_get() integrates chain's lock into the inode lock.
854          *
855          * NOTE: Only one new inode can currently be created per
856          *       transaction.  If the need arises we can adjust
857          *       hammer2_trans_init() to allow more.
858          *
859          * NOTE: nipdata will have chain's blockset data.
860          */
861         nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
862         nip->comp_heuristic = 0;
863 done:
864         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
865 done2:
866         hammer2_inode_unlock(dip);
867
868         return (nip);
869 }
870
871 /*
872  * Create a directory entry under dip with the specified name, inode number,
873  * and OBJTYPE (type).
874  */
875 int
876 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
877                       hammer2_key_t inum, uint8_t type)
878 {
879         hammer2_xop_mkdirent_t *xop;
880         hammer2_key_t lhc;
881         int error;
882
883         lhc = 0;
884         error = 0;
885
886         KKASSERT(name != NULL);
887         lhc = hammer2_dirhash(name, name_len);
888
889         /*
890          * Locate the inode or indirect block to create the new
891          * entry in.  At the same time check for key collisions
892          * and iterate until we don't get one.
893          *
894          * Lock the directory exclusively for now to guarantee that
895          * we can find an unused lhc for the name.  Due to collisions,
896          * two different creates can end up with the same lhc so we
897          * cannot depend on the OS to prevent the collision.
898          */
899         hammer2_inode_lock(dip, 0);
900
901         /*
902          * If name specified, locate an unused key in the collision space.
903          * Otherwise use the passed-in lhc directly.
904          */
905         {
906                 hammer2_xop_scanlhc_t *sxop;
907                 hammer2_key_t lhcbase;
908
909                 lhcbase = lhc;
910                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
911                 sxop->lhc = lhc;
912                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
913                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
914                         if (lhc != sxop->head.cluster.focus->bref.key)
915                                 break;
916                         ++lhc;
917                 }
918                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
919
920                 if (error) {
921                         if (error != ENOENT)
922                                 goto done2;
923                         ++lhc;
924                         error = 0;
925                 }
926                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
927                         error = ENOSPC;
928                         goto done2;
929                 }
930         }
931
932         /*
933          * Create the directory entry with the lhc as the key.
934          */
935         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
936         xop->lhc = lhc;
937         bzero(&xop->dirent, sizeof(xop->dirent));
938         xop->dirent.inum = inum;
939         xop->dirent.type = type;
940         xop->dirent.namlen = name_len;
941
942         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
943         hammer2_xop_setname(&xop->head, name, name_len);
944
945         hammer2_xop_start(&xop->head, hammer2_inode_xop_mkdirent);
946
947         error = hammer2_xop_collect(&xop->head, 0);
948
949         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
950 done2:
951         hammer2_inode_unlock(dip);
952
953         return error;
954 }
955
956 /*
957  * Repoint ip->cluster's chains to cluster's chains and fixup the default
958  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
959  * filters out invalid or non-matching elements.
960  *
961  * Caller must hold the inode and cluster exclusive locked, if not NULL,
962  * must also be locked.
963  *
964  * Cluster may be NULL to clean out any chains in ip->cluster.
965  */
966 void
967 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
968                       hammer2_cluster_t *cluster)
969 {
970         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
971         hammer2_chain_t *ochain;
972         hammer2_chain_t *nchain;
973         int i;
974
975         bzero(dropch, sizeof(dropch));
976
977         /*
978          * Replace chains in ip->cluster with chains from cluster and
979          * adjust the focus if necessary.
980          *
981          * NOTE: nchain and/or ochain can be NULL due to gaps
982          *       in the cluster arrays.
983          */
984         hammer2_spin_ex(&ip->cluster_spin);
985         for (i = 0; cluster && i < cluster->nchains; ++i) {
986                 /*
987                  * Do not replace elements which are the same.  Also handle
988                  * element count discrepancies.
989                  */
990                 nchain = cluster->array[i].chain;
991                 if (i < ip->cluster.nchains) {
992                         ochain = ip->cluster.array[i].chain;
993                         if (ochain == nchain)
994                                 continue;
995                 } else {
996                         ochain = NULL;
997                 }
998
999                 /*
1000                  * Make adjustments
1001                  */
1002                 ip->cluster.array[i].chain = nchain;
1003                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1004                 ip->cluster.array[i].flags |= cluster->array[i].flags &
1005                                               HAMMER2_CITEM_INVALID;
1006                 if (nchain)
1007                         hammer2_chain_ref(nchain);
1008                 dropch[i] = ochain;
1009         }
1010
1011         /*
1012          * Release any left-over chains in ip->cluster.
1013          */
1014         while (i < ip->cluster.nchains) {
1015                 nchain = ip->cluster.array[i].chain;
1016                 if (nchain) {
1017                         ip->cluster.array[i].chain = NULL;
1018                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1019                 }
1020                 dropch[i] = nchain;
1021                 ++i;
1022         }
1023
1024         /*
1025          * Fixup fields.  Note that the inode-embedded cluster is never
1026          * directly locked.
1027          */
1028         if (cluster) {
1029                 ip->cluster.nchains = cluster->nchains;
1030                 ip->cluster.focus = cluster->focus;
1031                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1032         } else {
1033                 ip->cluster.nchains = 0;
1034                 ip->cluster.focus = NULL;
1035                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1036         }
1037
1038         hammer2_spin_unex(&ip->cluster_spin);
1039
1040         /*
1041          * Cleanup outside of spinlock
1042          */
1043         while (--i >= 0) {
1044                 if (dropch[i])
1045                         hammer2_chain_drop(dropch[i]);
1046         }
1047 }
1048
1049 /*
1050  * Repoint a single element from the cluster to the ip.  Used by the
1051  * synchronization threads to piecemeal update inodes.  Does not change
1052  * focus and requires inode to be re-locked to clean-up flags (XXX).
1053  */
1054 void
1055 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1056                           int idx)
1057 {
1058         hammer2_chain_t *ochain;
1059         hammer2_chain_t *nchain;
1060         int i;
1061
1062         hammer2_spin_ex(&ip->cluster_spin);
1063         KKASSERT(idx < cluster->nchains);
1064         if (idx < ip->cluster.nchains) {
1065                 ochain = ip->cluster.array[idx].chain;
1066                 nchain = cluster->array[idx].chain;
1067         } else {
1068                 ochain = NULL;
1069                 nchain = cluster->array[idx].chain;
1070                 ip->cluster.nchains = idx + 1;
1071                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1072                         bzero(&ip->cluster.array[i],
1073                               sizeof(ip->cluster.array[i]));
1074                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1075                 }
1076         }
1077         if (ochain != nchain) {
1078                 /*
1079                  * Make adjustments.
1080                  */
1081                 ip->cluster.array[idx].chain = nchain;
1082                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1083                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1084                                                 HAMMER2_CITEM_INVALID;
1085         }
1086         hammer2_spin_unex(&ip->cluster_spin);
1087         if (ochain != nchain) {
1088                 if (nchain)
1089                         hammer2_chain_ref(nchain);
1090                 if (ochain)
1091                         hammer2_chain_drop(ochain);
1092         }
1093 }
1094
1095 /*
1096  * Called with a locked inode to finish unlinking an inode after xop_unlink
1097  * had been run.  This function is responsible for decrementing nlinks.
1098  *
1099  * We don't bother decrementing nlinks if the file is not open and this was
1100  * the last link.
1101  *
1102  * If the inode is a hardlink target it's chain has not yet been deleted,
1103  * otherwise it's chain has been deleted.
1104  *
1105  * If isopen then any prior deletion was not permanent and the inode is
1106  * left intact with nlinks == 0;
1107  */
1108 int
1109 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1110 {
1111         hammer2_pfs_t *pmp;
1112         int error;
1113
1114         pmp = ip->pmp;
1115
1116         /*
1117          * Decrement nlinks.  If this is the last link and the file is
1118          * not open we can just delete the inode and not bother dropping
1119          * nlinks to 0 (avoiding unnecessary block updates).
1120          */
1121         if (ip->meta.nlinks == 1) {
1122                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1123                 if (isopen == 0)
1124                         goto killit;
1125         }
1126
1127         hammer2_inode_modify(ip);
1128         --ip->meta.nlinks;
1129         if ((int64_t)ip->meta.nlinks < 0)
1130                 ip->meta.nlinks = 0;    /* safety */
1131
1132         /*
1133          * If nlinks is not zero we are done.  However, this should only be
1134          * possible with a hardlink target.  If the inode is an embedded
1135          * hardlink nlinks should have dropped to zero, warn and proceed
1136          * with the next step.
1137          */
1138         if (ip->meta.nlinks) {
1139                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1140                         return 0;
1141                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1142                         (intmax_t)ip->meta.nlinks);
1143                 return 0;
1144         }
1145
1146         if (ip->vp)
1147                 hammer2_knote(ip->vp, NOTE_DELETE);
1148
1149         /*
1150          * nlinks is now zero, delete the inode if not open.
1151          */
1152         if (isopen == 0) {
1153                 hammer2_xop_destroy_t *xop;
1154
1155 killit:
1156                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1157                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1158                 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1159                 error = hammer2_xop_collect(&xop->head, 0);
1160                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1161         }
1162         error = 0;
1163         return error;
1164 }
1165
1166 /*
1167  * Mark an inode as being modified, meaning that the caller will modify
1168  * ip->meta.
1169  *
1170  * If a vnode is present we set the vnode dirty and the nominal filesystem
1171  * sync will also handle synchronizing the inode meta-data.  If no vnode
1172  * is present we must ensure that the inode is on pmp->sideq.
1173  *
1174  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1175  *       only modifying the in-memory inode.  A modify_tid is synchronized
1176  *       later when the inode gets flushed.
1177  */
1178 void
1179 hammer2_inode_modify(hammer2_inode_t *ip)
1180 {
1181         hammer2_pfs_t *pmp;
1182
1183         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1184         if (ip->vp) {
1185                 vsetisdirty(ip->vp);
1186         } else if ((pmp = ip->pmp) != NULL) {
1187                 hammer2_inode_delayed_sideq(ip);
1188         }
1189 }
1190
1191 /*
1192  * Synchronize the inode's frontend state with the chain state prior
1193  * to any explicit flush of the inode or any strategy write call.
1194  *
1195  * Called with a locked inode inside a transaction.
1196  */
1197 void
1198 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1199 {
1200         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1201                 hammer2_xop_fsync_t *xop;
1202                 int error;
1203
1204                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1205                 xop->clear_directdata = 0;
1206                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1207                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1208                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1209                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1210                                 xop->clear_directdata = 1;
1211                         }
1212                         xop->osize = ip->osize;
1213                 } else {
1214                         xop->osize = ip->meta.size;     /* safety */
1215                 }
1216                 xop->ipflags = ip->flags;
1217                 xop->meta = ip->meta;
1218
1219                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1220                                              HAMMER2_INODE_MODIFIED);
1221                 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1222                 error = hammer2_xop_collect(&xop->head, 0);
1223                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1224                 if (error == ENOENT)
1225                         error = 0;
1226                 if (error) {
1227                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1228                         /*
1229                         atomic_set_int(&ip->flags,
1230                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1231                                                        HAMMER2_INODE_MODIFIED));
1232                         */
1233                         /* XXX return error somehow? */
1234                 }
1235         }
1236 }
1237
1238 /*
1239  * The normal filesystem sync no longer has visibility to an inode structure
1240  * after its vnode has been reclaimed.  In this situation an unlinked-but-open
1241  * inode or a dirty inode may require additional processing to synchronize
1242  * ip->meta to its underlying cluster nodes.
1243  *
1244  * In particular, reclaims can occur in almost any state (for example, when
1245  * doing operations on unrelated vnodes) and flushing the reclaimed inode
1246  * in the reclaim path itself is a non-starter.
1247  *
1248  * Caller must be in a transaction.
1249  */
1250 void
1251 hammer2_inode_run_sideq(hammer2_pfs_t *pmp)
1252 {
1253         hammer2_xop_destroy_t *xop;
1254         hammer2_inode_sideq_t *ipul;
1255         hammer2_inode_t *ip;
1256         int error;
1257
1258         if (TAILQ_EMPTY(&pmp->sideq))
1259                 return;
1260
1261         LOCKSTART;
1262         hammer2_spin_ex(&pmp->list_spin);
1263         while ((ipul = TAILQ_FIRST(&pmp->sideq)) != NULL) {
1264                 TAILQ_REMOVE(&pmp->sideq, ipul, entry);
1265                 ip = ipul->ip;
1266                 KKASSERT(ip->flags & HAMMER2_INODE_ONSIDEQ);
1267                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
1268                 hammer2_spin_unex(&pmp->list_spin);
1269                 kfree(ipul, pmp->minode);
1270
1271                 hammer2_inode_lock(ip, 0);
1272                 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
1273                         /*
1274                          * The inode was unlinked while open.  The inode must
1275                          * be deleted and destroyed.
1276                          */
1277                         xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1278                         hammer2_xop_start(&xop->head,
1279                                           hammer2_inode_xop_destroy);
1280                         error = hammer2_xop_collect(&xop->head, 0);
1281                         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1282                 } else {
1283                         /*
1284                          * The inode was dirty as-of the reclaim, requiring
1285                          * synchronization of ip->meta with its underlying
1286                          * chains.
1287                          */
1288                         hammer2_inode_chain_sync(ip);
1289                 }
1290
1291                 hammer2_inode_unlock(ip);
1292                 hammer2_inode_drop(ip);                 /* ipul ref */
1293
1294                 hammer2_spin_ex(&pmp->list_spin);
1295         }
1296         hammer2_spin_unex(&pmp->list_spin);
1297         LOCKSTOP;
1298 }
1299
1300 /*
1301  * Helper to create a directory entry.
1302  */
1303 void
1304 hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *arg)
1305 {
1306         hammer2_xop_mkdirent_t *xop = &arg->xop_mkdirent;
1307         hammer2_chain_t *parent;
1308         hammer2_chain_t *chain;
1309         hammer2_key_t key_next;
1310         size_t data_len;
1311         int cache_index = -1;
1312         int error;
1313
1314         if (hammer2_debug & 0x0001)
1315                 kprintf("dirent_create lhc %016jx clindex %d\n",
1316                         xop->lhc, thr->clindex);
1317
1318         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1319                                      HAMMER2_RESOLVE_ALWAYS);
1320         if (parent == NULL) {
1321                 error = EIO;
1322                 chain = NULL;
1323                 goto fail;
1324         }
1325         chain = hammer2_chain_lookup(&parent, &key_next,
1326                                      xop->lhc, xop->lhc,
1327                                      &cache_index, 0);
1328         if (chain) {
1329                 error = EEXIST;
1330                 goto fail;
1331         }
1332
1333         /*
1334          * We may be able to embed the directory entry directly in the
1335          * blockref.
1336          */
1337         if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1338                 data_len = 0;
1339         else
1340                 data_len = HAMMER2_ALLOC_MIN;
1341
1342         error = hammer2_chain_create(&parent, &chain,
1343                                      xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1344                                      xop->lhc, 0,
1345                                      HAMMER2_BREF_TYPE_DIRENT,
1346                                      data_len,
1347                                      xop->head.mtid, 0, 0);
1348         if (error == 0) {
1349                 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1350
1351                 chain->bref.embed.dirent = xop->dirent;
1352                 if (xop->dirent.namlen <= sizeof(chain->bref.check.buf))
1353                         bcopy(xop->head.name1, chain->bref.check.buf,
1354                               xop->dirent.namlen);
1355                 else
1356                         bcopy(xop->head.name1, chain->data->buf,
1357                               xop->dirent.namlen);
1358         }
1359 fail:
1360         if (parent) {
1361                 hammer2_chain_unlock(parent);
1362                 hammer2_chain_drop(parent);
1363         }
1364         hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1365         if (chain) {
1366                 hammer2_chain_unlock(chain);
1367                 hammer2_chain_drop(chain);
1368         }
1369 }
1370
1371 /*
1372  * Inode create helper (threaded, backend)
1373  *
1374  * Used by ncreate, nmknod, nsymlink, nmkdir.
1375  * Used by nlink and rename to create HARDLINK pointers.
1376  *
1377  * Frontend holds the parent directory ip locked exclusively.  We
1378  * create the inode and feed the exclusively locked chain to the
1379  * frontend.
1380  */
1381 void
1382 hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *arg)
1383 {
1384         hammer2_xop_create_t *xop = &arg->xop_create;
1385         hammer2_chain_t *parent;
1386         hammer2_chain_t *chain;
1387         hammer2_key_t key_next;
1388         int cache_index = -1;
1389         int error;
1390
1391         if (hammer2_debug & 0x0001)
1392                 kprintf("inode_create lhc %016jx clindex %d\n",
1393                         xop->lhc, thr->clindex);
1394
1395         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1396                                      HAMMER2_RESOLVE_ALWAYS);
1397         if (parent == NULL) {
1398                 error = EIO;
1399                 chain = NULL;
1400                 goto fail;
1401         }
1402         chain = hammer2_chain_lookup(&parent, &key_next,
1403                                      xop->lhc, xop->lhc,
1404                                      &cache_index, 0);
1405         if (chain) {
1406                 error = EEXIST;
1407                 goto fail;
1408         }
1409
1410         error = hammer2_chain_create(&parent, &chain,
1411                                      xop->head.ip1->pmp, HAMMER2_METH_DEFAULT,
1412                                      xop->lhc, 0,
1413                                      HAMMER2_BREF_TYPE_INODE,
1414                                      HAMMER2_INODE_BYTES,
1415                                      xop->head.mtid, 0, xop->flags);
1416         if (error == 0) {
1417                 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1418                 chain->data->ipdata.meta = xop->meta;
1419                 if (xop->head.name1) {
1420                         bcopy(xop->head.name1,
1421                               chain->data->ipdata.filename,
1422                               xop->head.name1_len);
1423                         chain->data->ipdata.meta.name_len = xop->head.name1_len;
1424                 }
1425                 chain->data->ipdata.meta.name_key = xop->lhc;
1426         }
1427 fail:
1428         if (parent) {
1429                 hammer2_chain_unlock(parent);
1430                 hammer2_chain_drop(parent);
1431         }
1432         hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
1433         if (chain) {
1434                 hammer2_chain_unlock(chain);
1435                 hammer2_chain_drop(chain);
1436         }
1437 }
1438
1439 /*
1440  * Inode delete helper (backend, threaded)
1441  *
1442  * Generally used by hammer2_run_sideq()
1443  */
1444 void
1445 hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *arg)
1446 {
1447         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1448         hammer2_pfs_t *pmp;
1449         hammer2_chain_t *parent;
1450         hammer2_chain_t *chain;
1451         hammer2_inode_t *ip;
1452         int error;
1453
1454         /*
1455          * We need the precise parent chain to issue the deletion.
1456          */
1457         ip = xop->head.ip1;
1458         pmp = ip->pmp;
1459         chain = NULL;
1460
1461 again:
1462         parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1463         if (parent)
1464                 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1465         if (parent == NULL) {
1466                 error = EIO;
1467                 goto done;
1468         }
1469         chain = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
1470         if (chain == NULL) {
1471                 error = EIO;
1472                 goto done;
1473         }
1474         if (chain->parent != parent) {
1475                 kprintf("hammer2_inode_xop_destroy: "
1476                         "parent changed %p->(%p,%p)\n",
1477                         chain, parent, chain->parent);
1478                 hammer2_chain_unlock(parent);
1479                 hammer2_chain_drop(parent);
1480                 hammer2_chain_unlock(chain);
1481                 hammer2_chain_drop(chain);
1482                 goto again;
1483         }
1484
1485         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1486         error = 0;
1487 done:
1488         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1489         if (parent) {
1490                 hammer2_chain_unlock(parent);
1491                 hammer2_chain_drop(parent);
1492         }
1493         if (chain) {
1494                 hammer2_chain_unlock(chain);
1495                 hammer2_chain_drop(chain);
1496         }
1497 }
1498
1499 void
1500 hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *arg)
1501 {
1502         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1503         hammer2_chain_t *parent;
1504         hammer2_chain_t *chain;
1505         hammer2_key_t key_next;
1506         int cache_index = -1;
1507
1508         /*
1509          * We need the precise parent chain to issue the deletion.
1510          */
1511         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1512                                      HAMMER2_RESOLVE_ALWAYS);
1513         chain = NULL;
1514         if (parent == NULL) {
1515                 /* XXX error */
1516                 goto done;
1517         }
1518         chain = hammer2_chain_lookup(&parent, &key_next,
1519                                      xop->key_beg, xop->key_end,
1520                                      &cache_index,
1521                                      HAMMER2_LOOKUP_ALWAYS);
1522         while (chain) {
1523                 hammer2_chain_delete(parent, chain,
1524                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1525                 hammer2_xop_feed(&xop->head, chain, thr->clindex, chain->error);
1526                 /* depend on function to unlock the shared lock */
1527                 chain = hammer2_chain_next(&parent, chain, &key_next,
1528                                            key_next, xop->key_end,
1529                                            &cache_index,
1530                                            HAMMER2_LOOKUP_ALWAYS);
1531         }
1532 done:
1533         hammer2_xop_feed(&xop->head, NULL, thr->clindex, ENOENT);
1534         if (parent) {
1535                 hammer2_chain_unlock(parent);
1536                 hammer2_chain_drop(parent);
1537         }
1538         if (chain) {
1539                 hammer2_chain_unlock(chain);
1540                 hammer2_chain_drop(chain);
1541         }
1542 }
1543
1544 void
1545 hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *arg)
1546 {
1547         hammer2_xop_connect_t *xop = &arg->xop_connect;
1548         hammer2_inode_data_t *wipdata;
1549         hammer2_chain_t *parent;
1550         hammer2_chain_t *chain;
1551         hammer2_pfs_t *pmp;
1552         hammer2_key_t key_dummy;
1553         int cache_index = -1;
1554         int error;
1555
1556         /*
1557          * Get directory, then issue a lookup to prime the parent chain
1558          * for the create.  The lookup is expected to fail.
1559          */
1560         pmp = xop->head.ip1->pmp;
1561         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1562                                      HAMMER2_RESOLVE_ALWAYS);
1563         if (parent == NULL) {
1564                 chain = NULL;
1565                 error = EIO;
1566                 goto fail;
1567         }
1568         chain = hammer2_chain_lookup(&parent, &key_dummy,
1569                                      xop->lhc, xop->lhc,
1570                                      &cache_index, 0);
1571         if (chain) {
1572                 hammer2_chain_unlock(chain);
1573                 hammer2_chain_drop(chain);
1574                 chain = NULL;
1575                 error = EEXIST;
1576                 goto fail;
1577         }
1578
1579         /*
1580          * Adjust the filename in the inode, set the name key.
1581          *
1582          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1583          *       do it here.
1584          */
1585         chain = hammer2_inode_chain(xop->head.ip2, thr->clindex,
1586                                     HAMMER2_RESOLVE_ALWAYS);
1587         hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1588         wipdata = &chain->data->ipdata;
1589
1590         hammer2_inode_modify(xop->head.ip2);
1591         if (xop->head.name1) {
1592                 bzero(wipdata->filename, sizeof(wipdata->filename));
1593                 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1594                 wipdata->meta.name_len = xop->head.name1_len;
1595         }
1596         wipdata->meta.name_key = xop->lhc;
1597
1598         /*
1599          * Reconnect the chain to the new parent directory
1600          */
1601         error = hammer2_chain_create(&parent, &chain,
1602                                      pmp, HAMMER2_METH_DEFAULT,
1603                                      xop->lhc, 0,
1604                                      HAMMER2_BREF_TYPE_INODE,
1605                                      HAMMER2_INODE_BYTES,
1606                                      xop->head.mtid, 0, 0);
1607
1608         /*
1609          * Feed result back.
1610          */
1611 fail:
1612         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1613         if (parent) {
1614                 hammer2_chain_unlock(parent);
1615                 hammer2_chain_drop(parent);
1616         }
1617         if (chain) {
1618                 hammer2_chain_unlock(chain);
1619                 hammer2_chain_drop(chain);
1620         }
1621 }
1622
1623 /*
1624  * Synchronize the in-memory inode with the chain.
1625  */
1626 void
1627 hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *arg)
1628 {
1629         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1630         hammer2_chain_t *parent;
1631         hammer2_chain_t *chain;
1632         int error;
1633
1634         parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1635                                      HAMMER2_RESOLVE_ALWAYS);
1636         chain = NULL;
1637         if (parent == NULL) {
1638                 error = EIO;
1639                 goto done;
1640         }
1641         if (parent->error) {
1642                 error = parent->error;
1643                 goto done;
1644         }
1645
1646         error = 0;
1647
1648         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1649                 /* osize must be ignored */
1650         } else if (xop->meta.size < xop->osize) {
1651                 /*
1652                  * We must delete any chains beyond the EOF.  The chain
1653                  * straddling the EOF will be pending in the bioq.
1654                  */
1655                 hammer2_key_t lbase;
1656                 hammer2_key_t key_next;
1657                 int cache_index = -1;
1658
1659                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1660                         ~HAMMER2_PBUFMASK64;
1661                 chain = hammer2_chain_lookup(&parent, &key_next,
1662                                              lbase, HAMMER2_KEY_MAX,
1663                                              &cache_index,
1664                                              HAMMER2_LOOKUP_NODATA |
1665                                              HAMMER2_LOOKUP_NODIRECT);
1666                 while (chain) {
1667                         /*
1668                          * Degenerate embedded case, nothing to loop on
1669                          */
1670                         switch (chain->bref.type) {
1671                         case HAMMER2_BREF_TYPE_DIRENT:
1672                         case HAMMER2_BREF_TYPE_INODE:
1673                                 KKASSERT(0);
1674                                 break;
1675                         case HAMMER2_BREF_TYPE_DATA:
1676                                 hammer2_chain_delete(parent, chain,
1677                                                      xop->head.mtid,
1678                                                      HAMMER2_DELETE_PERMANENT);
1679                                 break;
1680                         }
1681                         chain = hammer2_chain_next(&parent, chain, &key_next,
1682                                                    key_next, HAMMER2_KEY_MAX,
1683                                                    &cache_index,
1684                                                    HAMMER2_LOOKUP_NODATA |
1685                                                    HAMMER2_LOOKUP_NODIRECT);
1686                 }
1687
1688                 /*
1689                  * Reset to point at inode for following code, if necessary.
1690                  */
1691                 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1692                         hammer2_chain_unlock(parent);
1693                         hammer2_chain_drop(parent);
1694                         parent = hammer2_inode_chain(xop->head.ip1,
1695                                                      thr->clindex,
1696                                                      HAMMER2_RESOLVE_ALWAYS);
1697                         kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1698                                 parent->data->ipdata.filename);
1699                 }
1700         }
1701
1702         /*
1703          * Sync the inode meta-data, potentially clear the blockset area
1704          * of direct data so it can be used for blockrefs.
1705          */
1706         hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1707         parent->data->ipdata.meta = xop->meta;
1708         if (xop->clear_directdata) {
1709                 bzero(&parent->data->ipdata.u.blockset,
1710                       sizeof(parent->data->ipdata.u.blockset));
1711         }
1712 done:
1713         if (chain) {
1714                 hammer2_chain_unlock(chain);
1715                 hammer2_chain_drop(chain);
1716         }
1717         if (parent) {
1718                 hammer2_chain_unlock(parent);
1719                 hammer2_chain_drop(parent);
1720         }
1721         hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
1722 }
1723