38fa0c63ac2bc3e027f1777dc46be50f4aa0f1d9
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 /*
60  * HAMMER2 inode locks
61  *
62  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
63  * flags for options:
64  *
65  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
66  *        inode locking function will automatically set the RDONLY flag.
67  *
68  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69  *        Most front-end inode locks do.
70  *
71  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72  *        the inode data be resolved.  This is used by the syncthr because
73  *        it can run on an unresolved/out-of-sync cluster, and also by the
74  *        vnode reclamation code to avoid unnecessary I/O (particularly when
75  *        disposing of hundreds of thousands of cached vnodes).
76  *
77  * The inode locking function locks the inode itself, resolves any stale
78  * chains in the inode's cluster, and allocates a fresh copy of the
79  * cluster with 1 ref and all the underlying chains locked.
80  *
81  * ip->cluster will be stable while the inode is locked.
82  *
83  * NOTE: We don't combine the inode/chain lock because putting away an
84  *       inode would otherwise confuse multiple lock holders of the inode.
85  *
86  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87  *       and never point to a hardlink pointer.
88  *
89  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90  *       will feel free to reduce the chain set in the cluster as an
91  *       optimization.  It will still be validated against the quorum if
92  *       appropriate, but the optimization might be able to reduce data
93  *       accesses to one node.  This flag is automatically set if the inode
94  *       is locked with HAMMER2_RESOLVE_SHARED.
95  */
96 void
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
98 {
99         hammer2_inode_ref(ip);
100
101         /* 
102          * Inode structure mutex
103          */
104         if (how & HAMMER2_RESOLVE_SHARED) {
105                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
106                 hammer2_mtx_sh(&ip->lock);
107         } else {
108                 hammer2_mtx_ex(&ip->lock);
109         }
110 }
111
112 /*
113  * Select a chain out of an inode's cluster and lock it.
114  *
115  * The inode does not have to be locked.
116  */
117 hammer2_chain_t *
118 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
119 {
120         hammer2_chain_t *chain;
121
122         hammer2_spin_sh(&ip->cluster_spin);
123         if (clindex >= ip->cluster.nchains)
124                 chain = NULL;
125         else
126                 chain = ip->cluster.array[clindex].chain;
127         if (chain) {
128                 hammer2_chain_ref(chain);
129                 hammer2_spin_unsh(&ip->cluster_spin);
130                 hammer2_chain_lock(chain, how);
131         } else {
132                 hammer2_spin_unsh(&ip->cluster_spin);
133         }
134         return chain;
135 }
136
137 hammer2_chain_t *
138 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
139                                hammer2_chain_t **parentp, int how)
140 {
141         hammer2_chain_t *chain;
142         hammer2_chain_t *parent;
143
144         for (;;) {
145                 hammer2_spin_sh(&ip->cluster_spin);
146                 if (clindex >= ip->cluster.nchains)
147                         chain = NULL;
148                 else
149                         chain = ip->cluster.array[clindex].chain;
150                 if (chain) {
151                         hammer2_chain_ref(chain);
152                         hammer2_spin_unsh(&ip->cluster_spin);
153                         hammer2_chain_lock(chain, how);
154                 } else {
155                         hammer2_spin_unsh(&ip->cluster_spin);
156                 }
157
158                 /*
159                  * Get parent, lock order must be (parent, chain).
160                  */
161                 parent = chain->parent;
162                 hammer2_chain_ref(parent);
163                 hammer2_chain_unlock(chain);
164                 hammer2_chain_lock(parent, how);
165                 hammer2_chain_lock(chain, how);
166                 if (ip->cluster.array[clindex].chain == chain &&
167                     chain->parent == parent) {
168                         break;
169                 }
170
171                 /*
172                  * Retry
173                  */
174                 hammer2_chain_unlock(chain);
175                 hammer2_chain_drop(chain);
176                 hammer2_chain_unlock(parent);
177                 hammer2_chain_drop(parent);
178         }
179         *parentp = parent;
180
181         return chain;
182 }
183
184 void
185 hammer2_inode_unlock(hammer2_inode_t *ip)
186 {
187         hammer2_mtx_unlock(&ip->lock);
188         hammer2_inode_drop(ip);
189 }
190
191 /*
192  * Temporarily release a lock held shared or exclusive.  Caller must
193  * hold the lock shared or exclusive on call and lock will be released
194  * on return.
195  *
196  * Restore a lock that was temporarily released.
197  */
198 hammer2_mtx_state_t
199 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
200 {
201         return hammer2_mtx_temp_release(&ip->lock);
202 }
203
204 void
205 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
206 {
207         hammer2_mtx_temp_restore(&ip->lock, ostate);
208 }
209
210 /*
211  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
212  * is already held exclusively this is a NOP.
213  *
214  * The caller MUST hold the inode lock either shared or exclusive on call
215  * and will own the lock exclusively on return.
216  *
217  * Returns non-zero if the lock was already exclusive prior to the upgrade.
218  */
219 int
220 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
221 {
222         int wasexclusive;
223
224         if (mtx_islocked_ex(&ip->lock)) {
225                 wasexclusive = 1;
226         } else {
227                 hammer2_mtx_unlock(&ip->lock);
228                 hammer2_mtx_ex(&ip->lock);
229                 wasexclusive = 0;
230         }
231         return wasexclusive;
232 }
233
234 /*
235  * Downgrade an inode lock from exclusive to shared only if the inode
236  * lock was previously shared.  If the inode lock was previously exclusive,
237  * this is a NOP.
238  */
239 void
240 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
241 {
242         if (wasexclusive == 0)
243                 mtx_downgrade(&ip->lock);
244 }
245
246 /*
247  * Lookup an inode by inode number
248  */
249 hammer2_inode_t *
250 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
251 {
252         hammer2_inode_t *ip;
253
254         KKASSERT(pmp);
255         if (pmp->spmp_hmp) {
256                 ip = NULL;
257         } else {
258                 hammer2_spin_ex(&pmp->inum_spin);
259                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
260                 if (ip)
261                         hammer2_inode_ref(ip);
262                 hammer2_spin_unex(&pmp->inum_spin);
263         }
264         return(ip);
265 }
266
267 /*
268  * Adding a ref to an inode is only legal if the inode already has at least
269  * one ref.
270  *
271  * (can be called with spinlock held)
272  */
273 void
274 hammer2_inode_ref(hammer2_inode_t *ip)
275 {
276         atomic_add_int(&ip->refs, 1);
277         if (hammer2_debug & 0x80000) {
278                 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
279                 print_backtrace(8);
280         }
281 }
282
283 /*
284  * Drop an inode reference, freeing the inode when the last reference goes
285  * away.
286  */
287 void
288 hammer2_inode_drop(hammer2_inode_t *ip)
289 {
290         hammer2_pfs_t *pmp;
291         hammer2_inode_t *pip;
292         u_int refs;
293
294         while (ip) {
295                 if (hammer2_debug & 0x80000) {
296                         kprintf("INODE-1 %p (%d->%d)\n",
297                                 ip, ip->refs, ip->refs - 1);
298                         print_backtrace(8);
299                 }
300                 refs = ip->refs;
301                 cpu_ccfence();
302                 if (refs == 1) {
303                         /*
304                          * Transition to zero, must interlock with
305                          * the inode inumber lookup tree (if applicable).
306                          * It should not be possible for anyone to race
307                          * the transition to 0.
308                          */
309                         pmp = ip->pmp;
310                         KKASSERT(pmp);
311                         hammer2_spin_ex(&pmp->inum_spin);
312
313                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
314                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
315                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
316                                         atomic_clear_int(&ip->flags,
317                                                      HAMMER2_INODE_ONRBTREE);
318                                         RB_REMOVE(hammer2_inode_tree,
319                                                   &pmp->inum_tree, ip);
320                                 }
321                                 hammer2_spin_unex(&pmp->inum_spin);
322
323                                 pip = ip->pip;
324                                 ip->pip = NULL;
325                                 ip->pmp = NULL;
326
327                                 /*
328                                  * Cleaning out ip->cluster isn't entirely
329                                  * trivial.
330                                  */
331                                 hammer2_inode_repoint(ip, NULL, NULL);
332
333                                 /*
334                                  * We have to drop pip (if non-NULL) to
335                                  * dispose of our implied reference from
336                                  * ip->pip.  We can simply loop on it.
337                                  */
338                                 kfree(ip, pmp->minode);
339                                 atomic_add_long(&pmp->inmem_inodes, -1);
340                                 ip = pip;
341                                 /* continue with pip (can be NULL) */
342                         } else {
343                                 hammer2_spin_unex(&ip->pmp->inum_spin);
344                         }
345                 } else {
346                         /*
347                          * Non zero transition
348                          */
349                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
350                                 break;
351                 }
352         }
353 }
354
355 /*
356  * Get the vnode associated with the given inode, allocating the vnode if
357  * necessary.  The vnode will be returned exclusively locked.
358  *
359  * The caller must lock the inode (shared or exclusive).
360  *
361  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
362  * races.
363  */
364 struct vnode *
365 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
366 {
367         hammer2_pfs_t *pmp;
368         struct vnode *vp;
369
370         pmp = ip->pmp;
371         KKASSERT(pmp != NULL);
372         *errorp = 0;
373
374         for (;;) {
375                 /*
376                  * Attempt to reuse an existing vnode assignment.  It is
377                  * possible to race a reclaim so the vget() may fail.  The
378                  * inode must be unlocked during the vget() to avoid a
379                  * deadlock against a reclaim.
380                  */
381                 int wasexclusive;
382
383                 vp = ip->vp;
384                 if (vp) {
385                         /*
386                          * Inode must be unlocked during the vget() to avoid
387                          * possible deadlocks, but leave the ip ref intact.
388                          *
389                          * vnode is held to prevent destruction during the
390                          * vget().  The vget() can still fail if we lost
391                          * a reclaim race on the vnode.
392                          */
393                         hammer2_mtx_state_t ostate;
394
395                         vhold(vp);
396                         ostate = hammer2_inode_lock_temp_release(ip);
397                         if (vget(vp, LK_EXCLUSIVE)) {
398                                 vdrop(vp);
399                                 hammer2_inode_lock_temp_restore(ip, ostate);
400                                 continue;
401                         }
402                         hammer2_inode_lock_temp_restore(ip, ostate);
403                         vdrop(vp);
404                         /* vp still locked and ref from vget */
405                         if (ip->vp != vp) {
406                                 kprintf("hammer2: igetv race %p/%p\n",
407                                         ip->vp, vp);
408                                 vput(vp);
409                                 continue;
410                         }
411                         *errorp = 0;
412                         break;
413                 }
414
415                 /*
416                  * No vnode exists, allocate a new vnode.  Beware of
417                  * allocation races.  This function will return an
418                  * exclusively locked and referenced vnode.
419                  */
420                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
421                 if (*errorp) {
422                         kprintf("hammer2: igetv getnewvnode failed %d\n",
423                                 *errorp);
424                         vp = NULL;
425                         break;
426                 }
427
428                 /*
429                  * Lock the inode and check for an allocation race.
430                  */
431                 wasexclusive = hammer2_inode_lock_upgrade(ip);
432                 if (ip->vp != NULL) {
433                         vp->v_type = VBAD;
434                         vx_put(vp);
435                         hammer2_inode_lock_downgrade(ip, wasexclusive);
436                         continue;
437                 }
438
439                 switch (ip->meta.type) {
440                 case HAMMER2_OBJTYPE_DIRECTORY:
441                         vp->v_type = VDIR;
442                         break;
443                 case HAMMER2_OBJTYPE_REGFILE:
444                         vp->v_type = VREG;
445                         vinitvmio(vp, ip->meta.size,
446                                   HAMMER2_LBUFSIZE,
447                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
448                         break;
449                 case HAMMER2_OBJTYPE_SOFTLINK:
450                         /*
451                          * XXX for now we are using the generic file_read
452                          * and file_write code so we need a buffer cache
453                          * association.
454                          */
455                         vp->v_type = VLNK;
456                         vinitvmio(vp, ip->meta.size,
457                                   HAMMER2_LBUFSIZE,
458                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
459                         break;
460                 case HAMMER2_OBJTYPE_CDEV:
461                         vp->v_type = VCHR;
462                         /* fall through */
463                 case HAMMER2_OBJTYPE_BDEV:
464                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
465                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
466                                 vp->v_type = VBLK;
467                         addaliasu(vp,
468                                   ip->meta.rmajor,
469                                   ip->meta.rminor);
470                         break;
471                 case HAMMER2_OBJTYPE_FIFO:
472                         vp->v_type = VFIFO;
473                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
474                         break;
475                 case HAMMER2_OBJTYPE_SOCKET:
476                         vp->v_type = VSOCK;
477                         break;
478                 default:
479                         panic("hammer2: unhandled objtype %d",
480                               ip->meta.type);
481                         break;
482                 }
483
484                 if (ip == pmp->iroot)
485                         vsetflags(vp, VROOT);
486
487                 vp->v_data = ip;
488                 ip->vp = vp;
489                 hammer2_inode_ref(ip);          /* vp association */
490                 hammer2_inode_lock_downgrade(ip, wasexclusive);
491                 break;
492         }
493
494         /*
495          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
496          */
497         if (hammer2_debug & 0x0002) {
498                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
499                         vp, vp->v_refcnt, vp->v_auxrefs);
500         }
501         return (vp);
502 }
503
504 /*
505  * Returns the inode associated with the passed-in cluster, creating the
506  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
507  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
508  * Otherwise the whole cluster is synchronized.
509  *
510  * The passed-in cluster must be locked and will remain locked on return.
511  * The returned inode will be locked and the caller may dispose of both
512  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
513  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
514  *
515  * The hammer2_inode structure regulates the interface between the high level
516  * kernel VNOPS API and the filesystem backend (the chains).
517  *
518  * On return the inode is locked with the supplied cluster.
519  */
520 hammer2_inode_t *
521 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
522                   hammer2_cluster_t *cluster, int idx)
523 {
524         hammer2_inode_t *nip;
525         const hammer2_inode_data_t *iptmp;
526         const hammer2_inode_data_t *nipdata;
527
528         KKASSERT(cluster == NULL ||
529                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
530         KKASSERT(pmp);
531
532         /*
533          * Interlocked lookup/ref of the inode.  This code is only needed
534          * when looking up inodes with nlinks != 0 (TODO: optimize out
535          * otherwise and test for duplicates).
536          *
537          * Cluster can be NULL during the initial pfs allocation.
538          */
539 again:
540         while (cluster) {
541                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
542                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
543                 if (nip == NULL)
544                         break;
545
546                 hammer2_mtx_ex(&nip->lock);
547
548                 /*
549                  * Handle SMP race (not applicable to the super-root spmp
550                  * which can't index inodes due to duplicative inode numbers).
551                  */
552                 if (pmp->spmp_hmp == NULL &&
553                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
554                         hammer2_mtx_unlock(&nip->lock);
555                         hammer2_inode_drop(nip);
556                         continue;
557                 }
558                 if (idx >= 0)
559                         hammer2_inode_repoint_one(nip, cluster, idx);
560                 else
561                         hammer2_inode_repoint(nip, NULL, cluster);
562
563                 return nip;
564         }
565
566         /*
567          * We couldn't find the inode number, create a new inode.
568          */
569         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
570         spin_init(&nip->cluster_spin, "h2clspin");
571         atomic_add_long(&pmp->inmem_inodes, 1);
572         hammer2_pfs_memory_inc(pmp);
573         hammer2_pfs_memory_wakeup(pmp);
574         if (pmp->spmp_hmp)
575                 nip->flags = HAMMER2_INODE_SROOT;
576
577         /*
578          * Initialize nip's cluster.  A cluster is provided for normal
579          * inodes but typically not for the super-root or PFS inodes.
580          */
581         nip->cluster.refs = 1;
582         nip->cluster.pmp = pmp;
583         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
584         if (cluster) {
585                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
586                 nip->meta = nipdata->meta;
587                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
588                 hammer2_inode_repoint(nip, NULL, cluster);
589         } else {
590                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
591                 /* mtime will be updated when a cluster is available */
592                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
593         }
594
595         nip->pip = dip;                         /* can be NULL */
596         if (dip)
597                 hammer2_inode_ref(dip); /* ref dip for nip->pip */
598
599         nip->pmp = pmp;
600
601         /*
602          * ref and lock on nip gives it state compatible to after a
603          * hammer2_inode_lock() call.
604          */
605         nip->refs = 1;
606         hammer2_mtx_init(&nip->lock, "h2inode");
607         hammer2_mtx_ex(&nip->lock);
608         /* combination of thread lock and chain lock == inode lock */
609
610         /*
611          * Attempt to add the inode.  If it fails we raced another inode
612          * get.  Undo all the work and try again.
613          */
614         if (pmp->spmp_hmp == NULL) {
615                 hammer2_spin_ex(&pmp->inum_spin);
616                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
617                         hammer2_spin_unex(&pmp->inum_spin);
618                         hammer2_mtx_unlock(&nip->lock);
619                         hammer2_inode_drop(nip);
620                         goto again;
621                 }
622                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
623                 hammer2_spin_unex(&pmp->inum_spin);
624         }
625
626         return (nip);
627 }
628
629 /*
630  * Create a new inode in the specified directory using the vattr to
631  * figure out the type of inode.
632  *
633  * If no error occurs the new inode with its cluster locked is returned in
634  * *nipp, otherwise an error is returned and *nipp is set to NULL.
635  *
636  * If vap and/or cred are NULL the related fields are not set and the
637  * inode type defaults to a directory.  This is used when creating PFSs
638  * under the super-root, so the inode number is set to 1 in this case.
639  *
640  * dip is not locked on entry.
641  *
642  * NOTE: When used to create a snapshot, the inode is temporarily associated
643  *       with the super-root spmp. XXX should pass new pmp for snapshot.
644  */
645 hammer2_inode_t *
646 hammer2_inode_create(hammer2_inode_t *dip,
647                      struct vattr *vap, struct ucred *cred,
648                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
649                      hammer2_key_t inum, uint8_t type, uint8_t target_type,
650                      int flags, int *errorp)
651 {
652         hammer2_xop_create_t *xop;
653         hammer2_inode_t *nip;
654         int error;
655         uid_t xuid;
656         uuid_t dip_uid;
657         uuid_t dip_gid;
658         uint32_t dip_mode;
659         uint8_t dip_comp_algo;
660         uint8_t dip_check_algo;
661
662         if (name)
663                 lhc = hammer2_dirhash(name, name_len);
664         *errorp = 0;
665         nip = NULL;
666
667         /*
668          * Locate the inode or indirect block to create the new
669          * entry in.  At the same time check for key collisions
670          * and iterate until we don't get one.
671          *
672          * NOTE: hidden inodes do not have iterators.
673          *
674          * Lock the directory exclusively for now to guarantee that
675          * we can find an unused lhc for the name.  Due to collisions,
676          * two different creates can end up with the same lhc so we
677          * cannot depend on the OS to prevent the collision.
678          */
679         hammer2_inode_lock(dip, 0);
680
681         dip_uid = dip->meta.uid;
682         dip_gid = dip->meta.gid;
683         dip_mode = dip->meta.mode;
684         dip_comp_algo = dip->meta.comp_algo;
685         dip_check_algo = dip->meta.check_algo;
686
687         /*
688          * If name specified, locate an unused key in the collision space.
689          * Otherwise use the passed-in lhc directly.
690          */
691         if (name) {
692                 hammer2_xop_scanlhc_t *sxop;
693                 hammer2_key_t lhcbase;
694
695                 lhcbase = lhc;
696                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
697                 sxop->lhc = lhc;
698                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
699                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
700                         if (lhc != sxop->head.cluster.focus->bref.key)
701                                 break;
702                         ++lhc;
703                 }
704                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
705
706                 if (error) {
707                         if (error != ENOENT)
708                                 goto done2;
709                         ++lhc;
710                         error = 0;
711                 }
712                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
713                         error = ENOSPC;
714                         goto done2;
715                 }
716         }
717
718         /*
719          * Create the inode with the lhc as the key.
720          */
721         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
722         xop->lhc = lhc;
723         xop->flags = flags;
724         bzero(&xop->meta, sizeof(xop->meta));
725
726         if (vap) {
727                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
728
729                 switch (xop->meta.type) {
730                 case HAMMER2_OBJTYPE_CDEV:
731                 case HAMMER2_OBJTYPE_BDEV:
732                         xop->meta.rmajor = vap->va_rmajor;
733                         xop->meta.rminor = vap->va_rminor;
734                         break;
735                 default:
736                         break;
737                 }
738                 type = xop->meta.type;
739         } else {
740                 xop->meta.type = type;
741                 xop->meta.target_type = target_type;
742         }
743         xop->meta.inum = inum;
744         
745         /* Inherit parent's inode compression mode. */
746         xop->meta.comp_algo = dip_comp_algo;
747         xop->meta.check_algo = dip_check_algo;
748         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
749         hammer2_update_time(&xop->meta.ctime);
750         xop->meta.mtime = xop->meta.ctime;
751         if (vap)
752                 xop->meta.mode = vap->va_mode;
753         xop->meta.nlinks = 1;
754         if (vap) {
755                 if (dip && dip->pmp) {
756                         xuid = hammer2_to_unix_xid(&dip_uid);
757                         xuid = vop_helper_create_uid(dip->pmp->mp,
758                                                      dip_mode,
759                                                      xuid,
760                                                      cred,
761                                                      &vap->va_mode);
762                 } else {
763                         /* super-root has no dip and/or pmp */
764                         xuid = 0;
765                 }
766                 if (vap->va_vaflags & VA_UID_UUID_VALID)
767                         xop->meta.uid = vap->va_uid_uuid;
768                 else if (vap->va_uid != (uid_t)VNOVAL)
769                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
770                 else
771                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
772
773                 if (vap->va_vaflags & VA_GID_UUID_VALID)
774                         xop->meta.gid = vap->va_gid_uuid;
775                 else if (vap->va_gid != (gid_t)VNOVAL)
776                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
777                 else if (dip)
778                         xop->meta.gid = dip_gid;
779         }
780
781         /*
782          * Regular files and softlinks allow a small amount of data to be
783          * directly embedded in the inode.  This flag will be cleared if
784          * the size is extended past the embedded limit.
785          */
786         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
787             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
788             xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
789                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
790         }
791         if (name)
792                 hammer2_xop_setname(&xop->head, name, name_len);
793         xop->meta.name_len = name_len;
794         xop->meta.name_key = lhc;
795         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
796
797         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
798
799         error = hammer2_xop_collect(&xop->head, 0);
800 #if INODE_DEBUG
801         kprintf("CREATE INODE %*.*s\n",
802                 (int)name_len, (int)name_len, name);
803 #endif
804
805         if (error) {
806                 *errorp = error;
807                 goto done;
808         }
809
810         /*
811          * Set up the new inode if not a hardlink pointer.
812          *
813          * NOTE: *_get() integrates chain's lock into the inode lock.
814          *
815          * NOTE: Only one new inode can currently be created per
816          *       transaction.  If the need arises we can adjust
817          *       hammer2_trans_init() to allow more.
818          *
819          * NOTE: nipdata will have chain's blockset data.
820          */
821         if (type != HAMMER2_OBJTYPE_HARDLINK) {
822                 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
823                 nip->comp_heuristic = 0;
824         } else {
825                 nip = NULL;
826         }
827
828 done:
829         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
830 done2:
831         hammer2_inode_unlock(dip);
832
833         return (nip);
834 }
835
836 /*
837  * Connect the disconnected inode (ip) to the directory (dip) with the
838  * specified (name, name_len).  If name is NULL, (lhc) will be used as
839  * the directory key and the inode's embedded name will not be modified
840  * for future recovery purposes.
841  *
842  * dip and ip must both be locked exclusively (dip in particular to avoid
843  * lhc collisions).
844  */
845 int
846 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
847                       const char *name, size_t name_len,
848                       hammer2_key_t lhc)
849 {
850         hammer2_xop_scanlhc_t *sxop;
851         hammer2_xop_connect_t *xop;
852         hammer2_inode_t *opip;
853         hammer2_key_t lhcbase;
854         int error;
855
856         /*
857          * Calculate the lhc and resolve the collision space.
858          */
859         if (name) {
860                 lhc = lhcbase = hammer2_dirhash(name, name_len);
861                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
862                 sxop->lhc = lhc;
863                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
864                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
865                         if (lhc != sxop->head.cluster.focus->bref.key)
866                                 break;
867                         ++lhc;
868                 }
869                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
870
871                 if (error) {
872                         if (error != ENOENT)
873                                 goto done;
874                         ++lhc;
875                         error = 0;
876                 }
877                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
878                         error = ENOSPC;
879                         goto done;
880                 }
881         } else {
882                 error = 0;
883         }
884
885         /*
886          * Formally reconnect the in-memory structure.  ip must
887          * be locked exclusively to safely change ip->pip.
888          */
889         if (ip->pip != dip) {
890                 hammer2_inode_ref(dip);
891                 opip = ip->pip;
892                 ip->pip = dip;
893                 if (opip)
894                         hammer2_inode_drop(opip);
895         }
896
897         /*
898          * Connect her up
899          */
900         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
901         if (name)
902                 hammer2_xop_setname(&xop->head, name, name_len);
903         hammer2_xop_setip2(&xop->head, ip);
904         xop->lhc = lhc;
905         hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
906         error = hammer2_xop_collect(&xop->head, 0);
907         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
908
909         /*
910          * On success make the same adjustments to ip->meta or the
911          * next flush may blow up the chain.
912          */
913         if (error == 0) {
914                 hammer2_inode_modify(ip);
915                 ip->meta.name_key = lhc;
916                 if (name)
917                         ip->meta.name_len = name_len;
918         }
919 done:
920         return error;
921 }
922
923 /*
924  * Repoint ip->cluster's chains to cluster's chains and fixup the default
925  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
926  * filters out invalid or non-matching elements.
927  *
928  * Caller must hold the inode and cluster exclusive locked, if not NULL,
929  * must also be locked.
930  *
931  * Cluster may be NULL to clean out any chains in ip->cluster.
932  */
933 void
934 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
935                       hammer2_cluster_t *cluster)
936 {
937         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
938         hammer2_chain_t *ochain;
939         hammer2_chain_t *nchain;
940         hammer2_inode_t *opip;
941         int i;
942
943         bzero(dropch, sizeof(dropch));
944
945         /*
946          * Replace chains in ip->cluster with chains from cluster and
947          * adjust the focus if necessary.
948          *
949          * NOTE: nchain and/or ochain can be NULL due to gaps
950          *       in the cluster arrays.
951          */
952         hammer2_spin_ex(&ip->cluster_spin);
953         for (i = 0; cluster && i < cluster->nchains; ++i) {
954                 /*
955                  * Do not replace elements which are the same.  Also handle
956                  * element count discrepancies.
957                  */
958                 nchain = cluster->array[i].chain;
959                 if (i < ip->cluster.nchains) {
960                         ochain = ip->cluster.array[i].chain;
961                         if (ochain == nchain)
962                                 continue;
963                 } else {
964                         ochain = NULL;
965                 }
966
967                 /*
968                  * Make adjustments
969                  */
970                 ip->cluster.array[i].chain = nchain;
971                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
972                 ip->cluster.array[i].flags |= cluster->array[i].flags &
973                                               HAMMER2_CITEM_INVALID;
974                 if (nchain)
975                         hammer2_chain_ref(nchain);
976                 dropch[i] = ochain;
977         }
978
979         /*
980          * Release any left-over chains in ip->cluster.
981          */
982         while (i < ip->cluster.nchains) {
983                 nchain = ip->cluster.array[i].chain;
984                 if (nchain) {
985                         ip->cluster.array[i].chain = NULL;
986                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
987                 }
988                 dropch[i] = nchain;
989                 ++i;
990         }
991
992         /*
993          * Fixup fields.  Note that the inode-embedded cluster is never
994          * directly locked.
995          */
996         if (cluster) {
997                 ip->cluster.nchains = cluster->nchains;
998                 ip->cluster.focus = cluster->focus;
999                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1000         } else {
1001                 ip->cluster.nchains = 0;
1002                 ip->cluster.focus = NULL;
1003                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1004         }
1005
1006         /*
1007          * Repoint ip->pip if requested (non-NULL pip).
1008          */
1009         if (pip && ip->pip != pip) {
1010                 opip = ip->pip;
1011                 hammer2_inode_ref(pip);
1012                 ip->pip = pip;
1013         } else {
1014                 opip = NULL;
1015         }
1016         hammer2_spin_unex(&ip->cluster_spin);
1017
1018         /*
1019          * Cleanup outside of spinlock
1020          */
1021         while (--i >= 0) {
1022                 if (dropch[i])
1023                         hammer2_chain_drop(dropch[i]);
1024         }
1025         if (opip)
1026                 hammer2_inode_drop(opip);
1027 }
1028
1029 /*
1030  * Repoint a single element from the cluster to the ip.  Used by the
1031  * synchronization threads to piecemeal update inodes.  Does not change
1032  * focus and requires inode to be re-locked to clean-up flags (XXX).
1033  */
1034 void
1035 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1036                           int idx)
1037 {
1038         hammer2_chain_t *ochain;
1039         hammer2_chain_t *nchain;
1040         int i;
1041
1042         hammer2_spin_ex(&ip->cluster_spin);
1043         KKASSERT(idx < cluster->nchains);
1044         if (idx < ip->cluster.nchains) {
1045                 ochain = ip->cluster.array[idx].chain;
1046                 nchain = cluster->array[idx].chain;
1047         } else {
1048                 ochain = NULL;
1049                 nchain = cluster->array[idx].chain;
1050                 ip->cluster.nchains = idx + 1;
1051                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1052                         bzero(&ip->cluster.array[i],
1053                               sizeof(ip->cluster.array[i]));
1054                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1055                 }
1056         }
1057         if (ochain != nchain) {
1058                 /*
1059                  * Make adjustments.
1060                  */
1061                 ip->cluster.array[idx].chain = nchain;
1062                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1063                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1064                                                 HAMMER2_CITEM_INVALID;
1065         }
1066         hammer2_spin_unex(&ip->cluster_spin);
1067         if (ochain != nchain) {
1068                 if (nchain)
1069                         hammer2_chain_ref(nchain);
1070                 if (ochain)
1071                         hammer2_chain_drop(ochain);
1072         }
1073 }
1074
1075 /*
1076  * Called with a locked inode to finish unlinking an inode after xop_unlink
1077  * had been run.  This function is responsible for decrementing nlinks and
1078  * moving deleted inodes to the hidden directory if they are still open.
1079  *
1080  * We don't bother decrementing nlinks if the file is not open and this was
1081  * the last link.
1082  *
1083  * If the inode is a hardlink target it's chain has not yet been deleted,
1084  * otherwise it's chain has been deleted.
1085  *
1086  * If isopen then any prior deletion was not permanent and the inode must
1087  * be moved to the hidden directory.
1088  */
1089 int
1090 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1091 {
1092         hammer2_pfs_t *pmp;
1093         int error;
1094
1095         pmp = ip->pmp;
1096
1097         /*
1098          * Decrement nlinks.  If this is the last link and the file is
1099          * not open, the chain has already been removed and we don't bother
1100          * dirtying the inode.
1101          */
1102         if (ip->meta.nlinks == 1) {
1103                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1104                 if (isopen == 0) {
1105                         atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1106                         return 0;
1107                 }
1108         }
1109
1110         hammer2_inode_modify(ip);
1111         --ip->meta.nlinks;
1112         if ((int64_t)ip->meta.nlinks < 0)
1113                 ip->meta.nlinks = 0;    /* safety */
1114
1115         /*
1116          * If nlinks is not zero we are done.  However, this should only be
1117          * possible with a hardlink target.  If the inode is an embedded
1118          * hardlink nlinks should have dropped to zero, warn and proceed
1119          * with the next step.
1120          */
1121         if (ip->meta.nlinks) {
1122                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1123                         return 0;
1124                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1125                         (intmax_t)ip->meta.nlinks);
1126                 return 0;
1127         }
1128
1129         /*
1130          * nlinks is now zero, the inode should have already been deleted.
1131          * If the file is open it was deleted non-permanently and must be
1132          * moved to the hidden directory.
1133          *
1134          * When moving to the hidden directory we force the name_key to the
1135          * inode number to avoid collisions.
1136          */
1137         if (isopen) {
1138                 hammer2_inode_lock(pmp->ihidden, 0);
1139                 error = hammer2_inode_connect(pmp->ihidden, ip,
1140                                               NULL, 0, ip->meta.inum);
1141                 hammer2_inode_unlock(pmp->ihidden);
1142         } else {
1143                 error = 0;
1144         }
1145         return error;
1146 }
1147
1148 /*
1149  * This is called from the mount code to initialize pmp->ihidden
1150  */
1151 void
1152 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1153 {
1154         int error;
1155
1156         if (pmp->ihidden)
1157                 return;
1158
1159         hammer2_trans_init(pmp, 0);
1160         hammer2_inode_lock(pmp->iroot, 0);
1161
1162         /*
1163          * Find the hidden directory
1164          */
1165         {
1166                 hammer2_xop_lookup_t *xop;
1167
1168                 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1169                 xop->lhc = HAMMER2_INODE_HIDDENDIR;
1170                 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1171                 error = hammer2_xop_collect(&xop->head, 0);
1172
1173                 if (error == 0) {
1174                         /*
1175                          * Found the hidden directory
1176                          */
1177                         kprintf("PFS FOUND HIDDEN DIR\n");
1178                         pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1179                                                          &xop->head.cluster,
1180                                                          -1);
1181                         hammer2_inode_ref(pmp->ihidden);
1182                         hammer2_inode_unlock(pmp->ihidden);
1183                 }
1184                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1185         }
1186
1187         /*
1188          * Create the hidden directory if it could not be found.
1189          */
1190         if (error == ENOENT) {
1191                 kprintf("PFS CREATE HIDDEN DIR\n");
1192
1193                 pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1194                                                     NULL, 0,
1195                                 /* lhc */           HAMMER2_INODE_HIDDENDIR,
1196                                 /* inum */          HAMMER2_INODE_HIDDENDIR,
1197                                 /* type */          HAMMER2_OBJTYPE_DIRECTORY,
1198                                 /* target_type */   0,
1199                                 /* flags */         0,
1200                                                     &error);
1201                 if (pmp->ihidden) {
1202                         hammer2_inode_ref(pmp->ihidden);
1203                         hammer2_inode_unlock(pmp->ihidden);
1204                 }
1205                 if (error)
1206                         kprintf("PFS CREATE ERROR %d\n", error);
1207         }
1208
1209         /*
1210          * Scan the hidden directory on-mount and destroy its contents
1211          */
1212         if (error == 0) {
1213                 hammer2_xop_unlinkall_t *xop;
1214
1215                 hammer2_inode_lock(pmp->ihidden, 0);
1216                 xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1217                 xop->key_beg = HAMMER2_KEY_MIN;
1218                 xop->key_end = HAMMER2_KEY_MAX;
1219                 hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1220
1221                 while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1222                         ;
1223                 }
1224                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1225                 hammer2_inode_unlock(pmp->ihidden);
1226         }
1227
1228         hammer2_inode_unlock(pmp->iroot);
1229         hammer2_trans_done(pmp);
1230 }
1231
1232 /*
1233  * Find the directory common to both fdip and tdip.
1234  *
1235  * Returns a held but not locked inode.  Caller typically locks the inode,
1236  * and when through unlocks AND drops it.
1237  */
1238 hammer2_inode_t *
1239 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1240 {
1241         hammer2_inode_t *scan1;
1242         hammer2_inode_t *scan2;
1243
1244         /*
1245          * We used to have a depth field but it complicated matters too
1246          * much for directory renames.  So now its ugly.  Check for
1247          * simple cases before giving up and doing it the expensive way.
1248          *
1249          * XXX need a bottom-up topology stability lock
1250          */
1251         if (fdip == tdip || fdip == tdip->pip) {
1252                 hammer2_inode_ref(fdip);
1253                 return(fdip);
1254         }
1255         if (fdip->pip == tdip) {
1256                 hammer2_inode_ref(tdip);
1257                 return(tdip);
1258         }
1259
1260         /*
1261          * XXX not MPSAFE
1262          */
1263         for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1264                 scan2 = tdip;
1265                 while (scan2->pmp == tdip->pmp) {
1266                         if (scan1 == scan2) {
1267                                 hammer2_inode_ref(scan1);
1268                                 return(scan1);
1269                         }
1270                         scan2 = scan2->pip;
1271                         if (scan2 == NULL)
1272                                 break;
1273                 }
1274         }
1275         panic("hammer2_inode_common_parent: no common parent %p %p\n",
1276               fdip, tdip);
1277         /* NOT REACHED */
1278         return(NULL);
1279 }
1280
1281 /*
1282  * Mark an inode as being modified, meaning that the caller will modify
1283  * ip->meta.
1284  *
1285  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1286  *       only modifying the in-memory inode.  A modify_tid is synchronized
1287  *       later when the inode gets flushed.
1288  */
1289 void
1290 hammer2_inode_modify(hammer2_inode_t *ip)
1291 {
1292         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1293         if (ip->vp)
1294                 vsetisdirty(ip->vp);
1295 }
1296
1297 /*
1298  * Synchronize the inode's frontend state with the chain state prior
1299  * to any explicit flush of the inode or any strategy write call.
1300  *
1301  * Called with a locked inode inside a transaction.
1302  */
1303 void
1304 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1305 {
1306         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1307                 hammer2_xop_fsync_t *xop;
1308                 int error;
1309
1310                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1311                 xop->clear_directdata = 0;
1312                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1313                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1314                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1315                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1316                                 xop->clear_directdata = 1;
1317                         }
1318                         xop->osize = ip->osize;
1319                 } else {
1320                         xop->osize = ip->meta.size;     /* safety */
1321                 }
1322                 xop->ipflags = ip->flags;
1323                 xop->meta = ip->meta;
1324
1325                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1326                                              HAMMER2_INODE_MODIFIED);
1327                 hammer2_xop_start(&xop->head, hammer2_inode_xop_chain_sync);
1328                 error = hammer2_xop_collect(&xop->head, 0);
1329                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1330                 if (error == ENOENT)
1331                         error = 0;
1332                 if (error) {
1333                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1334                         /*
1335                         atomic_set_int(&ip->flags,
1336                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1337                                                        HAMMER2_INODE_MODIFIED));
1338                         */
1339                         /* XXX return error somehow? */
1340                 }
1341         }
1342 }
1343
1344 /*
1345  * This handles unlinked open files after the vnode is finally dereferenced.
1346  * To avoid deadlocks it cannot be called from the normal vnode recycling
1347  * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
1348  * flush, and (3) on umount.
1349  *
1350  * Caller must be in a transaction.
1351  */
1352 void
1353 hammer2_inode_run_unlinkq(hammer2_pfs_t *pmp)
1354 {
1355         hammer2_xop_destroy_t *xop;
1356         hammer2_inode_unlink_t *ipul;
1357         hammer2_inode_t *ip;
1358         int error;
1359
1360         if (TAILQ_EMPTY(&pmp->unlinkq))
1361                 return;
1362
1363         LOCKSTART;
1364         hammer2_spin_ex(&pmp->list_spin);
1365         while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
1366                 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
1367                 hammer2_spin_unex(&pmp->list_spin);
1368                 ip = ipul->ip;
1369                 kfree(ipul, pmp->minode);
1370
1371                 hammer2_inode_lock(ip, 0);
1372                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1373                 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1374                 error = hammer2_xop_collect(&xop->head, 0);
1375                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1376
1377                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1378
1379                 hammer2_inode_unlock(ip);
1380                 hammer2_inode_drop(ip);                 /* ipul ref */
1381
1382                 hammer2_spin_ex(&pmp->list_spin);
1383         }
1384         hammer2_spin_unex(&pmp->list_spin);
1385         LOCKSTOP;
1386 }
1387
1388 /*
1389  * Inode create helper (threaded, backend)
1390  *
1391  * Used by ncreate, nmknod, nsymlink, nmkdir.
1392  * Used by nlink and rename to create HARDLINK pointers.
1393  *
1394  * Frontend holds the parent directory ip locked exclusively.  We
1395  * create the inode and feed the exclusively locked chain to the
1396  * frontend.
1397  */
1398 void
1399 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1400 {
1401         hammer2_xop_create_t *xop = &arg->xop_create;
1402         hammer2_chain_t *parent;
1403         hammer2_chain_t *chain;
1404         hammer2_key_t key_next;
1405         int cache_index = -1;
1406         int error;
1407
1408         if (hammer2_debug & 0x0001)
1409         kprintf("inode_create lhc %016jx clindex %d\n",
1410                 xop->lhc, clindex);
1411
1412         chain = NULL;
1413         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1414                                      HAMMER2_RESOLVE_ALWAYS);
1415         if (parent == NULL) {
1416                 error = EIO;
1417                 goto fail;
1418         }
1419         chain = hammer2_chain_lookup(&parent, &key_next,
1420                                      xop->lhc, xop->lhc,
1421                                      &cache_index, 0);
1422         if (chain) {
1423                 hammer2_chain_unlock(chain);
1424                 error = EEXIST;
1425                 goto fail;
1426         }
1427
1428         error = hammer2_chain_create(&parent, &chain,
1429                                      xop->head.ip1->pmp,
1430                                      xop->lhc, 0,
1431                                      HAMMER2_BREF_TYPE_INODE,
1432                                      HAMMER2_INODE_BYTES,
1433                                      xop->head.mtid, 0, xop->flags);
1434         if (error == 0) {
1435                 hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1436                 chain->data->ipdata.meta = xop->meta;
1437                 if (xop->head.name1) {
1438                         bcopy(xop->head.name1,
1439                               chain->data->ipdata.filename,
1440                               xop->head.name1_len);
1441                         chain->data->ipdata.meta.name_len = xop->head.name1_len;
1442                 }
1443                 chain->data->ipdata.meta.name_key = xop->lhc;
1444         }
1445         hammer2_chain_unlock(chain);
1446         hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1447                                   HAMMER2_RESOLVE_SHARED);
1448 fail:
1449         if (parent) {
1450                 hammer2_chain_unlock(parent);
1451                 hammer2_chain_drop(parent);
1452         }
1453         hammer2_xop_feed(&xop->head, chain, clindex, error);
1454         if (chain)
1455                 hammer2_chain_drop(chain);
1456 }
1457
1458 /*
1459  * Inode delete helper (backend, threaded)
1460  *
1461  * Generally used by hammer2_run_unlinkq()
1462  */
1463 void
1464 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1465 {
1466         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1467         hammer2_pfs_t *pmp;
1468         hammer2_chain_t *parent;
1469         hammer2_chain_t *chain;
1470         hammer2_inode_t *ip;
1471         int error;
1472
1473         /*
1474          * We need the precise parent chain to issue the deletion.
1475          */
1476         ip = xop->head.ip1;
1477         pmp = ip->pmp;
1478         chain = NULL;
1479
1480         parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1481         if (parent)
1482                 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1483         if (parent == NULL) {
1484                 error = EIO;
1485                 goto done;
1486         }
1487         chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1488         if (chain == NULL) {
1489                 error = EIO;
1490                 goto done;
1491         }
1492         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1493         error = 0;
1494 done:
1495         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1496         if (parent) {
1497                 hammer2_chain_unlock(parent);
1498                 hammer2_chain_drop(parent);
1499         }
1500         if (chain) {
1501                 hammer2_chain_unlock(chain);
1502                 hammer2_chain_drop(chain);
1503         }
1504 }
1505
1506 void
1507 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1508 {
1509         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1510         hammer2_chain_t *parent;
1511         hammer2_chain_t *chain;
1512         hammer2_key_t key_next;
1513         int cache_index = -1;
1514
1515         /*
1516          * We need the precise parent chain to issue the deletion.
1517          */
1518         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1519                                      HAMMER2_RESOLVE_ALWAYS);
1520         chain = NULL;
1521         if (parent == NULL) {
1522                 /* XXX error */
1523                 goto done;
1524         }
1525         chain = hammer2_chain_lookup(&parent, &key_next,
1526                                      xop->key_beg, xop->key_end,
1527                                      &cache_index,
1528                                      HAMMER2_LOOKUP_ALWAYS);
1529         while (chain) {
1530                 hammer2_chain_delete(parent, chain,
1531                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1532                 hammer2_chain_unlock(chain);
1533                 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1534                                           HAMMER2_RESOLVE_SHARED);
1535                 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1536                 chain = hammer2_chain_next(&parent, chain, &key_next,
1537                                            key_next, xop->key_end,
1538                                            &cache_index,
1539                                            HAMMER2_LOOKUP_ALWAYS |
1540                                            HAMMER2_LOOKUP_NOUNLOCK);
1541         }
1542 done:
1543         hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1544         if (parent) {
1545                 hammer2_chain_unlock(parent);
1546                 hammer2_chain_drop(parent);
1547         }
1548         if (chain) {
1549                 hammer2_chain_unlock(chain);
1550                 hammer2_chain_drop(chain);
1551         }
1552 }
1553
1554 void
1555 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1556 {
1557         hammer2_xop_connect_t *xop = &arg->xop_connect;
1558         hammer2_inode_data_t *wipdata;
1559         hammer2_chain_t *parent;
1560         hammer2_chain_t *chain;
1561         hammer2_pfs_t *pmp;
1562         hammer2_key_t key_dummy;
1563         int cache_index = -1;
1564         int error;
1565
1566         /*
1567          * Get directory, then issue a lookup to prime the parent chain
1568          * for the create.  The lookup is expected to fail.
1569          */
1570         pmp = xop->head.ip1->pmp;
1571         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1572                                      HAMMER2_RESOLVE_ALWAYS);
1573         if (parent == NULL) {
1574                 chain = NULL;
1575                 error = EIO;
1576                 goto fail;
1577         }
1578         chain = hammer2_chain_lookup(&parent, &key_dummy,
1579                                      xop->lhc, xop->lhc,
1580                                      &cache_index, 0);
1581         if (chain) {
1582                 hammer2_chain_unlock(chain);
1583                 hammer2_chain_drop(chain);
1584                 chain = NULL;
1585                 error = EEXIST;
1586                 goto fail;
1587         }
1588
1589         /*
1590          * Adjust the filename in the inode, set the name key.
1591          *
1592          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1593          *       do it here.
1594          */
1595         chain = hammer2_inode_chain(xop->head.ip2, clindex,
1596                                     HAMMER2_RESOLVE_ALWAYS);
1597         hammer2_chain_modify(chain, xop->head.mtid, 0, 0);
1598         wipdata = &chain->data->ipdata;
1599
1600         hammer2_inode_modify(xop->head.ip2);
1601         if (xop->head.name1) {
1602                 bzero(wipdata->filename, sizeof(wipdata->filename));
1603                 bcopy(xop->head.name1, wipdata->filename, xop->head.name1_len);
1604                 wipdata->meta.name_len = xop->head.name1_len;
1605         }
1606         wipdata->meta.name_key = xop->lhc;
1607
1608         /*
1609          * Reconnect the chain to the new parent directory
1610          */
1611         error = hammer2_chain_create(&parent, &chain, pmp,
1612                                      xop->lhc, 0,
1613                                      HAMMER2_BREF_TYPE_INODE,
1614                                      HAMMER2_INODE_BYTES,
1615                                      xop->head.mtid, 0, 0);
1616
1617         /*
1618          * Feed result back.
1619          */
1620 fail:
1621         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1622         if (parent) {
1623                 hammer2_chain_unlock(parent);
1624                 hammer2_chain_drop(parent);
1625         }
1626         if (chain) {
1627                 hammer2_chain_unlock(chain);
1628                 hammer2_chain_drop(chain);
1629         }
1630 }
1631
1632 /*
1633  * Synchronize the in-memory inode with the chain.
1634  */
1635 void
1636 hammer2_inode_xop_chain_sync(hammer2_xop_t *arg, int clindex)
1637 {
1638         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1639         hammer2_chain_t *parent;
1640         hammer2_chain_t *chain;
1641         int error;
1642
1643         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1644                                      HAMMER2_RESOLVE_ALWAYS);
1645         chain = NULL;
1646         if (parent == NULL) {
1647                 error = EIO;
1648                 goto done;
1649         }
1650         if (parent->error) {
1651                 error = parent->error;
1652                 goto done;
1653         }
1654
1655         error = 0;
1656
1657         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1658                 /* osize must be ignored */
1659         } else if (xop->meta.size < xop->osize) {
1660                 /*
1661                  * We must delete any chains beyond the EOF.  The chain
1662                  * straddling the EOF will be pending in the bioq.
1663                  */
1664                 hammer2_key_t lbase;
1665                 hammer2_key_t key_next;
1666                 int cache_index = -1;
1667
1668                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1669                         ~HAMMER2_PBUFMASK64;
1670                 chain = hammer2_chain_lookup(&parent, &key_next,
1671                                              lbase, HAMMER2_KEY_MAX,
1672                                              &cache_index,
1673                                              HAMMER2_LOOKUP_NODATA |
1674                                              HAMMER2_LOOKUP_NODIRECT);
1675                 while (chain) {
1676                         /*
1677                          * Degenerate embedded case, nothing to loop on
1678                          */
1679                         switch (chain->bref.type) {
1680                         case HAMMER2_BREF_TYPE_INODE:
1681                                 KKASSERT(0);
1682                                 break;
1683                         case HAMMER2_BREF_TYPE_DATA:
1684                                 hammer2_chain_delete(parent, chain,
1685                                                      xop->head.mtid,
1686                                                      HAMMER2_DELETE_PERMANENT);
1687                                 break;
1688                         }
1689                         chain = hammer2_chain_next(&parent, chain, &key_next,
1690                                                    key_next, HAMMER2_KEY_MAX,
1691                                                    &cache_index,
1692                                                    HAMMER2_LOOKUP_NODATA |
1693                                                    HAMMER2_LOOKUP_NODIRECT);
1694                 }
1695
1696                 /*
1697                  * Reset to point at inode for following code, if necessary.
1698                  */
1699                 if (parent->bref.type != HAMMER2_BREF_TYPE_INODE) {
1700                         hammer2_chain_unlock(parent);
1701                         hammer2_chain_drop(parent);
1702                         parent = hammer2_inode_chain(xop->head.ip1, clindex,
1703                                                      HAMMER2_RESOLVE_ALWAYS);
1704                         kprintf("hammer2: TRUNCATE RESET on '%s'\n",
1705                                 parent->data->ipdata.filename);
1706                 }
1707         }
1708
1709         /*
1710          * Sync the inode meta-data, potentially clear the blockset area
1711          * of direct data so it can be used for blockrefs.
1712          */
1713         hammer2_chain_modify(parent, xop->head.mtid, 0, 0);
1714         parent->data->ipdata.meta = xop->meta;
1715         if (xop->clear_directdata) {
1716                 bzero(&parent->data->ipdata.u.blockset,
1717                       sizeof(parent->data->ipdata.u.blockset));
1718         }
1719 done:
1720         if (chain) {
1721                 hammer2_chain_unlock(chain);
1722                 hammer2_chain_drop(chain);
1723         }
1724         if (parent) {
1725                 hammer2_chain_unlock(parent);
1726                 hammer2_chain_drop(parent);
1727         }
1728         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1729 }
1730