hammer2 - Add root mount support.
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 /*
60  * HAMMER2 inode locks
61  *
62  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
63  * flags for options:
64  *
65  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
66  *        inode locking function will automatically set the RDONLY flag.
67  *
68  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69  *        Most front-end inode locks do.
70  *
71  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72  *        the inode data be resolved.  This is used by the syncthr because
73  *        it can run on an unresolved/out-of-sync cluster, and also by the
74  *        vnode reclamation code to avoid unnecessary I/O (particularly when
75  *        disposing of hundreds of thousands of cached vnodes).
76  *
77  * The inode locking function locks the inode itself, resolves any stale
78  * chains in the inode's cluster, and allocates a fresh copy of the
79  * cluster with 1 ref and all the underlying chains locked.
80  *
81  * ip->cluster will be stable while the inode is locked.
82  *
83  * NOTE: We don't combine the inode/chain lock because putting away an
84  *       inode would otherwise confuse multiple lock holders of the inode.
85  *
86  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87  *       and never point to a hardlink pointer.
88  *
89  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90  *       will feel free to reduce the chain set in the cluster as an
91  *       optimization.  It will still be validated against the quorum if
92  *       appropriate, but the optimization might be able to reduce data
93  *       accesses to one node.  This flag is automatically set if the inode
94  *       is locked with HAMMER2_RESOLVE_SHARED.
95  */
96 void
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
98 {
99         hammer2_inode_ref(ip);
100
101         /* 
102          * Inode structure mutex
103          */
104         if (how & HAMMER2_RESOLVE_SHARED) {
105                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
106                 hammer2_mtx_sh(&ip->lock);
107         } else {
108                 hammer2_mtx_ex(&ip->lock);
109         }
110 }
111
112 /*
113  * Select a chain out of an inode's cluster and lock it.
114  *
115  * The inode does not have to be locked.
116  */
117 hammer2_chain_t *
118 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
119 {
120         hammer2_chain_t *chain;
121
122         hammer2_spin_sh(&ip->cluster_spin);
123         if (clindex >= ip->cluster.nchains)
124                 chain = NULL;
125         else
126                 chain = ip->cluster.array[clindex].chain;
127         if (chain) {
128                 hammer2_chain_ref(chain);
129                 hammer2_spin_unsh(&ip->cluster_spin);
130                 hammer2_chain_lock(chain, how);
131         } else {
132                 hammer2_spin_unsh(&ip->cluster_spin);
133         }
134         return chain;
135 }
136
137 hammer2_chain_t *
138 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
139                                hammer2_chain_t **parentp, int how)
140 {
141         hammer2_chain_t *chain;
142         hammer2_chain_t *parent;
143
144         for (;;) {
145                 hammer2_spin_sh(&ip->cluster_spin);
146                 if (clindex >= ip->cluster.nchains)
147                         chain = NULL;
148                 else
149                         chain = ip->cluster.array[clindex].chain;
150                 if (chain) {
151                         hammer2_chain_ref(chain);
152                         hammer2_spin_unsh(&ip->cluster_spin);
153                         hammer2_chain_lock(chain, how);
154                 } else {
155                         hammer2_spin_unsh(&ip->cluster_spin);
156                 }
157
158                 /*
159                  * Get parent, lock order must be (parent, chain).
160                  */
161                 parent = chain->parent;
162                 hammer2_chain_ref(parent);
163                 hammer2_chain_unlock(chain);
164                 hammer2_chain_lock(parent, how);
165                 hammer2_chain_lock(chain, how);
166                 if (ip->cluster.array[clindex].chain == chain &&
167                     chain->parent == parent) {
168                         break;
169                 }
170
171                 /*
172                  * Retry
173                  */
174                 hammer2_chain_unlock(chain);
175                 hammer2_chain_drop(chain);
176                 hammer2_chain_unlock(parent);
177                 hammer2_chain_drop(parent);
178         }
179         *parentp = parent;
180
181         return chain;
182 }
183
184 void
185 hammer2_inode_unlock(hammer2_inode_t *ip)
186 {
187         hammer2_mtx_unlock(&ip->lock);
188         hammer2_inode_drop(ip);
189 }
190
191 /*
192  * Temporarily release a lock held shared or exclusive.  Caller must
193  * hold the lock shared or exclusive on call and lock will be released
194  * on return.
195  *
196  * Restore a lock that was temporarily released.
197  */
198 hammer2_mtx_state_t
199 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
200 {
201         return hammer2_mtx_temp_release(&ip->lock);
202 }
203
204 void
205 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
206 {
207         hammer2_mtx_temp_restore(&ip->lock, ostate);
208 }
209
210 /*
211  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
212  * is already held exclusively this is a NOP.
213  *
214  * The caller MUST hold the inode lock either shared or exclusive on call
215  * and will own the lock exclusively on return.
216  *
217  * Returns non-zero if the lock was already exclusive prior to the upgrade.
218  */
219 int
220 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
221 {
222         int wasexclusive;
223
224         if (mtx_islocked_ex(&ip->lock)) {
225                 wasexclusive = 1;
226         } else {
227                 hammer2_mtx_unlock(&ip->lock);
228                 hammer2_mtx_ex(&ip->lock);
229                 wasexclusive = 0;
230         }
231         return wasexclusive;
232 }
233
234 /*
235  * Downgrade an inode lock from exclusive to shared only if the inode
236  * lock was previously shared.  If the inode lock was previously exclusive,
237  * this is a NOP.
238  */
239 void
240 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
241 {
242         if (wasexclusive == 0)
243                 mtx_downgrade(&ip->lock);
244 }
245
246 /*
247  * Lookup an inode by inode number
248  */
249 hammer2_inode_t *
250 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
251 {
252         hammer2_inode_t *ip;
253
254         KKASSERT(pmp);
255         if (pmp->spmp_hmp) {
256                 ip = NULL;
257         } else {
258                 hammer2_spin_ex(&pmp->inum_spin);
259                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
260                 if (ip)
261                         hammer2_inode_ref(ip);
262                 hammer2_spin_unex(&pmp->inum_spin);
263         }
264         return(ip);
265 }
266
267 /*
268  * Adding a ref to an inode is only legal if the inode already has at least
269  * one ref.
270  *
271  * (can be called with spinlock held)
272  */
273 void
274 hammer2_inode_ref(hammer2_inode_t *ip)
275 {
276         atomic_add_int(&ip->refs, 1);
277 }
278
279 /*
280  * Drop an inode reference, freeing the inode when the last reference goes
281  * away.
282  */
283 void
284 hammer2_inode_drop(hammer2_inode_t *ip)
285 {
286         hammer2_pfs_t *pmp;
287         hammer2_inode_t *pip;
288         u_int refs;
289
290         while (ip) {
291                 refs = ip->refs;
292                 cpu_ccfence();
293                 if (refs == 1) {
294                         /*
295                          * Transition to zero, must interlock with
296                          * the inode inumber lookup tree (if applicable).
297                          * It should not be possible for anyone to race
298                          * the transition to 0.
299                          */
300                         pmp = ip->pmp;
301                         KKASSERT(pmp);
302                         hammer2_spin_ex(&pmp->inum_spin);
303
304                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
305                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
306                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
307                                         atomic_clear_int(&ip->flags,
308                                                      HAMMER2_INODE_ONRBTREE);
309                                         RB_REMOVE(hammer2_inode_tree,
310                                                   &pmp->inum_tree, ip);
311                                 }
312                                 hammer2_spin_unex(&pmp->inum_spin);
313
314                                 pip = ip->pip;
315                                 ip->pip = NULL;
316                                 ip->pmp = NULL;
317
318                                 /*
319                                  * Cleaning out ip->cluster isn't entirely
320                                  * trivial.
321                                  */
322                                 hammer2_inode_repoint(ip, NULL, NULL);
323
324                                 /*
325                                  * We have to drop pip (if non-NULL) to
326                                  * dispose of our implied reference from
327                                  * ip->pip.  We can simply loop on it.
328                                  */
329                                 kfree(ip, pmp->minode);
330                                 atomic_add_long(&pmp->inmem_inodes, -1);
331                                 ip = pip;
332                                 /* continue with pip (can be NULL) */
333                         } else {
334                                 hammer2_spin_unex(&ip->pmp->inum_spin);
335                         }
336                 } else {
337                         /*
338                          * Non zero transition
339                          */
340                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
341                                 break;
342                 }
343         }
344 }
345
346 /*
347  * Get the vnode associated with the given inode, allocating the vnode if
348  * necessary.  The vnode will be returned exclusively locked.
349  *
350  * The caller must lock the inode (shared or exclusive).
351  *
352  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
353  * races.
354  */
355 struct vnode *
356 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
357 {
358         hammer2_pfs_t *pmp;
359         struct vnode *vp;
360
361         pmp = ip->pmp;
362         KKASSERT(pmp != NULL);
363         *errorp = 0;
364
365         for (;;) {
366                 /*
367                  * Attempt to reuse an existing vnode assignment.  It is
368                  * possible to race a reclaim so the vget() may fail.  The
369                  * inode must be unlocked during the vget() to avoid a
370                  * deadlock against a reclaim.
371                  */
372                 int wasexclusive;
373
374                 vp = ip->vp;
375                 if (vp) {
376                         /*
377                          * Inode must be unlocked during the vget() to avoid
378                          * possible deadlocks, but leave the ip ref intact.
379                          *
380                          * vnode is held to prevent destruction during the
381                          * vget().  The vget() can still fail if we lost
382                          * a reclaim race on the vnode.
383                          */
384                         hammer2_mtx_state_t ostate;
385
386                         vhold(vp);
387                         ostate = hammer2_inode_lock_temp_release(ip);
388                         if (vget(vp, LK_EXCLUSIVE)) {
389                                 vdrop(vp);
390                                 hammer2_inode_lock_temp_restore(ip, ostate);
391                                 continue;
392                         }
393                         hammer2_inode_lock_temp_restore(ip, ostate);
394                         vdrop(vp);
395                         /* vp still locked and ref from vget */
396                         if (ip->vp != vp) {
397                                 kprintf("hammer2: igetv race %p/%p\n",
398                                         ip->vp, vp);
399                                 vput(vp);
400                                 continue;
401                         }
402                         *errorp = 0;
403                         break;
404                 }
405
406                 /*
407                  * No vnode exists, allocate a new vnode.  Beware of
408                  * allocation races.  This function will return an
409                  * exclusively locked and referenced vnode.
410                  */
411                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
412                 if (*errorp) {
413                         kprintf("hammer2: igetv getnewvnode failed %d\n",
414                                 *errorp);
415                         vp = NULL;
416                         break;
417                 }
418
419                 /*
420                  * Lock the inode and check for an allocation race.
421                  */
422                 wasexclusive = hammer2_inode_lock_upgrade(ip);
423                 if (ip->vp != NULL) {
424                         vp->v_type = VBAD;
425                         vx_put(vp);
426                         hammer2_inode_lock_downgrade(ip, wasexclusive);
427                         continue;
428                 }
429
430                 switch (ip->meta.type) {
431                 case HAMMER2_OBJTYPE_DIRECTORY:
432                         vp->v_type = VDIR;
433                         break;
434                 case HAMMER2_OBJTYPE_REGFILE:
435                         vp->v_type = VREG;
436                         vinitvmio(vp, ip->meta.size,
437                                   HAMMER2_LBUFSIZE,
438                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
439                         break;
440                 case HAMMER2_OBJTYPE_SOFTLINK:
441                         /*
442                          * XXX for now we are using the generic file_read
443                          * and file_write code so we need a buffer cache
444                          * association.
445                          */
446                         vp->v_type = VLNK;
447                         vinitvmio(vp, ip->meta.size,
448                                   HAMMER2_LBUFSIZE,
449                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
450                         break;
451                 case HAMMER2_OBJTYPE_CDEV:
452                         vp->v_type = VCHR;
453                         /* fall through */
454                 case HAMMER2_OBJTYPE_BDEV:
455                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
456                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
457                                 vp->v_type = VBLK;
458                         addaliasu(vp,
459                                   ip->meta.rmajor,
460                                   ip->meta.rminor);
461                         break;
462                 case HAMMER2_OBJTYPE_FIFO:
463                         vp->v_type = VFIFO;
464                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
465                         break;
466                 case HAMMER2_OBJTYPE_SOCKET:
467                         vp->v_type = VSOCK;
468                         break;
469                 default:
470                         panic("hammer2: unhandled objtype %d",
471                               ip->meta.type);
472                         break;
473                 }
474
475                 if (ip == pmp->iroot)
476                         vsetflags(vp, VROOT);
477
478                 vp->v_data = ip;
479                 ip->vp = vp;
480                 hammer2_inode_ref(ip);          /* vp association */
481                 hammer2_inode_lock_downgrade(ip, wasexclusive);
482                 break;
483         }
484
485         /*
486          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
487          */
488         if (hammer2_debug & 0x0002) {
489                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
490                         vp, vp->v_refcnt, vp->v_auxrefs);
491         }
492         return (vp);
493 }
494
495 /*
496  * Returns the inode associated with the passed-in cluster, creating the
497  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
498  * When synchronizing, if idx >= 0, only cluster index (idx) is synchronized.
499  * Otherwise the whole cluster is synchronized.
500  *
501  * The passed-in cluster must be locked and will remain locked on return.
502  * The returned inode will be locked and the caller may dispose of both
503  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
504  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
505  *
506  * The hammer2_inode structure regulates the interface between the high level
507  * kernel VNOPS API and the filesystem backend (the chains).
508  *
509  * On return the inode is locked with the supplied cluster.
510  */
511 hammer2_inode_t *
512 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
513                   hammer2_cluster_t *cluster, int idx)
514 {
515         hammer2_inode_t *nip;
516         const hammer2_inode_data_t *iptmp;
517         const hammer2_inode_data_t *nipdata;
518
519         KKASSERT(cluster == NULL ||
520                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
521         KKASSERT(pmp);
522
523         /*
524          * Interlocked lookup/ref of the inode.  This code is only needed
525          * when looking up inodes with nlinks != 0 (TODO: optimize out
526          * otherwise and test for duplicates).
527          *
528          * Cluster can be NULL during the initial pfs allocation.
529          */
530 again:
531         while (cluster) {
532                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
533                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
534                 if (nip == NULL)
535                         break;
536
537                 hammer2_mtx_ex(&nip->lock);
538
539                 /*
540                  * Handle SMP race (not applicable to the super-root spmp
541                  * which can't index inodes due to duplicative inode numbers).
542                  */
543                 if (pmp->spmp_hmp == NULL &&
544                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
545                         hammer2_mtx_unlock(&nip->lock);
546                         hammer2_inode_drop(nip);
547                         continue;
548                 }
549                 if (idx >= 0)
550                         hammer2_inode_repoint_one(nip, cluster, idx);
551                 else
552                         hammer2_inode_repoint(nip, NULL, cluster);
553
554                 return nip;
555         }
556
557         /*
558          * We couldn't find the inode number, create a new inode.
559          */
560         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
561         spin_init(&nip->cluster_spin, "h2clspin");
562         atomic_add_long(&pmp->inmem_inodes, 1);
563         hammer2_pfs_memory_inc(pmp);
564         hammer2_pfs_memory_wakeup(pmp);
565         if (pmp->spmp_hmp)
566                 nip->flags = HAMMER2_INODE_SROOT;
567
568         /*
569          * Initialize nip's cluster.  A cluster is provided for normal
570          * inodes but typically not for the super-root or PFS inodes.
571          */
572         nip->cluster.refs = 1;
573         nip->cluster.pmp = pmp;
574         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
575         if (cluster) {
576                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
577                 nip->meta = nipdata->meta;
578                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
579                 hammer2_inode_repoint(nip, NULL, cluster);
580         } else {
581                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
582                 /* mtime will be updated when a cluster is available */
583                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
584         }
585
586         nip->pip = dip;                         /* can be NULL */
587         if (dip)
588                 hammer2_inode_ref(dip); /* ref dip for nip->pip */
589
590         nip->pmp = pmp;
591
592         /*
593          * ref and lock on nip gives it state compatible to after a
594          * hammer2_inode_lock() call.
595          */
596         nip->refs = 1;
597         hammer2_mtx_init(&nip->lock, "h2inode");
598         hammer2_mtx_ex(&nip->lock);
599         /* combination of thread lock and chain lock == inode lock */
600
601         /*
602          * Attempt to add the inode.  If it fails we raced another inode
603          * get.  Undo all the work and try again.
604          */
605         if (pmp->spmp_hmp == NULL) {
606                 hammer2_spin_ex(&pmp->inum_spin);
607                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
608                         hammer2_spin_unex(&pmp->inum_spin);
609                         hammer2_mtx_unlock(&nip->lock);
610                         hammer2_inode_drop(nip);
611                         goto again;
612                 }
613                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
614                 hammer2_spin_unex(&pmp->inum_spin);
615         }
616
617         return (nip);
618 }
619
620 /*
621  * Create a new inode in the specified directory using the vattr to
622  * figure out the type of inode.
623  *
624  * If no error occurs the new inode with its cluster locked is returned in
625  * *nipp, otherwise an error is returned and *nipp is set to NULL.
626  *
627  * If vap and/or cred are NULL the related fields are not set and the
628  * inode type defaults to a directory.  This is used when creating PFSs
629  * under the super-root, so the inode number is set to 1 in this case.
630  *
631  * dip is not locked on entry.
632  *
633  * NOTE: When used to create a snapshot, the inode is temporarily associated
634  *       with the super-root spmp. XXX should pass new pmp for snapshot.
635  */
636 hammer2_inode_t *
637 hammer2_inode_create(hammer2_inode_t *dip,
638                      struct vattr *vap, struct ucred *cred,
639                      const uint8_t *name, size_t name_len, hammer2_key_t lhc,
640                      hammer2_key_t inum, uint8_t type, uint8_t target_type,
641                      int flags, int *errorp)
642 {
643         hammer2_xop_create_t *xop;
644         hammer2_inode_t *nip;
645         int error;
646         uid_t xuid;
647         uuid_t dip_uid;
648         uuid_t dip_gid;
649         uint32_t dip_mode;
650         uint8_t dip_comp_algo;
651         uint8_t dip_check_algo;
652
653         if (name)
654                 lhc = hammer2_dirhash(name, name_len);
655         *errorp = 0;
656         nip = NULL;
657
658         /*
659          * Locate the inode or indirect block to create the new
660          * entry in.  At the same time check for key collisions
661          * and iterate until we don't get one.
662          *
663          * NOTE: hidden inodes do not have iterators.
664          *
665          * Lock the directory exclusively for now to guarantee that
666          * we can find an unused lhc for the name.  Due to collisions,
667          * two different creates can end up with the same lhc so we
668          * cannot depend on the OS to prevent the collision.
669          */
670         hammer2_inode_lock(dip, 0);
671
672         dip_uid = dip->meta.uid;
673         dip_gid = dip->meta.gid;
674         dip_mode = dip->meta.mode;
675         dip_comp_algo = dip->meta.comp_algo;
676         dip_check_algo = dip->meta.check_algo;
677
678         /*
679          * If name specified, locate an unused key in the collision space.
680          * Otherwise use the passed-in lhc directly.
681          */
682         if (name) {
683                 hammer2_xop_scanlhc_t *sxop;
684                 hammer2_key_t lhcbase;
685
686                 lhcbase = lhc;
687                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
688                 sxop->lhc = lhc;
689                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
690                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
691                         if (lhc != sxop->head.cluster.focus->bref.key)
692                                 break;
693                         ++lhc;
694                 }
695                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
696
697                 if (error) {
698                         if (error != ENOENT)
699                                 goto done2;
700                         ++lhc;
701                         error = 0;
702                 }
703                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
704                         error = ENOSPC;
705                         goto done2;
706                 }
707         }
708
709         /*
710          * Create the inode with the lhc as the key.
711          */
712         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
713         xop->lhc = lhc;
714         xop->flags = flags;
715         bzero(&xop->meta, sizeof(xop->meta));
716
717         if (vap) {
718                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
719
720                 switch (xop->meta.type) {
721                 case HAMMER2_OBJTYPE_CDEV:
722                 case HAMMER2_OBJTYPE_BDEV:
723                         xop->meta.rmajor = vap->va_rmajor;
724                         xop->meta.rminor = vap->va_rminor;
725                         break;
726                 default:
727                         break;
728                 }
729                 type = xop->meta.type;
730         } else {
731                 xop->meta.type = type;
732                 xop->meta.target_type = target_type;
733         }
734         xop->meta.inum = inum;
735         
736         /* Inherit parent's inode compression mode. */
737         xop->meta.comp_algo = dip_comp_algo;
738         xop->meta.check_algo = dip_check_algo;
739         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
740         hammer2_update_time(&xop->meta.ctime);
741         xop->meta.mtime = xop->meta.ctime;
742         if (vap)
743                 xop->meta.mode = vap->va_mode;
744         xop->meta.nlinks = 1;
745         if (vap) {
746                 if (dip && dip->pmp) {
747                         xuid = hammer2_to_unix_xid(&dip_uid);
748                         xuid = vop_helper_create_uid(dip->pmp->mp,
749                                                      dip_mode,
750                                                      xuid,
751                                                      cred,
752                                                      &vap->va_mode);
753                 } else {
754                         /* super-root has no dip and/or pmp */
755                         xuid = 0;
756                 }
757                 if (vap->va_vaflags & VA_UID_UUID_VALID)
758                         xop->meta.uid = vap->va_uid_uuid;
759                 else if (vap->va_uid != (uid_t)VNOVAL)
760                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
761                 else
762                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
763
764                 if (vap->va_vaflags & VA_GID_UUID_VALID)
765                         xop->meta.gid = vap->va_gid_uuid;
766                 else if (vap->va_gid != (gid_t)VNOVAL)
767                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
768                 else if (dip)
769                         xop->meta.gid = dip_gid;
770         }
771
772         /*
773          * Regular files and softlinks allow a small amount of data to be
774          * directly embedded in the inode.  This flag will be cleared if
775          * the size is extended past the embedded limit.
776          */
777         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
778             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
779             xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
780                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
781         }
782         if (name)
783                 hammer2_xop_setname(&xop->head, name, name_len);
784         xop->meta.name_len = name_len;
785         xop->meta.name_key = lhc;
786         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
787
788         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
789
790         error = hammer2_xop_collect(&xop->head, 0);
791 #if INODE_DEBUG
792         kprintf("CREATE INODE %*.*s\n",
793                 (int)name_len, (int)name_len, name);
794 #endif
795
796         if (error) {
797                 *errorp = error;
798                 goto done;
799         }
800
801         /*
802          * Set up the new inode if not a hardlink pointer.
803          *
804          * NOTE: *_get() integrates chain's lock into the inode lock.
805          *
806          * NOTE: Only one new inode can currently be created per
807          *       transaction.  If the need arises we can adjust
808          *       hammer2_trans_init() to allow more.
809          *
810          * NOTE: nipdata will have chain's blockset data.
811          */
812         if (type != HAMMER2_OBJTYPE_HARDLINK) {
813                 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
814                 nip->comp_heuristic = 0;
815         } else {
816                 nip = NULL;
817         }
818
819 done:
820         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
821 done2:
822         hammer2_inode_unlock(dip);
823
824         return (nip);
825 }
826
827 /*
828  * Connect the disconnected inode (ip) to the directory (dip) with the
829  * specified (name, name_len).  If name is NULL, (lhc) will be used as
830  * the directory key and the inode's embedded name will not be modified
831  * for future recovery purposes.
832  *
833  * dip and ip must both be locked exclusively (dip in particular to avoid
834  * lhc collisions).
835  */
836 int
837 hammer2_inode_connect(hammer2_inode_t *dip, hammer2_inode_t *ip,
838                       const char *name, size_t name_len,
839                       hammer2_key_t lhc)
840 {
841         hammer2_xop_scanlhc_t *sxop;
842         hammer2_xop_connect_t *xop;
843         hammer2_inode_t *opip;
844         hammer2_key_t lhcbase;
845         int error;
846
847         /*
848          * Calculate the lhc and resolve the collision space.
849          */
850         if (name) {
851                 lhc = lhcbase = hammer2_dirhash(name, name_len);
852                 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
853                 sxop->lhc = lhc;
854                 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
855                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
856                         if (lhc != sxop->head.cluster.focus->bref.key)
857                                 break;
858                         ++lhc;
859                 }
860                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
861
862                 if (error) {
863                         if (error != ENOENT)
864                                 goto done;
865                         ++lhc;
866                         error = 0;
867                 }
868                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
869                         error = ENOSPC;
870                         goto done;
871                 }
872         } else {
873                 error = 0;
874         }
875
876         /*
877          * Formally reconnect the in-memory structure.  ip must
878          * be locked exclusively to safely change ip->pip.
879          */
880         if (ip->pip != dip) {
881                 hammer2_inode_ref(dip);
882                 opip = ip->pip;
883                 ip->pip = dip;
884                 if (opip)
885                         hammer2_inode_drop(opip);
886         }
887
888         /*
889          * Connect her up
890          */
891         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
892         if (name)
893                 hammer2_xop_setname(&xop->head, name, name_len);
894         hammer2_xop_setip2(&xop->head, ip);
895         xop->lhc = lhc;
896         hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
897         error = hammer2_xop_collect(&xop->head, 0);
898         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
899
900         /*
901          * On success make the same adjustments to ip->meta or the
902          * next flush may blow up the chain.
903          */
904         if (error == 0) {
905                 hammer2_inode_modify(ip);
906                 ip->meta.name_key = lhc;
907                 if (name)
908                         ip->meta.name_len = name_len;
909         }
910 done:
911         return error;
912 }
913
914 /*
915  * Repoint ip->cluster's chains to cluster's chains and fixup the default
916  * focus.  All items, valid or invalid, are repointed.  hammer2_xop_start()
917  * filters out invalid or non-matching elements.
918  *
919  * Caller must hold the inode and cluster exclusive locked, if not NULL,
920  * must also be locked.
921  *
922  * Cluster may be NULL to clean out any chains in ip->cluster.
923  */
924 void
925 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
926                       hammer2_cluster_t *cluster)
927 {
928         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
929         hammer2_chain_t *ochain;
930         hammer2_chain_t *nchain;
931         hammer2_inode_t *opip;
932         int i;
933
934         bzero(dropch, sizeof(dropch));
935
936         /*
937          * Replace chains in ip->cluster with chains from cluster and
938          * adjust the focus if necessary.
939          *
940          * NOTE: nchain and/or ochain can be NULL due to gaps
941          *       in the cluster arrays.
942          */
943         hammer2_spin_ex(&ip->cluster_spin);
944         for (i = 0; cluster && i < cluster->nchains; ++i) {
945                 /*
946                  * Do not replace elements which are the same.  Also handle
947                  * element count discrepancies.
948                  */
949                 nchain = cluster->array[i].chain;
950                 if (i < ip->cluster.nchains) {
951                         ochain = ip->cluster.array[i].chain;
952                         if (ochain == nchain)
953                                 continue;
954                 } else {
955                         ochain = NULL;
956                 }
957
958                 /*
959                  * Make adjustments
960                  */
961                 ip->cluster.array[i].chain = nchain;
962                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
963                 ip->cluster.array[i].flags |= cluster->array[i].flags &
964                                               HAMMER2_CITEM_INVALID;
965                 if (nchain)
966                         hammer2_chain_ref(nchain);
967                 dropch[i] = ochain;
968         }
969
970         /*
971          * Release any left-over chains in ip->cluster.
972          */
973         while (i < ip->cluster.nchains) {
974                 nchain = ip->cluster.array[i].chain;
975                 if (nchain) {
976                         ip->cluster.array[i].chain = NULL;
977                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
978                 }
979                 dropch[i] = nchain;
980                 ++i;
981         }
982
983         /*
984          * Fixup fields.  Note that the inode-embedded cluster is never
985          * directly locked.
986          */
987         if (cluster) {
988                 ip->cluster.nchains = cluster->nchains;
989                 ip->cluster.focus = cluster->focus;
990                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
991         } else {
992                 ip->cluster.nchains = 0;
993                 ip->cluster.focus = NULL;
994                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
995         }
996
997         /*
998          * Repoint ip->pip if requested (non-NULL pip).
999          */
1000         if (pip && ip->pip != pip) {
1001                 opip = ip->pip;
1002                 hammer2_inode_ref(pip);
1003                 ip->pip = pip;
1004         } else {
1005                 opip = NULL;
1006         }
1007         hammer2_spin_unex(&ip->cluster_spin);
1008
1009         /*
1010          * Cleanup outside of spinlock
1011          */
1012         while (--i >= 0) {
1013                 if (dropch[i])
1014                         hammer2_chain_drop(dropch[i]);
1015         }
1016         if (opip)
1017                 hammer2_inode_drop(opip);
1018 }
1019
1020 /*
1021  * Repoint a single element from the cluster to the ip.  Used by the
1022  * synchronization threads to piecemeal update inodes.  Does not change
1023  * focus and requires inode to be re-locked to clean-up flags (XXX).
1024  */
1025 void
1026 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1027                           int idx)
1028 {
1029         hammer2_chain_t *ochain;
1030         hammer2_chain_t *nchain;
1031         int i;
1032
1033         hammer2_spin_ex(&ip->cluster_spin);
1034         KKASSERT(idx < cluster->nchains);
1035         if (idx < ip->cluster.nchains) {
1036                 ochain = ip->cluster.array[idx].chain;
1037                 nchain = cluster->array[idx].chain;
1038         } else {
1039                 ochain = NULL;
1040                 nchain = cluster->array[idx].chain;
1041                 ip->cluster.nchains = idx + 1;
1042                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1043                         bzero(&ip->cluster.array[i],
1044                               sizeof(ip->cluster.array[i]));
1045                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1046                 }
1047         }
1048         if (ochain != nchain) {
1049                 /*
1050                  * Make adjustments.
1051                  */
1052                 ip->cluster.array[idx].chain = nchain;
1053                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1054                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1055                                                 HAMMER2_CITEM_INVALID;
1056         }
1057         hammer2_spin_unex(&ip->cluster_spin);
1058         if (ochain != nchain) {
1059                 if (nchain)
1060                         hammer2_chain_ref(nchain);
1061                 if (ochain)
1062                         hammer2_chain_drop(ochain);
1063         }
1064 }
1065
1066 /*
1067  * Called with a locked inode to finish unlinking an inode after xop_unlink
1068  * had been run.  This function is responsible for decrementing nlinks and
1069  * moving deleted inodes to the hidden directory if they are still open.
1070  *
1071  * We don't bother decrementing nlinks if the file is not open and this was
1072  * the last link.
1073  *
1074  * If the inode is a hardlink target it's chain has not yet been deleted,
1075  * otherwise it's chain has been deleted.
1076  *
1077  * If isopen then any prior deletion was not permanent and the inode must
1078  * be moved to the hidden directory.
1079  */
1080 int
1081 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1082 {
1083         hammer2_pfs_t *pmp;
1084         int error;
1085
1086         pmp = ip->pmp;
1087
1088         /*
1089          * Decrement nlinks.  If this is the last link and the file is
1090          * not open, the chain has already been removed and we don't bother
1091          * dirtying the inode.
1092          */
1093         if (ip->meta.nlinks == 1) {
1094                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1095                 if (isopen == 0) {
1096                         atomic_set_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1097                         return 0;
1098                 }
1099         }
1100
1101         hammer2_inode_modify(ip);
1102         --ip->meta.nlinks;
1103         if ((int64_t)ip->meta.nlinks < 0)
1104                 ip->meta.nlinks = 0;    /* safety */
1105
1106         /*
1107          * If nlinks is not zero we are done.  However, this should only be
1108          * possible with a hardlink target.  If the inode is an embedded
1109          * hardlink nlinks should have dropped to zero, warn and proceed
1110          * with the next step.
1111          */
1112         if (ip->meta.nlinks) {
1113                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1114                         return 0;
1115                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1116                         (intmax_t)ip->meta.nlinks);
1117                 return 0;
1118         }
1119
1120         /*
1121          * nlinks is now zero, the inode should have already been deleted.
1122          * If the file is open it was deleted non-permanently and must be
1123          * moved to the hidden directory.
1124          *
1125          * When moving to the hidden directory we force the name_key to the
1126          * inode number to avoid collisions.
1127          */
1128         if (isopen) {
1129                 hammer2_inode_lock(pmp->ihidden, 0);
1130                 error = hammer2_inode_connect(pmp->ihidden, ip,
1131                                               NULL, 0, ip->meta.inum);
1132                 hammer2_inode_unlock(pmp->ihidden);
1133         } else {
1134                 error = 0;
1135         }
1136         return error;
1137 }
1138
1139 /*
1140  * This is called from the mount code to initialize pmp->ihidden
1141  */
1142 void
1143 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1144 {
1145         int error;
1146
1147         if (pmp->ihidden)
1148                 return;
1149
1150         hammer2_trans_init(pmp, 0);
1151         hammer2_inode_lock(pmp->iroot, 0);
1152
1153         /*
1154          * Find the hidden directory
1155          */
1156         {
1157                 hammer2_xop_lookup_t *xop;
1158
1159                 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1160                 xop->lhc = HAMMER2_INODE_HIDDENDIR;
1161                 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1162                 error = hammer2_xop_collect(&xop->head, 0);
1163
1164                 if (error == 0) {
1165                         /*
1166                          * Found the hidden directory
1167                          */
1168                         kprintf("PFS FOUND HIDDEN DIR\n");
1169                         pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot,
1170                                                          &xop->head.cluster,
1171                                                          -1);
1172                         hammer2_inode_ref(pmp->ihidden);
1173                         hammer2_inode_unlock(pmp->ihidden);
1174                 }
1175                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1176         }
1177
1178         /*
1179          * Create the hidden directory if it could not be found.
1180          */
1181         if (error == ENOENT) {
1182                 kprintf("PFS CREATE HIDDEN DIR\n");
1183
1184                 pmp->ihidden = hammer2_inode_create(pmp->iroot, NULL, NULL,
1185                                                     NULL, 0,
1186                                 /* lhc */           HAMMER2_INODE_HIDDENDIR,
1187                                 /* inum */          HAMMER2_INODE_HIDDENDIR,
1188                                 /* type */          HAMMER2_OBJTYPE_DIRECTORY,
1189                                 /* target_type */   0,
1190                                 /* flags */         0,
1191                                                     &error);
1192                 if (pmp->ihidden) {
1193                         hammer2_inode_ref(pmp->ihidden);
1194                         hammer2_inode_unlock(pmp->ihidden);
1195                 }
1196                 if (error)
1197                         kprintf("PFS CREATE ERROR %d\n", error);
1198         }
1199
1200         /*
1201          * Scan the hidden directory on-mount and destroy its contents
1202          */
1203         if (error == 0) {
1204                 hammer2_xop_unlinkall_t *xop;
1205
1206                 hammer2_inode_lock(pmp->ihidden, 0);
1207                 xop = hammer2_xop_alloc(pmp->ihidden, HAMMER2_XOP_MODIFYING);
1208                 xop->key_beg = HAMMER2_KEY_MIN;
1209                 xop->key_end = HAMMER2_KEY_MAX;
1210                 hammer2_xop_start(&xop->head, hammer2_inode_xop_unlinkall);
1211
1212                 while ((error = hammer2_xop_collect(&xop->head, 0)) == 0) {
1213                         ;
1214                 }
1215                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1216                 hammer2_inode_unlock(pmp->ihidden);
1217         }
1218
1219         hammer2_inode_unlock(pmp->iroot);
1220         hammer2_trans_done(pmp);
1221 }
1222
1223 /*
1224  * Find the directory common to both fdip and tdip.
1225  *
1226  * Returns a held but not locked inode.  Caller typically locks the inode,
1227  * and when through unlocks AND drops it.
1228  */
1229 hammer2_inode_t *
1230 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1231 {
1232         hammer2_inode_t *scan1;
1233         hammer2_inode_t *scan2;
1234
1235         /*
1236          * We used to have a depth field but it complicated matters too
1237          * much for directory renames.  So now its ugly.  Check for
1238          * simple cases before giving up and doing it the expensive way.
1239          *
1240          * XXX need a bottom-up topology stability lock
1241          */
1242         if (fdip == tdip || fdip == tdip->pip) {
1243                 hammer2_inode_ref(fdip);
1244                 return(fdip);
1245         }
1246         if (fdip->pip == tdip) {
1247                 hammer2_inode_ref(tdip);
1248                 return(tdip);
1249         }
1250
1251         /*
1252          * XXX not MPSAFE
1253          */
1254         for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1255                 scan2 = tdip;
1256                 while (scan2->pmp == tdip->pmp) {
1257                         if (scan1 == scan2) {
1258                                 hammer2_inode_ref(scan1);
1259                                 return(scan1);
1260                         }
1261                         scan2 = scan2->pip;
1262                         if (scan2 == NULL)
1263                                 break;
1264                 }
1265         }
1266         panic("hammer2_inode_common_parent: no common parent %p %p\n",
1267               fdip, tdip);
1268         /* NOT REACHED */
1269         return(NULL);
1270 }
1271
1272 /*
1273  * Mark an inode as being modified, meaning that the caller will modify
1274  * ip->meta.
1275  *
1276  * NOTE: No mtid (modify_tid) is passed into this routine.  The caller is
1277  *       only modifying the in-memory inode.  A modify_tid is synchronized
1278  *       later when the inode gets flushed.
1279  */
1280 void
1281 hammer2_inode_modify(hammer2_inode_t *ip)
1282 {
1283         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1284         if (ip->vp)
1285                 vsetisdirty(ip->vp);
1286 }
1287
1288 /*
1289  * Synchronize the inode's frontend state with the chain state prior
1290  * to any explicit flush of the inode or any strategy write call.
1291  *
1292  * Called with a locked inode inside a transaction.
1293  */
1294 void
1295 hammer2_inode_fsync(hammer2_inode_t *ip)
1296 {
1297         if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1298                 hammer2_xop_fsync_t *xop;
1299                 int error;
1300
1301                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1302                 xop->clear_directdata = 0;
1303                 if (ip->flags & HAMMER2_INODE_RESIZED) {
1304                         if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1305                             ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1306                                 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1307                                 xop->clear_directdata = 1;
1308                         }
1309                         xop->osize = ip->osize;
1310                 } else {
1311                         xop->osize = ip->meta.size;     /* safety */
1312                 }
1313                 xop->ipflags = ip->flags;
1314                 xop->meta = ip->meta;
1315
1316                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1317                                              HAMMER2_INODE_MODIFIED);
1318                 hammer2_xop_start(&xop->head, hammer2_inode_xop_fsync);
1319                 error = hammer2_xop_collect(&xop->head, 0);
1320                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1321                 if (error == ENOENT)
1322                         error = 0;
1323                 if (error) {
1324                         kprintf("hammer2: unable to fsync inode %p\n", ip);
1325                         /*
1326                         atomic_set_int(&ip->flags,
1327                                        xop->ipflags & (HAMMER2_INODE_RESIZED |
1328                                                        HAMMER2_INODE_MODIFIED));
1329                         */
1330                         /* XXX return error somehow? */
1331                 }
1332         }
1333 }
1334
1335 /*
1336  * This handles unlinked open files after the vnode is finally dereferenced.
1337  * To avoid deadlocks it cannot be called from the normal vnode recycling
1338  * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
1339  * flush, and (3) on umount.
1340  *
1341  * Caller must be in a transaction.
1342  */
1343 void
1344 hammer2_inode_run_unlinkq(hammer2_pfs_t *pmp)
1345 {
1346         hammer2_xop_destroy_t *xop;
1347         hammer2_inode_unlink_t *ipul;
1348         hammer2_inode_t *ip;
1349         int error;
1350
1351         if (TAILQ_EMPTY(&pmp->unlinkq))
1352                 return;
1353
1354         LOCKSTART;
1355         hammer2_spin_ex(&pmp->list_spin);
1356         while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
1357                 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
1358                 hammer2_spin_unex(&pmp->list_spin);
1359                 ip = ipul->ip;
1360                 kfree(ipul, pmp->minode);
1361
1362                 hammer2_inode_lock(ip, 0);
1363                 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1364                 hammer2_xop_start(&xop->head, hammer2_inode_xop_destroy);
1365                 error = hammer2_xop_collect(&xop->head, 0);
1366                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1367
1368                 atomic_clear_int(&ip->flags, HAMMER2_INODE_ISDELETED);
1369
1370                 hammer2_inode_unlock(ip);
1371                 hammer2_inode_drop(ip);                 /* ipul ref */
1372
1373                 hammer2_spin_ex(&pmp->list_spin);
1374         }
1375         hammer2_spin_unex(&pmp->list_spin);
1376         LOCKSTOP;
1377 }
1378
1379 /*
1380  * Inode create helper (threaded, backend)
1381  *
1382  * Used by ncreate, nmknod, nsymlink, nmkdir.
1383  * Used by nlink and rename to create HARDLINK pointers.
1384  *
1385  * Frontend holds the parent directory ip locked exclusively.  We
1386  * create the inode and feed the exclusively locked chain to the
1387  * frontend.
1388  */
1389 void
1390 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1391 {
1392         hammer2_xop_create_t *xop = &arg->xop_create;
1393         hammer2_chain_t *parent;
1394         hammer2_chain_t *chain;
1395         hammer2_key_t key_next;
1396         int cache_index = -1;
1397         int error;
1398
1399         if (hammer2_debug & 0x0001)
1400         kprintf("inode_create lhc %016jx clindex %d\n",
1401                 xop->lhc, clindex);
1402
1403         chain = NULL;
1404         parent = hammer2_inode_chain(xop->head.ip, clindex,
1405                                      HAMMER2_RESOLVE_ALWAYS);
1406         if (parent == NULL) {
1407                 error = EIO;
1408                 goto fail;
1409         }
1410         chain = hammer2_chain_lookup(&parent, &key_next,
1411                                      xop->lhc, xop->lhc,
1412                                      &cache_index, 0);
1413         if (chain) {
1414                 hammer2_chain_unlock(chain);
1415                 error = EEXIST;
1416                 goto fail;
1417         }
1418
1419         error = hammer2_chain_create(&parent, &chain,
1420                                      xop->head.ip->pmp,
1421                                      xop->lhc, 0,
1422                                      HAMMER2_BREF_TYPE_INODE,
1423                                      HAMMER2_INODE_BYTES,
1424                                      xop->head.mtid, xop->flags);
1425         if (error == 0) {
1426                 hammer2_chain_modify(chain, xop->head.mtid, 0);
1427                 chain->data->ipdata.meta = xop->meta;
1428                 if (xop->head.name) {
1429                         bcopy(xop->head.name,
1430                               chain->data->ipdata.filename,
1431                               xop->head.name_len);
1432                         chain->data->ipdata.meta.name_len = xop->head.name_len;
1433                 }
1434                 chain->data->ipdata.meta.name_key = xop->lhc;
1435         }
1436         hammer2_chain_unlock(chain);
1437         hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1438                                   HAMMER2_RESOLVE_SHARED);
1439 fail:
1440         if (parent) {
1441                 hammer2_chain_unlock(parent);
1442                 hammer2_chain_drop(parent);
1443         }
1444         hammer2_xop_feed(&xop->head, chain, clindex, error);
1445         if (chain)
1446                 hammer2_chain_drop(chain);
1447 }
1448
1449 /*
1450  * Inode delete helper (backend, threaded)
1451  *
1452  * Generally used by hammer2_run_unlinkq()
1453  */
1454 void
1455 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1456 {
1457         hammer2_xop_destroy_t *xop = &arg->xop_destroy;
1458         hammer2_pfs_t *pmp;
1459         hammer2_chain_t *parent;
1460         hammer2_chain_t *chain;
1461         hammer2_inode_t *ip;
1462         int error;
1463
1464         /*
1465          * We need the precise parent chain to issue the deletion.
1466          */
1467         ip = xop->head.ip;
1468         pmp = ip->pmp;
1469         chain = NULL;
1470
1471         parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1472         if (parent)
1473                 hammer2_chain_getparent(&parent, HAMMER2_RESOLVE_ALWAYS);
1474         if (parent == NULL) {
1475                 error = EIO;
1476                 goto done;
1477         }
1478         chain = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
1479         if (chain == NULL) {
1480                 error = EIO;
1481                 goto done;
1482         }
1483         hammer2_chain_delete(parent, chain, xop->head.mtid, 0);
1484         error = 0;
1485 done:
1486         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1487         if (parent) {
1488                 hammer2_chain_unlock(parent);
1489                 hammer2_chain_drop(parent);
1490         }
1491         if (chain) {
1492                 hammer2_chain_unlock(chain);
1493                 hammer2_chain_drop(chain);
1494         }
1495 }
1496
1497 void
1498 hammer2_inode_xop_unlinkall(hammer2_xop_t *arg, int clindex)
1499 {
1500         hammer2_xop_unlinkall_t *xop = &arg->xop_unlinkall;
1501         hammer2_chain_t *parent;
1502         hammer2_chain_t *chain;
1503         hammer2_key_t key_next;
1504         int cache_index = -1;
1505
1506         /*
1507          * We need the precise parent chain to issue the deletion.
1508          */
1509         parent = hammer2_inode_chain(xop->head.ip, clindex,
1510                                      HAMMER2_RESOLVE_ALWAYS);
1511         chain = NULL;
1512         if (parent == NULL) {
1513                 /* XXX error */
1514                 goto done;
1515         }
1516         chain = hammer2_chain_lookup(&parent, &key_next,
1517                                      xop->key_beg, xop->key_end,
1518                                      &cache_index,
1519                                      HAMMER2_LOOKUP_ALWAYS);
1520         while (chain) {
1521                 hammer2_chain_delete(parent, chain,
1522                                      xop->head.mtid, HAMMER2_DELETE_PERMANENT);
1523                 hammer2_chain_unlock(chain);
1524                 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1525                                           HAMMER2_RESOLVE_SHARED);
1526                 hammer2_xop_feed(&xop->head, chain, clindex, chain->error);
1527                 chain = hammer2_chain_next(&parent, chain, &key_next,
1528                                            key_next, xop->key_end,
1529                                            &cache_index,
1530                                            HAMMER2_LOOKUP_ALWAYS |
1531                                            HAMMER2_LOOKUP_NOUNLOCK);
1532         }
1533 done:
1534         hammer2_xop_feed(&xop->head, NULL, clindex, ENOENT);
1535         if (parent) {
1536                 hammer2_chain_unlock(parent);
1537                 hammer2_chain_drop(parent);
1538         }
1539         if (chain) {
1540                 hammer2_chain_unlock(chain);
1541                 hammer2_chain_drop(chain);
1542         }
1543 }
1544
1545 void
1546 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1547 {
1548         hammer2_xop_connect_t *xop = &arg->xop_connect;
1549         hammer2_inode_data_t *wipdata;
1550         hammer2_chain_t *parent;
1551         hammer2_chain_t *chain;
1552         hammer2_pfs_t *pmp;
1553         hammer2_key_t key_dummy;
1554         int cache_index = -1;
1555         int error;
1556
1557         /*
1558          * Get directory, then issue a lookup to prime the parent chain
1559          * for the create.  The lookup is expected to fail.
1560          */
1561         pmp = xop->head.ip->pmp;
1562         parent = hammer2_inode_chain(xop->head.ip, clindex,
1563                                      HAMMER2_RESOLVE_ALWAYS);
1564         if (parent == NULL) {
1565                 chain = NULL;
1566                 error = EIO;
1567                 goto fail;
1568         }
1569         chain = hammer2_chain_lookup(&parent, &key_dummy,
1570                                      xop->lhc, xop->lhc,
1571                                      &cache_index, 0);
1572         if (chain) {
1573                 hammer2_chain_unlock(chain);
1574                 hammer2_chain_drop(chain);
1575                 chain = NULL;
1576                 error = EEXIST;
1577                 goto fail;
1578         }
1579
1580         /*
1581          * Adjust the filename in the inode, set the name key.
1582          *
1583          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1584          *       do it here.
1585          */
1586         chain = hammer2_inode_chain(xop->head.ip2, clindex,
1587                                     HAMMER2_RESOLVE_ALWAYS);
1588         hammer2_chain_modify(chain, xop->head.mtid, 0);
1589         wipdata = &chain->data->ipdata;
1590
1591         hammer2_inode_modify(xop->head.ip2);
1592         if (xop->head.name) {
1593                 bzero(wipdata->filename, sizeof(wipdata->filename));
1594                 bcopy(xop->head.name, wipdata->filename, xop->head.name_len);
1595                 wipdata->meta.name_len = xop->head.name_len;
1596         }
1597         wipdata->meta.name_key = xop->lhc;
1598
1599         /*
1600          * Reconnect the chain to the new parent directory
1601          */
1602         error = hammer2_chain_create(&parent, &chain, pmp,
1603                                      xop->lhc, 0,
1604                                      HAMMER2_BREF_TYPE_INODE,
1605                                      HAMMER2_INODE_BYTES,
1606                                      xop->head.mtid, 0);
1607
1608         /*
1609          * Feed result back.
1610          */
1611 fail:
1612         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1613         if (parent) {
1614                 hammer2_chain_unlock(parent);
1615                 hammer2_chain_drop(parent);
1616         }
1617         if (chain) {
1618                 hammer2_chain_unlock(chain);
1619                 hammer2_chain_drop(chain);
1620         }
1621 }
1622
1623 void
1624 hammer2_inode_xop_fsync(hammer2_xop_t *arg, int clindex)
1625 {
1626         hammer2_xop_fsync_t *xop = &arg->xop_fsync;
1627         hammer2_chain_t *parent;
1628         hammer2_chain_t *chain;
1629         int error;
1630
1631         parent = hammer2_inode_chain(xop->head.ip, clindex,
1632                                      HAMMER2_RESOLVE_ALWAYS);
1633         chain = NULL;
1634         if (parent == NULL) {
1635                 error = EIO;
1636                 goto done;
1637         }
1638         if (parent->error) {
1639                 error = parent->error;
1640                 goto done;
1641         }
1642
1643         error = 0;
1644
1645         if ((xop->ipflags & HAMMER2_INODE_RESIZED) == 0) {
1646                 /* osize must be ignored */
1647         } else if (xop->meta.size < xop->osize) {
1648                 /*
1649                  * We must delete any chains beyond the EOF.  The chain
1650                  * straddling the EOF will be pending in the bioq.
1651                  */
1652                 hammer2_key_t lbase;
1653                 hammer2_key_t key_next;
1654                 int cache_index = -1;
1655
1656                 lbase = (xop->meta.size + HAMMER2_PBUFMASK64) &
1657                         ~HAMMER2_PBUFMASK64;
1658                 chain = hammer2_chain_lookup(&parent, &key_next,
1659                                              lbase, HAMMER2_KEY_MAX,
1660                                              &cache_index,
1661                                              HAMMER2_LOOKUP_NODATA |
1662                                              HAMMER2_LOOKUP_NODIRECT);
1663                 while (chain) {
1664                         /*
1665                          * Degenerate embedded case, nothing to loop on
1666                          */
1667                         switch (chain->bref.type) {
1668                         case HAMMER2_BREF_TYPE_INODE:
1669                                 KKASSERT(0);
1670                                 break;
1671                         case HAMMER2_BREF_TYPE_DATA:
1672                                 hammer2_chain_delete(parent, chain,
1673                                                      xop->head.mtid,
1674                                                      HAMMER2_DELETE_PERMANENT);
1675                                 break;
1676                         }
1677                         chain = hammer2_chain_next(&parent, chain, &key_next,
1678                                                    key_next, HAMMER2_KEY_MAX,
1679                                                    &cache_index,
1680                                                    HAMMER2_LOOKUP_NODATA |
1681                                                    HAMMER2_LOOKUP_NODIRECT);
1682                 }
1683         }
1684
1685         /*
1686          * Sync the inode meta-data, potentially clear the blockset area
1687          * of direct data so it can be used for blockrefs.
1688          */
1689         hammer2_chain_modify(parent, xop->head.mtid, 0);
1690         parent->data->ipdata.meta = xop->meta;
1691         if (xop->clear_directdata) {
1692                 bzero(&parent->data->ipdata.u.blockset,
1693                       sizeof(parent->data->ipdata.u.blockset));
1694         }
1695 done:
1696         if (chain) {
1697                 hammer2_chain_unlock(chain);
1698                 hammer2_chain_drop(chain);
1699         }
1700         if (parent) {
1701                 hammer2_chain_unlock(parent);
1702                 hammer2_chain_drop(parent);
1703         }
1704         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1705 }
1706