hammer2 - Refactor frontend part 14/many
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41
42 #include "hammer2.h"
43
44 #define INODE_DEBUG     0
45
46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
47              hammer2_tid_t, meta.inum);
48
49 int
50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51 {
52         if (ip1->meta.inum < ip2->meta.inum)
53                 return(-1);
54         if (ip1->meta.inum > ip2->meta.inum)
55                 return(1);
56         return(0);
57 }
58
59 /*
60  * HAMMER2 inode locks
61  *
62  * HAMMER2 offers shared and exclusive locks on inodes.  Pass a mask of
63  * flags for options:
64  *
65  *      - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.  The
66  *        inode locking function will automatically set the RDONLY flag.
67  *
68  *      - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69  *        Most front-end inode locks do.
70  *
71  *      - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72  *        the inode data be resolved.  This is used by the syncthr because
73  *        it can run on an unresolved/out-of-sync cluster, and also by the
74  *        vnode reclamation code to avoid unnecessary I/O (particularly when
75  *        disposing of hundreds of thousands of cached vnodes).
76  *
77  * The inode locking function locks the inode itself, resolves any stale
78  * chains in the inode's cluster, and allocates a fresh copy of the
79  * cluster with 1 ref and all the underlying chains locked.
80  *
81  * ip->cluster will be stable while the inode is locked.
82  *
83  * NOTE: We don't combine the inode/chain lock because putting away an
84  *       inode would otherwise confuse multiple lock holders of the inode.
85  *
86  * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87  *       and never point to a hardlink pointer.
88  *
89  * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90  *       will feel free to reduce the chain set in the cluster as an
91  *       optimization.  It will still be validated against the quorum if
92  *       appropriate, but the optimization might be able to reduce data
93  *       accesses to one node.  This flag is automatically set if the inode
94  *       is locked with HAMMER2_RESOLVE_SHARED.
95  */
96 void
97 hammer2_inode_lock(hammer2_inode_t *ip, int how)
98 {
99         hammer2_inode_ref(ip);
100
101         /* 
102          * Inode structure mutex
103          */
104         if (how & HAMMER2_RESOLVE_SHARED) {
105                 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
106                 hammer2_mtx_sh(&ip->lock);
107         } else {
108                 hammer2_mtx_ex(&ip->lock);
109         }
110 }
111
112 /*
113  * Create a locked copy of ip->cluster.  Note that the copy will have a
114  * ref on the cluster AND its chains and we don't want a second ref to
115  * either when we lock it.
116  *
117  * Exclusive inode locks set the template focus chain in (ip)
118  * as a hint.  Cluster locks can ALWAYS replace the focus in the
119  * working copy if the hint does not work out, so beware.
120  */
121 hammer2_cluster_t *
122 hammer2_inode_cluster(hammer2_inode_t *ip, int how)
123 {
124         hammer2_cluster_t *cluster;
125
126         cluster = hammer2_cluster_copy(&ip->cluster);
127         hammer2_cluster_lock(cluster, how);
128         hammer2_cluster_resolve(cluster);
129
130         /*
131          * cluster->focus will be set if resolving RESOLVE_ALWAYS, but
132          * only update the cached focus in the inode structure when taking
133          * out an exclusive lock.
134          */
135         if ((how & HAMMER2_RESOLVE_SHARED) == 0)
136                 ip->cluster.focus = cluster->focus;
137
138         return cluster;
139 }
140
141 /*
142  * Select a chain out of an inode's cluster and lock it.
143  *
144  * The inode does not have to be locked.
145  */
146 hammer2_chain_t *
147 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
148 {
149         hammer2_chain_t *chain;
150
151         hammer2_spin_sh(&ip->cluster_spin);
152         if (clindex >= ip->cluster.nchains)
153                 chain = NULL;
154         else
155                 chain = ip->cluster.array[clindex].chain;
156         if (chain) {
157                 hammer2_chain_ref(chain);
158                 hammer2_spin_unsh(&ip->cluster_spin);
159                 hammer2_chain_lock(chain, how);
160         } else {
161                 hammer2_spin_unsh(&ip->cluster_spin);
162         }
163         return chain;
164 }
165
166 void
167 hammer2_inode_unlock(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
168 {
169         if (cluster) {
170                 hammer2_cluster_unlock(cluster);
171                 hammer2_cluster_drop(cluster);
172         }
173         hammer2_mtx_unlock(&ip->lock);
174         hammer2_inode_drop(ip);
175 }
176
177 /*
178  * Temporarily release a lock held shared or exclusive.  Caller must
179  * hold the lock shared or exclusive on call and lock will be released
180  * on return.
181  *
182  * Restore a lock that was temporarily released.
183  */
184 hammer2_mtx_state_t
185 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
186 {
187         return hammer2_mtx_temp_release(&ip->lock);
188 }
189
190 void
191 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
192 {
193         hammer2_mtx_temp_restore(&ip->lock, ostate);
194 }
195
196 /*
197  * Upgrade a shared inode lock to exclusive and return.  If the inode lock
198  * is already held exclusively this is a NOP.
199  *
200  * The caller MUST hold the inode lock either shared or exclusive on call
201  * and will own the lock exclusively on return.
202  *
203  * Returns non-zero if the lock was already exclusive prior to the upgrade.
204  */
205 int
206 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
207 {
208         int wasexclusive;
209
210         if (mtx_islocked_ex(&ip->lock)) {
211                 wasexclusive = 1;
212         } else {
213                 hammer2_mtx_unlock(&ip->lock);
214                 hammer2_mtx_ex(&ip->lock);
215                 wasexclusive = 0;
216         }
217         return wasexclusive;
218 }
219
220 /*
221  * Downgrade an inode lock from exclusive to shared only if the inode
222  * lock was previously shared.  If the inode lock was previously exclusive,
223  * this is a NOP.
224  */
225 void
226 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
227 {
228         if (wasexclusive == 0)
229                 mtx_downgrade(&ip->lock);
230 }
231
232 /*
233  * Lookup an inode by inode number
234  */
235 hammer2_inode_t *
236 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
237 {
238         hammer2_inode_t *ip;
239
240         KKASSERT(pmp);
241         if (pmp->spmp_hmp) {
242                 ip = NULL;
243         } else {
244                 hammer2_spin_ex(&pmp->inum_spin);
245                 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
246                 if (ip)
247                         hammer2_inode_ref(ip);
248                 hammer2_spin_unex(&pmp->inum_spin);
249         }
250         return(ip);
251 }
252
253 /*
254  * Adding a ref to an inode is only legal if the inode already has at least
255  * one ref.
256  *
257  * (can be called with spinlock held)
258  */
259 void
260 hammer2_inode_ref(hammer2_inode_t *ip)
261 {
262         atomic_add_int(&ip->refs, 1);
263 }
264
265 /*
266  * Drop an inode reference, freeing the inode when the last reference goes
267  * away.
268  */
269 void
270 hammer2_inode_drop(hammer2_inode_t *ip)
271 {
272         hammer2_pfs_t *pmp;
273         hammer2_inode_t *pip;
274         u_int refs;
275
276         while (ip) {
277                 refs = ip->refs;
278                 cpu_ccfence();
279                 if (refs == 1) {
280                         /*
281                          * Transition to zero, must interlock with
282                          * the inode inumber lookup tree (if applicable).
283                          * It should not be possible for anyone to race
284                          * the transition to 0.
285                          */
286                         pmp = ip->pmp;
287                         KKASSERT(pmp);
288                         hammer2_spin_ex(&pmp->inum_spin);
289
290                         if (atomic_cmpset_int(&ip->refs, 1, 0)) {
291                                 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
292                                 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
293                                         atomic_clear_int(&ip->flags,
294                                                      HAMMER2_INODE_ONRBTREE);
295                                         RB_REMOVE(hammer2_inode_tree,
296                                                   &pmp->inum_tree, ip);
297                                 }
298                                 hammer2_spin_unex(&pmp->inum_spin);
299
300                                 pip = ip->pip;
301                                 ip->pip = NULL;
302                                 ip->pmp = NULL;
303
304                                 /*
305                                  * Cleaning out ip->cluster isn't entirely
306                                  * trivial.
307                                  */
308                                 hammer2_inode_repoint(ip, NULL, NULL);
309
310                                 /*
311                                  * We have to drop pip (if non-NULL) to
312                                  * dispose of our implied reference from
313                                  * ip->pip.  We can simply loop on it.
314                                  */
315                                 kfree(ip, pmp->minode);
316                                 atomic_add_long(&pmp->inmem_inodes, -1);
317                                 ip = pip;
318                                 /* continue with pip (can be NULL) */
319                         } else {
320                                 hammer2_spin_unex(&ip->pmp->inum_spin);
321                         }
322                 } else {
323                         /*
324                          * Non zero transition
325                          */
326                         if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
327                                 break;
328                 }
329         }
330 }
331
332 /*
333  * Get the vnode associated with the given inode, allocating the vnode if
334  * necessary.  The vnode will be returned exclusively locked.
335  *
336  * The caller must lock the inode (shared or exclusive).
337  *
338  * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
339  * races.
340  */
341 struct vnode *
342 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
343 {
344         hammer2_pfs_t *pmp;
345         struct vnode *vp;
346
347         pmp = ip->pmp;
348         KKASSERT(pmp != NULL);
349         *errorp = 0;
350
351         for (;;) {
352                 /*
353                  * Attempt to reuse an existing vnode assignment.  It is
354                  * possible to race a reclaim so the vget() may fail.  The
355                  * inode must be unlocked during the vget() to avoid a
356                  * deadlock against a reclaim.
357                  */
358                 int wasexclusive;
359
360                 vp = ip->vp;
361                 if (vp) {
362                         /*
363                          * Inode must be unlocked during the vget() to avoid
364                          * possible deadlocks, but leave the ip ref intact.
365                          *
366                          * vnode is held to prevent destruction during the
367                          * vget().  The vget() can still fail if we lost
368                          * a reclaim race on the vnode.
369                          */
370                         hammer2_mtx_state_t ostate;
371
372                         vhold(vp);
373                         ostate = hammer2_inode_lock_temp_release(ip);
374                         if (vget(vp, LK_EXCLUSIVE)) {
375                                 vdrop(vp);
376                                 hammer2_inode_lock_temp_restore(ip, ostate);
377                                 continue;
378                         }
379                         hammer2_inode_lock_temp_restore(ip, ostate);
380                         vdrop(vp);
381                         /* vp still locked and ref from vget */
382                         if (ip->vp != vp) {
383                                 kprintf("hammer2: igetv race %p/%p\n",
384                                         ip->vp, vp);
385                                 vput(vp);
386                                 continue;
387                         }
388                         *errorp = 0;
389                         break;
390                 }
391
392                 /*
393                  * No vnode exists, allocate a new vnode.  Beware of
394                  * allocation races.  This function will return an
395                  * exclusively locked and referenced vnode.
396                  */
397                 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
398                 if (*errorp) {
399                         kprintf("hammer2: igetv getnewvnode failed %d\n",
400                                 *errorp);
401                         vp = NULL;
402                         break;
403                 }
404
405                 /*
406                  * Lock the inode and check for an allocation race.
407                  */
408                 wasexclusive = hammer2_inode_lock_upgrade(ip);
409                 if (ip->vp != NULL) {
410                         vp->v_type = VBAD;
411                         vx_put(vp);
412                         hammer2_inode_lock_downgrade(ip, wasexclusive);
413                         continue;
414                 }
415
416                 switch (ip->meta.type) {
417                 case HAMMER2_OBJTYPE_DIRECTORY:
418                         vp->v_type = VDIR;
419                         break;
420                 case HAMMER2_OBJTYPE_REGFILE:
421                         vp->v_type = VREG;
422                         vinitvmio(vp, ip->meta.size,
423                                   HAMMER2_LBUFSIZE,
424                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
425                         break;
426                 case HAMMER2_OBJTYPE_SOFTLINK:
427                         /*
428                          * XXX for now we are using the generic file_read
429                          * and file_write code so we need a buffer cache
430                          * association.
431                          */
432                         vp->v_type = VLNK;
433                         vinitvmio(vp, ip->meta.size,
434                                   HAMMER2_LBUFSIZE,
435                                   (int)ip->meta.size & HAMMER2_LBUFMASK);
436                         break;
437                 case HAMMER2_OBJTYPE_CDEV:
438                         vp->v_type = VCHR;
439                         /* fall through */
440                 case HAMMER2_OBJTYPE_BDEV:
441                         vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
442                         if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
443                                 vp->v_type = VBLK;
444                         addaliasu(vp,
445                                   ip->meta.rmajor,
446                                   ip->meta.rminor);
447                         break;
448                 case HAMMER2_OBJTYPE_FIFO:
449                         vp->v_type = VFIFO;
450                         vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
451                         break;
452                 default:
453                         panic("hammer2: unhandled objtype %d",
454                               ip->meta.type);
455                         break;
456                 }
457
458                 if (ip == pmp->iroot)
459                         vsetflags(vp, VROOT);
460
461                 vp->v_data = ip;
462                 ip->vp = vp;
463                 hammer2_inode_ref(ip);          /* vp association */
464                 hammer2_inode_lock_downgrade(ip, wasexclusive);
465                 break;
466         }
467
468         /*
469          * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
470          */
471         if (hammer2_debug & 0x0002) {
472                 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
473                         vp, vp->v_refcnt, vp->v_auxrefs);
474         }
475         return (vp);
476 }
477
478 /*
479  * Returns the inode associated with the passed-in cluster, creating the
480  * inode if necessary and synchronizing it to the passed-in cluster otherwise.
481  *
482  * The passed-in cluster must be locked and will remain locked on return.
483  * The returned inode will be locked and the caller may dispose of both
484  * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
485  * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
486  *
487  * The hammer2_inode structure regulates the interface between the high level
488  * kernel VNOPS API and the filesystem backend (the chains).
489  *
490  * On return the inode is locked with the supplied cluster.
491  */
492 hammer2_inode_t *
493 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
494                   hammer2_cluster_t *cluster)
495 {
496         hammer2_inode_t *nip;
497         const hammer2_inode_data_t *iptmp;
498         const hammer2_inode_data_t *nipdata;
499
500         KKASSERT(cluster == NULL ||
501                  hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
502         KKASSERT(pmp);
503
504         /*
505          * Interlocked lookup/ref of the inode.  This code is only needed
506          * when looking up inodes with nlinks != 0 (TODO: optimize out
507          * otherwise and test for duplicates).
508          *
509          * Cluster can be NULL during the initial pfs allocation.
510          */
511 again:
512         while (cluster) {
513                 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
514                 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
515                 if (nip == NULL)
516                         break;
517
518                 hammer2_mtx_ex(&nip->lock);
519
520                 /*
521                  * Handle SMP race (not applicable to the super-root spmp
522                  * which can't index inodes due to duplicative inode numbers).
523                  */
524                 if (pmp->spmp_hmp == NULL &&
525                     (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
526                         hammer2_mtx_unlock(&nip->lock);
527                         hammer2_inode_drop(nip);
528                         continue;
529                 }
530                 hammer2_inode_repoint(nip, NULL, cluster);
531
532                 return nip;
533         }
534
535         /*
536          * We couldn't find the inode number, create a new inode.
537          */
538         nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
539         spin_init(&nip->cluster_spin, "h2clspin");
540         atomic_add_long(&pmp->inmem_inodes, 1);
541         hammer2_pfs_memory_inc(pmp);
542         hammer2_pfs_memory_wakeup(pmp);
543         if (pmp->spmp_hmp)
544                 nip->flags = HAMMER2_INODE_SROOT;
545
546         /*
547          * Initialize nip's cluster.  A cluster is provided for normal
548          * inodes but typically not for the super-root or PFS inodes.
549          */
550         nip->cluster.refs = 1;
551         nip->cluster.pmp = pmp;
552         nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
553         if (cluster) {
554                 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
555                 nip->meta = nipdata->meta;
556                 hammer2_cluster_bref(cluster, &nip->bref);
557                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
558                 hammer2_inode_repoint(nip, NULL, cluster);
559         } else {
560                 nip->meta.inum = 1;             /* PFS inum is always 1 XXX */
561                 /* mtime will be updated when a cluster is available */
562                 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
563         }
564
565         nip->pip = dip;                         /* can be NULL */
566         if (dip)
567                 hammer2_inode_ref(dip); /* ref dip for nip->pip */
568
569         nip->pmp = pmp;
570
571         /*
572          * ref and lock on nip gives it state compatible to after a
573          * hammer2_inode_lock() call.
574          */
575         nip->refs = 1;
576         hammer2_mtx_init(&nip->lock, "h2inode");
577         hammer2_mtx_ex(&nip->lock);
578         /* combination of thread lock and chain lock == inode lock */
579
580         /*
581          * Attempt to add the inode.  If it fails we raced another inode
582          * get.  Undo all the work and try again.
583          */
584         if (pmp->spmp_hmp == NULL) {
585                 hammer2_spin_ex(&pmp->inum_spin);
586                 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
587                         hammer2_spin_unex(&pmp->inum_spin);
588                         hammer2_mtx_unlock(&nip->lock);
589                         hammer2_inode_drop(nip);
590                         goto again;
591                 }
592                 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
593                 hammer2_spin_unex(&pmp->inum_spin);
594         }
595
596         return (nip);
597 }
598
599 /*
600  * Create a new inode in the specified directory using the vattr to
601  * figure out the type of inode.
602  *
603  * If no error occurs the new inode with its cluster locked is returned in
604  * *nipp, otherwise an error is returned and *nipp is set to NULL.
605  *
606  * If vap and/or cred are NULL the related fields are not set and the
607  * inode type defaults to a directory.  This is used when creating PFSs
608  * under the super-root, so the inode number is set to 1 in this case.
609  *
610  * dip is not locked on entry.
611  *
612  * NOTE: When used to create a snapshot, the inode is temporarily associated
613  *       with the super-root spmp. XXX should pass new pmp for snapshot.
614  */
615 hammer2_inode_t *
616 hammer2_inode_create(hammer2_inode_t *dip,
617                      struct vattr *vap, struct ucred *cred,
618                      const uint8_t *name, size_t name_len,
619                      hammer2_key_t inum, uint8_t type, uint8_t target_type,
620                      int flags, int *errorp)
621 {
622         hammer2_xop_scanlhc_t *sxop;
623         hammer2_xop_create_t *xop;
624         hammer2_inode_t *nip;
625         hammer2_key_t lhcbase;
626         hammer2_key_t lhc;
627         int error;
628         uid_t xuid;
629         uuid_t dip_uid;
630         uuid_t dip_gid;
631         uint32_t dip_mode;
632         uint8_t dip_comp_algo;
633         uint8_t dip_check_algo;
634
635         lhc = hammer2_dirhash(name, name_len);
636         *errorp = 0;
637         nip = NULL;
638
639         /*
640          * Locate the inode or indirect block to create the new
641          * entry in.  At the same time check for key collisions
642          * and iterate until we don't get one.
643          *
644          * NOTE: hidden inodes do not have iterators.
645          *
646          * Lock the directory exclusively for now to guarantee that
647          * we can find an unused lhc for the name.  Due to collisions,
648          * two different creates can end up with the same lhc so we
649          * cannot depend on the OS to prevent the collision.
650          */
651         hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS);
652
653         dip_uid = dip->meta.uid;
654         dip_gid = dip->meta.gid;
655         dip_mode = dip->meta.mode;
656         dip_comp_algo = dip->meta.comp_algo;
657         dip_check_algo = dip->meta.check_algo;
658
659         /*
660          * Locate an unused key in the collision space.
661          */
662         lhcbase = lhc;
663         sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
664         sxop->lhc = lhc;
665         hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
666         while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
667                 if (lhc != sxop->head.cluster.focus->bref.key)
668                         break;
669                 ++lhc;
670         }
671         hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
672
673         if (error) {
674                 if (error != ENOENT)
675                         goto done2;
676                 ++lhc;
677                 error = 0;
678         }
679         if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
680                 error = ENOSPC;
681                 goto done2;
682         }
683
684         /*
685          * Create the inode with the lhc as the key.
686          */
687         xop = &hammer2_xop_alloc(dip)->xop_create;
688         xop->lhc = lhc;
689         xop->flags = flags;
690         bzero(&xop->meta, sizeof(xop->meta));
691
692         if (vap) {
693                 xop->meta.type = hammer2_get_obj_type(vap->va_type);
694
695                 switch (xop->meta.type) {
696                 case HAMMER2_OBJTYPE_CDEV:
697                 case HAMMER2_OBJTYPE_BDEV:
698                         xop->meta.rmajor = vap->va_rmajor;
699                         xop->meta.rminor = vap->va_rminor;
700                         break;
701                 default:
702                         break;
703                 }
704                 type = xop->meta.type;
705         } else {
706                 xop->meta.type = type;
707                 xop->meta.target_type = target_type;
708         }
709         xop->meta.inum = inum;
710         
711         /* Inherit parent's inode compression mode. */
712         xop->meta.comp_algo = dip_comp_algo;
713         xop->meta.check_algo = dip_check_algo;
714         xop->meta.version = HAMMER2_INODE_VERSION_ONE;
715         hammer2_update_time(&xop->meta.ctime);
716         xop->meta.mtime = xop->meta.ctime;
717         if (vap)
718                 xop->meta.mode = vap->va_mode;
719         xop->meta.nlinks = 1;
720         if (vap) {
721                 if (dip && dip->pmp) {
722                         xuid = hammer2_to_unix_xid(&dip_uid);
723                         xuid = vop_helper_create_uid(dip->pmp->mp,
724                                                      dip_mode,
725                                                      xuid,
726                                                      cred,
727                                                      &vap->va_mode);
728                 } else {
729                         /* super-root has no dip and/or pmp */
730                         xuid = 0;
731                 }
732                 if (vap->va_vaflags & VA_UID_UUID_VALID)
733                         xop->meta.uid = vap->va_uid_uuid;
734                 else if (vap->va_uid != (uid_t)VNOVAL)
735                         hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
736                 else
737                         hammer2_guid_to_uuid(&xop->meta.uid, xuid);
738
739                 if (vap->va_vaflags & VA_GID_UUID_VALID)
740                         xop->meta.gid = vap->va_gid_uuid;
741                 else if (vap->va_gid != (gid_t)VNOVAL)
742                         hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
743                 else if (dip)
744                         xop->meta.gid = dip_gid;
745         }
746
747         /*
748          * Regular files and softlinks allow a small amount of data to be
749          * directly embedded in the inode.  This flag will be cleared if
750          * the size is extended past the embedded limit.
751          */
752         if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
753             xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
754             xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
755                 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
756         }
757         hammer2_xop_setname(&xop->head, name, name_len);
758         xop->meta.name_len = name_len;
759         xop->meta.name_key = lhc;
760         KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
761
762         hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
763
764         error = hammer2_xop_collect(&xop->head, 0);
765 #if INODE_DEBUG
766         kprintf("CREATE INODE %*.*s\n",
767                 (int)name_len, (int)name_len, name);
768 #endif
769
770         if (error) {
771                 *errorp = error;
772                 goto done;
773         }
774
775         /*
776          * Set up the new inode if not a hardlink pointer.
777          *
778          * NOTE: *_get() integrates chain's lock into the inode lock.
779          *
780          * NOTE: Only one new inode can currently be created per
781          *       transaction.  If the need arises we can adjust
782          *       hammer2_trans_init() to allow more.
783          *
784          * NOTE: nipdata will have chain's blockset data.
785          */
786         if (type != HAMMER2_OBJTYPE_HARDLINK) {
787                 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster);
788                 nip->comp_heuristic = 0;
789         } else {
790                 nip = NULL;
791         }
792
793 done:
794         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
795 done2:
796         hammer2_inode_unlock(dip, NULL);
797
798         return (nip);
799 }
800
801 /*
802  * Connect the disconnected inode (ip) to the directory (dip) with the
803  * specified (name, name_len).  If name is NULL, (lhc) will be used as
804  * the directory key and the inode's embedded name will not be modified
805  * for future recovery purposes.
806  *
807  * dip and ip must both be locked exclusively (dip in particular to avoid
808  * lhc collisions).
809  */
810 int
811 hammer2_inode_connect_simple(hammer2_inode_t *dip, hammer2_inode_t *ip,
812                              const char *name, size_t name_len,
813                              hammer2_key_t lhc)
814 {
815         hammer2_xop_scanlhc_t *sxop;
816         hammer2_xop_connect_t *xop;
817         hammer2_inode_t *opip;
818         hammer2_key_t lhcbase;
819         int error;
820
821         /*
822          * Calculate the lhc and resolve the collision space.
823          */
824         if (name) {
825                 lhc = lhcbase = hammer2_dirhash(name, name_len);
826                 sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
827                 sxop->lhc = lhc;
828                 hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
829                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
830                         if (lhc != sxop->head.cluster.focus->bref.key)
831                                 break;
832                         ++lhc;
833                 }
834                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
835
836                 if (error) {
837                         if (error != ENOENT)
838                                 goto done;
839                         ++lhc;
840                         error = 0;
841                 }
842                 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
843                         error = ENOSPC;
844                         goto done;
845                 }
846         } else {
847                 error = 0;
848         }
849
850         /*
851          * Formally reconnect the in-memory structure.  ip must
852          * be locked exclusively to safely change ip->pip.
853          */
854         if (ip->pip != dip) {
855                 hammer2_inode_ref(dip);
856                 opip = ip->pip;
857                 ip->pip = dip;
858                 if (opip)
859                         hammer2_inode_drop(opip);
860         }
861
862         /*
863          * Connect her up
864          */
865         xop = &hammer2_xop_alloc(dip)->xop_connect;
866         if (name)
867                 hammer2_xop_setname(&xop->head, name, name_len);
868         hammer2_xop_setip2(&xop->head, ip);
869         xop->lhc = lhc;
870         hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
871         error = hammer2_xop_collect(&xop->head, 0);
872         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
873
874         /*
875          * On success make the same adjustments to ip->meta or the
876          * next flush may blow up the chain.
877          */
878         if (error == 0) {
879                 hammer2_inode_modify(ip);
880                 ip->meta.name_key = lhc;
881                 if (name)
882                         ip->meta.name_len = name_len;
883         }
884 done:
885         return error;
886 }
887
888 /*
889  * Repoint ip->cluster's chains to cluster's chains and fixup the default
890  * focus.  Only valid elements are repointed.  Invalid elements have to be
891  * adjusted by the appropriate slave sync threads.
892  *
893  * Caller must hold the inode and cluster exclusive locked, if not NULL,
894  * must also be locked.
895  *
896  * Cluster may be NULL to clean out any chains in ip->cluster.
897  */
898 void
899 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
900                       hammer2_cluster_t *cluster)
901 {
902         hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
903         hammer2_chain_t *ochain;
904         hammer2_chain_t *nchain;
905         hammer2_inode_t *opip;
906         int i;
907
908         bzero(dropch, sizeof(dropch));
909
910         /*
911          * Replace chains in ip->cluster with chains from cluster and
912          * adjust the focus if necessary.
913          *
914          * NOTE: nchain and/or ochain can be NULL due to gaps
915          *       in the cluster arrays.
916          */
917         hammer2_spin_ex(&ip->cluster_spin);
918         for (i = 0; cluster && i < cluster->nchains; ++i) {
919                 /*
920                  * Do not replace invalid elements as this might race
921                  * syncthr replacements.
922                  */
923                 if (cluster->array[i].flags & HAMMER2_CITEM_INVALID)
924                         continue;
925
926                 /*
927                  * Do not replace elements which are the same.  Also handle
928                  * element count discrepancies.
929                  */
930                 nchain = cluster->array[i].chain;
931                 if (i < ip->cluster.nchains) {
932                         ochain = ip->cluster.array[i].chain;
933                         if (ochain == nchain)
934                                 continue;
935                 } else {
936                         ochain = NULL;
937                 }
938
939                 /*
940                  * Make adjustments
941                  */
942                 ip->cluster.array[i].chain = nchain;
943                 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
944                 ip->cluster.array[i].flags |= cluster->array[i].flags &
945                                               HAMMER2_CITEM_INVALID;
946                 if (nchain)
947                         hammer2_chain_ref(nchain);
948                 dropch[i] = ochain;
949         }
950
951         /*
952          * Release any left-over chains in ip->cluster.
953          */
954         while (i < ip->cluster.nchains) {
955                 nchain = ip->cluster.array[i].chain;
956                 if (nchain) {
957                         ip->cluster.array[i].chain = NULL;
958                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
959                 }
960                 dropch[i] = nchain;
961                 ++i;
962         }
963
964         /*
965          * Fixup fields.  Note that the inode-embedded cluster is never
966          * directly locked.
967          */
968         if (cluster) {
969                 ip->cluster.nchains = cluster->nchains;
970                 ip->cluster.focus = cluster->focus;
971                 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
972         } else {
973                 ip->cluster.nchains = 0;
974                 ip->cluster.focus = NULL;
975                 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
976         }
977
978         /*
979          * Repoint ip->pip if requested (non-NULL pip).
980          */
981         if (pip && ip->pip != pip) {
982                 opip = ip->pip;
983                 hammer2_inode_ref(pip);
984                 ip->pip = pip;
985         } else {
986                 opip = NULL;
987         }
988         hammer2_spin_unex(&ip->cluster_spin);
989
990         /*
991          * Cleanup outside of spinlock
992          */
993         while (--i >= 0) {
994                 if (dropch[i])
995                         hammer2_chain_drop(dropch[i]);
996         }
997         if (opip)
998                 hammer2_inode_drop(opip);
999 }
1000
1001 /*
1002  * Repoint a single element from the cluster to the ip.  Used by the
1003  * synchronization threads to piecemeal update inodes.  Does not change
1004  * focus and requires inode to be re-locked to clean-up flags (XXX).
1005  */
1006 void
1007 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1008                           int idx)
1009 {
1010         hammer2_chain_t *ochain;
1011         hammer2_chain_t *nchain;
1012         int i;
1013
1014         hammer2_spin_ex(&ip->cluster_spin);
1015         KKASSERT(idx < cluster->nchains);
1016         if (idx < ip->cluster.nchains) {
1017                 ochain = ip->cluster.array[idx].chain;
1018                 nchain = cluster->array[idx].chain;
1019         } else {
1020                 ochain = NULL;
1021                 nchain = cluster->array[idx].chain;
1022                 ip->cluster.nchains = idx + 1;
1023                 for (i = ip->cluster.nchains; i <= idx; ++i) {
1024                         bzero(&ip->cluster.array[i],
1025                               sizeof(ip->cluster.array[i]));
1026                         ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1027                 }
1028         }
1029         if (ochain != nchain) {
1030                 /*
1031                  * Make adjustments.
1032                  */
1033                 ip->cluster.array[idx].chain = nchain;
1034                 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1035                 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1036                                                 HAMMER2_CITEM_INVALID;
1037         }
1038         hammer2_spin_unex(&ip->cluster_spin);
1039         if (ochain != nchain) {
1040                 if (nchain)
1041                         hammer2_chain_ref(nchain);
1042                 if (ochain)
1043                         hammer2_chain_drop(ochain);
1044         }
1045 }
1046
1047 /*
1048  * Called with a locked inode to finish unlinking an inode after xop_unlink
1049  * had been run.  This function is responsible for decrementing nlinks and
1050  * moving deleted inodes to the hidden directory if they are still open.
1051  *
1052  * We don't bother decrementing nlinks if the file is not open and this was
1053  * the last link.
1054  *
1055  * If the inode is a hardlink target it's chain has not yet been deleted,
1056  * otherwise it's chain has been deleted.
1057  *
1058  * If isopen then any prior deletion was not permanent and the inode must
1059  * be moved to the hidden directory.
1060  */
1061 int
1062 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
1063 {
1064         hammer2_pfs_t *pmp;
1065         int error;
1066
1067         pmp = ip->pmp;
1068
1069         /*
1070          * Decrement nlinks.  If this is the last link and the file is
1071          * not open, the chain has already been removed and we don't bother
1072          * dirtying the inode.
1073          */
1074         if (ip->meta.nlinks == 1) {
1075                 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1076                 if (isopen == 0)
1077                         return 0;
1078         }
1079
1080         hammer2_inode_modify(ip);
1081         --ip->meta.nlinks;
1082         if ((int64_t)ip->meta.nlinks < 0)
1083                 ip->meta.nlinks = 0;    /* safety */
1084
1085         /*
1086          * If nlinks is not zero we are done.  However, this should only be
1087          * possible with a hardlink target.  If the inode is an embedded
1088          * hardlink nlinks should have dropped to zero, warn and proceed
1089          * with the next step.
1090          */
1091         if (ip->meta.nlinks) {
1092                 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1093                         return 0;
1094                 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1095                         (intmax_t)ip->meta.nlinks);
1096                 return 0;
1097         }
1098
1099         /*
1100          * nlinks is now zero, the inode should have already been deleted.
1101          * If the file is open it was deleted non-permanently and must be
1102          * moved to the hidden directory.
1103          *
1104          * When moving to the hidden directory we force the name_key to the
1105          * inode number to avoid collisions.
1106          */
1107         if (isopen) {
1108                 hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1109                 error = hammer2_inode_connect_simple(pmp->ihidden, ip,
1110                                                      NULL, 0, ip->meta.inum);
1111                 hammer2_inode_unlock(pmp->ihidden, NULL);
1112         } else {
1113                 error = 0;
1114         }
1115         return error;
1116 }
1117
1118 /*
1119  * This is called from the mount code to initialize pmp->ihidden
1120  */
1121 void
1122 hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
1123 {
1124         hammer2_cluster_t *cparent;
1125         hammer2_cluster_t *cluster;
1126         hammer2_cluster_t *scan;
1127         const hammer2_inode_data_t *ripdata;
1128         hammer2_inode_data_t *wipdata;
1129         hammer2_key_t key_dummy;
1130         hammer2_key_t key_next;
1131         int error;
1132         int count;
1133         int dip_check_algo;
1134         int dip_comp_algo;
1135
1136         if (pmp->ihidden)
1137                 return;
1138
1139         /*
1140          * Find the hidden directory
1141          */
1142         bzero(&key_dummy, sizeof(key_dummy));
1143         hammer2_trans_init(pmp, 0);
1144
1145         /*
1146          * Setup for lookup, retrieve iroot's check and compression
1147          * algorithm request which was likely generated by newfs_hammer2.
1148          *
1149          * The check/comp fields will probably never be used since inodes
1150          * are renamed into the hidden directory and not created relative to
1151          * the hidden directory, chain creation inherits from bref.methods,
1152          * and data chains inherit from their respective file inode *_algo
1153          * fields.
1154          */
1155         hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1156         cparent = hammer2_inode_cluster(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1157         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1158         dip_check_algo = ripdata->meta.check_algo;
1159         dip_comp_algo = ripdata->meta.comp_algo;
1160         ripdata = NULL;
1161
1162         cluster = hammer2_cluster_lookup(cparent, &key_dummy,
1163                                          HAMMER2_INODE_HIDDENDIR,
1164                                          HAMMER2_INODE_HIDDENDIR,
1165                                          0);
1166         if (cluster) {
1167                 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
1168                 hammer2_inode_ref(pmp->ihidden);
1169
1170                 /*
1171                  * Remove any unlinked files which were left open as-of
1172                  * any system crash.
1173                  *
1174                  * Don't pass NODATA, we need the inode data so the delete
1175                  * can do proper statistics updates.
1176                  */
1177                 count = 0;
1178                 scan = hammer2_cluster_lookup(cluster, &key_next,
1179                                               0, HAMMER2_TID_MAX, 0);
1180                 while (scan) {
1181                         if (hammer2_cluster_type(scan) ==
1182                             HAMMER2_BREF_TYPE_INODE) {
1183                                 hammer2_cluster_delete(cluster, scan,
1184                                                    HAMMER2_DELETE_PERMANENT);
1185                                 ++count;
1186                         }
1187                         scan = hammer2_cluster_next(cluster, scan, &key_next,
1188                                                     0, HAMMER2_TID_MAX, 0);
1189                 }
1190
1191                 hammer2_inode_unlock(pmp->ihidden, cluster);
1192                 hammer2_inode_unlock(pmp->iroot, cparent);
1193                 hammer2_trans_done(pmp);
1194                 kprintf("hammer2: PFS loaded hidden dir, "
1195                         "removed %d dead entries\n", count);
1196                 return;
1197         }
1198
1199         /*
1200          * Create the hidden directory
1201          */
1202         error = hammer2_cluster_create(pmp, cparent, &cluster,
1203                                        HAMMER2_INODE_HIDDENDIR, 0,
1204                                        HAMMER2_BREF_TYPE_INODE,
1205                                        HAMMER2_INODE_BYTES,
1206                                        0);
1207         hammer2_inode_unlock(pmp->iroot, cparent);
1208
1209         hammer2_cluster_modify(cluster, 0);
1210         wipdata = &hammer2_cluster_wdata(cluster)->ipdata;
1211         wipdata->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1212         wipdata->meta.inum = HAMMER2_INODE_HIDDENDIR;
1213         wipdata->meta.nlinks = 1;
1214         wipdata->meta.comp_algo = dip_comp_algo;
1215         wipdata->meta.check_algo = dip_check_algo;
1216         hammer2_cluster_modsync(cluster);
1217         kprintf("hammer2: PFS root missing hidden directory, creating\n");
1218
1219         pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
1220         hammer2_inode_ref(pmp->ihidden);
1221         hammer2_inode_unlock(pmp->ihidden, cluster);
1222         hammer2_trans_done(pmp);
1223 }
1224
1225 #if 0
1226 /*
1227  * If an open file is unlinked H2 needs to retain the file in the topology
1228  * to ensure that its backing store is not recovered by the bulk free scan.
1229  * This also allows us to avoid having to special-case the CHAIN_DELETED flag.
1230  *
1231  * To do this the file is moved to a hidden directory in the PFS root and
1232  * renamed.  The hidden directory must be created if it does not exist.
1233  */
1234 static
1235 void
1236 hammer2_inode_move_to_hidden(hammer2_cluster_t **cparentp,
1237                              hammer2_cluster_t **clusterp,
1238                              hammer2_tid_t inum)
1239 {
1240         hammer2_cluster_t *dcluster;
1241         hammer2_pfs_t *pmp;
1242         int error;
1243
1244         pmp = (*clusterp)->pmp;
1245         KKASSERT(pmp != NULL);
1246         KKASSERT(pmp->ihidden != NULL);
1247
1248         hammer2_cluster_delete(*cparentp, *clusterp, 0);
1249         hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1250         dcluster = hammer2_inode_cluster(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1251         error = hammer2_inode_connect(NULL/*XXX*/, clusterp, 0,
1252                                       pmp->ihidden, dcluster,
1253                                       NULL, 0, inum);
1254         hammer2_inode_unlock(pmp->ihidden, dcluster);
1255         KKASSERT(error == 0);
1256 }
1257 #endif
1258
1259 /*
1260  * Find the directory common to both fdip and tdip.
1261  *
1262  * Returns a held but not locked inode.  Caller typically locks the inode,
1263  * and when through unlocks AND drops it.
1264  */
1265 hammer2_inode_t *
1266 hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
1267 {
1268         hammer2_inode_t *scan1;
1269         hammer2_inode_t *scan2;
1270
1271         /*
1272          * We used to have a depth field but it complicated matters too
1273          * much for directory renames.  So now its ugly.  Check for
1274          * simple cases before giving up and doing it the expensive way.
1275          *
1276          * XXX need a bottom-up topology stability lock
1277          */
1278         if (fdip == tdip || fdip == tdip->pip) {
1279                 hammer2_inode_ref(fdip);
1280                 return(fdip);
1281         }
1282         if (fdip->pip == tdip) {
1283                 hammer2_inode_ref(tdip);
1284                 return(tdip);
1285         }
1286
1287         /*
1288          * XXX not MPSAFE
1289          */
1290         for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1291                 scan2 = tdip;
1292                 while (scan2->pmp == tdip->pmp) {
1293                         if (scan1 == scan2) {
1294                                 hammer2_inode_ref(scan1);
1295                                 return(scan1);
1296                         }
1297                         scan2 = scan2->pip;
1298                         if (scan2 == NULL)
1299                                 break;
1300                 }
1301         }
1302         panic("hammer2_inode_common_parent: no common parent %p %p\n",
1303               fdip, tdip);
1304         /* NOT REACHED */
1305         return(NULL);
1306 }
1307
1308 /*
1309  * Set an inode's cluster modified, marking the related chains RW and
1310  * duplicating them if necessary.
1311  *
1312  * The passed-in chain is a localized copy of the chain previously acquired
1313  * when the inode was locked (and possilby replaced in the mean time), and
1314  * must also be updated.  In fact, we update it first and then synchronize
1315  * the inode's cluster cache.
1316  */
1317 void
1318 hammer2_inode_modify(hammer2_inode_t *ip)
1319 {
1320         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1321         if (ip->vp)
1322                 vsetisdirty(ip->vp);
1323 }
1324
1325 /*
1326  * Synchronize the inode's frontend state with the chain state prior
1327  * to any explicit flush of the inode or any strategy write call.
1328  *
1329  * Called with a locked inode.
1330  */
1331 void
1332 hammer2_inode_fsync(hammer2_inode_t *ip, hammer2_cluster_t *cparent)
1333 {
1334         int clear_directdata = 0;
1335
1336         /* temporary hack, allow cparent to be NULL */
1337         if (cparent == NULL) {
1338                 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1339                 hammer2_inode_fsync(ip, cparent);
1340                 hammer2_cluster_unlock(cparent);
1341                 hammer2_cluster_drop(cparent);
1342                 return;
1343         }
1344
1345         if ((ip->flags & HAMMER2_INODE_RESIZED) == 0) {
1346                 /* do nothing */
1347         } else if (ip->meta.size < ip->osize) {
1348                 /*
1349                  * We must delete any chains beyond the EOF.  The chain
1350                  * straddling the EOF will be pending in the bioq.
1351                  */
1352                 hammer2_cluster_t *dparent;
1353                 hammer2_cluster_t *cluster;
1354                 hammer2_key_t lbase;
1355                 hammer2_key_t key_next;
1356
1357                 lbase = (ip->meta.size + HAMMER2_PBUFMASK64) &
1358                         ~HAMMER2_PBUFMASK64;
1359                 dparent = hammer2_cluster_lookup_init(&ip->cluster, 0);
1360                 cluster = hammer2_cluster_lookup(dparent, &key_next,
1361                                                  lbase, (hammer2_key_t)-1,
1362                                                  HAMMER2_LOOKUP_NODATA);
1363                 while (cluster) {
1364                         /*
1365                          * Degenerate embedded case, nothing to loop on
1366                          */
1367                         switch (hammer2_cluster_type(cluster)) {
1368                         case HAMMER2_BREF_TYPE_INODE:
1369                                 hammer2_cluster_unlock(cluster);
1370                                 hammer2_cluster_drop(cluster);
1371                                 cluster = NULL;
1372                                 break;
1373                         case HAMMER2_BREF_TYPE_DATA:
1374                                 hammer2_cluster_delete(dparent, cluster,
1375                                                    HAMMER2_DELETE_PERMANENT);
1376                                 /* fall through */
1377                         default:
1378                                 cluster = hammer2_cluster_next(dparent, cluster,
1379                                                    &key_next,
1380                                                    key_next, (hammer2_key_t)-1,
1381                                                    HAMMER2_LOOKUP_NODATA);
1382                                 break;
1383                         }
1384                 }
1385                 hammer2_cluster_lookup_done(dparent);
1386                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1387                 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1388         } else if (ip->meta.size > ip->osize) {
1389                 /*
1390                  * When resizing larger we may not have any direct-data
1391                  * available.
1392                  */
1393                 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1394                     ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1395                         ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1396                         clear_directdata = 1;
1397                 }
1398                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1399                 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1400         } else {
1401                 /*
1402                  * RESIZED was set but size didn't change.
1403                  */
1404                 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1405                 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1406         }
1407
1408         /*
1409          * Sync inode meta-data
1410          */
1411         if (ip->flags & HAMMER2_INODE_MODIFIED) {
1412                 hammer2_inode_data_t *wipdata;
1413
1414                 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1415                 hammer2_cluster_modify(cparent, 0);
1416                 hammer2_inode_repoint(ip, NULL, cparent);
1417
1418                 wipdata = &hammer2_cluster_wdata(cparent)->ipdata;
1419                 wipdata->meta = ip->meta;
1420                 if (clear_directdata) {
1421                         bzero(&wipdata->u.blockset,
1422                               sizeof(wipdata->u.blockset));
1423                 }
1424                 hammer2_cluster_modsync(cparent);
1425         }
1426 }
1427
1428 /*
1429  * Inode create helper (threaded, backend)
1430  *
1431  * Used by ncreate, nmknod, nsymlink, nmkdir.
1432  * Used by nlink and rename to create HARDLINK pointers.
1433  *
1434  * Frontend holds the parent directory ip locked exclusively.  We
1435  * create the inode and feed the exclusively locked chain to the
1436  * frontend.
1437  */
1438 void
1439 hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1440 {
1441         hammer2_xop_create_t *xop = &arg->xop_create;
1442         hammer2_chain_t *parent;
1443         hammer2_chain_t *chain;
1444         hammer2_key_t key_next;
1445         int cache_index = -1;
1446         int error;
1447
1448         chain = NULL;
1449         parent = hammer2_inode_chain(xop->head.ip, clindex,
1450                                      HAMMER2_RESOLVE_ALWAYS);
1451         if (parent == NULL) {
1452                 error = EIO;
1453                 goto fail;
1454         }
1455         chain = hammer2_chain_lookup(&parent, &key_next,
1456                                      xop->lhc, xop->lhc,
1457                                      &cache_index, 0);
1458         if (chain) {
1459                 hammer2_chain_unlock(chain);
1460                 error = EEXIST;
1461                 goto fail;
1462         }
1463
1464         error = hammer2_chain_create(&parent, &chain,
1465                                      xop->head.ip->pmp,
1466                                      xop->lhc, 0,
1467                                      HAMMER2_BREF_TYPE_INODE,
1468                                      HAMMER2_INODE_BYTES,
1469                                      xop->flags);
1470         if (error == 0) {
1471                 hammer2_chain_modify(chain, 0);
1472                 chain->data->ipdata.meta = xop->meta;
1473                 bcopy(xop->head.name, chain->data->ipdata.filename,
1474                       xop->head.name_len);
1475         }
1476         hammer2_chain_unlock(chain);
1477         hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1478                                   HAMMER2_RESOLVE_SHARED);
1479 fail:
1480         if (parent) {
1481                 hammer2_chain_unlock(parent);
1482                 hammer2_chain_drop(parent);
1483         }
1484         error = hammer2_xop_feed(&xop->head, chain, clindex, error);
1485         if (chain)
1486                 hammer2_chain_drop(chain);
1487 }
1488
1489 /*
1490  * Inode delete helper (backend, threaded)
1491  */
1492 void
1493 hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1494 {
1495         /*hammer2_xop_inode_t *xop = &arg->xop_inode;*/
1496 }
1497
1498 void
1499 hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1500 {
1501         hammer2_xop_connect_t *xop = &arg->xop_connect;
1502         hammer2_inode_data_t *wipdata;
1503         hammer2_chain_t *parent;
1504         hammer2_chain_t *chain;
1505         hammer2_pfs_t *pmp;
1506         hammer2_key_t key_dummy;
1507         int cache_index = -1;
1508         int error;
1509
1510         /*
1511          * Get directory, then issue a lookup to prime the parent chain
1512          * for the create.  The lookup is expected to fail.
1513          */
1514         pmp = xop->head.ip->pmp;
1515         parent = hammer2_inode_chain(xop->head.ip, clindex,
1516                                      HAMMER2_RESOLVE_ALWAYS);
1517         if (parent == NULL) {
1518                 chain = NULL;
1519                 error = EIO;
1520                 goto fail;
1521         }
1522         chain = hammer2_chain_lookup(&parent, &key_dummy,
1523                                      xop->lhc, xop->lhc,
1524                                      &cache_index, 0);
1525         if (chain) {
1526                 hammer2_chain_unlock(chain);
1527                 hammer2_chain_drop(chain);
1528                 chain = NULL;
1529                 error = EEXIST;
1530                 goto fail;
1531         }
1532
1533         /*
1534          * Adjust the filename in the inode, set the name key.
1535          *
1536          * NOTE: Frontend must also adjust ip2->meta on success, we can't
1537          *       do it here.
1538          */
1539         chain = hammer2_inode_chain(xop->head.ip2, clindex,
1540                                     HAMMER2_RESOLVE_ALWAYS);
1541         hammer2_chain_modify(chain, 0);
1542         wipdata = &chain->data->ipdata;
1543
1544         hammer2_inode_modify(xop->head.ip2);
1545         if (xop->head.name) {
1546                 bzero(wipdata->filename, sizeof(wipdata->filename));
1547                 bcopy(xop->head.name, wipdata->filename, xop->head.name_len);
1548                 wipdata->meta.name_len = xop->head.name_len;
1549         }
1550         wipdata->meta.name_key = xop->lhc;
1551
1552         /*
1553          * Reconnect the chain to the new parent directory
1554          */
1555         error = hammer2_chain_create(&parent, &chain, pmp,
1556                                      xop->lhc, 0,
1557                                      HAMMER2_BREF_TYPE_INODE,
1558                                      HAMMER2_INODE_BYTES,
1559                                      0);
1560
1561         /*
1562          * Feed result back.
1563          */
1564 fail:
1565         hammer2_xop_feed(&xop->head, NULL, clindex, error);
1566         if (parent) {
1567                 hammer2_chain_unlock(parent);
1568                 hammer2_chain_drop(parent);
1569         }
1570         if (chain) {
1571                 hammer2_chain_unlock(chain);
1572                 hammer2_chain_drop(chain);
1573         }
1574 }