hammer2 - Refactor frontend part 14/many
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
CommitLineData
7cfa8da5 1/*
8138a154 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
7cfa8da5
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35#include <sys/cdefs.h>
7cfa8da5
MD
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/types.h>
39#include <sys/lock.h>
40#include <sys/uuid.h>
41
42#include "hammer2.h"
43
8138a154
MD
44#define INODE_DEBUG 0
45
9797e933 46RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
7a9b14a0 47 hammer2_tid_t, meta.inum);
9797e933
MD
48
49int
50hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
51{
7a9b14a0 52 if (ip1->meta.inum < ip2->meta.inum)
9797e933 53 return(-1);
7a9b14a0 54 if (ip1->meta.inum > ip2->meta.inum)
9797e933
MD
55 return(1);
56 return(0);
57}
58
731b2a84
MD
59/*
60 * HAMMER2 inode locks
61 *
b93cc2e0
MD
62 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
63 * flags for options:
731b2a84 64 *
b93cc2e0
MD
65 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The
66 * inode locking function will automatically set the RDONLY flag.
67 *
68 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
69 * Most front-end inode locks do.
70 *
71 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
72 * the inode data be resolved. This is used by the syncthr because
73 * it can run on an unresolved/out-of-sync cluster, and also by the
74 * vnode reclamation code to avoid unnecessary I/O (particularly when
75 * disposing of hundreds of thousands of cached vnodes).
71008001 76 *
278ab2b2
MD
77 * The inode locking function locks the inode itself, resolves any stale
78 * chains in the inode's cluster, and allocates a fresh copy of the
b93cc2e0 79 * cluster with 1 ref and all the underlying chains locked.
731b2a84 80 *
278ab2b2 81 * ip->cluster will be stable while the inode is locked.
731b2a84
MD
82 *
83 * NOTE: We don't combine the inode/chain lock because putting away an
84 * inode would otherwise confuse multiple lock holders of the inode.
278ab2b2 85 *
05dd26e4
MD
86 * NOTE: In-memory inodes always point to hardlink targets (the actual file),
87 * and never point to a hardlink pointer.
fe73aa5d 88 *
b8ba9690
MD
89 * NOTE: If caller passes HAMMER2_RESOLVE_RDONLY the exclusive locking code
90 * will feel free to reduce the chain set in the cluster as an
91 * optimization. It will still be validated against the quorum if
92 * appropriate, but the optimization might be able to reduce data
b93cc2e0
MD
93 * accesses to one node. This flag is automatically set if the inode
94 * is locked with HAMMER2_RESOLVE_SHARED.
731b2a84 95 */
159c3ca2 96void
b93cc2e0 97hammer2_inode_lock(hammer2_inode_t *ip, int how)
71008001 98{
731b2a84 99 hammer2_inode_ref(ip);
b93cc2e0
MD
100
101 /*
102 * Inode structure mutex
103 */
104 if (how & HAMMER2_RESOLVE_SHARED) {
2121ef11 105 /*how |= HAMMER2_RESOLVE_RDONLY; not used */
b93cc2e0
MD
106 hammer2_mtx_sh(&ip->lock);
107 } else {
108 hammer2_mtx_ex(&ip->lock);
109 }
159c3ca2
MD
110}
111
112/*
113 * Create a locked copy of ip->cluster. Note that the copy will have a
114 * ref on the cluster AND its chains and we don't want a second ref to
115 * either when we lock it.
116 *
117 * Exclusive inode locks set the template focus chain in (ip)
118 * as a hint. Cluster locks can ALWAYS replace the focus in the
119 * working copy if the hint does not work out, so beware.
120 */
121hammer2_cluster_t *
122hammer2_inode_cluster(hammer2_inode_t *ip, int how)
123{
124 hammer2_cluster_t *cluster;
278ab2b2 125
fe73aa5d 126 cluster = hammer2_cluster_copy(&ip->cluster);
e513e77e 127 hammer2_cluster_lock(cluster, how);
a6cf1052 128 hammer2_cluster_resolve(cluster);
278ab2b2
MD
129
130 /*
b93cc2e0
MD
131 * cluster->focus will be set if resolving RESOLVE_ALWAYS, but
132 * only update the cached focus in the inode structure when taking
133 * out an exclusive lock.
278ab2b2 134 */
b93cc2e0
MD
135 if ((how & HAMMER2_RESOLVE_SHARED) == 0)
136 ip->cluster.focus = cluster->focus;
731b2a84 137
159c3ca2 138 return cluster;
731b2a84
MD
139}
140
c847e838
MD
141/*
142 * Select a chain out of an inode's cluster and lock it.
d34788ef
MD
143 *
144 * The inode does not have to be locked.
c847e838
MD
145 */
146hammer2_chain_t *
147hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
148{
149 hammer2_chain_t *chain;
150
d34788ef 151 hammer2_spin_sh(&ip->cluster_spin);
c847e838
MD
152 if (clindex >= ip->cluster.nchains)
153 chain = NULL;
154 else
155 chain = ip->cluster.array[clindex].chain;
156 if (chain) {
157 hammer2_chain_ref(chain);
d34788ef 158 hammer2_spin_unsh(&ip->cluster_spin);
c847e838 159 hammer2_chain_lock(chain, how);
d34788ef
MD
160 } else {
161 hammer2_spin_unsh(&ip->cluster_spin);
c847e838
MD
162 }
163 return chain;
164}
165
731b2a84 166void
b93cc2e0 167hammer2_inode_unlock(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
731b2a84 168{
e513e77e 169 if (cluster) {
278ab2b2 170 hammer2_cluster_unlock(cluster);
e513e77e
MD
171 hammer2_cluster_drop(cluster);
172 }
94491fa0 173 hammer2_mtx_unlock(&ip->lock);
731b2a84
MD
174 hammer2_inode_drop(ip);
175}
176
94491fa0
MD
177/*
178 * Temporarily release a lock held shared or exclusive. Caller must
179 * hold the lock shared or exclusive on call and lock will be released
180 * on return.
181 *
182 * Restore a lock that was temporarily released.
183 */
184hammer2_mtx_state_t
731b2a84
MD
185hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
186{
94491fa0 187 return hammer2_mtx_temp_release(&ip->lock);
731b2a84
MD
188}
189
190void
94491fa0 191hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
731b2a84 192{
0bdddbf4 193 hammer2_mtx_temp_restore(&ip->lock, ostate);
731b2a84
MD
194}
195
94491fa0
MD
196/*
197 * Upgrade a shared inode lock to exclusive and return. If the inode lock
198 * is already held exclusively this is a NOP.
199 *
200 * The caller MUST hold the inode lock either shared or exclusive on call
201 * and will own the lock exclusively on return.
202 *
203 * Returns non-zero if the lock was already exclusive prior to the upgrade.
204 */
205int
731b2a84
MD
206hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
207{
94491fa0
MD
208 int wasexclusive;
209
210 if (mtx_islocked_ex(&ip->lock)) {
211 wasexclusive = 1;
212 } else {
213 hammer2_mtx_unlock(&ip->lock);
0bdddbf4 214 hammer2_mtx_ex(&ip->lock);
94491fa0
MD
215 wasexclusive = 0;
216 }
217 return wasexclusive;
731b2a84
MD
218}
219
94491fa0
MD
220/*
221 * Downgrade an inode lock from exclusive to shared only if the inode
222 * lock was previously shared. If the inode lock was previously exclusive,
223 * this is a NOP.
224 */
731b2a84 225void
94491fa0 226hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
731b2a84 227{
94491fa0
MD
228 if (wasexclusive == 0)
229 mtx_downgrade(&ip->lock);
731b2a84
MD
230}
231
232/*
233 * Lookup an inode by inode number
234 */
9797e933 235hammer2_inode_t *
506bd6d1 236hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
9797e933
MD
237{
238 hammer2_inode_t *ip;
239
50456506
MD
240 KKASSERT(pmp);
241 if (pmp->spmp_hmp) {
242 ip = NULL;
243 } else {
94491fa0 244 hammer2_spin_ex(&pmp->inum_spin);
9797e933
MD
245 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
246 if (ip)
247 hammer2_inode_ref(ip);
94491fa0 248 hammer2_spin_unex(&pmp->inum_spin);
9797e933
MD
249 }
250 return(ip);
251}
252
7cfa8da5
MD
253/*
254 * Adding a ref to an inode is only legal if the inode already has at least
255 * one ref.
af791767
MD
256 *
257 * (can be called with spinlock held)
7cfa8da5
MD
258 */
259void
260hammer2_inode_ref(hammer2_inode_t *ip)
261{
476d2aad 262 atomic_add_int(&ip->refs, 1);
7cfa8da5
MD
263}
264
265/*
266 * Drop an inode reference, freeing the inode when the last reference goes
267 * away.
268 */
269void
270hammer2_inode_drop(hammer2_inode_t *ip)
271{
506bd6d1 272 hammer2_pfs_t *pmp;
10252dc7 273 hammer2_inode_t *pip;
10252dc7 274 u_int refs;
476d2aad 275
ea155208 276 while (ip) {
476d2aad
MD
277 refs = ip->refs;
278 cpu_ccfence();
279 if (refs == 1) {
9797e933
MD
280 /*
281 * Transition to zero, must interlock with
282 * the inode inumber lookup tree (if applicable).
94491fa0
MD
283 * It should not be possible for anyone to race
284 * the transition to 0.
9797e933 285 */
99da41ea 286 pmp = ip->pmp;
50456506 287 KKASSERT(pmp);
94491fa0 288 hammer2_spin_ex(&pmp->inum_spin);
9797e933 289
476d2aad 290 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
94491fa0 291 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
9797e933
MD
292 if (ip->flags & HAMMER2_INODE_ONRBTREE) {
293 atomic_clear_int(&ip->flags,
294 HAMMER2_INODE_ONRBTREE);
295 RB_REMOVE(hammer2_inode_tree,
99da41ea 296 &pmp->inum_tree, ip);
9797e933 297 }
94491fa0 298 hammer2_spin_unex(&pmp->inum_spin);
10252dc7 299
10252dc7
MD
300 pip = ip->pip;
301 ip->pip = NULL;
99da41ea 302 ip->pmp = NULL;
cd189b1e
MD
303
304 /*
84e47819 305 * Cleaning out ip->cluster isn't entirely
cd189b1e
MD
306 * trivial.
307 */
308 hammer2_inode_repoint(ip, NULL, NULL);
10252dc7
MD
309
310 /*
311 * We have to drop pip (if non-NULL) to
312 * dispose of our implied reference from
313 * ip->pip. We can simply loop on it.
314 */
50456506
MD
315 kfree(ip, pmp->minode);
316 atomic_add_long(&pmp->inmem_inodes, -1);
10252dc7 317 ip = pip;
ea155208 318 /* continue with pip (can be NULL) */
9797e933 319 } else {
94491fa0 320 hammer2_spin_unex(&ip->pmp->inum_spin);
476d2aad
MD
321 }
322 } else {
9797e933
MD
323 /*
324 * Non zero transition
325 */
476d2aad
MD
326 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
327 break;
328 }
329 }
7cfa8da5
MD
330}
331
332/*
333 * Get the vnode associated with the given inode, allocating the vnode if
a0ed3c24
MD
334 * necessary. The vnode will be returned exclusively locked.
335 *
336 * The caller must lock the inode (shared or exclusive).
7cfa8da5
MD
337 *
338 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
339 * races.
7cfa8da5
MD
340 */
341struct vnode *
c603b86b 342hammer2_igetv(hammer2_inode_t *ip, int *errorp)
7cfa8da5 343{
506bd6d1 344 hammer2_pfs_t *pmp;
476d2aad 345 struct vnode *vp;
7cfa8da5 346
e4e20f48
MD
347 pmp = ip->pmp;
348 KKASSERT(pmp != NULL);
7cfa8da5 349 *errorp = 0;
278ab2b2 350
7cfa8da5
MD
351 for (;;) {
352 /*
353 * Attempt to reuse an existing vnode assignment. It is
354 * possible to race a reclaim so the vget() may fail. The
355 * inode must be unlocked during the vget() to avoid a
356 * deadlock against a reclaim.
357 */
94491fa0
MD
358 int wasexclusive;
359
7cfa8da5
MD
360 vp = ip->vp;
361 if (vp) {
7cfa8da5
MD
362 /*
363 * Inode must be unlocked during the vget() to avoid
476d2aad
MD
364 * possible deadlocks, but leave the ip ref intact.
365 *
366 * vnode is held to prevent destruction during the
367 * vget(). The vget() can still fail if we lost
368 * a reclaim race on the vnode.
7cfa8da5 369 */
94491fa0
MD
370 hammer2_mtx_state_t ostate;
371
ee173d09 372 vhold(vp);
476d2aad 373 ostate = hammer2_inode_lock_temp_release(ip);
7cfa8da5
MD
374 if (vget(vp, LK_EXCLUSIVE)) {
375 vdrop(vp);
9797e933 376 hammer2_inode_lock_temp_restore(ip, ostate);
7cfa8da5
MD
377 continue;
378 }
9797e933 379 hammer2_inode_lock_temp_restore(ip, ostate);
7cfa8da5
MD
380 vdrop(vp);
381 /* vp still locked and ref from vget */
3fc4c63d
MD
382 if (ip->vp != vp) {
383 kprintf("hammer2: igetv race %p/%p\n",
384 ip->vp, vp);
385 vput(vp);
386 continue;
387 }
7cfa8da5
MD
388 *errorp = 0;
389 break;
390 }
391
392 /*
393 * No vnode exists, allocate a new vnode. Beware of
394 * allocation races. This function will return an
395 * exclusively locked and referenced vnode.
396 */
e4e20f48 397 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
7cfa8da5 398 if (*errorp) {
3fc4c63d
MD
399 kprintf("hammer2: igetv getnewvnode failed %d\n",
400 *errorp);
7cfa8da5
MD
401 vp = NULL;
402 break;
403 }
404
405 /*
406 * Lock the inode and check for an allocation race.
407 */
94491fa0 408 wasexclusive = hammer2_inode_lock_upgrade(ip);
7cfa8da5
MD
409 if (ip->vp != NULL) {
410 vp->v_type = VBAD;
411 vx_put(vp);
94491fa0 412 hammer2_inode_lock_downgrade(ip, wasexclusive);
7cfa8da5
MD
413 continue;
414 }
415
c603b86b 416 switch (ip->meta.type) {
df9ea374 417 case HAMMER2_OBJTYPE_DIRECTORY:
7cfa8da5
MD
418 vp->v_type = VDIR;
419 break;
df9ea374 420 case HAMMER2_OBJTYPE_REGFILE:
7cfa8da5 421 vp->v_type = VREG;
c603b86b 422 vinitvmio(vp, ip->meta.size,
6ba3b984 423 HAMMER2_LBUFSIZE,
c603b86b 424 (int)ip->meta.size & HAMMER2_LBUFMASK);
7cfa8da5 425 break;
4e2004ea
MD
426 case HAMMER2_OBJTYPE_SOFTLINK:
427 /*
428 * XXX for now we are using the generic file_read
429 * and file_write code so we need a buffer cache
430 * association.
431 */
432 vp->v_type = VLNK;
c603b86b 433 vinitvmio(vp, ip->meta.size,
6ba3b984 434 HAMMER2_LBUFSIZE,
c603b86b 435 (int)ip->meta.size & HAMMER2_LBUFMASK);
4e2004ea 436 break;
41c34a6d
MD
437 case HAMMER2_OBJTYPE_CDEV:
438 vp->v_type = VCHR;
439 /* fall through */
440 case HAMMER2_OBJTYPE_BDEV:
441 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
c603b86b 442 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
41c34a6d 443 vp->v_type = VBLK;
b0f58de8 444 addaliasu(vp,
c603b86b
MD
445 ip->meta.rmajor,
446 ip->meta.rminor);
41c34a6d
MD
447 break;
448 case HAMMER2_OBJTYPE_FIFO:
449 vp->v_type = VFIFO;
450 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
451 break;
7cfa8da5 452 default:
b0f58de8 453 panic("hammer2: unhandled objtype %d",
c603b86b 454 ip->meta.type);
7cfa8da5
MD
455 break;
456 }
457
e4e20f48 458 if (ip == pmp->iroot)
7cfa8da5
MD
459 vsetflags(vp, VROOT);
460
461 vp->v_data = ip;
462 ip->vp = vp;
476d2aad 463 hammer2_inode_ref(ip); /* vp association */
94491fa0 464 hammer2_inode_lock_downgrade(ip, wasexclusive);
7cfa8da5
MD
465 break;
466 }
467
468 /*
469 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
470 */
37aa19df 471 if (hammer2_debug & 0x0002) {
ee173d09
SW
472 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
473 vp, vp->v_refcnt, vp->v_auxrefs);
37aa19df 474 }
7cfa8da5
MD
475 return (vp);
476}
477
476d2aad 478/*
278ab2b2
MD
479 * Returns the inode associated with the passed-in cluster, creating the
480 * inode if necessary and synchronizing it to the passed-in cluster otherwise.
481 *
b8ba9690 482 * The passed-in cluster must be locked and will remain locked on return.
278ab2b2 483 * The returned inode will be locked and the caller may dispose of both
2121ef11
MD
484 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
485 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
10252dc7
MD
486 *
487 * The hammer2_inode structure regulates the interface between the high level
488 * kernel VNOPS API and the filesystem backend (the chains).
b8ba9690
MD
489 *
490 * On return the inode is locked with the supplied cluster.
476d2aad
MD
491 */
492hammer2_inode_t *
506bd6d1 493hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
278ab2b2 494 hammer2_cluster_t *cluster)
476d2aad 495{
476d2aad 496 hammer2_inode_t *nip;
6a5f4fe6
MD
497 const hammer2_inode_data_t *iptmp;
498 const hammer2_inode_data_t *nipdata;
476d2aad 499
506bd6d1
MD
500 KKASSERT(cluster == NULL ||
501 hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
50456506 502 KKASSERT(pmp);
10252dc7 503
9797e933
MD
504 /*
505 * Interlocked lookup/ref of the inode. This code is only needed
506 * when looking up inodes with nlinks != 0 (TODO: optimize out
507 * otherwise and test for duplicates).
506bd6d1
MD
508 *
509 * Cluster can be NULL during the initial pfs allocation.
9797e933
MD
510 */
511again:
506bd6d1 512 while (cluster) {
bca9f8e6 513 iptmp = &hammer2_cluster_rdata(cluster)->ipdata;
b0f58de8 514 nip = hammer2_inode_lookup(pmp, iptmp->meta.inum);
9797e933
MD
515 if (nip == NULL)
516 break;
278ab2b2 517
0bdddbf4 518 hammer2_mtx_ex(&nip->lock);
50456506
MD
519
520 /*
521 * Handle SMP race (not applicable to the super-root spmp
522 * which can't index inodes due to duplicative inode numbers).
523 */
524 if (pmp->spmp_hmp == NULL &&
525 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
94491fa0 526 hammer2_mtx_unlock(&nip->lock);
9797e933
MD
527 hammer2_inode_drop(nip);
528 continue;
529 }
278ab2b2 530 hammer2_inode_repoint(nip, NULL, cluster);
b8ba9690 531
9797e933
MD
532 return nip;
533 }
10252dc7 534
9797e933
MD
535 /*
536 * We couldn't find the inode number, create a new inode.
537 */
50456506 538 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
eedd52a3 539 spin_init(&nip->cluster_spin, "h2clspin");
50456506
MD
540 atomic_add_long(&pmp->inmem_inodes, 1);
541 hammer2_pfs_memory_inc(pmp);
542 hammer2_pfs_memory_wakeup(pmp);
543 if (pmp->spmp_hmp)
99da41ea 544 nip->flags = HAMMER2_INODE_SROOT;
278ab2b2
MD
545
546 /*
506bd6d1
MD
547 * Initialize nip's cluster. A cluster is provided for normal
548 * inodes but typically not for the super-root or PFS inodes.
278ab2b2
MD
549 */
550 nip->cluster.refs = 1;
84e47819
MD
551 nip->cluster.pmp = pmp;
552 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
506bd6d1 553 if (cluster) {
506bd6d1 554 nipdata = &hammer2_cluster_rdata(cluster)->ipdata;
7a9b14a0 555 nip->meta = nipdata->meta;
159c3ca2 556 hammer2_cluster_bref(cluster, &nip->bref);
7a9b14a0 557 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
506bd6d1
MD
558 hammer2_inode_repoint(nip, NULL, cluster);
559 } else {
7a9b14a0 560 nip->meta.inum = 1; /* PFS inum is always 1 XXX */
506bd6d1 561 /* mtime will be updated when a cluster is available */
159c3ca2 562 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);/*XXX*/
506bd6d1 563 }
278ab2b2 564
10252dc7
MD
565 nip->pip = dip; /* can be NULL */
566 if (dip)
567 hammer2_inode_ref(dip); /* ref dip for nip->pip */
568
569 nip->pmp = pmp;
10252dc7
MD
570
571 /*
572 * ref and lock on nip gives it state compatible to after a
b93cc2e0 573 * hammer2_inode_lock() call.
10252dc7
MD
574 */
575 nip->refs = 1;
0bdddbf4
MD
576 hammer2_mtx_init(&nip->lock, "h2inode");
577 hammer2_mtx_ex(&nip->lock);
476d2aad
MD
578 /* combination of thread lock and chain lock == inode lock */
579
9797e933
MD
580 /*
581 * Attempt to add the inode. If it fails we raced another inode
582 * get. Undo all the work and try again.
583 */
50456506 584 if (pmp->spmp_hmp == NULL) {
94491fa0 585 hammer2_spin_ex(&pmp->inum_spin);
9797e933 586 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
94491fa0
MD
587 hammer2_spin_unex(&pmp->inum_spin);
588 hammer2_mtx_unlock(&nip->lock);
9797e933
MD
589 hammer2_inode_drop(nip);
590 goto again;
591 }
592 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
94491fa0 593 hammer2_spin_unex(&pmp->inum_spin);
9797e933
MD
594 }
595
476d2aad
MD
596 return (nip);
597}
598
7cfa8da5 599/*
37494cab
MD
600 * Create a new inode in the specified directory using the vattr to
601 * figure out the type of inode.
7cfa8da5 602 *
278ab2b2 603 * If no error occurs the new inode with its cluster locked is returned in
db71f61f 604 * *nipp, otherwise an error is returned and *nipp is set to NULL.
ae183399
MD
605 *
606 * If vap and/or cred are NULL the related fields are not set and the
607 * inode type defaults to a directory. This is used when creating PFSs
608 * under the super-root, so the inode number is set to 1 in this case.
476d2aad
MD
609 *
610 * dip is not locked on entry.
50456506
MD
611 *
612 * NOTE: When used to create a snapshot, the inode is temporarily associated
613 * with the super-root spmp. XXX should pass new pmp for snapshot.
7cfa8da5 614 */
0dea3156 615hammer2_inode_t *
c603b86b 616hammer2_inode_create(hammer2_inode_t *dip,
37494cab 617 struct vattr *vap, struct ucred *cred,
37494cab 618 const uint8_t *name, size_t name_len,
e12ae3a5 619 hammer2_key_t inum, uint8_t type, uint8_t target_type,
506bd6d1 620 int flags, int *errorp)
7cfa8da5 621{
c603b86b
MD
622 hammer2_xop_scanlhc_t *sxop;
623 hammer2_xop_create_t *xop;
37494cab 624 hammer2_inode_t *nip;
12ff971c 625 hammer2_key_t lhcbase;
37494cab
MD
626 hammer2_key_t lhc;
627 int error;
b2b78aaa 628 uid_t xuid;
476d2aad
MD
629 uuid_t dip_uid;
630 uuid_t dip_gid;
631 uint32_t dip_mode;
e07becf8
MD
632 uint8_t dip_comp_algo;
633 uint8_t dip_check_algo;
7cfa8da5 634
37494cab 635 lhc = hammer2_dirhash(name, name_len);
0dea3156 636 *errorp = 0;
c603b86b 637 nip = NULL;
7cfa8da5 638
37494cab
MD
639 /*
640 * Locate the inode or indirect block to create the new
641 * entry in. At the same time check for key collisions
642 * and iterate until we don't get one.
9797e933
MD
643 *
644 * NOTE: hidden inodes do not have iterators.
c603b86b
MD
645 *
646 * Lock the directory exclusively for now to guarantee that
647 * we can find an unused lhc for the name. Due to collisions,
648 * two different creates can end up with the same lhc so we
649 * cannot depend on the OS to prevent the collision.
37494cab 650 */
159c3ca2 651 hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS);
7cfa8da5 652
c603b86b
MD
653 dip_uid = dip->meta.uid;
654 dip_gid = dip->meta.gid;
655 dip_mode = dip->meta.mode;
656 dip_comp_algo = dip->meta.comp_algo;
657 dip_check_algo = dip->meta.check_algo;
7cfa8da5 658
37494cab 659 /*
c603b86b 660 * Locate an unused key in the collision space.
37494cab 661 */
12ff971c 662 lhcbase = lhc;
c603b86b
MD
663 sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
664 sxop->lhc = lhc;
665 hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
666 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
667 if (lhc != sxop->head.cluster.focus->bref.key)
668 break;
669 ++lhc;
0dea3156 670 }
c603b86b 671 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
0dea3156 672
37494cab 673 if (error) {
c603b86b
MD
674 if (error != ENOENT)
675 goto done2;
676 ++lhc;
677 error = 0;
678 }
12ff971c 679 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
c603b86b
MD
680 error = ENOSPC;
681 goto done2;
37494cab 682 }
7cfa8da5 683
37494cab 684 /*
c603b86b 685 * Create the inode with the lhc as the key.
37494cab 686 */
c603b86b
MD
687 xop = &hammer2_xop_alloc(dip)->xop_create;
688 xop->lhc = lhc;
689 xop->flags = flags;
690 bzero(&xop->meta, sizeof(xop->meta));
7cfa8da5 691
ae183399 692 if (vap) {
c603b86b 693 xop->meta.type = hammer2_get_obj_type(vap->va_type);
41c34a6d 694
c603b86b 695 switch (xop->meta.type) {
41c34a6d
MD
696 case HAMMER2_OBJTYPE_CDEV:
697 case HAMMER2_OBJTYPE_BDEV:
c603b86b
MD
698 xop->meta.rmajor = vap->va_rmajor;
699 xop->meta.rminor = vap->va_rminor;
41c34a6d
MD
700 break;
701 default:
702 break;
703 }
e12ae3a5 704 type = xop->meta.type;
ae183399 705 } else {
e12ae3a5
MD
706 xop->meta.type = type;
707 xop->meta.target_type = target_type;
ae183399 708 }
e12ae3a5 709 xop->meta.inum = inum;
355d67fc
MD
710
711 /* Inherit parent's inode compression mode. */
c603b86b
MD
712 xop->meta.comp_algo = dip_comp_algo;
713 xop->meta.check_algo = dip_check_algo;
714 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
715 hammer2_update_time(&xop->meta.ctime);
716 xop->meta.mtime = xop->meta.ctime;
ae183399 717 if (vap)
c603b86b
MD
718 xop->meta.mode = vap->va_mode;
719 xop->meta.nlinks = 1;
b2b78aaa 720 if (vap) {
9b6b3df4 721 if (dip && dip->pmp) {
476d2aad 722 xuid = hammer2_to_unix_xid(&dip_uid);
b2b78aaa 723 xuid = vop_helper_create_uid(dip->pmp->mp,
476d2aad 724 dip_mode,
b2b78aaa
MD
725 xuid,
726 cred,
727 &vap->va_mode);
728 } else {
9b6b3df4 729 /* super-root has no dip and/or pmp */
b2b78aaa
MD
730 xuid = 0;
731 }
732 if (vap->va_vaflags & VA_UID_UUID_VALID)
c603b86b 733 xop->meta.uid = vap->va_uid_uuid;
b2b78aaa 734 else if (vap->va_uid != (uid_t)VNOVAL)
c603b86b 735 hammer2_guid_to_uuid(&xop->meta.uid, vap->va_uid);
b2b78aaa 736 else
c603b86b 737 hammer2_guid_to_uuid(&xop->meta.uid, xuid);
b2b78aaa
MD
738
739 if (vap->va_vaflags & VA_GID_UUID_VALID)
c603b86b 740 xop->meta.gid = vap->va_gid_uuid;
b2b78aaa 741 else if (vap->va_gid != (gid_t)VNOVAL)
c603b86b 742 hammer2_guid_to_uuid(&xop->meta.gid, vap->va_gid);
b2b78aaa 743 else if (dip)
c603b86b 744 xop->meta.gid = dip_gid;
b2b78aaa 745 }
232a50f9 746
3ac6a319
MD
747 /*
748 * Regular files and softlinks allow a small amount of data to be
749 * directly embedded in the inode. This flag will be cleared if
750 * the size is extended past the embedded limit.
751 */
c603b86b 752 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE ||
e12ae3a5
MD
753 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK ||
754 xop->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
c603b86b 755 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
3ac6a319 756 }
e12ae3a5 757 hammer2_xop_setname(&xop->head, name, name_len);
c603b86b
MD
758 xop->meta.name_len = name_len;
759 xop->meta.name_key = lhc;
37494cab 760 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
99535653 761
c603b86b 762 hammer2_xop_start(&xop->head, hammer2_inode_xop_create);
99535653 763
c603b86b
MD
764 error = hammer2_xop_collect(&xop->head, 0);
765#if INODE_DEBUG
766 kprintf("CREATE INODE %*.*s\n",
767 (int)name_len, (int)name_len, name);
9797e933 768#endif
99535653 769
c603b86b
MD
770 if (error) {
771 *errorp = error;
772 goto done;
99535653 773 }
99535653 774
28ee5f14 775 /*
e12ae3a5 776 * Set up the new inode if not a hardlink pointer.
28ee5f14 777 *
c603b86b 778 * NOTE: *_get() integrates chain's lock into the inode lock.
84e47819 779 *
c603b86b
MD
780 * NOTE: Only one new inode can currently be created per
781 * transaction. If the need arises we can adjust
782 * hammer2_trans_init() to allow more.
9797e933 783 *
c603b86b 784 * NOTE: nipdata will have chain's blockset data.
51bf8e9b 785 */
e12ae3a5
MD
786 if (type != HAMMER2_OBJTYPE_HARDLINK) {
787 nip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster);
788 nip->comp_heuristic = 0;
789 } else {
790 nip = NULL;
791 }
c847e838 792
c603b86b
MD
793done:
794 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
795done2:
796 hammer2_inode_unlock(dip, NULL);
7a9b14a0 797
c603b86b 798 return (nip);
99535653
MD
799}
800
6934ae32 801/*
e12ae3a5
MD
802 * Connect the disconnected inode (ip) to the directory (dip) with the
803 * specified (name, name_len). If name is NULL, (lhc) will be used as
804 * the directory key and the inode's embedded name will not be modified
805 * for future recovery purposes.
0dea3156 806 *
e12ae3a5
MD
807 * dip and ip must both be locked exclusively (dip in particular to avoid
808 * lhc collisions).
6934ae32
MD
809 */
810int
e12ae3a5
MD
811hammer2_inode_connect_simple(hammer2_inode_t *dip, hammer2_inode_t *ip,
812 const char *name, size_t name_len,
813 hammer2_key_t lhc)
6934ae32 814{
e12ae3a5
MD
815 hammer2_xop_scanlhc_t *sxop;
816 hammer2_xop_connect_t *xop;
817 hammer2_inode_t *opip;
818 hammer2_key_t lhcbase;
6934ae32
MD
819 int error;
820
f3843dc2 821 /*
e12ae3a5 822 * Calculate the lhc and resolve the collision space.
6934ae32 823 */
044541cd 824 if (name) {
e12ae3a5
MD
825 lhc = lhcbase = hammer2_dirhash(name, name_len);
826 sxop = &hammer2_xop_alloc(dip)->xop_scanlhc;
827 sxop->lhc = lhc;
828 hammer2_xop_start(&sxop->head, hammer2_inode_xop_scanlhc);
829 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
830 if (lhc != sxop->head.cluster.focus->bref.key)
044541cd 831 break;
044541cd
MD
832 ++lhc;
833 }
e12ae3a5 834 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
6934ae32 835
e12ae3a5
MD
836 if (error) {
837 if (error != ENOENT)
838 goto done;
839 ++lhc;
840 error = 0;
99535653 841 }
e12ae3a5
MD
842 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
843 error = ENOSPC;
844 goto done;
845 }
846 } else {
847 error = 0;
6934ae32 848 }
476d2aad
MD
849
850 /*
e12ae3a5
MD
851 * Formally reconnect the in-memory structure. ip must
852 * be locked exclusively to safely change ip->pip.
476d2aad 853 */
e12ae3a5
MD
854 if (ip->pip != dip) {
855 hammer2_inode_ref(dip);
856 opip = ip->pip;
857 ip->pip = dip;
858 if (opip)
859 hammer2_inode_drop(opip);
860 }
6934ae32
MD
861
862 /*
e12ae3a5 863 * Connect her up
6934ae32 864 */
e12ae3a5
MD
865 xop = &hammer2_xop_alloc(dip)->xop_connect;
866 if (name)
867 hammer2_xop_setname(&xop->head, name, name_len);
868 hammer2_xop_setip2(&xop->head, ip);
869 xop->lhc = lhc;
870 hammer2_xop_start(&xop->head, hammer2_inode_xop_connect);
871 error = hammer2_xop_collect(&xop->head, 0);
872 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
6934ae32
MD
873
874 /*
e12ae3a5
MD
875 * On success make the same adjustments to ip->meta or the
876 * next flush may blow up the chain.
6934ae32 877 */
e12ae3a5 878 if (error == 0) {
c603b86b 879 hammer2_inode_modify(ip);
c847e838 880 ip->meta.name_key = lhc;
e12ae3a5
MD
881 if (name)
882 ip->meta.name_len = name_len;
e513e77e 883 }
e12ae3a5
MD
884done:
885 return error;
d5fabb70
MD
886}
887
888/*
fe73aa5d 889 * Repoint ip->cluster's chains to cluster's chains and fixup the default
a6cf1052
MD
890 * focus. Only valid elements are repointed. Invalid elements have to be
891 * adjusted by the appropriate slave sync threads.
fe73aa5d 892 *
b8ba9690 893 * Caller must hold the inode and cluster exclusive locked, if not NULL,
fe73aa5d
MD
894 * must also be locked.
895 *
896 * Cluster may be NULL to clean out any chains in ip->cluster.
d5fabb70
MD
897 */
898void
ea155208 899hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
278ab2b2 900 hammer2_cluster_t *cluster)
d5fabb70 901{
eedd52a3 902 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
d5fabb70 903 hammer2_chain_t *ochain;
278ab2b2 904 hammer2_chain_t *nchain;
ea155208 905 hammer2_inode_t *opip;
278ab2b2 906 int i;
ea155208 907
eedd52a3
MD
908 bzero(dropch, sizeof(dropch));
909
84e47819
MD
910 /*
911 * Replace chains in ip->cluster with chains from cluster and
912 * adjust the focus if necessary.
913 *
914 * NOTE: nchain and/or ochain can be NULL due to gaps
915 * in the cluster arrays.
916 */
eedd52a3 917 hammer2_spin_ex(&ip->cluster_spin);
84e47819 918 for (i = 0; cluster && i < cluster->nchains; ++i) {
a6cf1052
MD
919 /*
920 * Do not replace invalid elements as this might race
921 * syncthr replacements.
922 */
923 if (cluster->array[i].flags & HAMMER2_CITEM_INVALID)
924 continue;
925
926 /*
927 * Do not replace elements which are the same. Also handle
928 * element count discrepancies.
929 */
4b7e61e0 930 nchain = cluster->array[i].chain;
278ab2b2 931 if (i < ip->cluster.nchains) {
4b7e61e0 932 ochain = ip->cluster.array[i].chain;
b8ba9690 933 if (ochain == nchain)
278ab2b2
MD
934 continue;
935 } else {
936 ochain = NULL;
937 }
938
939 /*
84e47819 940 * Make adjustments
278ab2b2 941 */
4b7e61e0 942 ip->cluster.array[i].chain = nchain;
a6cf1052
MD
943 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
944 ip->cluster.array[i].flags |= cluster->array[i].flags &
945 HAMMER2_CITEM_INVALID;
278ab2b2
MD
946 if (nchain)
947 hammer2_chain_ref(nchain);
eedd52a3 948 dropch[i] = ochain;
278ab2b2 949 }
84e47819
MD
950
951 /*
952 * Release any left-over chains in ip->cluster.
953 */
954 while (i < ip->cluster.nchains) {
4b7e61e0 955 nchain = ip->cluster.array[i].chain;
84e47819 956 if (nchain) {
4b7e61e0 957 ip->cluster.array[i].chain = NULL;
a6cf1052 958 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
84e47819 959 }
eedd52a3 960 dropch[i] = nchain;
84e47819
MD
961 ++i;
962 }
b8ba9690
MD
963
964 /*
965 * Fixup fields. Note that the inode-embedded cluster is never
966 * directly locked.
967 */
968 if (cluster) {
969 ip->cluster.nchains = cluster->nchains;
970 ip->cluster.focus = cluster->focus;
971 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
972 } else {
973 ip->cluster.nchains = 0;
974 ip->cluster.focus = NULL;
975 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
976 }
d5fabb70 977
0dea3156 978 /*
cd189b1e 979 * Repoint ip->pip if requested (non-NULL pip).
0dea3156 980 */
cd189b1e 981 if (pip && ip->pip != pip) {
ea155208 982 opip = ip->pip;
cd189b1e 983 hammer2_inode_ref(pip);
ea155208 984 ip->pip = pip;
eedd52a3
MD
985 } else {
986 opip = NULL;
987 }
988 hammer2_spin_unex(&ip->cluster_spin);
989
990 /*
991 * Cleanup outside of spinlock
992 */
993 while (--i >= 0) {
994 if (dropch[i])
995 hammer2_chain_drop(dropch[i]);
0dea3156 996 }
eedd52a3
MD
997 if (opip)
998 hammer2_inode_drop(opip);
6934ae32 999}
db0c2eb3 1000
a6cf1052
MD
1001/*
1002 * Repoint a single element from the cluster to the ip. Used by the
1003 * synchronization threads to piecemeal update inodes. Does not change
1004 * focus and requires inode to be re-locked to clean-up flags (XXX).
1005 */
1006void
1007hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1008 int idx)
1009{
1010 hammer2_chain_t *ochain;
1011 hammer2_chain_t *nchain;
1012 int i;
1013
eedd52a3 1014 hammer2_spin_ex(&ip->cluster_spin);
a6cf1052
MD
1015 KKASSERT(idx < cluster->nchains);
1016 if (idx < ip->cluster.nchains) {
1017 ochain = ip->cluster.array[idx].chain;
1018 nchain = cluster->array[idx].chain;
1019 } else {
1020 ochain = NULL;
1021 nchain = cluster->array[idx].chain;
1022 ip->cluster.nchains = idx + 1;
eedd52a3 1023 for (i = ip->cluster.nchains; i <= idx; ++i) {
a6cf1052
MD
1024 bzero(&ip->cluster.array[i],
1025 sizeof(ip->cluster.array[i]));
1026 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1027 }
1028 }
1029 if (ochain != nchain) {
1030 /*
1031 * Make adjustments.
1032 */
1033 ip->cluster.array[idx].chain = nchain;
1034 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1035 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1036 HAMMER2_CITEM_INVALID;
eedd52a3
MD
1037 }
1038 hammer2_spin_unex(&ip->cluster_spin);
1039 if (ochain != nchain) {
a6cf1052
MD
1040 if (nchain)
1041 hammer2_chain_ref(nchain);
1042 if (ochain)
1043 hammer2_chain_drop(ochain);
1044 }
1045}
1046
ae183399 1047/*
e12ae3a5
MD
1048 * Called with a locked inode to finish unlinking an inode after xop_unlink
1049 * had been run. This function is responsible for decrementing nlinks and
1050 * moving deleted inodes to the hidden directory if they are still open.
044541cd 1051 *
e12ae3a5
MD
1052 * We don't bother decrementing nlinks if the file is not open and this was
1053 * the last link.
044541cd 1054 *
e12ae3a5
MD
1055 * If the inode is a hardlink target it's chain has not yet been deleted,
1056 * otherwise it's chain has been deleted.
da6f36f4 1057 *
e12ae3a5
MD
1058 * If isopen then any prior deletion was not permanent and the inode must
1059 * be moved to the hidden directory.
ae183399
MD
1060 */
1061int
e12ae3a5 1062hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen)
ae183399 1063{
e12ae3a5 1064 hammer2_pfs_t *pmp;
ae183399 1065 int error;
ae183399 1066
e12ae3a5 1067 pmp = ip->pmp;
ae183399
MD
1068
1069 /*
e12ae3a5
MD
1070 * Decrement nlinks. If this is the last link and the file is
1071 * not open, the chain has already been removed and we don't bother
1072 * dirtying the inode.
ae183399 1073 */
e12ae3a5
MD
1074 if (ip->meta.nlinks == 1) {
1075 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1076 if (isopen == 0)
1077 return 0;
ae183399 1078 }
2121ef11 1079
e12ae3a5
MD
1080 hammer2_inode_modify(ip);
1081 --ip->meta.nlinks;
1082 if ((int64_t)ip->meta.nlinks < 0)
1083 ip->meta.nlinks = 0; /* safety */
ae183399 1084
99535653 1085 /*
e12ae3a5
MD
1086 * If nlinks is not zero we are done. However, this should only be
1087 * possible with a hardlink target. If the inode is an embedded
1088 * hardlink nlinks should have dropped to zero, warn and proceed
1089 * with the next step.
99535653 1090 */
e12ae3a5
MD
1091 if (ip->meta.nlinks) {
1092 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0)
1093 return 0;
1094 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n",
1095 (intmax_t)ip->meta.nlinks);
1096 return 0;
99535653
MD
1097 }
1098
ae183399 1099 /*
e12ae3a5
MD
1100 * nlinks is now zero, the inode should have already been deleted.
1101 * If the file is open it was deleted non-permanently and must be
1102 * moved to the hidden directory.
26b047fa 1103 *
e12ae3a5
MD
1104 * When moving to the hidden directory we force the name_key to the
1105 * inode number to avoid collisions.
ae183399 1106 */
e12ae3a5
MD
1107 if (isopen) {
1108 hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1109 error = hammer2_inode_connect_simple(pmp->ihidden, ip,
1110 NULL, 0, ip->meta.inum);
1111 hammer2_inode_unlock(pmp->ihidden, NULL);
f1c7c224 1112 } else {
e12ae3a5 1113 error = 0;
e513e77e 1114 }
ae183399
MD
1115 return error;
1116}
1117
044541cd
MD
1118/*
1119 * This is called from the mount code to initialize pmp->ihidden
1120 */
1121void
506bd6d1 1122hammer2_inode_install_hidden(hammer2_pfs_t *pmp)
044541cd 1123{
278ab2b2
MD
1124 hammer2_cluster_t *cparent;
1125 hammer2_cluster_t *cluster;
1126 hammer2_cluster_t *scan;
e07becf8 1127 const hammer2_inode_data_t *ripdata;
6a5f4fe6 1128 hammer2_inode_data_t *wipdata;
044541cd
MD
1129 hammer2_key_t key_dummy;
1130 hammer2_key_t key_next;
044541cd
MD
1131 int error;
1132 int count;
e07becf8
MD
1133 int dip_check_algo;
1134 int dip_comp_algo;
044541cd
MD
1135
1136 if (pmp->ihidden)
1137 return;
1138
1139 /*
1140 * Find the hidden directory
1141 */
1142 bzero(&key_dummy, sizeof(key_dummy));
c603b86b 1143 hammer2_trans_init(pmp, 0);
044541cd 1144
e07becf8
MD
1145 /*
1146 * Setup for lookup, retrieve iroot's check and compression
1147 * algorithm request which was likely generated by newfs_hammer2.
1148 *
1149 * The check/comp fields will probably never be used since inodes
1150 * are renamed into the hidden directory and not created relative to
1151 * the hidden directory, chain creation inherits from bref.methods,
1152 * and data chains inherit from their respective file inode *_algo
1153 * fields.
1154 */
159c3ca2
MD
1155 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1156 cparent = hammer2_inode_cluster(pmp->iroot, HAMMER2_RESOLVE_ALWAYS);
bca9f8e6 1157 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
b0f58de8
MD
1158 dip_check_algo = ripdata->meta.check_algo;
1159 dip_comp_algo = ripdata->meta.comp_algo;
e07becf8
MD
1160 ripdata = NULL;
1161
278ab2b2
MD
1162 cluster = hammer2_cluster_lookup(cparent, &key_dummy,
1163 HAMMER2_INODE_HIDDENDIR,
1164 HAMMER2_INODE_HIDDENDIR,
b8ba9690 1165 0);
278ab2b2
MD
1166 if (cluster) {
1167 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
044541cd
MD
1168 hammer2_inode_ref(pmp->ihidden);
1169
1170 /*
1171 * Remove any unlinked files which were left open as-of
1172 * any system crash.
5c88f2c7
MD
1173 *
1174 * Don't pass NODATA, we need the inode data so the delete
1175 * can do proper statistics updates.
044541cd
MD
1176 */
1177 count = 0;
278ab2b2 1178 scan = hammer2_cluster_lookup(cluster, &key_next,
b8ba9690 1179 0, HAMMER2_TID_MAX, 0);
044541cd 1180 while (scan) {
278ab2b2
MD
1181 if (hammer2_cluster_type(scan) ==
1182 HAMMER2_BREF_TYPE_INODE) {
c603b86b 1183 hammer2_cluster_delete(cluster, scan,
da6f36f4 1184 HAMMER2_DELETE_PERMANENT);
044541cd
MD
1185 ++count;
1186 }
278ab2b2 1187 scan = hammer2_cluster_next(cluster, scan, &key_next,
5c88f2c7 1188 0, HAMMER2_TID_MAX, 0);
044541cd
MD
1189 }
1190
b93cc2e0
MD
1191 hammer2_inode_unlock(pmp->ihidden, cluster);
1192 hammer2_inode_unlock(pmp->iroot, cparent);
c603b86b 1193 hammer2_trans_done(pmp);
044541cd
MD
1194 kprintf("hammer2: PFS loaded hidden dir, "
1195 "removed %d dead entries\n", count);
1196 return;
1197 }
1198
1199 /*
1200 * Create the hidden directory
1201 */
c603b86b 1202 error = hammer2_cluster_create(pmp, cparent, &cluster,
278ab2b2
MD
1203 HAMMER2_INODE_HIDDENDIR, 0,
1204 HAMMER2_BREF_TYPE_INODE,
b3659de2
MD
1205 HAMMER2_INODE_BYTES,
1206 0);
b93cc2e0 1207 hammer2_inode_unlock(pmp->iroot, cparent);
6a5f4fe6 1208
c603b86b 1209 hammer2_cluster_modify(cluster, 0);
6a5f4fe6 1210 wipdata = &hammer2_cluster_wdata(cluster)->ipdata;
b0f58de8
MD
1211 wipdata->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1212 wipdata->meta.inum = HAMMER2_INODE_HIDDENDIR;
1213 wipdata->meta.nlinks = 1;
1214 wipdata->meta.comp_algo = dip_comp_algo;
1215 wipdata->meta.check_algo = dip_check_algo;
6a5f4fe6 1216 hammer2_cluster_modsync(cluster);
044541cd
MD
1217 kprintf("hammer2: PFS root missing hidden directory, creating\n");
1218
278ab2b2 1219 pmp->ihidden = hammer2_inode_get(pmp, pmp->iroot, cluster);
044541cd 1220 hammer2_inode_ref(pmp->ihidden);
b93cc2e0 1221 hammer2_inode_unlock(pmp->ihidden, cluster);
c603b86b 1222 hammer2_trans_done(pmp);
044541cd
MD
1223}
1224
e12ae3a5 1225#if 0
044541cd
MD
1226/*
1227 * If an open file is unlinked H2 needs to retain the file in the topology
1228 * to ensure that its backing store is not recovered by the bulk free scan.
1229 * This also allows us to avoid having to special-case the CHAIN_DELETED flag.
1230 *
1231 * To do this the file is moved to a hidden directory in the PFS root and
1232 * renamed. The hidden directory must be created if it does not exist.
1233 */
1234static
1235void
c603b86b 1236hammer2_inode_move_to_hidden(hammer2_cluster_t **cparentp,
da6f36f4
MD
1237 hammer2_cluster_t **clusterp,
1238 hammer2_tid_t inum)
044541cd 1239{
278ab2b2 1240 hammer2_cluster_t *dcluster;
506bd6d1 1241 hammer2_pfs_t *pmp;
044541cd
MD
1242 int error;
1243
278ab2b2 1244 pmp = (*clusterp)->pmp;
044541cd
MD
1245 KKASSERT(pmp != NULL);
1246 KKASSERT(pmp->ihidden != NULL);
59c5121a 1247
c603b86b 1248 hammer2_cluster_delete(*cparentp, *clusterp, 0);
159c3ca2
MD
1249 hammer2_inode_lock(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
1250 dcluster = hammer2_inode_cluster(pmp->ihidden, HAMMER2_RESOLVE_ALWAYS);
c603b86b 1251 error = hammer2_inode_connect(NULL/*XXX*/, clusterp, 0,
278ab2b2 1252 pmp->ihidden, dcluster,
044541cd 1253 NULL, 0, inum);
b93cc2e0 1254 hammer2_inode_unlock(pmp->ihidden, dcluster);
044541cd
MD
1255 KKASSERT(error == 0);
1256}
e12ae3a5 1257#endif
044541cd 1258
f3843dc2 1259/*
850687d2
MD
1260 * Find the directory common to both fdip and tdip.
1261 *
1262 * Returns a held but not locked inode. Caller typically locks the inode,
1263 * and when through unlocks AND drops it.
f3843dc2
MD
1264 */
1265hammer2_inode_t *
0dea3156 1266hammer2_inode_common_parent(hammer2_inode_t *fdip, hammer2_inode_t *tdip)
f3843dc2
MD
1267{
1268 hammer2_inode_t *scan1;
1269 hammer2_inode_t *scan2;
1270
1271 /*
1272 * We used to have a depth field but it complicated matters too
1273 * much for directory renames. So now its ugly. Check for
1274 * simple cases before giving up and doing it the expensive way.
1275 *
1276 * XXX need a bottom-up topology stability lock
1277 */
1278 if (fdip == tdip || fdip == tdip->pip) {
1279 hammer2_inode_ref(fdip);
1280 return(fdip);
1281 }
1282 if (fdip->pip == tdip) {
1283 hammer2_inode_ref(tdip);
1284 return(tdip);
1285 }
51bf8e9b
MD
1286
1287 /*
1288 * XXX not MPSAFE
1289 */
f3843dc2
MD
1290 for (scan1 = fdip; scan1->pmp == fdip->pmp; scan1 = scan1->pip) {
1291 scan2 = tdip;
1292 while (scan2->pmp == tdip->pmp) {
1293 if (scan1 == scan2) {
1294 hammer2_inode_ref(scan1);
1295 return(scan1);
1296 }
1297 scan2 = scan2->pip;
9797e933
MD
1298 if (scan2 == NULL)
1299 break;
f3843dc2
MD
1300 }
1301 }
1302 panic("hammer2_inode_common_parent: no common parent %p %p\n",
1303 fdip, tdip);
1304 /* NOT REACHED */
1305 return(NULL);
1306}
355d67fc 1307
2121ef11
MD
1308/*
1309 * Set an inode's cluster modified, marking the related chains RW and
1310 * duplicating them if necessary.
1311 *
1312 * The passed-in chain is a localized copy of the chain previously acquired
1313 * when the inode was locked (and possilby replaced in the mean time), and
1314 * must also be updated. In fact, we update it first and then synchronize
1315 * the inode's cluster cache.
1316 */
1317void
c603b86b 1318hammer2_inode_modify(hammer2_inode_t *ip)
2121ef11
MD
1319{
1320 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1321 if (ip->vp)
1322 vsetisdirty(ip->vp);
1323}
1324
355d67fc
MD
1325/*
1326 * Synchronize the inode's frontend state with the chain state prior
1327 * to any explicit flush of the inode or any strategy write call.
1328 *
1329 * Called with a locked inode.
1330 */
1331void
c603b86b 1332hammer2_inode_fsync(hammer2_inode_t *ip, hammer2_cluster_t *cparent)
355d67fc 1333{
2121ef11 1334 int clear_directdata = 0;
355d67fc 1335
2121ef11
MD
1336 /* temporary hack, allow cparent to be NULL */
1337 if (cparent == NULL) {
1338 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
c603b86b 1339 hammer2_inode_fsync(ip, cparent);
2121ef11
MD
1340 hammer2_cluster_unlock(cparent);
1341 hammer2_cluster_drop(cparent);
1342 return;
355d67fc 1343 }
355d67fc 1344
2121ef11
MD
1345 if ((ip->flags & HAMMER2_INODE_RESIZED) == 0) {
1346 /* do nothing */
1347 } else if (ip->meta.size < ip->osize) {
355d67fc
MD
1348 /*
1349 * We must delete any chains beyond the EOF. The chain
1350 * straddling the EOF will be pending in the bioq.
1351 */
2121ef11
MD
1352 hammer2_cluster_t *dparent;
1353 hammer2_cluster_t *cluster;
1354 hammer2_key_t lbase;
1355 hammer2_key_t key_next;
1356
1357 lbase = (ip->meta.size + HAMMER2_PBUFMASK64) &
355d67fc 1358 ~HAMMER2_PBUFMASK64;
278ab2b2
MD
1359 dparent = hammer2_cluster_lookup_init(&ip->cluster, 0);
1360 cluster = hammer2_cluster_lookup(dparent, &key_next,
1361 lbase, (hammer2_key_t)-1,
b8ba9690 1362 HAMMER2_LOOKUP_NODATA);
278ab2b2 1363 while (cluster) {
355d67fc
MD
1364 /*
1365 * Degenerate embedded case, nothing to loop on
1366 */
278ab2b2
MD
1367 switch (hammer2_cluster_type(cluster)) {
1368 case HAMMER2_BREF_TYPE_INODE:
1369 hammer2_cluster_unlock(cluster);
e513e77e 1370 hammer2_cluster_drop(cluster);
278ab2b2 1371 cluster = NULL;
355d67fc 1372 break;
278ab2b2 1373 case HAMMER2_BREF_TYPE_DATA:
c603b86b 1374 hammer2_cluster_delete(dparent, cluster,
da6f36f4 1375 HAMMER2_DELETE_PERMANENT);
278ab2b2
MD
1376 /* fall through */
1377 default:
1378 cluster = hammer2_cluster_next(dparent, cluster,
1379 &key_next,
1897c66e 1380 key_next, (hammer2_key_t)-1,
355d67fc 1381 HAMMER2_LOOKUP_NODATA);
278ab2b2
MD
1382 break;
1383 }
355d67fc 1384 }
278ab2b2 1385 hammer2_cluster_lookup_done(dparent);
355d67fc 1386 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
2121ef11
MD
1387 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1388 } else if (ip->meta.size > ip->osize) {
355d67fc
MD
1389 /*
1390 * When resizing larger we may not have any direct-data
1391 * available.
1392 */
2121ef11 1393 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
7a9b14a0 1394 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
2121ef11
MD
1395 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1396 clear_directdata = 1;
1397 }
1398 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1399 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1400 } else {
1401 /*
1402 * RESIZED was set but size didn't change.
1403 */
1404 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED);
1405 KKASSERT(ip->flags & HAMMER2_INODE_MODIFIED);
1406 }
1407
1408 /*
1409 * Sync inode meta-data
1410 */
1411 if (ip->flags & HAMMER2_INODE_MODIFIED) {
1412 hammer2_inode_data_t *wipdata;
1413
1414 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
c603b86b 1415 hammer2_cluster_modify(cparent, 0);
2121ef11
MD
1416 hammer2_inode_repoint(ip, NULL, cparent);
1417
1418 wipdata = &hammer2_cluster_wdata(cparent)->ipdata;
1419 wipdata->meta = ip->meta;
1420 if (clear_directdata) {
6a5f4fe6
MD
1421 bzero(&wipdata->u.blockset,
1422 sizeof(wipdata->u.blockset));
355d67fc 1423 }
6a5f4fe6 1424 hammer2_cluster_modsync(cparent);
2121ef11 1425 }
355d67fc 1426}
12ff971c
MD
1427
1428/*
e12ae3a5
MD
1429 * Inode create helper (threaded, backend)
1430 *
1431 * Used by ncreate, nmknod, nsymlink, nmkdir.
1432 * Used by nlink and rename to create HARDLINK pointers.
12ff971c
MD
1433 *
1434 * Frontend holds the parent directory ip locked exclusively. We
1435 * create the inode and feed the exclusively locked chain to the
1436 * frontend.
1437 */
1438void
1439hammer2_inode_xop_create(hammer2_xop_t *arg, int clindex)
1440{
1441 hammer2_xop_create_t *xop = &arg->xop_create;
1442 hammer2_chain_t *parent;
1443 hammer2_chain_t *chain;
1444 hammer2_key_t key_next;
1445 int cache_index = -1;
1446 int error;
1447
1448 chain = NULL;
1449 parent = hammer2_inode_chain(xop->head.ip, clindex,
1450 HAMMER2_RESOLVE_ALWAYS);
1451 if (parent == NULL) {
1452 error = EIO;
1453 goto fail;
1454 }
1455 chain = hammer2_chain_lookup(&parent, &key_next,
1456 xop->lhc, xop->lhc,
1457 &cache_index, 0);
1458 if (chain) {
1459 hammer2_chain_unlock(chain);
1460 error = EEXIST;
1461 goto fail;
1462 }
1463
1464 error = hammer2_chain_create(&parent, &chain,
1465 xop->head.ip->pmp,
1466 xop->lhc, 0,
1467 HAMMER2_BREF_TYPE_INODE,
1468 HAMMER2_INODE_BYTES,
1469 xop->flags);
1470 if (error == 0) {
1471 hammer2_chain_modify(chain, 0);
1472 chain->data->ipdata.meta = xop->meta;
1473 bcopy(xop->head.name, chain->data->ipdata.filename,
1474 xop->head.name_len);
1475 }
1476 hammer2_chain_unlock(chain);
1477 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
1478 HAMMER2_RESOLVE_SHARED);
1479fail:
1480 if (parent) {
1481 hammer2_chain_unlock(parent);
1482 hammer2_chain_drop(parent);
1483 }
1484 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
1485 if (chain)
1486 hammer2_chain_drop(chain);
1487}
1488
1489/*
e12ae3a5 1490 * Inode delete helper (backend, threaded)
12ff971c
MD
1491 */
1492void
1493hammer2_inode_xop_destroy(hammer2_xop_t *arg, int clindex)
1494{
1495 /*hammer2_xop_inode_t *xop = &arg->xop_inode;*/
1496}
e12ae3a5
MD
1497
1498void
1499hammer2_inode_xop_connect(hammer2_xop_t *arg, int clindex)
1500{
1501 hammer2_xop_connect_t *xop = &arg->xop_connect;
1502 hammer2_inode_data_t *wipdata;
1503 hammer2_chain_t *parent;
1504 hammer2_chain_t *chain;
1505 hammer2_pfs_t *pmp;
1506 hammer2_key_t key_dummy;
1507 int cache_index = -1;
1508 int error;
1509
1510 /*
1511 * Get directory, then issue a lookup to prime the parent chain
1512 * for the create. The lookup is expected to fail.
1513 */
1514 pmp = xop->head.ip->pmp;
1515 parent = hammer2_inode_chain(xop->head.ip, clindex,
1516 HAMMER2_RESOLVE_ALWAYS);
1517 if (parent == NULL) {
1518 chain = NULL;
1519 error = EIO;
1520 goto fail;
1521 }
1522 chain = hammer2_chain_lookup(&parent, &key_dummy,
1523 xop->lhc, xop->lhc,
1524 &cache_index, 0);
1525 if (chain) {
1526 hammer2_chain_unlock(chain);
1527 hammer2_chain_drop(chain);
1528 chain = NULL;
1529 error = EEXIST;
1530 goto fail;
1531 }
1532
1533 /*
1534 * Adjust the filename in the inode, set the name key.
1535 *
1536 * NOTE: Frontend must also adjust ip2->meta on success, we can't
1537 * do it here.
1538 */
1539 chain = hammer2_inode_chain(xop->head.ip2, clindex,
1540 HAMMER2_RESOLVE_ALWAYS);
1541 hammer2_chain_modify(chain, 0);
1542 wipdata = &chain->data->ipdata;
1543
1544 hammer2_inode_modify(xop->head.ip2);
1545 if (xop->head.name) {
1546 bzero(wipdata->filename, sizeof(wipdata->filename));
1547 bcopy(xop->head.name, wipdata->filename, xop->head.name_len);
1548 wipdata->meta.name_len = xop->head.name_len;
1549 }
1550 wipdata->meta.name_key = xop->lhc;
1551
1552 /*
1553 * Reconnect the chain to the new parent directory
1554 */
1555 error = hammer2_chain_create(&parent, &chain, pmp,
1556 xop->lhc, 0,
1557 HAMMER2_BREF_TYPE_INODE,
1558 HAMMER2_INODE_BYTES,
1559 0);
1560
1561 /*
1562 * Feed result back.
1563 */
1564fail:
1565 hammer2_xop_feed(&xop->head, NULL, clindex, error);
1566 if (parent) {
1567 hammer2_chain_unlock(parent);
1568 hammer2_chain_drop(parent);
1569 }
1570 if (chain) {
1571 hammer2_chain_unlock(chain);
1572 hammer2_chain_drop(chain);
1573 }
1574}