2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
69 struct hammer2_sync_info {
74 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
75 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
76 static struct hammer2_mntlist hammer2_mntlist;
77 static struct hammer2_pfslist hammer2_pfslist;
78 static struct lock hammer2_mntlk;
81 int hammer2_cluster_enable = 1;
82 int hammer2_hardlink_enable = 1;
83 int hammer2_flush_pipe = 100;
84 int hammer2_synchronous_flush = 1;
85 int hammer2_dio_count;
86 long hammer2_limit_dirty_chains;
87 long hammer2_iod_file_read;
88 long hammer2_iod_meta_read;
89 long hammer2_iod_indr_read;
90 long hammer2_iod_fmap_read;
91 long hammer2_iod_volu_read;
92 long hammer2_iod_file_write;
93 long hammer2_iod_meta_write;
94 long hammer2_iod_indr_write;
95 long hammer2_iod_fmap_write;
96 long hammer2_iod_volu_write;
97 long hammer2_ioa_file_read;
98 long hammer2_ioa_meta_read;
99 long hammer2_ioa_indr_read;
100 long hammer2_ioa_fmap_read;
101 long hammer2_ioa_volu_read;
102 long hammer2_ioa_fmap_write;
103 long hammer2_ioa_file_write;
104 long hammer2_ioa_meta_write;
105 long hammer2_ioa_indr_write;
106 long hammer2_ioa_volu_write;
108 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
109 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
110 "Buffer used for compression.");
112 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
113 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
114 "Buffer used for decompression.");
116 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
118 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
119 &hammer2_debug, 0, "");
120 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
121 &hammer2_cluster_enable, 0, "");
122 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
123 &hammer2_hardlink_enable, 0, "");
124 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
125 &hammer2_flush_pipe, 0, "");
126 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW,
127 &hammer2_synchronous_flush, 0, "");
128 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
129 &hammer2_limit_dirty_chains, 0, "");
130 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
131 &hammer2_dio_count, 0, "");
133 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
134 &hammer2_iod_file_read, 0, "");
135 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
136 &hammer2_iod_meta_read, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
138 &hammer2_iod_indr_read, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
140 &hammer2_iod_fmap_read, 0, "");
141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
142 &hammer2_iod_volu_read, 0, "");
144 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
145 &hammer2_iod_file_write, 0, "");
146 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
147 &hammer2_iod_meta_write, 0, "");
148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
149 &hammer2_iod_indr_write, 0, "");
150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
151 &hammer2_iod_fmap_write, 0, "");
152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
153 &hammer2_iod_volu_write, 0, "");
155 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
156 &hammer2_ioa_file_read, 0, "");
157 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
158 &hammer2_ioa_meta_read, 0, "");
159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
160 &hammer2_ioa_indr_read, 0, "");
161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
162 &hammer2_ioa_fmap_read, 0, "");
163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
164 &hammer2_ioa_volu_read, 0, "");
166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
167 &hammer2_ioa_file_write, 0, "");
168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
169 &hammer2_ioa_meta_write, 0, "");
170 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
171 &hammer2_ioa_indr_write, 0, "");
172 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
173 &hammer2_ioa_fmap_write, 0, "");
174 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
175 &hammer2_ioa_volu_write, 0, "");
177 static int hammer2_vfs_init(struct vfsconf *conf);
178 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
179 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
181 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
182 struct vnode *, struct ucred *);
183 static int hammer2_recovery(hammer2_dev_t *hmp);
184 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
185 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
186 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
188 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
190 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
191 ino_t ino, struct vnode **vpp);
192 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
193 struct fid *fhp, struct vnode **vpp);
194 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
195 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
196 int *exflagsp, struct ucred **credanonp);
198 static int hammer2_install_volume_header(hammer2_dev_t *hmp);
199 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
201 static void hammer2_update_pmps(hammer2_dev_t *hmp);
203 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
204 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
208 * HAMMER2 vfs operations.
210 static struct vfsops hammer2_vfsops = {
211 .vfs_init = hammer2_vfs_init,
212 .vfs_uninit = hammer2_vfs_uninit,
213 .vfs_sync = hammer2_vfs_sync,
214 .vfs_mount = hammer2_vfs_mount,
215 .vfs_unmount = hammer2_vfs_unmount,
216 .vfs_root = hammer2_vfs_root,
217 .vfs_statfs = hammer2_vfs_statfs,
218 .vfs_statvfs = hammer2_vfs_statvfs,
219 .vfs_vget = hammer2_vfs_vget,
220 .vfs_vptofh = hammer2_vfs_vptofh,
221 .vfs_fhtovp = hammer2_vfs_fhtovp,
222 .vfs_checkexp = hammer2_vfs_checkexp
225 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
227 VFS_SET(hammer2_vfsops, hammer2, 0);
228 MODULE_VERSION(hammer2, 1);
232 hammer2_vfs_init(struct vfsconf *conf)
234 static struct objcache_malloc_args margs_read;
235 static struct objcache_malloc_args margs_write;
236 static struct objcache_malloc_args margs_vop;
242 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
244 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
246 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
250 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
252 margs_read.objsize = 65536;
253 margs_read.mtype = M_HAMMER2_DEBUFFER;
255 margs_write.objsize = 32768;
256 margs_write.mtype = M_HAMMER2_CBUFFER;
258 margs_vop.objsize = sizeof(hammer2_xop_t);
259 margs_vop.mtype = M_HAMMER2;
262 * Note thaht for the XOPS cache we want backing store allocations
263 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
264 * confusion), so use the backing store function that does it. This
265 * means that initial XOPS objects are zerod but REUSED objects are
266 * not. So we are responsible for cleaning the object up sufficiently
267 * for our needs before objcache_put()ing it back (typically just the
270 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
271 0, 1, NULL, NULL, NULL,
272 objcache_malloc_alloc,
273 objcache_malloc_free,
275 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
276 0, 1, NULL, NULL, NULL,
277 objcache_malloc_alloc,
278 objcache_malloc_free,
280 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
281 0, 1, NULL, NULL, NULL,
282 objcache_malloc_alloc_zero,
283 objcache_malloc_free,
287 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
288 TAILQ_INIT(&hammer2_mntlist);
289 TAILQ_INIT(&hammer2_pfslist);
291 hammer2_limit_dirty_chains = desiredvnodes / 10;
298 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
300 objcache_destroy(cache_buffer_read);
301 objcache_destroy(cache_buffer_write);
302 objcache_destroy(cache_xops);
307 * Core PFS allocator. Used to allocate the pmp structure for PFS cluster
308 * mounts and the spmp structure for media (hmp) structures.
310 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
311 * transactions. Note that synchronization does not use this field.
312 * (typically frontend operations and synchronization cannot run on the
313 * same PFS node at the same time).
318 hammer2_pfsalloc(hammer2_chain_t *chain, const hammer2_inode_data_t *ripdata,
319 hammer2_tid_t modify_tid)
321 hammer2_inode_t *iroot;
328 * Locate or create the PFS based on the cluster id. If ripdata
329 * is NULL this is a spmp which is unique and is always allocated.
332 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
333 if (bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
334 sizeof(pmp->pfs_clid)) == 0) {
343 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
344 hammer2_trans_manage_init(pmp);
345 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
346 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
347 lockinit(&pmp->lock, "pfslk", 0, 0);
348 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
349 RB_INIT(&pmp->inum_tree);
350 TAILQ_INIT(&pmp->unlinkq);
351 spin_init(&pmp->list_spin, "hm2pfsalloc_list");
353 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
354 hammer2_xop_group_init(pmp, &pmp->xop_groups[j]);
357 * Save the last media transaction id for the flusher. Set
361 pmp->pfs_clid = ripdata->meta.pfs_clid;
362 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
365 * The synchronization thread may start too early, make
366 * sure it stays frozen until we are ready to let it go.
370 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
371 HAMMER2_THREAD_REMASTER;
376 * Create the PFS's root inode.
378 if ((iroot = pmp->iroot) == NULL) {
379 iroot = hammer2_inode_get(pmp, NULL, NULL, -1);
381 hammer2_inode_ref(iroot);
382 hammer2_inode_unlock(iroot);
386 * Stop here if no chain is passed in.
392 * When a chain is passed in we must add it to the PFS's root
393 * inode, update pmp->pfs_types[], and update the syncronization
396 * At the moment empty spots can develop due to removals or failures.
397 * Ultimately we want to re-fill these spots but doing so might
398 * confused running code. XXX
400 hammer2_inode_ref(iroot);
401 hammer2_mtx_ex(&iroot->lock);
402 j = iroot->cluster.nchains;
404 kprintf("add PFS to pmp %p[%d]\n", pmp, j);
406 if (j == HAMMER2_MAXCLUSTER) {
407 kprintf("hammer2_mount: cluster full!\n");
408 /* XXX fatal error? */
410 KKASSERT(chain->pmp == NULL);
412 hammer2_chain_ref(chain);
413 iroot->cluster.array[j].chain = chain;
414 pmp->pfs_types[j] = ripdata->meta.pfs_type;
415 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2);
416 pmp->pfs_hmps[j] = chain->hmp;
419 * If the PFS is already mounted we must account
420 * for the mount_count here.
423 ++chain->hmp->mount_count;
426 * May have to fixup dirty chain tracking. Previous
427 * pmp was NULL so nothing to undo.
429 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
430 hammer2_pfs_memory_inc(pmp);
433 iroot->cluster.nchains = j;
436 * Update nmasters from any PFS inode which is part of the cluster.
437 * It is possible that this will result in a value which is too
438 * high. MASTER PFSs are authoritative for pfs_nmasters and will
439 * override this value later on.
441 * (This informs us of masters that might not currently be
442 * discoverable by this mount).
444 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
445 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
449 * Count visible masters. Masters are usually added with
450 * ripdata->meta.pfs_nmasters set to 1. This detects when there
451 * are more (XXX and must update the master inodes).
454 for (i = 0; i < iroot->cluster.nchains; ++i) {
455 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
458 if (pmp->pfs_nmasters < count)
459 pmp->pfs_nmasters = count;
462 * Create missing synchronization and support threads.
464 * Single-node masters (including snapshots) have nothing to
465 * synchronize and do not require this thread.
467 * Multi-node masters or any number of soft masters, slaves, copy,
468 * or other PFS types need the thread.
470 * Each thread is responsible for its particular cluster index.
471 * We use independent threads so stalls or mismatches related to
472 * any given target do not affect other targets.
474 for (i = 0; i < iroot->cluster.nchains; ++i) {
476 * Single-node masters (including snapshots) have nothing
477 * to synchronize and will make direct xops support calls,
478 * thus they do not require this thread.
480 * Note that there can be thousands of snapshots. We do not
481 * want to create thousands of threads.
483 if (pmp->pfs_nmasters <= 1 &&
484 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
489 * Sync support thread
491 if (pmp->sync_thrs[i].td == NULL) {
492 hammer2_thr_create(&pmp->sync_thrs[i], pmp,
494 hammer2_primary_sync_thread);
499 * Create missing Xop threads
502 hammer2_xop_helper_create(pmp);
504 hammer2_mtx_unlock(&iroot->lock);
505 hammer2_inode_drop(iroot);
511 * Destroy a PFS, typically only occurs after the last mount on a device
515 hammer2_pfsfree(hammer2_pfs_t *pmp)
517 hammer2_inode_t *iroot;
522 * Cleanup our reference on iroot. iroot is (should) not be needed
525 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
529 for (i = 0; i < iroot->cluster.nchains; ++i) {
530 hammer2_thr_delete(&pmp->sync_thrs[i]);
531 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
532 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
534 #if REPORT_REFS_ERRORS
535 if (pmp->iroot->refs != 1)
536 kprintf("PMP->IROOT %p REFS WRONG %d\n",
537 pmp->iroot, pmp->iroot->refs);
539 KKASSERT(pmp->iroot->refs == 1);
541 /* ref for pmp->iroot */
542 hammer2_inode_drop(pmp->iroot);
546 kmalloc_destroy(&pmp->mmsg);
547 kmalloc_destroy(&pmp->minode);
549 kfree(pmp, M_HAMMER2);
553 * Remove all references to hmp from the pfs list. Any PFS which becomes
554 * empty is terminated and freed.
559 hammer2_pfsfree_scan(hammer2_dev_t *hmp)
562 hammer2_inode_t *iroot;
563 hammer2_chain_t *rchain;
569 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
570 if ((iroot = pmp->iroot) == NULL)
572 if (hmp->spmp == pmp) {
573 kprintf("unmount hmp %p remove spmp %p\n",
579 * Determine if this PFS is affected. If it is we must
580 * freeze all management threads and lock its iroot.
582 * Freezing a management thread forces it idle, operations
583 * in-progress will be aborted and it will have to start
584 * over again when unfrozen, or exit if told to exit.
586 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
587 if (pmp->pfs_hmps[i] == hmp)
590 if (i != HAMMER2_MAXCLUSTER) {
592 * Make sure all synchronization threads are locked
595 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
596 if (pmp->pfs_hmps[i] == NULL)
598 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
599 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
600 hammer2_thr_freeze_async(
601 &pmp->xop_groups[j].thrs[i]);
604 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
605 if (pmp->pfs_hmps[i] == NULL)
607 hammer2_thr_freeze(&pmp->sync_thrs[i]);
608 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
610 &pmp->xop_groups[j].thrs[i]);
615 * Lock the inode and clean out matching chains.
616 * Note that we cannot use hammer2_inode_lock_*()
617 * here because that would attempt to validate the
618 * cluster that we are in the middle of ripping
621 * WARNING! We are working directly on the inodes
624 hammer2_mtx_ex(&iroot->lock);
627 * Remove the chain from matching elements of the PFS.
629 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
630 if (pmp->pfs_hmps[i] != hmp)
632 hammer2_thr_delete(&pmp->sync_thrs[i]);
633 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
635 &pmp->xop_groups[j].thrs[i]);
637 rchain = iroot->cluster.array[i].chain;
638 iroot->cluster.array[i].chain = NULL;
639 pmp->pfs_types[i] = 0;
640 if (pmp->pfs_names[i]) {
641 kfree(pmp->pfs_names[i], M_HAMMER2);
642 pmp->pfs_names[i] = NULL;
645 hammer2_chain_drop(rchain);
647 if (iroot->cluster.focus == rchain)
648 iroot->cluster.focus = NULL;
650 pmp->pfs_hmps[i] = NULL;
652 hammer2_mtx_unlock(&iroot->lock);
653 didfreeze = 1; /* remaster, unfreeze down below */
659 * Cleanup trailing chains. Gaps may remain.
661 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) {
662 if (pmp->pfs_hmps[i])
665 iroot->cluster.nchains = i + 1;
668 * If the PMP has no elements remaining we can destroy it.
669 * (this will transition management threads from frozen->exit).
671 if (iroot->cluster.nchains == 0) {
672 kprintf("unmount hmp %p last ref to PMP=%p\n",
674 hammer2_pfsfree(pmp);
679 * If elements still remain we need to set the REMASTER
680 * flag and unfreeze it.
683 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
684 if (pmp->pfs_hmps[i] == NULL)
686 hammer2_thr_remaster(&pmp->sync_thrs[i]);
687 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
688 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
689 hammer2_thr_remaster(
690 &pmp->xop_groups[j].thrs[i]);
691 hammer2_thr_unfreeze(
692 &pmp->xop_groups[j].thrs[i]);
700 * Mount or remount HAMMER2 fileystem from physical media
703 * mp mount point structure
709 * mp mount point structure
710 * path path to mount point
711 * data pointer to argument structure in user space
712 * volume volume path (device@LABEL form)
713 * hflags user mount flags
714 * cred user credentials
721 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
724 struct hammer2_mount_info info;
728 hammer2_key_t key_next;
729 hammer2_key_t key_dummy;
732 struct nlookupdata nd;
733 hammer2_chain_t *parent;
734 hammer2_chain_t *chain;
735 hammer2_cluster_t *cluster;
736 const hammer2_inode_data_t *ripdata;
737 hammer2_blockref_t bref;
739 char devstr[MNAMELEN];
756 kprintf("hammer2_mount\n");
762 bzero(&info, sizeof(info));
763 info.cluster_fd = -1;
767 * Non-root mount or updating a mount
769 error = copyin(data, &info, sizeof(info));
773 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
777 /* Extract device and label */
779 label = strchr(devstr, '@');
781 ((label + 1) - dev) > done) {
789 if (mp->mnt_flag & MNT_UPDATE) {
791 * Update mount. Note that pmp->iroot->cluster is
792 * an inode-embedded cluster and thus cannot be
795 * XXX HAMMER2 needs to implement NFS export via
799 cluster = &pmp->iroot->cluster;
800 for (i = 0; i < cluster->nchains; ++i) {
801 if (cluster->array[i].chain == NULL)
803 hmp = cluster->array[i].chain->hmp;
805 error = hammer2_remount(hmp, mp, path,
818 * Lookup name and verify it refers to a block device.
820 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
822 error = nlookup(&nd);
824 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
828 if (vn_isdisk(devvp, &error))
829 error = vfs_mountedon(devvp);
833 * Determine if the device has already been mounted. After this
834 * check hmp will be non-NULL if we are doing the second or more
835 * hammer2 mounts from the same device.
837 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
838 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
839 if (hmp->devvp == devvp)
844 * Open the device if this isn't a secondary mount and construct
845 * the H2 device mount (hmp).
848 hammer2_chain_t *schain;
851 if (error == 0 && vcount(devvp) > 0)
855 * Now open the device
858 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
859 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
860 error = vinvalbuf(devvp, V_SAVE, 0, 0);
862 error = VOP_OPEN(devvp,
863 ronly ? FREAD : FREAD | FWRITE,
868 if (error && devvp) {
873 lockmgr(&hammer2_mntlk, LK_RELEASE);
876 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
877 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev);
880 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
881 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
882 RB_INIT(&hmp->iotree);
883 spin_init(&hmp->io_spin, "hm2mount_io");
884 spin_init(&hmp->list_spin, "hm2mount_list");
885 TAILQ_INIT(&hmp->flushq);
887 lockinit(&hmp->vollk, "h2vol", 0, 0);
890 * vchain setup. vchain.data is embedded.
891 * vchain.refs is initialized and will never drop to 0.
893 * NOTE! voldata is not yet loaded.
895 hmp->vchain.hmp = hmp;
896 hmp->vchain.refs = 1;
897 hmp->vchain.data = (void *)&hmp->voldata;
898 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
899 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
900 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
902 hammer2_chain_core_init(&hmp->vchain);
903 /* hmp->vchain.u.xxx is left NULL */
906 * fchain setup. fchain.data is embedded.
907 * fchain.refs is initialized and will never drop to 0.
909 * The data is not used but needs to be initialized to
910 * pass assertion muster. We use this chain primarily
911 * as a placeholder for the freemap's top-level RBTREE
912 * so it does not interfere with the volume's topology
915 hmp->fchain.hmp = hmp;
916 hmp->fchain.refs = 1;
917 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
918 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
919 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
920 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
921 hmp->fchain.bref.methods =
922 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
923 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
925 hammer2_chain_core_init(&hmp->fchain);
926 /* hmp->fchain.u.xxx is left NULL */
929 * Install the volume header and initialize fields from
932 error = hammer2_install_volume_header(hmp);
934 hammer2_unmount_helper(mp, NULL, hmp);
935 lockmgr(&hammer2_mntlk, LK_RELEASE);
936 hammer2_vfs_unmount(mp, MNT_FORCE);
941 * Really important to get these right or flush will get
944 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0);
945 kprintf("alloc spmp %p tid %016jx\n",
946 hmp->spmp, hmp->voldata.mirror_tid);
950 * Dummy-up vchain and fchain's modify_tid. mirror_tid
951 * is inherited from the volume header.
954 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
955 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
956 hmp->vchain.pmp = spmp;
957 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
958 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
959 hmp->fchain.pmp = spmp;
962 * First locate the super-root inode, which is key 0
963 * relative to the volume header's blockset.
965 * Then locate the root inode by scanning the directory keyspace
966 * represented by the label.
968 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
969 schain = hammer2_chain_lookup(&parent, &key_dummy,
970 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
972 hammer2_chain_lookup_done(parent);
973 if (schain == NULL) {
974 kprintf("hammer2_mount: invalid super-root\n");
975 hammer2_unmount_helper(mp, NULL, hmp);
976 lockmgr(&hammer2_mntlk, LK_RELEASE);
977 hammer2_vfs_unmount(mp, MNT_FORCE);
981 kprintf("hammer2_mount: error %s reading super-root\n",
982 hammer2_error_str(schain->error));
983 hammer2_chain_unlock(schain);
984 hammer2_chain_drop(schain);
986 hammer2_unmount_helper(mp, NULL, hmp);
987 lockmgr(&hammer2_mntlk, LK_RELEASE);
988 hammer2_vfs_unmount(mp, MNT_FORCE);
993 * The super-root always uses an inode_tid of 1 when
997 spmp->modify_tid = schain->bref.modify_tid + 1;
1000 * Sanity-check schain's pmp and finish initialization.
1001 * Any chain belonging to the super-root topology should
1002 * have a NULL pmp (not even set to spmp).
1004 ripdata = &hammer2_chain_rdata(schain)->ipdata;
1005 KKASSERT(schain->pmp == NULL);
1006 spmp->pfs_clid = ripdata->meta.pfs_clid;
1009 * Replace the dummy spmp->iroot with a real one. It's
1010 * easier to just do a wholesale replacement than to try
1011 * to update the chain and fixup the iroot fields.
1013 * The returned inode is locked with the supplied cluster.
1015 cluster = hammer2_cluster_from_chain(schain);
1016 hammer2_inode_drop(spmp->iroot);
1018 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster, -1);
1019 spmp->spmp_hmp = hmp;
1020 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1021 spmp->pfs_hmps[0] = hmp;
1022 hammer2_inode_ref(spmp->iroot);
1023 hammer2_inode_unlock(spmp->iroot);
1024 hammer2_cluster_unlock(cluster);
1025 hammer2_cluster_drop(cluster);
1027 /* leave spmp->iroot with one ref */
1029 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1030 error = hammer2_recovery(hmp);
1031 /* XXX do something with error */
1033 hammer2_update_pmps(hmp);
1034 hammer2_iocom_init(hmp);
1037 * Ref the cluster management messaging descriptor. The mount
1038 * program deals with the other end of the communications pipe.
1040 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
1042 hammer2_cluster_reconnect(hmp, fp);
1044 kprintf("hammer2_mount: bad cluster_fd!\n");
1051 * Lookup the mount point under the media-localized super-root.
1052 * Scanning hammer2_pfslist doesn't help us because it represents
1053 * PFS cluster ids which can aggregate several named PFSs together.
1055 * cluster->pmp will incorrectly point to spmp and must be fixed
1058 hammer2_inode_lock(spmp->iroot, 0);
1059 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1060 lhc = hammer2_dirhash(label, strlen(label));
1061 chain = hammer2_chain_lookup(&parent, &key_next,
1062 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1065 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1066 strcmp(label, chain->data->ipdata.filename) == 0) {
1069 chain = hammer2_chain_next(&parent, chain, &key_next,
1071 lhc + HAMMER2_DIRHASH_LOMASK,
1075 hammer2_chain_unlock(parent);
1076 hammer2_chain_drop(parent);
1078 hammer2_inode_unlock(spmp->iroot);
1081 * PFS could not be found?
1083 if (chain == NULL) {
1084 kprintf("hammer2_mount: PFS label not found\n");
1085 hammer2_unmount_helper(mp, NULL, hmp);
1086 lockmgr(&hammer2_mntlk, LK_RELEASE);
1087 hammer2_vfs_unmount(mp, MNT_FORCE);
1093 * Acquire the pmp structure (it should have already been allocated
1094 * via hammer2_update_pmps() so do not pass cluster in to add to
1095 * available chains).
1097 * Check if the cluster has already been mounted. A cluster can
1098 * only be mounted once, use null mounts to mount additional copies.
1100 ripdata = &chain->data->ipdata;
1102 pmp = hammer2_pfsalloc(NULL, ripdata, bref.modify_tid);
1103 hammer2_chain_unlock(chain);
1104 hammer2_chain_drop(chain);
1107 kprintf("hammer2_mount: PFS already mounted!\n");
1108 hammer2_unmount_helper(mp, NULL, hmp);
1109 lockmgr(&hammer2_mntlk, LK_RELEASE);
1110 hammer2_vfs_unmount(mp, MNT_FORCE);
1118 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp);
1120 mp->mnt_flag = MNT_LOCAL;
1121 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1122 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1125 * required mount structure initializations
1127 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1128 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1130 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1131 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1136 mp->mnt_iosize_max = MAXPHYS;
1139 * Connect up mount pointers.
1141 hammer2_mount_helper(mp, pmp);
1143 lockmgr(&hammer2_mntlk, LK_RELEASE);
1149 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1150 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1151 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1153 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
1154 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1155 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1156 copyinstr(path, mp->mnt_stat.f_mntonname,
1157 sizeof(mp->mnt_stat.f_mntonname) - 1,
1161 * Initial statfs to prime mnt_stat.
1163 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
1169 * Scan PFSs under the super-root and create hammer2_pfs structures.
1173 hammer2_update_pmps(hammer2_dev_t *hmp)
1175 const hammer2_inode_data_t *ripdata;
1176 hammer2_chain_t *parent;
1177 hammer2_chain_t *chain;
1178 hammer2_blockref_t bref;
1179 hammer2_pfs_t *spmp;
1181 hammer2_key_t key_next;
1182 int cache_index = -1;
1185 * Lookup mount point under the media-localized super-root.
1187 * cluster->pmp will incorrectly point to spmp and must be fixed
1191 hammer2_inode_lock(spmp->iroot, 0);
1192 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1193 chain = hammer2_chain_lookup(&parent, &key_next,
1194 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
1197 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE)
1199 ripdata = &chain->data->ipdata;
1201 kprintf("ADD LOCAL PFS: %s\n", ripdata->filename);
1203 pmp = hammer2_pfsalloc(chain, ripdata, bref.modify_tid);
1204 chain = hammer2_chain_next(&parent, chain, &key_next,
1205 key_next, HAMMER2_KEY_MAX,
1209 hammer2_chain_unlock(parent);
1210 hammer2_chain_drop(parent);
1212 hammer2_inode_unlock(spmp->iroot);
1217 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path,
1218 struct vnode *devvp, struct ucred *cred)
1222 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
1223 error = hammer2_recovery(hmp);
1232 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1243 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1246 * If mount initialization proceeded far enough we must flush
1247 * its vnodes and sync the underlying mount points. Three syncs
1248 * are required to fully flush the filesystem (freemap updates lag
1249 * by one flush, and one extra for safety).
1251 if (mntflags & MNT_FORCE)
1256 error = vflush(mp, 0, flags);
1259 hammer2_vfs_sync(mp, MNT_WAIT);
1260 hammer2_vfs_sync(mp, MNT_WAIT);
1261 hammer2_vfs_sync(mp, MNT_WAIT);
1265 * Cleanup the frontend support XOPS threads
1267 hammer2_xop_helper_cleanup(pmp);
1270 * Cleanup our reference on ihidden.
1273 hammer2_inode_drop(pmp->ihidden);
1274 pmp->ihidden = NULL;
1277 hammer2_unmount_helper(mp, pmp, NULL);
1281 lockmgr(&hammer2_mntlk, LK_RELEASE);
1287 * Mount helper, hook the system mount into our PFS.
1288 * The mount lock is held.
1290 * We must bump the mount_count on related devices for any
1295 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1297 hammer2_cluster_t *cluster;
1298 hammer2_chain_t *rchain;
1301 mp->mnt_data = (qaddr_t)pmp;
1305 * After pmp->mp is set we have to adjust hmp->mount_count.
1307 cluster = &pmp->iroot->cluster;
1308 for (i = 0; i < cluster->nchains; ++i) {
1309 rchain = cluster->array[i].chain;
1312 ++rchain->hmp->mount_count;
1313 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n",
1314 rchain->hmp, rchain->hmp->mount_count);
1318 * Create missing Xop threads
1320 hammer2_xop_helper_create(pmp);
1324 * Mount helper, unhook the system mount from our PFS.
1325 * The mount lock is held.
1327 * If hmp is supplied a mount responsible for being the first to open
1328 * the block device failed and the block device and all PFSs using the
1329 * block device must be cleaned up.
1331 * If pmp is supplied multiple devices might be backing the PFS and each
1332 * must be disconnect. This might not be the last PFS using some of the
1333 * underlying devices. Also, we have to adjust our hmp->mount_count
1334 * accounting for the devices backing the pmp which is now undergoing an
1339 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1341 hammer2_cluster_t *cluster;
1342 hammer2_chain_t *rchain;
1343 struct vnode *devvp;
1349 * If no device supplied this is a high-level unmount and we have to
1350 * to disconnect the mount, adjust mount_count, and locate devices
1351 * that might now have no mounts.
1354 KKASSERT(hmp == NULL);
1355 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp);
1357 mp->mnt_data = NULL;
1360 * After pmp->mp is cleared we have to account for
1363 cluster = &pmp->iroot->cluster;
1364 for (i = 0; i < cluster->nchains; ++i) {
1365 rchain = cluster->array[i].chain;
1368 --rchain->hmp->mount_count;
1369 kprintf("hammer2_unmount hmp=%p --mount_count=%d\n",
1370 rchain->hmp, rchain->hmp->mount_count);
1371 /* scrapping hmp now may invalidate the pmp */
1374 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1375 if (hmp->mount_count == 0) {
1376 hammer2_unmount_helper(NULL, NULL, hmp);
1384 * Try to terminate the block device. We can't terminate it if
1385 * there are still PFSs referencing it.
1387 kprintf("hammer2_unmount hmp=%p mount_count=%d\n",
1388 hmp, hmp->mount_count);
1389 if (hmp->mount_count)
1392 hammer2_pfsfree_scan(hmp);
1393 hammer2_dev_exlock(hmp); /* XXX order */
1396 * Cycle the volume data lock as a safety (probably not needed any
1397 * more). To ensure everything is out we need to flush at least
1398 * three times. (1) The running of the unlinkq can dirty the
1399 * filesystem, (2) A normal flush can dirty the freemap, and
1400 * (3) ensure that the freemap is fully synchronized.
1402 * The next mount's recovery scan can clean everything up but we want
1403 * to leave the filesystem in a 100% clean state on a normal unmount.
1406 hammer2_voldata_lock(hmp);
1407 hammer2_voldata_unlock(hmp);
1409 hammer2_iocom_uninit(hmp);
1411 if ((hmp->vchain.flags | hmp->fchain.flags) &
1412 HAMMER2_CHAIN_FLUSH_MASK) {
1413 kprintf("hammer2_unmount: chains left over "
1414 "after final sync\n");
1415 kprintf(" vchain %08x\n", hmp->vchain.flags);
1416 kprintf(" fchain %08x\n", hmp->fchain.flags);
1418 if (hammer2_debug & 0x0010)
1419 Debugger("entered debugger");
1422 KKASSERT(hmp->spmp == NULL);
1425 * Finish up with the device vnode
1427 if ((devvp = hmp->devvp) != NULL) {
1428 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1429 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1431 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL);
1438 * Clear vchain/fchain flags that might prevent final cleanup
1441 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1442 atomic_clear_int(&hmp->vchain.flags,
1443 HAMMER2_CHAIN_MODIFIED);
1444 hammer2_pfs_memory_wakeup(hmp->vchain.pmp);
1445 hammer2_chain_drop(&hmp->vchain);
1447 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1448 atomic_clear_int(&hmp->vchain.flags,
1449 HAMMER2_CHAIN_UPDATE);
1450 hammer2_chain_drop(&hmp->vchain);
1453 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1454 atomic_clear_int(&hmp->fchain.flags,
1455 HAMMER2_CHAIN_MODIFIED);
1456 hammer2_pfs_memory_wakeup(hmp->fchain.pmp);
1457 hammer2_chain_drop(&hmp->fchain);
1459 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1460 atomic_clear_int(&hmp->fchain.flags,
1461 HAMMER2_CHAIN_UPDATE);
1462 hammer2_chain_drop(&hmp->fchain);
1466 * Final drop of embedded freemap root chain to
1467 * clean up fchain.core (fchain structure is not
1468 * flagged ALLOCATED so it is cleaned out and then
1471 hammer2_chain_drop(&hmp->fchain);
1474 * Final drop of embedded volume root chain to clean
1475 * up vchain.core (vchain structure is not flagged
1476 * ALLOCATED so it is cleaned out and then left to
1480 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v');
1482 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f');
1483 hammer2_dev_unlock(hmp);
1484 hammer2_chain_drop(&hmp->vchain);
1486 hammer2_io_cleanup(hmp, &hmp->iotree);
1487 if (hmp->iofree_count) {
1488 kprintf("io_cleanup: %d I/O's left hanging\n",
1492 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1493 kmalloc_destroy(&hmp->mchain);
1494 kfree(hmp, M_HAMMER2);
1499 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1500 ino_t ino, struct vnode **vpp)
1502 kprintf("hammer2_vget\n");
1503 return (EOPNOTSUPP);
1508 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1515 if (pmp->iroot == NULL) {
1521 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1523 while (pmp->inode_tid == 0) {
1524 hammer2_xop_ipcluster_t *xop;
1525 hammer2_inode_meta_t *meta;
1527 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1528 hammer2_xop_start(&xop->head, hammer2_xop_ipcluster);
1529 error = hammer2_xop_collect(&xop->head, 0);
1532 meta = &xop->head.cluster.focus->data->ipdata.meta;
1533 pmp->iroot->meta = *meta;
1534 pmp->inode_tid = meta->pfs_inum + 1;
1535 if (pmp->inode_tid < HAMMER2_INODE_START)
1536 pmp->inode_tid = HAMMER2_INODE_START;
1538 xop->head.cluster.focus->bref.modify_tid + 1;
1539 kprintf("PFS: Starting inode %jd\n",
1540 (intmax_t)pmp->inode_tid);
1541 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1542 pmp->inode_tid, pmp->modify_tid);
1543 wakeup(&pmp->iroot);
1545 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1548 * Prime the mount info.
1550 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL);
1553 * With the cluster operational, check for and
1554 * install ihidden if needed. The install_hidden
1555 * code needs to get a transaction so we must unlock
1558 * This is only applicable PFS mounts, there is no
1559 * hidden directory in the spmp.
1561 hammer2_inode_unlock(pmp->iroot);
1562 hammer2_inode_install_hidden(pmp);
1563 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1571 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1572 hammer2_inode_unlock(pmp->iroot);
1573 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz);
1574 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1580 hammer2_inode_unlock(pmp->iroot);
1583 vp = hammer2_igetv(pmp->iroot, &error);
1584 hammer2_inode_unlock(pmp->iroot);
1594 * XXX incorporate ipdata->meta.inode_quota and data_quota
1598 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1602 hammer2_blockref_t bref;
1606 * NOTE: iroot might not have validated the cluster yet.
1610 mp->mnt_stat.f_files = 0;
1611 mp->mnt_stat.f_ffree = 0;
1612 mp->mnt_stat.f_blocks = 0;
1613 mp->mnt_stat.f_bfree = 0;
1614 mp->mnt_stat.f_bavail = 0;
1616 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
1617 hmp = pmp->pfs_hmps[i];
1620 if (pmp->iroot->cluster.array[i].chain)
1621 bref = pmp->iroot->cluster.array[i].chain->bref;
1623 bzero(&bref, sizeof(bref));
1625 mp->mnt_stat.f_files = bref.inode_count;
1626 mp->mnt_stat.f_ffree = 0;
1627 mp->mnt_stat.f_blocks = (bref.data_count +
1628 hmp->voldata.allocator_free) /
1629 mp->mnt_vstat.f_bsize;
1630 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free /
1631 mp->mnt_vstat.f_bsize;
1632 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1634 *sbp = mp->mnt_stat;
1641 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1645 hammer2_blockref_t bref;
1649 * NOTE: iroot might not have validated the cluster yet.
1653 mp->mnt_vstat.f_bsize = 0;
1654 mp->mnt_vstat.f_files = 0;
1655 mp->mnt_vstat.f_ffree = 0;
1656 mp->mnt_vstat.f_blocks = 0;
1657 mp->mnt_vstat.f_bfree = 0;
1658 mp->mnt_vstat.f_bavail = 0;
1660 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
1661 hmp = pmp->pfs_hmps[i];
1664 if (pmp->iroot->cluster.array[i].chain)
1665 bref = pmp->iroot->cluster.array[i].chain->bref;
1667 bzero(&bref, sizeof(bref));
1669 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1670 mp->mnt_vstat.f_files = bref.inode_count;
1671 mp->mnt_vstat.f_ffree = 0;
1672 mp->mnt_vstat.f_blocks = (bref.data_count +
1673 hmp->voldata.allocator_free) /
1674 mp->mnt_vstat.f_bsize;
1675 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free /
1676 mp->mnt_vstat.f_bsize;
1677 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1679 *sbp = mp->mnt_vstat;
1685 * Mount-time recovery (RW mounts)
1687 * Updates to the free block table are allowed to lag flushes by one
1688 * transaction. In case of a crash, then on a fresh mount we must do an
1689 * incremental scan of the last committed transaction id and make sure that
1690 * all related blocks have been marked allocated.
1692 * The super-root topology and each PFS has its own transaction id domain,
1693 * so we must track PFS boundary transitions.
1695 struct hammer2_recovery_elm {
1696 TAILQ_ENTRY(hammer2_recovery_elm) entry;
1697 hammer2_chain_t *chain;
1698 hammer2_tid_t sync_tid;
1701 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
1703 struct hammer2_recovery_info {
1704 struct hammer2_recovery_list list;
1709 static int hammer2_recovery_scan(hammer2_dev_t *hmp,
1710 hammer2_chain_t *parent,
1711 struct hammer2_recovery_info *info,
1712 hammer2_tid_t sync_tid);
1714 #define HAMMER2_RECOVERY_MAXDEPTH 10
1718 hammer2_recovery(hammer2_dev_t *hmp)
1720 struct hammer2_recovery_info info;
1721 struct hammer2_recovery_elm *elm;
1722 hammer2_chain_t *parent;
1723 hammer2_tid_t sync_tid;
1724 hammer2_tid_t mirror_tid;
1726 int cumulative_error = 0;
1728 hammer2_trans_init(hmp->spmp, 0);
1730 sync_tid = hmp->voldata.freemap_tid;
1731 mirror_tid = hmp->voldata.mirror_tid;
1733 kprintf("hammer2 mount \"%s\": ", hmp->devrepname);
1734 if (sync_tid >= mirror_tid) {
1735 kprintf(" no recovery needed\n");
1737 kprintf(" freemap recovery %016jx-%016jx\n",
1738 sync_tid + 1, mirror_tid);
1741 TAILQ_INIT(&info.list);
1743 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1744 cumulative_error = hammer2_recovery_scan(hmp, parent,
1746 hammer2_chain_lookup_done(parent);
1748 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
1749 TAILQ_REMOVE(&info.list, elm, entry);
1750 parent = elm->chain;
1751 sync_tid = elm->sync_tid;
1752 kfree(elm, M_HAMMER2);
1754 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1755 error = hammer2_recovery_scan(hmp, parent,
1756 &info, hmp->voldata.freemap_tid);
1757 hammer2_chain_unlock(parent);
1758 hammer2_chain_drop(parent); /* drop elm->chain ref */
1760 cumulative_error = error;
1762 hammer2_trans_done(hmp->spmp);
1764 return cumulative_error;
1769 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent,
1770 struct hammer2_recovery_info *info,
1771 hammer2_tid_t sync_tid)
1773 const hammer2_inode_data_t *ripdata;
1774 hammer2_chain_t *chain;
1776 int cumulative_error = 0;
1780 * Adjust freemap to ensure that the block(s) are marked allocated.
1782 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
1783 hammer2_freemap_adjust(hmp, &parent->bref,
1784 HAMMER2_FREEMAP_DORECOVER);
1788 * Check type for recursive scan
1790 switch(parent->bref.type) {
1791 case HAMMER2_BREF_TYPE_VOLUME:
1792 /* data already instantiated */
1794 case HAMMER2_BREF_TYPE_INODE:
1796 * Must instantiate data for DIRECTDATA test and also
1799 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1800 ripdata = &hammer2_chain_rdata(parent)->ipdata;
1801 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1802 /* not applicable to recovery scan */
1803 hammer2_chain_unlock(parent);
1806 hammer2_chain_unlock(parent);
1808 case HAMMER2_BREF_TYPE_INDIRECT:
1810 * Must instantiate data for recursion
1812 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1813 hammer2_chain_unlock(parent);
1815 case HAMMER2_BREF_TYPE_DATA:
1816 case HAMMER2_BREF_TYPE_FREEMAP:
1817 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1818 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1819 /* not applicable to recovery scan */
1827 * Defer operation if depth limit reached or if we are crossing a
1830 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
1831 struct hammer2_recovery_elm *elm;
1833 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
1834 elm->chain = parent;
1835 elm->sync_tid = sync_tid;
1836 hammer2_chain_ref(parent);
1837 TAILQ_INSERT_TAIL(&info->list, elm, entry);
1838 /* unlocked by caller */
1845 * Recursive scan of the last flushed transaction only. We are
1846 * doing this without pmp assignments so don't leave the chains
1847 * hanging around after we are done with them.
1850 chain = hammer2_chain_scan(parent, NULL, &cache_index,
1851 HAMMER2_LOOKUP_NODATA);
1853 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
1854 if (chain->bref.mirror_tid > sync_tid) {
1856 error = hammer2_recovery_scan(hmp, chain,
1860 cumulative_error = error;
1864 * Flush the recovery at the PFS boundary to stage it for
1865 * the final flush of the super-root topology.
1867 if ((chain->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
1868 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
1869 hammer2_flush(chain, info->mtid, 1);
1871 chain = hammer2_chain_scan(parent, chain, &cache_index,
1872 HAMMER2_LOOKUP_NODATA);
1875 return cumulative_error;
1879 * Sync a mount point; this is called on a per-mount basis from the
1880 * filesystem syncer process periodically and whenever a user issues
1884 hammer2_vfs_sync(struct mount *mp, int waitfor)
1886 hammer2_xop_flush_t *xop;
1887 struct hammer2_sync_info info;
1888 hammer2_inode_t *iroot;
1896 KKASSERT(iroot->pmp == pmp);
1899 * We can't acquire locks on existing vnodes while in a transaction
1900 * without risking a deadlock. This assumes that vfsync() can be
1901 * called without the vnode locked (which it can in DragonFly).
1902 * Otherwise we'd have to implement a multi-pass or flag the lock
1903 * failures and retry.
1905 * The reclamation code interlocks with the sync list's token
1906 * (by removing the vnode from the scan list) before unlocking
1907 * the inode, giving us time to ref the inode.
1909 /*flags = VMSC_GETVP;*/
1911 if (waitfor & MNT_LAZY)
1912 flags |= VMSC_ONEPASS;
1916 * Preflush the vnodes using a normal transaction before interlocking
1917 * with a flush transaction.
1919 hammer2_trans_init(pmp, 0);
1921 info.waitfor = MNT_NOWAIT;
1922 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1923 hammer2_trans_done(pmp);
1927 * Start our flush transaction. This does not return until all
1928 * concurrent transactions have completed and will prevent any
1929 * new transactions from running concurrently, except for the
1930 * buffer cache transactions.
1932 * For efficiency do an async pass before making sure with a
1933 * synchronous pass on all related buffer cache buffers. It
1934 * should theoretically not be possible for any new file buffers
1935 * to be instantiated during this sequence.
1937 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH |
1938 HAMMER2_TRANS_PREFLUSH);
1939 hammer2_inode_run_unlinkq(pmp);
1942 info.waitfor = MNT_NOWAIT;
1943 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1944 info.waitfor = MNT_WAIT;
1945 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
1948 * Clear PREFLUSH. This prevents (or asserts on) any new logical
1949 * buffer cache flushes which occur during the flush. Device buffers
1952 hammer2_bioq_sync(pmp);
1953 hammer2_trans_clear_preflush(pmp);
1956 * Use the XOP interface to concurrently flush all nodes to
1957 * synchronize the PFSROOT subtopology to the media. A standard
1958 * end-of-scan ENOENT error indicates cluster sufficiency.
1960 * Note that this flush will not be visible on crash recovery until
1961 * we flush the super-root topology in the next loop.
1963 * XXX For now wait for all flushes to complete.
1966 xop = hammer2_xop_alloc(iroot, HAMMER2_XOP_MODIFYING);
1967 hammer2_xop_start(&xop->head, hammer2_inode_xop_flush);
1968 error = hammer2_xop_collect(&xop->head,
1969 HAMMER2_XOP_COLLECT_WAITALL);
1970 if (error == ENOENT)
1975 hammer2_trans_done(pmp);
1983 * Note that we ignore the tranasction mtid we got above. Instead,
1984 * each vfsync below will ultimately get its own via TRANS_BUFCACHE
1988 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1990 struct hammer2_sync_info *info = data;
1991 hammer2_inode_t *ip;
1995 * Degenerate cases. Note that ip == NULL typically means the
1996 * syncer vnode itself and we don't want to vclrisdirty() in that
2003 if (vp->v_type == VNON || vp->v_type == VBAD) {
2009 * VOP_FSYNC will start a new transaction so replicate some code
2010 * here to do it inline (see hammer2_vop_fsync()).
2012 * WARNING: The vfsync interacts with the buffer cache and might
2013 * block, we can't hold the inode lock at that time.
2014 * However, we MUST ref ip before blocking to ensure that
2015 * it isn't ripped out from under us (since we do not
2016 * hold a lock on the vnode).
2018 hammer2_inode_ref(ip);
2019 if ((ip->flags & HAMMER2_INODE_MODIFIED) ||
2020 !RB_EMPTY(&vp->v_rbdirty_tree)) {
2021 vfsync(vp, info->waitfor, 1, NULL, NULL);
2022 hammer2_inode_fsync(ip);
2024 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
2025 RB_EMPTY(&vp->v_rbdirty_tree)) {
2029 hammer2_inode_drop(ip);
2033 info->error = error;
2040 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
2047 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
2048 struct fid *fhp, struct vnode **vpp)
2055 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2056 int *exflagsp, struct ucred **credanonp)
2062 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume
2063 * header into the HMP
2065 * XXX read four volhdrs and use the one with the highest TID whos CRC
2070 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2071 * nonexistant locations.
2073 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2077 hammer2_install_volume_header(hammer2_dev_t *hmp)
2079 hammer2_volume_data_t *vd;
2081 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
2093 * There are up to 4 copies of the volume header (syncs iterate
2094 * between them so there is no single master). We don't trust the
2095 * volu_size field so we don't know precisely how large the filesystem
2096 * is, so depend on the OS to return an error if we go beyond the
2097 * block device's EOF.
2099 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
2100 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2101 HAMMER2_VOLUME_BYTES, &bp);
2108 vd = (struct hammer2_volume_data *) bp->b_data;
2109 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
2110 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
2116 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
2117 /* XXX: Reversed-endianness filesystem */
2118 kprintf("hammer2: reverse-endian filesystem detected");
2124 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
2125 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
2126 HAMMER2_VOLUME_ICRC0_SIZE);
2127 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
2128 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
2129 HAMMER2_VOLUME_ICRC1_SIZE);
2130 if ((crc0 != crc) || (bcrc0 != bcrc)) {
2131 kprintf("hammer2 volume header crc "
2132 "mismatch copy #%d %08x/%08x\n",
2139 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
2148 hmp->volsync = hmp->voldata;
2150 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
2151 kprintf("hammer2: using volume header #%d\n",
2156 kprintf("hammer2: no valid volume headers found!\n");
2162 * This handles hysteresis on regular file flushes. Because the BIOs are
2163 * routed to a thread it is possible for an excessive number to build up
2164 * and cause long front-end stalls long before the runningbuffspace limit
2165 * is hit, so we implement hammer2_flush_pipe to control the
2168 * This is a particular problem when compression is used.
2171 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2173 atomic_add_int(&pmp->count_lwinprog, 1);
2177 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2181 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2182 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2183 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2184 atomic_clear_int(&pmp->count_lwinprog,
2185 HAMMER2_LWINPROG_WAITING);
2186 wakeup(&pmp->count_lwinprog);
2188 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) &&
2189 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) {
2190 atomic_clear_int(&pmp->count_lwinprog,
2191 HAMMER2_LWINPROG_WAITING0);
2192 wakeup(&pmp->count_lwinprog);
2197 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe)
2200 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING :
2201 HAMMER2_LWINPROG_WAITING0;
2204 lwinprog = pmp->count_lwinprog;
2206 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2208 tsleep_interlock(&pmp->count_lwinprog, 0);
2209 atomic_set_int(&pmp->count_lwinprog, lwflag);
2210 lwinprog = pmp->count_lwinprog;
2211 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2213 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2218 * Manage excessive memory resource use for chain and related
2222 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2232 * Atomic check condition and wait. Also do an early speedup of
2233 * the syncer to try to avoid hitting the wait.
2236 waiting = pmp->inmem_dirty_chains;
2238 count = waiting & HAMMER2_DIRTYCHAIN_MASK;
2240 limit = pmp->mp->mnt_nvnodelistsize / 10;
2241 if (limit < hammer2_limit_dirty_chains)
2242 limit = hammer2_limit_dirty_chains;
2247 if ((int)(ticks - zzticks) > hz) {
2249 kprintf("count %ld %ld\n", count, limit);
2254 * Block if there are too many dirty chains present, wait
2255 * for the flush to clean some out.
2257 if (count > limit) {
2258 tsleep_interlock(&pmp->inmem_dirty_chains, 0);
2259 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2261 waiting | HAMMER2_DIRTYCHAIN_WAITING)) {
2262 speedup_syncer(pmp->mp);
2263 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED,
2266 continue; /* loop on success or fail */
2270 * Try to start an early flush before we are forced to block.
2272 if (count > limit * 7 / 10)
2273 speedup_syncer(pmp->mp);
2279 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
2282 atomic_add_int(&pmp->inmem_dirty_chains, 1);
2287 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp)
2295 waiting = pmp->inmem_dirty_chains;
2297 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2300 ~HAMMER2_DIRTYCHAIN_WAITING)) {
2305 if (waiting & HAMMER2_DIRTYCHAIN_WAITING)
2306 wakeup(&pmp->inmem_dirty_chains);
2313 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx)
2315 hammer2_chain_t *scan;
2316 hammer2_chain_t *parent;
2320 kprintf("%*.*s...\n", tab, tab, "");
2325 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n",
2327 chain, chain->bref.type,
2328 chain->bref.key, chain->bref.keybits,
2329 chain->bref.mirror_tid);
2331 kprintf("%*.*s [%08x] (%s) refs=%d",
2334 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2335 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2338 parent = chain->parent;
2340 kprintf("\n%*.*s p=%p [pflags %08x prefs %d",
2342 parent, parent->flags, parent->refs);
2343 if (RB_EMPTY(&chain->core.rbtree)) {
2347 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree)
2348 hammer2_dump_chain(scan, tab + 4, countp, 'a');
2349 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2350 kprintf("%*.*s}(%s)\n", tab, tab, "",
2351 chain->data->ipdata.filename);
2353 kprintf("%*.*s}\n", tab, tab, "");