2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
69 struct hammer2_sync_info {
74 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
75 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
76 static struct hammer2_mntlist hammer2_mntlist;
77 static struct hammer2_pfslist hammer2_pfslist;
78 static struct lock hammer2_mntlk;
81 int hammer2_cluster_enable = 1;
82 int hammer2_hardlink_enable = 1;
83 int hammer2_flush_pipe = 100;
84 int hammer2_synchronous_flush = 1;
85 int hammer2_dio_count;
86 long hammer2_limit_dirty_chains;
87 long hammer2_iod_file_read;
88 long hammer2_iod_meta_read;
89 long hammer2_iod_indr_read;
90 long hammer2_iod_fmap_read;
91 long hammer2_iod_volu_read;
92 long hammer2_iod_file_write;
93 long hammer2_iod_meta_write;
94 long hammer2_iod_indr_write;
95 long hammer2_iod_fmap_write;
96 long hammer2_iod_volu_write;
97 long hammer2_ioa_file_read;
98 long hammer2_ioa_meta_read;
99 long hammer2_ioa_indr_read;
100 long hammer2_ioa_fmap_read;
101 long hammer2_ioa_volu_read;
102 long hammer2_ioa_fmap_write;
103 long hammer2_ioa_file_write;
104 long hammer2_ioa_meta_write;
105 long hammer2_ioa_indr_write;
106 long hammer2_ioa_volu_write;
108 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
109 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
110 "Buffer used for compression.");
112 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
113 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
114 "Buffer used for decompression.");
116 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
118 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
119 &hammer2_debug, 0, "");
120 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
121 &hammer2_cluster_enable, 0, "");
122 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
123 &hammer2_hardlink_enable, 0, "");
124 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
125 &hammer2_flush_pipe, 0, "");
126 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW,
127 &hammer2_synchronous_flush, 0, "");
128 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
129 &hammer2_limit_dirty_chains, 0, "");
130 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
131 &hammer2_dio_count, 0, "");
133 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
134 &hammer2_iod_file_read, 0, "");
135 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
136 &hammer2_iod_meta_read, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
138 &hammer2_iod_indr_read, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
140 &hammer2_iod_fmap_read, 0, "");
141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
142 &hammer2_iod_volu_read, 0, "");
144 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
145 &hammer2_iod_file_write, 0, "");
146 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
147 &hammer2_iod_meta_write, 0, "");
148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
149 &hammer2_iod_indr_write, 0, "");
150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
151 &hammer2_iod_fmap_write, 0, "");
152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
153 &hammer2_iod_volu_write, 0, "");
155 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
156 &hammer2_ioa_file_read, 0, "");
157 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
158 &hammer2_ioa_meta_read, 0, "");
159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
160 &hammer2_ioa_indr_read, 0, "");
161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
162 &hammer2_ioa_fmap_read, 0, "");
163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
164 &hammer2_ioa_volu_read, 0, "");
166 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
167 &hammer2_ioa_file_write, 0, "");
168 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
169 &hammer2_ioa_meta_write, 0, "");
170 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
171 &hammer2_ioa_indr_write, 0, "");
172 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
173 &hammer2_ioa_fmap_write, 0, "");
174 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
175 &hammer2_ioa_volu_write, 0, "");
177 static int hammer2_vfs_init(struct vfsconf *conf);
178 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
179 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
181 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
182 struct vnode *, struct ucred *);
183 static int hammer2_recovery(hammer2_dev_t *hmp);
184 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
185 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
186 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
188 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
190 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
191 ino_t ino, struct vnode **vpp);
192 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
193 struct fid *fhp, struct vnode **vpp);
194 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
195 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
196 int *exflagsp, struct ucred **credanonp);
198 static int hammer2_install_volume_header(hammer2_dev_t *hmp);
199 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
201 static void hammer2_update_pmps(hammer2_dev_t *hmp);
203 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
204 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
208 * HAMMER2 vfs operations.
210 static struct vfsops hammer2_vfsops = {
211 .vfs_init = hammer2_vfs_init,
212 .vfs_uninit = hammer2_vfs_uninit,
213 .vfs_sync = hammer2_vfs_sync,
214 .vfs_mount = hammer2_vfs_mount,
215 .vfs_unmount = hammer2_vfs_unmount,
216 .vfs_root = hammer2_vfs_root,
217 .vfs_statfs = hammer2_vfs_statfs,
218 .vfs_statvfs = hammer2_vfs_statvfs,
219 .vfs_vget = hammer2_vfs_vget,
220 .vfs_vptofh = hammer2_vfs_vptofh,
221 .vfs_fhtovp = hammer2_vfs_fhtovp,
222 .vfs_checkexp = hammer2_vfs_checkexp
225 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
227 VFS_SET(hammer2_vfsops, hammer2, 0);
228 MODULE_VERSION(hammer2, 1);
232 hammer2_vfs_init(struct vfsconf *conf)
234 static struct objcache_malloc_args margs_read;
235 static struct objcache_malloc_args margs_write;
236 static struct objcache_malloc_args margs_vop;
242 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
244 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
246 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
250 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
252 margs_read.objsize = 65536;
253 margs_read.mtype = M_HAMMER2_DEBUFFER;
255 margs_write.objsize = 32768;
256 margs_write.mtype = M_HAMMER2_CBUFFER;
258 margs_vop.objsize = sizeof(hammer2_xop_t);
259 margs_vop.mtype = M_HAMMER2;
262 * Note thaht for the XOPS cache we want backing store allocations
263 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
264 * confusion), so use the backing store function that does it. This
265 * means that initial XOPS objects are zerod but REUSED objects are
266 * not. So we are responsible for cleaning the object up sufficiently
267 * for our needs before objcache_put()ing it back (typically just the
270 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
271 0, 1, NULL, NULL, NULL,
272 objcache_malloc_alloc,
273 objcache_malloc_free,
275 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
276 0, 1, NULL, NULL, NULL,
277 objcache_malloc_alloc,
278 objcache_malloc_free,
280 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
281 0, 1, NULL, NULL, NULL,
282 objcache_malloc_alloc_zero,
283 objcache_malloc_free,
287 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
288 TAILQ_INIT(&hammer2_mntlist);
289 TAILQ_INIT(&hammer2_pfslist);
291 hammer2_limit_dirty_chains = desiredvnodes / 10;
298 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
300 objcache_destroy(cache_buffer_read);
301 objcache_destroy(cache_buffer_write);
302 objcache_destroy(cache_xops);
307 * Core PFS allocator. Used to allocate the pmp structure for PFS cluster
308 * mounts and the spmp structure for media (hmp) structures.
310 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
311 * transactions. Note that synchronization does not use this field.
312 * (typically frontend operations and synchronization cannot run on the
313 * same PFS node at the same time).
318 hammer2_pfsalloc(hammer2_cluster_t *cluster,
319 const hammer2_inode_data_t *ripdata,
320 hammer2_tid_t modify_tid)
322 hammer2_chain_t *rchain;
323 hammer2_inode_t *iroot;
330 * Locate or create the PFS based on the cluster id. If ripdata
331 * is NULL this is a spmp which is unique and is always allocated.
334 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
335 if (bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
336 sizeof(pmp->pfs_clid)) == 0) {
345 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
346 hammer2_trans_manage_init(pmp);
347 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
348 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
349 lockinit(&pmp->lock, "pfslk", 0, 0);
350 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
351 RB_INIT(&pmp->inum_tree);
352 TAILQ_INIT(&pmp->unlinkq);
353 spin_init(&pmp->list_spin, "hm2pfsalloc_list");
355 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
356 hammer2_xop_group_init(pmp, &pmp->xop_groups[j]);
359 * Save the last media transaction id for the flusher. Set
363 pmp->pfs_clid = ripdata->meta.pfs_clid;
364 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
367 * The synchronization thread may start too early, make
368 * sure it stays frozen until we are ready to let it go.
372 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
373 HAMMER2_THREAD_REMASTER;
378 * Create the PFS's root inode.
380 if ((iroot = pmp->iroot) == NULL) {
381 iroot = hammer2_inode_get(pmp, NULL, NULL);
383 hammer2_inode_ref(iroot);
384 hammer2_inode_unlock(iroot, NULL);
388 * Stop here if no cluster is passed in.
394 * When a cluster is passed in we must add the cluster's chains
395 * to the PFS's root inode, update pmp->pfs_types[], and update
396 * the syncronization threads.
398 * At the moment empty spots can develop due to removals or failures.
399 * Ultimately we want to re-fill these spots but doing so might
400 * confused running code. XXX
402 hammer2_inode_ref(iroot);
403 hammer2_mtx_ex(&iroot->lock);
404 j = iroot->cluster.nchains;
406 kprintf("add PFS to pmp %p[%d]\n", pmp, j);
408 for (i = 0; i < cluster->nchains; ++i) {
409 if (j == HAMMER2_MAXCLUSTER)
411 rchain = cluster->array[i].chain;
412 KKASSERT(rchain->pmp == NULL);
414 hammer2_chain_ref(rchain);
415 iroot->cluster.array[j].chain = rchain;
416 pmp->pfs_types[j] = ripdata->meta.pfs_type;
417 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2);
420 * If the PFS is already mounted we must account
421 * for the mount_count here.
424 ++rchain->hmp->mount_count;
427 * May have to fixup dirty chain tracking. Previous
428 * pmp was NULL so nothing to undo.
430 if (rchain->flags & HAMMER2_CHAIN_MODIFIED)
431 hammer2_pfs_memory_inc(pmp);
434 iroot->cluster.nchains = j;
436 if (i != cluster->nchains) {
437 kprintf("hammer2_mount: cluster full!\n");
438 /* XXX fatal error? */
442 * Update nmasters from any PFS inode which is part of the cluster.
443 * It is possible that this will result in a value which is too
444 * high. MASTER PFSs are authoritative for pfs_nmasters and will
445 * override this value later on.
447 * (This informs us of masters that might not currently be
448 * discoverable by this mount).
450 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
451 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
455 * Count visible masters. Masters are usually added with
456 * ripdata->meta.pfs_nmasters set to 1. This detects when there
457 * are more (XXX and must update the master inodes).
460 for (i = 0; i < iroot->cluster.nchains; ++i) {
461 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
464 if (pmp->pfs_nmasters < count)
465 pmp->pfs_nmasters = count;
468 * Create missing synchronization and support threads.
470 * Single-node masters (including snapshots) have nothing to
471 * synchronize and do not require this thread.
473 * Multi-node masters or any number of soft masters, slaves, copy,
474 * or other PFS types need the thread.
476 * Each thread is responsible for its particular cluster index.
477 * We use independent threads so stalls or mismatches related to
478 * any given target do not affect other targets.
480 for (i = 0; i < iroot->cluster.nchains; ++i) {
482 * Single-node masters (including snapshots) have nothing
483 * to synchronize and will make direct xops support calls,
484 * thus they do not require this thread.
486 * Note that there can be thousands of snapshots. We do not
487 * want to create thousands of threads.
489 if (pmp->pfs_nmasters <= 1 &&
490 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
495 * Sync support thread
497 if (pmp->sync_thrs[i].td == NULL) {
498 hammer2_thr_create(&pmp->sync_thrs[i], pmp,
500 hammer2_primary_sync_thread);
505 * Create missing Xop threads
508 hammer2_xop_helper_create(pmp);
510 hammer2_mtx_unlock(&iroot->lock);
511 hammer2_inode_drop(iroot);
517 * Destroy a PFS, typically only occurs after the last mount on a device
521 hammer2_pfsfree(hammer2_pfs_t *pmp)
523 hammer2_inode_t *iroot;
528 * Cleanup our reference on iroot. iroot is (should) not be needed
531 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
535 for (i = 0; i < iroot->cluster.nchains; ++i) {
536 hammer2_thr_delete(&pmp->sync_thrs[i]);
537 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
538 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
540 #if REPORT_REFS_ERRORS
541 if (pmp->iroot->refs != 1)
542 kprintf("PMP->IROOT %p REFS WRONG %d\n",
543 pmp->iroot, pmp->iroot->refs);
545 KKASSERT(pmp->iroot->refs == 1);
547 /* ref for pmp->iroot */
548 hammer2_inode_drop(pmp->iroot);
552 kmalloc_destroy(&pmp->mmsg);
553 kmalloc_destroy(&pmp->minode);
555 kfree(pmp, M_HAMMER2);
559 * Remove all references to hmp from the pfs list. Any PFS which becomes
560 * empty is terminated and freed.
565 hammer2_pfsfree_scan(hammer2_dev_t *hmp)
568 hammer2_inode_t *iroot;
569 hammer2_cluster_t *cluster;
570 hammer2_chain_t *rchain;
576 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
577 if ((iroot = pmp->iroot) == NULL)
579 if (hmp->spmp == pmp) {
580 kprintf("unmount hmp %p remove spmp %p\n",
586 * Determine if this PFS is affected. If it is we must
587 * freeze all management threads and lock its iroot.
589 * Freezing a management thread forces it idle, operations
590 * in-progress will be aborted and it will have to start
591 * over again when unfrozen, or exit if told to exit.
593 cluster = &iroot->cluster;
594 for (i = 0; i < cluster->nchains; ++i) {
595 rchain = cluster->array[i].chain;
596 if (rchain == NULL || rchain->hmp != hmp)
600 if (i != cluster->nchains) {
602 * Make sure all synchronization threads are locked
605 for (i = 0; i < iroot->cluster.nchains; ++i) {
606 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
607 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
608 hammer2_thr_freeze_async(
609 &pmp->xop_groups[j].thrs[i]);
612 for (i = 0; i < iroot->cluster.nchains; ++i) {
613 hammer2_thr_freeze(&pmp->sync_thrs[i]);
614 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
616 &pmp->xop_groups[j].thrs[i]);
621 * Lock the inode and clean out matching chains.
622 * Note that we cannot use hammer2_inode_lock_*()
623 * here because that would attempt to validate the
624 * cluster that we are in the middle of ripping
627 * WARNING! We are working directly on the inodes
630 hammer2_mtx_ex(&iroot->lock);
633 * Remove the chain from matching elements of the PFS.
635 for (i = 0; i < cluster->nchains; ++i) {
636 rchain = cluster->array[i].chain;
637 if (rchain == NULL || rchain->hmp != hmp)
639 hammer2_thr_delete(&pmp->sync_thrs[i]);
640 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
642 &pmp->xop_groups[j].thrs[i]);
644 rchain = cluster->array[i].chain;
645 cluster->array[i].chain = NULL;
646 pmp->pfs_types[i] = 0;
647 if (pmp->pfs_names[i]) {
648 kfree(pmp->pfs_names[i], M_HAMMER2);
649 pmp->pfs_names[i] = NULL;
651 hammer2_chain_drop(rchain);
654 if (cluster->focus == rchain)
655 cluster->focus = NULL;
657 hammer2_mtx_unlock(&iroot->lock);
658 didfreeze = 1; /* remaster, unfreeze down below */
664 * Cleanup trailing chains. Do not reorder chains (for now).
665 * XXX might remove more than we intended.
668 if (cluster->array[i - 1].chain)
672 cluster->nchains = i;
675 * If the PMP has no elements remaining we can destroy it.
676 * (this will transition management threads from frozen->exit).
678 if (cluster->nchains == 0) {
679 kprintf("unmount hmp %p last ref to PMP=%p\n",
681 hammer2_pfsfree(pmp);
686 * If elements still remain we need to set the REMASTER
687 * flag and unfreeze it.
690 for (i = 0; i < iroot->cluster.nchains; ++i) {
691 hammer2_thr_remaster(&pmp->sync_thrs[i]);
692 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
693 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
694 hammer2_thr_remaster(
695 &pmp->xop_groups[j].thrs[i]);
696 hammer2_thr_unfreeze(
697 &pmp->xop_groups[j].thrs[i]);
705 * Mount or remount HAMMER2 fileystem from physical media
708 * mp mount point structure
714 * mp mount point structure
715 * path path to mount point
716 * data pointer to argument structure in user space
717 * volume volume path (device@LABEL form)
718 * hflags user mount flags
719 * cred user credentials
726 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
729 struct hammer2_mount_info info;
733 hammer2_key_t key_next;
734 hammer2_key_t key_dummy;
737 struct nlookupdata nd;
738 hammer2_chain_t *parent;
739 hammer2_cluster_t *cluster;
740 hammer2_cluster_t *cparent;
741 const hammer2_inode_data_t *ripdata;
742 hammer2_blockref_t bref;
744 char devstr[MNAMELEN];
761 kprintf("hammer2_mount\n");
767 bzero(&info, sizeof(info));
768 info.cluster_fd = -1;
772 * Non-root mount or updating a mount
774 error = copyin(data, &info, sizeof(info));
778 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
782 /* Extract device and label */
784 label = strchr(devstr, '@');
786 ((label + 1) - dev) > done) {
794 if (mp->mnt_flag & MNT_UPDATE) {
796 * Update mount. Note that pmp->iroot->cluster is
797 * an inode-embedded cluster and thus cannot be
800 * XXX HAMMER2 needs to implement NFS export via
804 cluster = &pmp->iroot->cluster;
805 for (i = 0; i < cluster->nchains; ++i) {
806 if (cluster->array[i].chain == NULL)
808 hmp = cluster->array[i].chain->hmp;
810 error = hammer2_remount(hmp, mp, path,
815 /*hammer2_inode_install_hidden(pmp);*/
824 * Lookup name and verify it refers to a block device.
826 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
828 error = nlookup(&nd);
830 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
834 if (vn_isdisk(devvp, &error))
835 error = vfs_mountedon(devvp);
839 * Determine if the device has already been mounted. After this
840 * check hmp will be non-NULL if we are doing the second or more
841 * hammer2 mounts from the same device.
843 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
844 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
845 if (hmp->devvp == devvp)
850 * Open the device if this isn't a secondary mount and construct
851 * the H2 device mount (hmp).
854 hammer2_chain_t *schain;
857 if (error == 0 && vcount(devvp) > 0)
861 * Now open the device
864 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
865 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
866 error = vinvalbuf(devvp, V_SAVE, 0, 0);
868 error = VOP_OPEN(devvp,
869 ronly ? FREAD : FREAD | FWRITE,
874 if (error && devvp) {
879 lockmgr(&hammer2_mntlk, LK_RELEASE);
882 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
883 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev);
886 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
887 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
888 RB_INIT(&hmp->iotree);
889 spin_init(&hmp->io_spin, "hm2mount_io");
890 spin_init(&hmp->list_spin, "hm2mount_list");
891 TAILQ_INIT(&hmp->flushq);
893 lockinit(&hmp->vollk, "h2vol", 0, 0);
896 * vchain setup. vchain.data is embedded.
897 * vchain.refs is initialized and will never drop to 0.
899 * NOTE! voldata is not yet loaded.
901 hmp->vchain.hmp = hmp;
902 hmp->vchain.refs = 1;
903 hmp->vchain.data = (void *)&hmp->voldata;
904 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
905 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
906 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
908 hammer2_chain_core_init(&hmp->vchain);
909 /* hmp->vchain.u.xxx is left NULL */
912 * fchain setup. fchain.data is embedded.
913 * fchain.refs is initialized and will never drop to 0.
915 * The data is not used but needs to be initialized to
916 * pass assertion muster. We use this chain primarily
917 * as a placeholder for the freemap's top-level RBTREE
918 * so it does not interfere with the volume's topology
921 hmp->fchain.hmp = hmp;
922 hmp->fchain.refs = 1;
923 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
924 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
925 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
926 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
927 hmp->fchain.bref.methods =
928 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
929 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
931 hammer2_chain_core_init(&hmp->fchain);
932 /* hmp->fchain.u.xxx is left NULL */
935 * Install the volume header and initialize fields from
938 error = hammer2_install_volume_header(hmp);
940 hammer2_unmount_helper(mp, NULL, hmp);
941 lockmgr(&hammer2_mntlk, LK_RELEASE);
942 hammer2_vfs_unmount(mp, MNT_FORCE);
947 * Really important to get these right or flush will get
950 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0);
951 kprintf("alloc spmp %p tid %016jx\n",
952 hmp->spmp, hmp->voldata.mirror_tid);
956 * Dummy-up vchain and fchain's modify_tid. mirror_tid
957 * is inherited from the volume header.
960 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
961 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
962 hmp->vchain.pmp = spmp;
963 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
964 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
965 hmp->fchain.pmp = spmp;
968 * First locate the super-root inode, which is key 0
969 * relative to the volume header's blockset.
971 * Then locate the root inode by scanning the directory keyspace
972 * represented by the label.
974 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
975 schain = hammer2_chain_lookup(&parent, &key_dummy,
976 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
978 hammer2_chain_lookup_done(parent);
979 if (schain == NULL) {
980 kprintf("hammer2_mount: invalid super-root\n");
981 hammer2_unmount_helper(mp, NULL, hmp);
982 lockmgr(&hammer2_mntlk, LK_RELEASE);
983 hammer2_vfs_unmount(mp, MNT_FORCE);
987 kprintf("hammer2_mount: error %s reading super-root\n",
988 hammer2_error_str(schain->error));
989 hammer2_chain_unlock(schain);
990 hammer2_chain_drop(schain);
992 hammer2_unmount_helper(mp, NULL, hmp);
993 lockmgr(&hammer2_mntlk, LK_RELEASE);
994 hammer2_vfs_unmount(mp, MNT_FORCE);
999 * The super-root always uses an inode_tid of 1 when
1002 spmp->inode_tid = 1;
1003 spmp->modify_tid = schain->bref.modify_tid + 1;
1006 * Sanity-check schain's pmp and finish initialization.
1007 * Any chain belonging to the super-root topology should
1008 * have a NULL pmp (not even set to spmp).
1010 ripdata = &hammer2_chain_rdata(schain)->ipdata;
1011 KKASSERT(schain->pmp == NULL);
1012 spmp->pfs_clid = ripdata->meta.pfs_clid;
1015 * Replace the dummy spmp->iroot with a real one. It's
1016 * easier to just do a wholesale replacement than to try
1017 * to update the chain and fixup the iroot fields.
1019 * The returned inode is locked with the supplied cluster.
1021 cluster = hammer2_cluster_from_chain(schain);
1022 hammer2_inode_drop(spmp->iroot);
1024 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster);
1025 spmp->spmp_hmp = hmp;
1026 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1027 hammer2_inode_ref(spmp->iroot);
1028 hammer2_inode_unlock(spmp->iroot, cluster);
1030 /* leave spmp->iroot with one ref */
1032 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1033 error = hammer2_recovery(hmp);
1034 /* XXX do something with error */
1036 hammer2_update_pmps(hmp);
1037 hammer2_iocom_init(hmp);
1040 * Ref the cluster management messaging descriptor. The mount
1041 * program deals with the other end of the communications pipe.
1043 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
1045 hammer2_cluster_reconnect(hmp, fp);
1047 kprintf("hammer2_mount: bad cluster_fd!\n");
1054 * Lookup the mount point under the media-localized super-root.
1055 * Scanning hammer2_pfslist doesn't help us because it represents
1056 * PFS cluster ids which can aggregate several named PFSs together.
1058 * cluster->pmp will incorrectly point to spmp and must be fixed
1061 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1062 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1063 lhc = hammer2_dirhash(label, strlen(label));
1064 cluster = hammer2_cluster_lookup(cparent, &key_next,
1065 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1068 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE &&
1070 hammer2_cluster_rdata(cluster)->ipdata.filename) == 0) {
1073 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1075 lhc + HAMMER2_DIRHASH_LOMASK, 0);
1077 hammer2_inode_unlock(spmp->iroot, cparent);
1080 * PFS could not be found?
1082 if (cluster == NULL) {
1083 kprintf("hammer2_mount: PFS label not found\n");
1084 hammer2_unmount_helper(mp, NULL, hmp);
1085 lockmgr(&hammer2_mntlk, LK_RELEASE);
1086 hammer2_vfs_unmount(mp, MNT_FORCE);
1092 * Acquire the pmp structure (it should have already been allocated
1093 * via hammer2_update_pmps() so do not pass cluster in to add to
1094 * available chains).
1096 * Check if the cluster has already been mounted. A cluster can
1097 * only be mounted once, use null mounts to mount additional copies.
1099 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1100 hammer2_cluster_bref(cluster, &bref);
1101 pmp = hammer2_pfsalloc(NULL, ripdata, bref.modify_tid);
1102 hammer2_cluster_unlock(cluster);
1103 hammer2_cluster_drop(cluster);
1106 kprintf("hammer2_mount: PFS already mounted!\n");
1107 hammer2_unmount_helper(mp, NULL, hmp);
1108 lockmgr(&hammer2_mntlk, LK_RELEASE);
1109 hammer2_vfs_unmount(mp, MNT_FORCE);
1117 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp);
1119 mp->mnt_flag = MNT_LOCAL;
1120 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1121 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1124 * required mount structure initializations
1126 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1127 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1129 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1130 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1135 mp->mnt_iosize_max = MAXPHYS;
1138 * Connect up mount pointers.
1140 hammer2_mount_helper(mp, pmp);
1142 lockmgr(&hammer2_mntlk, LK_RELEASE);
1145 * With the cluster operational install ihidden.
1146 * (only applicable to pfs mounts, not applicable to spmp)
1148 hammer2_inode_install_hidden(pmp);
1154 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1155 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1156 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1158 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
1159 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1160 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1161 copyinstr(path, mp->mnt_stat.f_mntonname,
1162 sizeof(mp->mnt_stat.f_mntonname) - 1,
1166 * Initial statfs to prime mnt_stat.
1168 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
1174 * Scan PFSs under the super-root and create hammer2_pfs structures.
1178 hammer2_update_pmps(hammer2_dev_t *hmp)
1180 const hammer2_inode_data_t *ripdata;
1181 hammer2_cluster_t *cparent;
1182 hammer2_cluster_t *cluster;
1183 hammer2_blockref_t bref;
1184 hammer2_pfs_t *spmp;
1186 hammer2_key_t key_next;
1189 * Lookup mount point under the media-localized super-root.
1191 * cluster->pmp will incorrectly point to spmp and must be fixed
1195 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1196 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1197 cluster = hammer2_cluster_lookup(cparent, &key_next,
1202 if (hammer2_cluster_type(cluster) != HAMMER2_BREF_TYPE_INODE)
1204 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1205 hammer2_cluster_bref(cluster, &bref);
1206 kprintf("ADD LOCAL PFS: %s\n", ripdata->filename);
1208 pmp = hammer2_pfsalloc(cluster, ripdata, bref.modify_tid);
1209 cluster = hammer2_cluster_next(cparent, cluster,
1215 hammer2_inode_unlock(spmp->iroot, cparent);
1220 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path,
1221 struct vnode *devvp, struct ucred *cred)
1225 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
1226 error = hammer2_recovery(hmp);
1235 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1246 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1249 * If mount initialization proceeded far enough we must flush
1250 * its vnodes and sync the underlying mount points. Three syncs
1251 * are required to fully flush the filesystem (freemap updates lag
1252 * by one flush, and one extra for safety).
1254 if (mntflags & MNT_FORCE)
1259 error = vflush(mp, 0, flags);
1262 hammer2_vfs_sync(mp, MNT_WAIT);
1263 hammer2_vfs_sync(mp, MNT_WAIT);
1264 hammer2_vfs_sync(mp, MNT_WAIT);
1268 * Cleanup the frontend support XOPS threads
1270 hammer2_xop_helper_cleanup(pmp);
1273 * Cleanup our reference on ihidden.
1276 hammer2_inode_drop(pmp->ihidden);
1277 pmp->ihidden = NULL;
1280 hammer2_unmount_helper(mp, pmp, NULL);
1284 lockmgr(&hammer2_mntlk, LK_RELEASE);
1290 * Mount helper, hook the system mount into our PFS.
1291 * The mount lock is held.
1293 * We must bump the mount_count on related devices for any
1298 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1300 hammer2_cluster_t *cluster;
1301 hammer2_chain_t *rchain;
1304 mp->mnt_data = (qaddr_t)pmp;
1308 * After pmp->mp is set we have to adjust hmp->mount_count.
1310 cluster = &pmp->iroot->cluster;
1311 for (i = 0; i < cluster->nchains; ++i) {
1312 rchain = cluster->array[i].chain;
1315 ++rchain->hmp->mount_count;
1316 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n",
1317 rchain->hmp, rchain->hmp->mount_count);
1321 * Create missing Xop threads
1323 hammer2_xop_helper_create(pmp);
1327 * Mount helper, unhook the system mount from our PFS.
1328 * The mount lock is held.
1330 * If hmp is supplied a mount responsible for being the first to open
1331 * the block device failed and the block device and all PFSs using the
1332 * block device must be cleaned up.
1334 * If pmp is supplied multiple devices might be backing the PFS and each
1335 * must be disconnect. This might not be the last PFS using some of the
1336 * underlying devices. Also, we have to adjust our hmp->mount_count
1337 * accounting for the devices backing the pmp which is now undergoing an
1342 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1344 hammer2_cluster_t *cluster;
1345 hammer2_chain_t *rchain;
1346 struct vnode *devvp;
1352 * If no device supplied this is a high-level unmount and we have to
1353 * to disconnect the mount, adjust mount_count, and locate devices
1354 * that might now have no mounts.
1357 KKASSERT(hmp == NULL);
1358 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp);
1360 mp->mnt_data = NULL;
1363 * After pmp->mp is cleared we have to account for
1366 cluster = &pmp->iroot->cluster;
1367 for (i = 0; i < cluster->nchains; ++i) {
1368 rchain = cluster->array[i].chain;
1371 --rchain->hmp->mount_count;
1372 kprintf("hammer2_unmount hmp=%p --mount_count=%d\n",
1373 rchain->hmp, rchain->hmp->mount_count);
1374 /* scrapping hmp now may invalidate the pmp */
1377 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1378 if (hmp->mount_count == 0) {
1379 hammer2_unmount_helper(NULL, NULL, hmp);
1387 * Try to terminate the block device. We can't terminate it if
1388 * there are still PFSs referencing it.
1390 kprintf("hammer2_unmount hmp=%p mount_count=%d\n",
1391 hmp, hmp->mount_count);
1392 if (hmp->mount_count)
1395 hammer2_pfsfree_scan(hmp);
1396 hammer2_dev_exlock(hmp); /* XXX order */
1399 * Cycle the volume data lock as a safety (probably not needed any
1400 * more). To ensure everything is out we need to flush at least
1401 * three times. (1) The running of the unlinkq can dirty the
1402 * filesystem, (2) A normal flush can dirty the freemap, and
1403 * (3) ensure that the freemap is fully synchronized.
1405 * The next mount's recovery scan can clean everything up but we want
1406 * to leave the filesystem in a 100% clean state on a normal unmount.
1409 hammer2_voldata_lock(hmp);
1410 hammer2_voldata_unlock(hmp);
1412 hammer2_iocom_uninit(hmp);
1414 if ((hmp->vchain.flags | hmp->fchain.flags) &
1415 HAMMER2_CHAIN_FLUSH_MASK) {
1416 kprintf("hammer2_unmount: chains left over "
1417 "after final sync\n");
1418 kprintf(" vchain %08x\n", hmp->vchain.flags);
1419 kprintf(" fchain %08x\n", hmp->fchain.flags);
1421 if (hammer2_debug & 0x0010)
1422 Debugger("entered debugger");
1425 KKASSERT(hmp->spmp == NULL);
1428 * Finish up with the device vnode
1430 if ((devvp = hmp->devvp) != NULL) {
1431 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1432 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1434 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL);
1441 * Clear vchain/fchain flags that might prevent final cleanup
1444 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1445 atomic_clear_int(&hmp->vchain.flags,
1446 HAMMER2_CHAIN_MODIFIED);
1447 hammer2_pfs_memory_wakeup(hmp->vchain.pmp);
1448 hammer2_chain_drop(&hmp->vchain);
1450 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1451 atomic_clear_int(&hmp->vchain.flags,
1452 HAMMER2_CHAIN_UPDATE);
1453 hammer2_chain_drop(&hmp->vchain);
1456 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1457 atomic_clear_int(&hmp->fchain.flags,
1458 HAMMER2_CHAIN_MODIFIED);
1459 hammer2_pfs_memory_wakeup(hmp->fchain.pmp);
1460 hammer2_chain_drop(&hmp->fchain);
1462 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1463 atomic_clear_int(&hmp->fchain.flags,
1464 HAMMER2_CHAIN_UPDATE);
1465 hammer2_chain_drop(&hmp->fchain);
1469 * Final drop of embedded freemap root chain to
1470 * clean up fchain.core (fchain structure is not
1471 * flagged ALLOCATED so it is cleaned out and then
1474 hammer2_chain_drop(&hmp->fchain);
1477 * Final drop of embedded volume root chain to clean
1478 * up vchain.core (vchain structure is not flagged
1479 * ALLOCATED so it is cleaned out and then left to
1483 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v');
1485 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f');
1486 hammer2_dev_unlock(hmp);
1487 hammer2_chain_drop(&hmp->vchain);
1489 hammer2_io_cleanup(hmp, &hmp->iotree);
1490 if (hmp->iofree_count) {
1491 kprintf("io_cleanup: %d I/O's left hanging\n",
1495 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1496 kmalloc_destroy(&hmp->mchain);
1497 kfree(hmp, M_HAMMER2);
1502 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1503 ino_t ino, struct vnode **vpp)
1505 kprintf("hammer2_vget\n");
1506 return (EOPNOTSUPP);
1511 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1514 hammer2_cluster_t *cparent;
1519 if (pmp->iroot == NULL) {
1523 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS |
1524 HAMMER2_RESOLVE_SHARED);
1525 cparent = hammer2_inode_cluster(pmp->iroot,
1526 HAMMER2_RESOLVE_ALWAYS |
1527 HAMMER2_RESOLVE_SHARED);
1530 * Initialize pmp->inode_tid and pmp->modify_tid on first access
1531 * to the root of mount that resolves good.
1532 * XXX probably not the best place for this.
1534 if (pmp->inode_tid == 0 &&
1535 cparent->error == 0 && cparent->focus) {
1536 const hammer2_inode_data_t *ripdata;
1537 hammer2_blockref_t bref;
1539 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1540 hammer2_cluster_bref(cparent, &bref);
1541 pmp->inode_tid = ripdata->meta.pfs_inum + 1;
1542 if (pmp->inode_tid < HAMMER2_INODE_START)
1543 pmp->inode_tid = HAMMER2_INODE_START;
1544 pmp->modify_tid = bref.modify_tid + 1;
1545 pmp->iroot->meta = ripdata->meta;
1546 hammer2_cluster_bref(cparent, &pmp->iroot->bref);
1547 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1548 pmp->inode_tid, pmp->modify_tid);
1551 vp = hammer2_igetv(pmp->iroot, &error);
1552 hammer2_inode_unlock(pmp->iroot, cparent);
1555 kprintf("vnodefail\n");
1564 * XXX incorporate ipdata->meta.inode_quota and data_quota
1568 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1572 hammer2_blockref_t bref;
1575 KKASSERT(pmp->iroot->cluster.nchains >= 1);
1576 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */
1577 bref = pmp->iroot->cluster.focus->bref; /* no lock */
1579 mp->mnt_stat.f_files = bref.inode_count;
1580 mp->mnt_stat.f_ffree = 0;
1581 mp->mnt_stat.f_blocks = (bref.data_count +
1582 hmp->voldata.allocator_free) /
1583 mp->mnt_vstat.f_bsize;
1584 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free /
1585 mp->mnt_vstat.f_bsize;
1586 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1588 *sbp = mp->mnt_stat;
1594 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1598 hammer2_blockref_t bref;
1601 KKASSERT(pmp->iroot->cluster.nchains >= 1);
1602 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */
1603 bref = pmp->iroot->cluster.focus->bref; /* no lock */
1605 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1606 mp->mnt_vstat.f_files = bref.inode_count;
1607 mp->mnt_vstat.f_ffree = 0;
1608 mp->mnt_vstat.f_blocks = (bref.data_count +
1609 hmp->voldata.allocator_free) /
1610 mp->mnt_vstat.f_bsize;
1611 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free /
1612 mp->mnt_vstat.f_bsize;
1613 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1615 *sbp = mp->mnt_vstat;
1620 * Mount-time recovery (RW mounts)
1622 * Updates to the free block table are allowed to lag flushes by one
1623 * transaction. In case of a crash, then on a fresh mount we must do an
1624 * incremental scan of the last committed transaction id and make sure that
1625 * all related blocks have been marked allocated.
1627 * The super-root topology and each PFS has its own transaction id domain,
1628 * so we must track PFS boundary transitions.
1630 struct hammer2_recovery_elm {
1631 TAILQ_ENTRY(hammer2_recovery_elm) entry;
1632 hammer2_chain_t *chain;
1633 hammer2_tid_t sync_tid;
1636 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
1638 struct hammer2_recovery_info {
1639 struct hammer2_recovery_list list;
1643 static int hammer2_recovery_scan(hammer2_dev_t *hmp,
1644 hammer2_chain_t *parent,
1645 struct hammer2_recovery_info *info,
1646 hammer2_tid_t sync_tid);
1648 #define HAMMER2_RECOVERY_MAXDEPTH 10
1652 hammer2_recovery(hammer2_dev_t *hmp)
1654 struct hammer2_recovery_info info;
1655 struct hammer2_recovery_elm *elm;
1656 hammer2_chain_t *parent;
1657 hammer2_tid_t sync_tid;
1658 hammer2_tid_t mirror_tid;
1660 int cumulative_error = 0;
1662 hammer2_trans_init(hmp->spmp, 0);
1664 sync_tid = hmp->voldata.freemap_tid;
1665 mirror_tid = hmp->voldata.mirror_tid;
1667 kprintf("hammer2 mount \"%s\": ", hmp->devrepname);
1668 if (sync_tid >= mirror_tid) {
1669 kprintf(" no recovery needed\n");
1671 kprintf(" freemap recovery %016jx-%016jx\n",
1672 sync_tid + 1, mirror_tid);
1675 TAILQ_INIT(&info.list);
1677 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1678 cumulative_error = hammer2_recovery_scan(hmp, parent,
1680 hammer2_chain_lookup_done(parent);
1682 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
1683 TAILQ_REMOVE(&info.list, elm, entry);
1684 parent = elm->chain;
1685 sync_tid = elm->sync_tid;
1686 kfree(elm, M_HAMMER2);
1688 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1689 error = hammer2_recovery_scan(hmp, parent,
1690 &info, hmp->voldata.freemap_tid);
1691 hammer2_chain_unlock(parent);
1692 hammer2_chain_drop(parent); /* drop elm->chain ref */
1694 cumulative_error = error;
1696 hammer2_trans_done(hmp->spmp);
1698 return cumulative_error;
1703 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent,
1704 struct hammer2_recovery_info *info,
1705 hammer2_tid_t sync_tid)
1707 const hammer2_inode_data_t *ripdata;
1708 hammer2_chain_t *chain;
1710 int cumulative_error = 0;
1714 * Adjust freemap to ensure that the block(s) are marked allocated.
1716 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
1717 hammer2_freemap_adjust(hmp, &parent->bref,
1718 HAMMER2_FREEMAP_DORECOVER);
1722 * Check type for recursive scan
1724 switch(parent->bref.type) {
1725 case HAMMER2_BREF_TYPE_VOLUME:
1726 /* data already instantiated */
1728 case HAMMER2_BREF_TYPE_INODE:
1730 * Must instantiate data for DIRECTDATA test and also
1733 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1734 ripdata = &hammer2_chain_rdata(parent)->ipdata;
1735 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1736 /* not applicable to recovery scan */
1737 hammer2_chain_unlock(parent);
1740 hammer2_chain_unlock(parent);
1742 case HAMMER2_BREF_TYPE_INDIRECT:
1744 * Must instantiate data for recursion
1746 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1747 hammer2_chain_unlock(parent);
1749 case HAMMER2_BREF_TYPE_DATA:
1750 case HAMMER2_BREF_TYPE_FREEMAP:
1751 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1752 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1753 /* not applicable to recovery scan */
1761 * Defer operation if depth limit reached or if we are crossing a
1764 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
1765 struct hammer2_recovery_elm *elm;
1767 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
1768 elm->chain = parent;
1769 elm->sync_tid = sync_tid;
1770 hammer2_chain_ref(parent);
1771 TAILQ_INSERT_TAIL(&info->list, elm, entry);
1772 /* unlocked by caller */
1779 * Recursive scan of the last flushed transaction only. We are
1780 * doing this without pmp assignments so don't leave the chains
1781 * hanging around after we are done with them.
1784 chain = hammer2_chain_scan(parent, NULL, &cache_index,
1785 HAMMER2_LOOKUP_NODATA);
1787 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
1788 if (chain->bref.mirror_tid > sync_tid) {
1790 error = hammer2_recovery_scan(hmp, chain,
1794 cumulative_error = error;
1798 * Flush the recovery at the PFS boundary to stage it for
1799 * the final flush of the super-root topology.
1801 if ((chain->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
1802 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
1803 hammer2_flush(chain, 1);
1805 chain = hammer2_chain_scan(parent, chain, &cache_index,
1806 HAMMER2_LOOKUP_NODATA);
1809 return cumulative_error;
1813 * Sync a mount point; this is called on a per-mount basis from the
1814 * filesystem syncer process periodically and whenever a user issues
1818 hammer2_vfs_sync(struct mount *mp, int waitfor)
1820 hammer2_xop_flush_t *xop;
1821 struct hammer2_sync_info info;
1822 hammer2_inode_t *iroot;
1830 KKASSERT(iroot->pmp == pmp);
1833 * We can't acquire locks on existing vnodes while in a transaction
1834 * without risking a deadlock. This assumes that vfsync() can be
1835 * called without the vnode locked (which it can in DragonFly).
1836 * Otherwise we'd have to implement a multi-pass or flag the lock
1837 * failures and retry.
1839 * The reclamation code interlocks with the sync list's token
1840 * (by removing the vnode from the scan list) before unlocking
1841 * the inode, giving us time to ref the inode.
1843 /*flags = VMSC_GETVP;*/
1845 if (waitfor & MNT_LAZY)
1846 flags |= VMSC_ONEPASS;
1850 * Preflush the vnodes using a normal transaction before interlocking
1851 * with a flush transaction.
1853 hammer2_trans_init(pmp, 0);
1855 info.waitfor = MNT_NOWAIT;
1856 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1857 hammer2_trans_done(pmp);
1861 * Start our flush transaction. This does not return until all
1862 * concurrent transactions have completed and will prevent any
1863 * new transactions from running concurrently, except for the
1864 * buffer cache transactions.
1866 * For efficiency do an async pass before making sure with a
1867 * synchronous pass on all related buffer cache buffers. It
1868 * should theoretically not be possible for any new file buffers
1869 * to be instantiated during this sequence.
1871 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH |
1872 HAMMER2_TRANS_PREFLUSH);
1873 hammer2_run_unlinkq(pmp);
1876 info.waitfor = MNT_NOWAIT;
1877 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1878 info.waitfor = MNT_WAIT;
1879 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
1882 * Clear PREFLUSH. This prevents (or asserts on) any new logical
1883 * buffer cache flushes which occur during the flush. Device buffers
1886 hammer2_bioq_sync(pmp);
1887 atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH);
1890 * Use the XOP interface to concurrently flush all nodes to
1891 * synchronize the PFSROOT subtopology to the media. A standard
1892 * end-of-scan ENOENT error indicates cluster sufficiency.
1894 * Note that this flush will not be visible on crash recovery until
1895 * we flush the super-root topology in the next loop.
1897 * XXX For now wait for all flushes to complete.
1900 xop = &hammer2_xop_alloc(iroot)->xop_flush;
1901 hammer2_xop_start(&xop->head, hammer2_inode_xop_flush);
1902 error = hammer2_xop_collect(&xop->head,
1903 HAMMER2_XOP_COLLECT_WAITALL);
1904 if (error == ENOENT)
1909 hammer2_trans_done(pmp);
1918 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1920 struct hammer2_sync_info *info = data;
1921 hammer2_inode_t *ip;
1925 * Degenerate cases. Note that ip == NULL typically means the
1926 * syncer vnode itself and we don't want to vclrisdirty() in that
1933 if (vp->v_type == VNON || vp->v_type == VBAD) {
1939 * VOP_FSYNC will start a new transaction so replicate some code
1940 * here to do it inline (see hammer2_vop_fsync()).
1942 * WARNING: The vfsync interacts with the buffer cache and might
1943 * block, we can't hold the inode lock at that time.
1944 * However, we MUST ref ip before blocking to ensure that
1945 * it isn't ripped out from under us (since we do not
1946 * hold a lock on the vnode).
1948 hammer2_inode_ref(ip);
1949 if ((ip->flags & HAMMER2_INODE_MODIFIED) ||
1950 !RB_EMPTY(&vp->v_rbdirty_tree)) {
1951 vfsync(vp, info->waitfor, 1, NULL, NULL);
1952 hammer2_inode_fsync(ip, NULL);
1954 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
1955 RB_EMPTY(&vp->v_rbdirty_tree)) {
1959 hammer2_inode_drop(ip);
1963 info->error = error;
1970 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1977 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1978 struct fid *fhp, struct vnode **vpp)
1985 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1986 int *exflagsp, struct ucred **credanonp)
1992 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume
1993 * header into the HMP
1995 * XXX read four volhdrs and use the one with the highest TID whos CRC
2000 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2001 * nonexistant locations.
2003 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2007 hammer2_install_volume_header(hammer2_dev_t *hmp)
2009 hammer2_volume_data_t *vd;
2011 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
2023 * There are up to 4 copies of the volume header (syncs iterate
2024 * between them so there is no single master). We don't trust the
2025 * volu_size field so we don't know precisely how large the filesystem
2026 * is, so depend on the OS to return an error if we go beyond the
2027 * block device's EOF.
2029 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
2030 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2031 HAMMER2_VOLUME_BYTES, &bp);
2038 vd = (struct hammer2_volume_data *) bp->b_data;
2039 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
2040 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
2046 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
2047 /* XXX: Reversed-endianness filesystem */
2048 kprintf("hammer2: reverse-endian filesystem detected");
2054 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
2055 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
2056 HAMMER2_VOLUME_ICRC0_SIZE);
2057 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
2058 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
2059 HAMMER2_VOLUME_ICRC1_SIZE);
2060 if ((crc0 != crc) || (bcrc0 != bcrc)) {
2061 kprintf("hammer2 volume header crc "
2062 "mismatch copy #%d %08x/%08x\n",
2069 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
2078 hmp->volsync = hmp->voldata;
2080 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
2081 kprintf("hammer2: using volume header #%d\n",
2086 kprintf("hammer2: no valid volume headers found!\n");
2092 * This handles hysteresis on regular file flushes. Because the BIOs are
2093 * routed to a thread it is possible for an excessive number to build up
2094 * and cause long front-end stalls long before the runningbuffspace limit
2095 * is hit, so we implement hammer2_flush_pipe to control the
2098 * This is a particular problem when compression is used.
2101 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2103 atomic_add_int(&pmp->count_lwinprog, 1);
2107 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2111 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2112 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2113 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2114 atomic_clear_int(&pmp->count_lwinprog,
2115 HAMMER2_LWINPROG_WAITING);
2116 wakeup(&pmp->count_lwinprog);
2118 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) &&
2119 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) {
2120 atomic_clear_int(&pmp->count_lwinprog,
2121 HAMMER2_LWINPROG_WAITING0);
2122 wakeup(&pmp->count_lwinprog);
2127 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe)
2130 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING :
2131 HAMMER2_LWINPROG_WAITING0;
2134 lwinprog = pmp->count_lwinprog;
2136 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2138 tsleep_interlock(&pmp->count_lwinprog, 0);
2139 atomic_set_int(&pmp->count_lwinprog, lwflag);
2140 lwinprog = pmp->count_lwinprog;
2141 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2143 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2148 * Manage excessive memory resource use for chain and related
2152 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2162 * Atomic check condition and wait. Also do an early speedup of
2163 * the syncer to try to avoid hitting the wait.
2166 waiting = pmp->inmem_dirty_chains;
2168 count = waiting & HAMMER2_DIRTYCHAIN_MASK;
2170 limit = pmp->mp->mnt_nvnodelistsize / 10;
2171 if (limit < hammer2_limit_dirty_chains)
2172 limit = hammer2_limit_dirty_chains;
2177 if ((int)(ticks - zzticks) > hz) {
2179 kprintf("count %ld %ld\n", count, limit);
2184 * Block if there are too many dirty chains present, wait
2185 * for the flush to clean some out.
2187 if (count > limit) {
2188 tsleep_interlock(&pmp->inmem_dirty_chains, 0);
2189 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2191 waiting | HAMMER2_DIRTYCHAIN_WAITING)) {
2192 speedup_syncer(pmp->mp);
2193 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED,
2196 continue; /* loop on success or fail */
2200 * Try to start an early flush before we are forced to block.
2202 if (count > limit * 7 / 10)
2203 speedup_syncer(pmp->mp);
2209 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
2212 atomic_add_int(&pmp->inmem_dirty_chains, 1);
2217 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp)
2225 waiting = pmp->inmem_dirty_chains;
2227 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2230 ~HAMMER2_DIRTYCHAIN_WAITING)) {
2235 if (waiting & HAMMER2_DIRTYCHAIN_WAITING)
2236 wakeup(&pmp->inmem_dirty_chains);
2243 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx)
2245 hammer2_chain_t *scan;
2246 hammer2_chain_t *parent;
2250 kprintf("%*.*s...\n", tab, tab, "");
2255 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n",
2257 chain, chain->bref.type,
2258 chain->bref.key, chain->bref.keybits,
2259 chain->bref.mirror_tid);
2261 kprintf("%*.*s [%08x] (%s) refs=%d",
2264 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2265 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2268 parent = chain->parent;
2270 kprintf("\n%*.*s p=%p [pflags %08x prefs %d",
2272 parent, parent->flags, parent->refs);
2273 if (RB_EMPTY(&chain->core.rbtree)) {
2277 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree)
2278 hammer2_dump_chain(scan, tab + 4, countp, 'a');
2279 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2280 kprintf("%*.*s}(%s)\n", tab, tab, "",
2281 chain->data->ipdata.filename);
2283 kprintf("%*.*s}\n", tab, tab, "");