2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
69 struct hammer2_sync_info {
70 hammer2_trans_t trans;
75 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
76 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
77 static struct hammer2_mntlist hammer2_mntlist;
78 static struct hammer2_pfslist hammer2_pfslist;
79 static struct lock hammer2_mntlk;
82 int hammer2_cluster_enable = 1;
83 int hammer2_hardlink_enable = 1;
84 int hammer2_flush_pipe = 100;
85 int hammer2_synchronous_flush = 1;
86 int hammer2_dio_count;
87 long hammer2_limit_dirty_chains;
88 long hammer2_iod_file_read;
89 long hammer2_iod_meta_read;
90 long hammer2_iod_indr_read;
91 long hammer2_iod_fmap_read;
92 long hammer2_iod_volu_read;
93 long hammer2_iod_file_write;
94 long hammer2_iod_meta_write;
95 long hammer2_iod_indr_write;
96 long hammer2_iod_fmap_write;
97 long hammer2_iod_volu_write;
98 long hammer2_ioa_file_read;
99 long hammer2_ioa_meta_read;
100 long hammer2_ioa_indr_read;
101 long hammer2_ioa_fmap_read;
102 long hammer2_ioa_volu_read;
103 long hammer2_ioa_fmap_write;
104 long hammer2_ioa_file_write;
105 long hammer2_ioa_meta_write;
106 long hammer2_ioa_indr_write;
107 long hammer2_ioa_volu_write;
109 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
110 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
111 "Buffer used for compression.");
113 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
114 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
115 "Buffer used for decompression.");
117 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
119 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
120 &hammer2_debug, 0, "");
121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
122 &hammer2_cluster_enable, 0, "");
123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
124 &hammer2_hardlink_enable, 0, "");
125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
126 &hammer2_flush_pipe, 0, "");
127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW,
128 &hammer2_synchronous_flush, 0, "");
129 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
130 &hammer2_limit_dirty_chains, 0, "");
131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
132 &hammer2_dio_count, 0, "");
134 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
135 &hammer2_iod_file_read, 0, "");
136 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
137 &hammer2_iod_meta_read, 0, "");
138 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
139 &hammer2_iod_indr_read, 0, "");
140 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
141 &hammer2_iod_fmap_read, 0, "");
142 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
143 &hammer2_iod_volu_read, 0, "");
145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
146 &hammer2_iod_file_write, 0, "");
147 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
148 &hammer2_iod_meta_write, 0, "");
149 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
150 &hammer2_iod_indr_write, 0, "");
151 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
152 &hammer2_iod_fmap_write, 0, "");
153 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
154 &hammer2_iod_volu_write, 0, "");
156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
157 &hammer2_ioa_file_read, 0, "");
158 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
159 &hammer2_ioa_meta_read, 0, "");
160 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
161 &hammer2_ioa_indr_read, 0, "");
162 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
163 &hammer2_ioa_fmap_read, 0, "");
164 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
165 &hammer2_ioa_volu_read, 0, "");
167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
168 &hammer2_ioa_file_write, 0, "");
169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
170 &hammer2_ioa_meta_write, 0, "");
171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
172 &hammer2_ioa_indr_write, 0, "");
173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
174 &hammer2_ioa_fmap_write, 0, "");
175 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
176 &hammer2_ioa_volu_write, 0, "");
178 static int hammer2_vfs_init(struct vfsconf *conf);
179 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
180 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
182 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
183 struct vnode *, struct ucred *);
184 static int hammer2_recovery(hammer2_dev_t *hmp);
185 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
186 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
187 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
189 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
191 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
192 ino_t ino, struct vnode **vpp);
193 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
194 struct fid *fhp, struct vnode **vpp);
195 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
196 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
197 int *exflagsp, struct ucred **credanonp);
199 static int hammer2_install_volume_header(hammer2_dev_t *hmp);
200 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
202 static void hammer2_update_pmps(hammer2_dev_t *hmp);
204 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
205 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
209 * HAMMER2 vfs operations.
211 static struct vfsops hammer2_vfsops = {
212 .vfs_init = hammer2_vfs_init,
213 .vfs_uninit = hammer2_vfs_uninit,
214 .vfs_sync = hammer2_vfs_sync,
215 .vfs_mount = hammer2_vfs_mount,
216 .vfs_unmount = hammer2_vfs_unmount,
217 .vfs_root = hammer2_vfs_root,
218 .vfs_statfs = hammer2_vfs_statfs,
219 .vfs_statvfs = hammer2_vfs_statvfs,
220 .vfs_vget = hammer2_vfs_vget,
221 .vfs_vptofh = hammer2_vfs_vptofh,
222 .vfs_fhtovp = hammer2_vfs_fhtovp,
223 .vfs_checkexp = hammer2_vfs_checkexp
226 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
228 VFS_SET(hammer2_vfsops, hammer2, 0);
229 MODULE_VERSION(hammer2, 1);
233 hammer2_vfs_init(struct vfsconf *conf)
235 static struct objcache_malloc_args margs_read;
236 static struct objcache_malloc_args margs_write;
237 static struct objcache_malloc_args margs_vop;
243 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
245 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
247 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
251 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
253 margs_read.objsize = 65536;
254 margs_read.mtype = M_HAMMER2_DEBUFFER;
256 margs_write.objsize = 32768;
257 margs_write.mtype = M_HAMMER2_CBUFFER;
259 margs_vop.objsize = sizeof(hammer2_xop_t);
260 margs_vop.mtype = M_HAMMER2;
263 * Note thaht for the XOPS cache we want backing store allocations
264 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
265 * confusion), so use the backing store function that does it. This
266 * means that initial XOPS objects are zerod but REUSED objects are
267 * not. So we are responsible for cleaning the object up sufficiently
268 * for our needs before objcache_put()ing it back (typically just the
271 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
272 0, 1, NULL, NULL, NULL,
273 objcache_malloc_alloc,
274 objcache_malloc_free,
276 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
277 0, 1, NULL, NULL, NULL,
278 objcache_malloc_alloc,
279 objcache_malloc_free,
281 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
282 0, 1, NULL, NULL, NULL,
283 objcache_malloc_alloc_zero,
284 objcache_malloc_free,
288 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
289 TAILQ_INIT(&hammer2_mntlist);
290 TAILQ_INIT(&hammer2_pfslist);
292 hammer2_limit_dirty_chains = desiredvnodes / 10;
299 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
301 objcache_destroy(cache_buffer_read);
302 objcache_destroy(cache_buffer_write);
303 objcache_destroy(cache_xops);
308 * Core PFS allocator. Used to allocate the pmp structure for PFS cluster
309 * mounts and the spmp structure for media (hmp) structures.
311 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
312 * transactions. Note that synchronization does not use this field.
313 * (typically frontend operations and synchronization cannot run on the
314 * same PFS node at the same time).
319 hammer2_pfsalloc(hammer2_cluster_t *cluster,
320 const hammer2_inode_data_t *ripdata,
321 hammer2_tid_t modify_tid)
323 hammer2_chain_t *rchain;
324 hammer2_inode_t *iroot;
331 * Locate or create the PFS based on the cluster id. If ripdata
332 * is NULL this is a spmp which is unique and is always allocated.
335 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
336 if (bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
337 sizeof(pmp->pfs_clid)) == 0) {
346 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
347 hammer2_trans_manage_init(&pmp->tmanage);
348 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
349 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
350 lockinit(&pmp->lock, "pfslk", 0, 0);
351 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
352 RB_INIT(&pmp->inum_tree);
353 TAILQ_INIT(&pmp->unlinkq);
354 spin_init(&pmp->list_spin, "hm2pfsalloc_list");
356 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
357 hammer2_xop_group_init(pmp, &pmp->xop_groups[j]);
360 * Save the last media transaction id for the flusher. Set
364 pmp->pfs_clid = ripdata->meta.pfs_clid;
365 hammer2_mtx_init(&pmp->wthread_mtx, "h2wthr");
366 bioq_init(&pmp->wthread_bioq);
367 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
370 * The synchronization thread may start too early, make
371 * sure it stays frozen until we are ready to let it go.
375 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
376 HAMMER2_THREAD_REMASTER;
381 * Create the PFS's root inode.
383 if ((iroot = pmp->iroot) == NULL) {
384 iroot = hammer2_inode_get(pmp, NULL, NULL);
386 hammer2_inode_ref(iroot);
387 hammer2_inode_unlock(iroot, NULL);
391 * Stop here if no cluster is passed in.
397 * When a cluster is passed in we must add the cluster's chains
398 * to the PFS's root inode, update pmp->pfs_types[], and update
399 * the syncronization threads.
401 * At the moment empty spots can develop due to removals or failures.
402 * Ultimately we want to re-fill these spots but doing so might
403 * confused running code. XXX
405 hammer2_inode_ref(iroot);
406 hammer2_mtx_ex(&iroot->lock);
407 j = iroot->cluster.nchains;
409 kprintf("add PFS to pmp %p[%d]\n", pmp, j);
411 for (i = 0; i < cluster->nchains; ++i) {
412 if (j == HAMMER2_MAXCLUSTER)
414 rchain = cluster->array[i].chain;
415 KKASSERT(rchain->pmp == NULL);
417 hammer2_chain_ref(rchain);
418 iroot->cluster.array[j].chain = rchain;
419 pmp->pfs_types[j] = ripdata->meta.pfs_type;
420 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2);
423 * If the PFS is already mounted we must account
424 * for the mount_count here.
427 ++rchain->hmp->mount_count;
430 * May have to fixup dirty chain tracking. Previous
431 * pmp was NULL so nothing to undo.
433 if (rchain->flags & HAMMER2_CHAIN_MODIFIED)
434 hammer2_pfs_memory_inc(pmp);
437 iroot->cluster.nchains = j;
439 if (i != cluster->nchains) {
440 kprintf("hammer2_mount: cluster full!\n");
441 /* XXX fatal error? */
445 * Update nmasters from any PFS inode which is part of the cluster.
446 * It is possible that this will result in a value which is too
447 * high. MASTER PFSs are authoritative for pfs_nmasters and will
448 * override this value later on.
450 * (This informs us of masters that might not currently be
451 * discoverable by this mount).
453 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
454 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
458 * Count visible masters. Masters are usually added with
459 * ripdata->meta.pfs_nmasters set to 1. This detects when there
460 * are more (XXX and must update the master inodes).
463 for (i = 0; i < iroot->cluster.nchains; ++i) {
464 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
467 if (pmp->pfs_nmasters < count)
468 pmp->pfs_nmasters = count;
471 * Create missing synchronization and support threads.
473 * Single-node masters (including snapshots) have nothing to
474 * synchronize and do not require this thread.
476 * Multi-node masters or any number of soft masters, slaves, copy,
477 * or other PFS types need the thread.
479 * Each thread is responsible for its particular cluster index.
480 * We use independent threads so stalls or mismatches related to
481 * any given target do not affect other targets.
483 for (i = 0; i < iroot->cluster.nchains; ++i) {
485 * Single-node masters (including snapshots) have nothing
486 * to synchronize and will make direct xops support calls,
487 * thus they do not require this thread.
489 * Note that there can be thousands of snapshots. We do not
490 * want to create thousands of threads.
492 if (pmp->pfs_nmasters <= 1 &&
493 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
498 * Sync support thread
500 if (pmp->sync_thrs[i].td == NULL) {
501 hammer2_thr_create(&pmp->sync_thrs[i], pmp,
503 hammer2_primary_sync_thread);
508 * Create missing Xop threads
511 hammer2_xop_helper_create(pmp);
513 hammer2_mtx_unlock(&iroot->lock);
514 hammer2_inode_drop(iroot);
520 * Destroy a PFS, typically only occurs after the last mount on a device
524 hammer2_pfsfree(hammer2_pfs_t *pmp)
526 hammer2_inode_t *iroot;
531 * Cleanup our reference on iroot. iroot is (should) not be needed
534 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
538 for (i = 0; i < iroot->cluster.nchains; ++i) {
539 hammer2_thr_delete(&pmp->sync_thrs[i]);
540 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
541 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
543 #if REPORT_REFS_ERRORS
544 if (pmp->iroot->refs != 1)
545 kprintf("PMP->IROOT %p REFS WRONG %d\n",
546 pmp->iroot, pmp->iroot->refs);
548 KKASSERT(pmp->iroot->refs == 1);
550 /* ref for pmp->iroot */
551 hammer2_inode_drop(pmp->iroot);
555 kmalloc_destroy(&pmp->mmsg);
556 kmalloc_destroy(&pmp->minode);
558 kfree(pmp, M_HAMMER2);
562 * Remove all references to hmp from the pfs list. Any PFS which becomes
563 * empty is terminated and freed.
568 hammer2_pfsfree_scan(hammer2_dev_t *hmp)
571 hammer2_inode_t *iroot;
572 hammer2_cluster_t *cluster;
573 hammer2_chain_t *rchain;
579 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
580 if ((iroot = pmp->iroot) == NULL)
582 if (hmp->spmp == pmp) {
583 kprintf("unmount hmp %p remove spmp %p\n",
589 * Determine if this PFS is affected. If it is we must
590 * freeze all management threads and lock its iroot.
592 * Freezing a management thread forces it idle, operations
593 * in-progress will be aborted and it will have to start
594 * over again when unfrozen, or exit if told to exit.
596 cluster = &iroot->cluster;
597 for (i = 0; i < cluster->nchains; ++i) {
598 rchain = cluster->array[i].chain;
599 if (rchain == NULL || rchain->hmp != hmp)
603 if (i != cluster->nchains) {
605 * Make sure all synchronization threads are locked
608 for (i = 0; i < iroot->cluster.nchains; ++i) {
609 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
610 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
611 hammer2_thr_freeze_async(
612 &pmp->xop_groups[j].thrs[i]);
615 for (i = 0; i < iroot->cluster.nchains; ++i) {
616 hammer2_thr_freeze(&pmp->sync_thrs[i]);
617 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
619 &pmp->xop_groups[j].thrs[i]);
624 * Lock the inode and clean out matching chains.
625 * Note that we cannot use hammer2_inode_lock_*()
626 * here because that would attempt to validate the
627 * cluster that we are in the middle of ripping
630 * WARNING! We are working directly on the inodes
633 hammer2_mtx_ex(&iroot->lock);
636 * Remove the chain from matching elements of the PFS.
638 for (i = 0; i < cluster->nchains; ++i) {
639 rchain = cluster->array[i].chain;
640 if (rchain == NULL || rchain->hmp != hmp)
642 hammer2_thr_delete(&pmp->sync_thrs[i]);
643 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
645 &pmp->xop_groups[j].thrs[i]);
647 rchain = cluster->array[i].chain;
648 cluster->array[i].chain = NULL;
649 pmp->pfs_types[i] = 0;
650 if (pmp->pfs_names[i]) {
651 kfree(pmp->pfs_names[i], M_HAMMER2);
652 pmp->pfs_names[i] = NULL;
654 hammer2_chain_drop(rchain);
657 if (cluster->focus == rchain)
658 cluster->focus = NULL;
660 hammer2_mtx_unlock(&iroot->lock);
661 didfreeze = 1; /* remaster, unfreeze down below */
667 * Cleanup trailing chains. Do not reorder chains (for now).
668 * XXX might remove more than we intended.
671 if (cluster->array[i - 1].chain)
675 cluster->nchains = i;
678 * If the PMP has no elements remaining we can destroy it.
679 * (this will transition management threads from frozen->exit).
681 if (cluster->nchains == 0) {
682 kprintf("unmount hmp %p last ref to PMP=%p\n",
684 hammer2_pfsfree(pmp);
689 * If elements still remain we need to set the REMASTER
690 * flag and unfreeze it.
693 for (i = 0; i < iroot->cluster.nchains; ++i) {
694 hammer2_thr_remaster(&pmp->sync_thrs[i]);
695 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
696 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
697 hammer2_thr_remaster(
698 &pmp->xop_groups[j].thrs[i]);
699 hammer2_thr_unfreeze(
700 &pmp->xop_groups[j].thrs[i]);
708 * Mount or remount HAMMER2 fileystem from physical media
711 * mp mount point structure
717 * mp mount point structure
718 * path path to mount point
719 * data pointer to argument structure in user space
720 * volume volume path (device@LABEL form)
721 * hflags user mount flags
722 * cred user credentials
729 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
732 struct hammer2_mount_info info;
736 hammer2_key_t key_next;
737 hammer2_key_t key_dummy;
740 struct nlookupdata nd;
741 hammer2_chain_t *parent;
742 hammer2_cluster_t *cluster;
743 hammer2_cluster_t *cparent;
744 const hammer2_inode_data_t *ripdata;
745 hammer2_blockref_t bref;
747 char devstr[MNAMELEN];
764 kprintf("hammer2_mount\n");
770 bzero(&info, sizeof(info));
771 info.cluster_fd = -1;
775 * Non-root mount or updating a mount
777 error = copyin(data, &info, sizeof(info));
781 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
785 /* Extract device and label */
787 label = strchr(devstr, '@');
789 ((label + 1) - dev) > done) {
797 if (mp->mnt_flag & MNT_UPDATE) {
799 * Update mount. Note that pmp->iroot->cluster is
800 * an inode-embedded cluster and thus cannot be
803 * XXX HAMMER2 needs to implement NFS export via
807 cluster = &pmp->iroot->cluster;
808 for (i = 0; i < cluster->nchains; ++i) {
809 if (cluster->array[i].chain == NULL)
811 hmp = cluster->array[i].chain->hmp;
813 error = hammer2_remount(hmp, mp, path,
818 /*hammer2_inode_install_hidden(pmp);*/
827 * Lookup name and verify it refers to a block device.
829 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
831 error = nlookup(&nd);
833 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
837 if (vn_isdisk(devvp, &error))
838 error = vfs_mountedon(devvp);
842 * Determine if the device has already been mounted. After this
843 * check hmp will be non-NULL if we are doing the second or more
844 * hammer2 mounts from the same device.
846 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
847 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
848 if (hmp->devvp == devvp)
853 * Open the device if this isn't a secondary mount and construct
854 * the H2 device mount (hmp).
857 hammer2_chain_t *schain;
860 if (error == 0 && vcount(devvp) > 0)
864 * Now open the device
867 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
868 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
869 error = vinvalbuf(devvp, V_SAVE, 0, 0);
871 error = VOP_OPEN(devvp,
872 ronly ? FREAD : FREAD | FWRITE,
877 if (error && devvp) {
882 lockmgr(&hammer2_mntlk, LK_RELEASE);
885 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
886 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev);
889 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
890 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
891 RB_INIT(&hmp->iotree);
892 spin_init(&hmp->io_spin, "hm2mount_io");
893 spin_init(&hmp->list_spin, "hm2mount_list");
894 TAILQ_INIT(&hmp->flushq);
896 lockinit(&hmp->vollk, "h2vol", 0, 0);
899 * vchain setup. vchain.data is embedded.
900 * vchain.refs is initialized and will never drop to 0.
902 * NOTE! voldata is not yet loaded.
904 hmp->vchain.hmp = hmp;
905 hmp->vchain.refs = 1;
906 hmp->vchain.data = (void *)&hmp->voldata;
907 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
908 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
909 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
911 hammer2_chain_core_init(&hmp->vchain);
912 /* hmp->vchain.u.xxx is left NULL */
915 * fchain setup. fchain.data is embedded.
916 * fchain.refs is initialized and will never drop to 0.
918 * The data is not used but needs to be initialized to
919 * pass assertion muster. We use this chain primarily
920 * as a placeholder for the freemap's top-level RBTREE
921 * so it does not interfere with the volume's topology
924 hmp->fchain.hmp = hmp;
925 hmp->fchain.refs = 1;
926 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
927 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
928 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
929 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
930 hmp->fchain.bref.methods =
931 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
932 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
934 hammer2_chain_core_init(&hmp->fchain);
935 /* hmp->fchain.u.xxx is left NULL */
938 * Install the volume header and initialize fields from
941 error = hammer2_install_volume_header(hmp);
943 hammer2_unmount_helper(mp, NULL, hmp);
944 lockmgr(&hammer2_mntlk, LK_RELEASE);
945 hammer2_vfs_unmount(mp, MNT_FORCE);
950 * Really important to get these right or flush will get
953 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0);
954 kprintf("alloc spmp %p tid %016jx\n",
955 hmp->spmp, hmp->voldata.mirror_tid);
959 * Dummy-up vchain and fchain's modify_tid. mirror_tid
960 * is inherited from the volume header.
963 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
964 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
965 hmp->vchain.pmp = spmp;
966 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
967 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
968 hmp->fchain.pmp = spmp;
971 * First locate the super-root inode, which is key 0
972 * relative to the volume header's blockset.
974 * Then locate the root inode by scanning the directory keyspace
975 * represented by the label.
977 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
978 schain = hammer2_chain_lookup(&parent, &key_dummy,
979 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
981 hammer2_chain_lookup_done(parent);
982 if (schain == NULL) {
983 kprintf("hammer2_mount: invalid super-root\n");
984 hammer2_unmount_helper(mp, NULL, hmp);
985 lockmgr(&hammer2_mntlk, LK_RELEASE);
986 hammer2_vfs_unmount(mp, MNT_FORCE);
990 kprintf("hammer2_mount: error %s reading super-root\n",
991 hammer2_error_str(schain->error));
992 hammer2_chain_unlock(schain);
993 hammer2_chain_drop(schain);
995 hammer2_unmount_helper(mp, NULL, hmp);
996 lockmgr(&hammer2_mntlk, LK_RELEASE);
997 hammer2_vfs_unmount(mp, MNT_FORCE);
1002 * The super-root always uses an inode_tid of 1 when
1005 spmp->inode_tid = 1;
1006 spmp->modify_tid = schain->bref.modify_tid;
1009 * Sanity-check schain's pmp and finish initialization.
1010 * Any chain belonging to the super-root topology should
1011 * have a NULL pmp (not even set to spmp).
1013 ripdata = &hammer2_chain_rdata(schain)->ipdata;
1014 KKASSERT(schain->pmp == NULL);
1015 spmp->pfs_clid = ripdata->meta.pfs_clid;
1018 * Replace the dummy spmp->iroot with a real one. It's
1019 * easier to just do a wholesale replacement than to try
1020 * to update the chain and fixup the iroot fields.
1022 * The returned inode is locked with the supplied cluster.
1024 cluster = hammer2_cluster_from_chain(schain);
1025 hammer2_inode_drop(spmp->iroot);
1027 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster);
1028 spmp->spmp_hmp = hmp;
1029 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1030 hammer2_inode_ref(spmp->iroot);
1031 hammer2_inode_unlock(spmp->iroot, cluster);
1033 /* leave spmp->iroot with one ref */
1035 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1036 error = hammer2_recovery(hmp);
1037 /* XXX do something with error */
1039 hammer2_update_pmps(hmp);
1040 hammer2_iocom_init(hmp);
1043 * Ref the cluster management messaging descriptor. The mount
1044 * program deals with the other end of the communications pipe.
1046 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
1048 hammer2_cluster_reconnect(hmp, fp);
1050 kprintf("hammer2_mount: bad cluster_fd!\n");
1057 * Lookup the mount point under the media-localized super-root.
1058 * Scanning hammer2_pfslist doesn't help us because it represents
1059 * PFS cluster ids which can aggregate several named PFSs together.
1061 * cluster->pmp will incorrectly point to spmp and must be fixed
1064 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1065 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1066 lhc = hammer2_dirhash(label, strlen(label));
1067 cluster = hammer2_cluster_lookup(cparent, &key_next,
1068 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1071 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE &&
1073 hammer2_cluster_rdata(cluster)->ipdata.filename) == 0) {
1076 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1078 lhc + HAMMER2_DIRHASH_LOMASK, 0);
1080 hammer2_inode_unlock(spmp->iroot, cparent);
1083 * PFS could not be found?
1085 if (cluster == NULL) {
1086 kprintf("hammer2_mount: PFS label not found\n");
1087 hammer2_unmount_helper(mp, NULL, hmp);
1088 lockmgr(&hammer2_mntlk, LK_RELEASE);
1089 hammer2_vfs_unmount(mp, MNT_FORCE);
1095 * Acquire the pmp structure (it should have already been allocated
1096 * via hammer2_update_pmps() so do not pass cluster in to add to
1097 * available chains).
1099 * Check if the cluster has already been mounted. A cluster can
1100 * only be mounted once, use null mounts to mount additional copies.
1102 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1103 hammer2_cluster_bref(cluster, &bref);
1104 pmp = hammer2_pfsalloc(NULL, ripdata, bref.modify_tid);
1105 hammer2_cluster_unlock(cluster);
1106 hammer2_cluster_drop(cluster);
1109 kprintf("hammer2_mount: PFS already mounted!\n");
1110 hammer2_unmount_helper(mp, NULL, hmp);
1111 lockmgr(&hammer2_mntlk, LK_RELEASE);
1112 hammer2_vfs_unmount(mp, MNT_FORCE);
1120 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp);
1122 mp->mnt_flag = MNT_LOCAL;
1123 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1124 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1127 * required mount structure initializations
1129 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1130 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1132 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1133 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1138 mp->mnt_iosize_max = MAXPHYS;
1141 * Connect up mount pointers.
1143 hammer2_mount_helper(mp, pmp);
1145 lockmgr(&hammer2_mntlk, LK_RELEASE);
1148 * A mounted PFS needs a write thread for logical buffers and
1149 * a hidden directory for deletions of open files. These features
1150 * are not used by unmounted PFSs.
1152 * The logical file buffer bio write thread handles things like
1153 * physical block assignment and compression.
1155 pmp->wthread_destroy = 0;
1156 lwkt_create(hammer2_write_thread, pmp,
1157 &pmp->wthread_td, NULL, 0, -1, "h2pfs-%s", label);
1160 * With the cluster operational install ihidden.
1161 * (only applicable to pfs mounts, not applicable to spmp)
1163 hammer2_inode_install_hidden(pmp);
1169 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1170 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1171 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1173 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
1174 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1175 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1176 copyinstr(path, mp->mnt_stat.f_mntonname,
1177 sizeof(mp->mnt_stat.f_mntonname) - 1,
1181 * Initial statfs to prime mnt_stat.
1183 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
1189 * Scan PFSs under the super-root and create hammer2_pfs structures.
1193 hammer2_update_pmps(hammer2_dev_t *hmp)
1195 const hammer2_inode_data_t *ripdata;
1196 hammer2_cluster_t *cparent;
1197 hammer2_cluster_t *cluster;
1198 hammer2_blockref_t bref;
1199 hammer2_pfs_t *spmp;
1201 hammer2_key_t key_next;
1204 * Lookup mount point under the media-localized super-root.
1206 * cluster->pmp will incorrectly point to spmp and must be fixed
1210 hammer2_inode_lock(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1211 cparent = hammer2_inode_cluster(spmp->iroot, HAMMER2_RESOLVE_ALWAYS);
1212 cluster = hammer2_cluster_lookup(cparent, &key_next,
1217 if (hammer2_cluster_type(cluster) != HAMMER2_BREF_TYPE_INODE)
1219 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1220 hammer2_cluster_bref(cluster, &bref);
1221 kprintf("ADD LOCAL PFS: %s\n", ripdata->filename);
1223 pmp = hammer2_pfsalloc(cluster, ripdata, bref.modify_tid);
1224 cluster = hammer2_cluster_next(cparent, cluster,
1230 hammer2_inode_unlock(spmp->iroot, cparent);
1235 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path,
1236 struct vnode *devvp, struct ucred *cred)
1240 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
1241 error = hammer2_recovery(hmp);
1250 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1261 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1264 * If mount initialization proceeded far enough we must flush
1265 * its vnodes and sync the underlying mount points. Three syncs
1266 * are required to fully flush the filesystem (freemap updates lag
1267 * by one flush, and one extra for safety).
1269 if (mntflags & MNT_FORCE)
1274 error = vflush(mp, 0, flags);
1277 hammer2_vfs_sync(mp, MNT_WAIT);
1278 hammer2_vfs_sync(mp, MNT_WAIT);
1279 hammer2_vfs_sync(mp, MNT_WAIT);
1282 if (pmp->wthread_td) {
1283 hammer2_mtx_ex(&pmp->wthread_mtx);
1284 pmp->wthread_destroy = 1;
1285 wakeup(&pmp->wthread_bioq);
1286 while (pmp->wthread_destroy != -1) {
1287 mtxsleep(&pmp->wthread_destroy,
1288 &pmp->wthread_mtx, 0,
1291 hammer2_mtx_unlock(&pmp->wthread_mtx);
1292 pmp->wthread_td = NULL;
1296 * Cleanup the frontend support XOPS threads
1298 hammer2_xop_helper_cleanup(pmp);
1301 * Cleanup our reference on ihidden.
1304 hammer2_inode_drop(pmp->ihidden);
1305 pmp->ihidden = NULL;
1308 hammer2_unmount_helper(mp, pmp, NULL);
1312 lockmgr(&hammer2_mntlk, LK_RELEASE);
1318 * Mount helper, hook the system mount into our PFS.
1319 * The mount lock is held.
1321 * We must bump the mount_count on related devices for any
1326 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1328 hammer2_cluster_t *cluster;
1329 hammer2_chain_t *rchain;
1332 mp->mnt_data = (qaddr_t)pmp;
1336 * After pmp->mp is set we have to adjust hmp->mount_count.
1338 cluster = &pmp->iroot->cluster;
1339 for (i = 0; i < cluster->nchains; ++i) {
1340 rchain = cluster->array[i].chain;
1343 ++rchain->hmp->mount_count;
1344 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n",
1345 rchain->hmp, rchain->hmp->mount_count);
1349 * Create missing Xop threads
1351 hammer2_xop_helper_create(pmp);
1355 * Mount helper, unhook the system mount from our PFS.
1356 * The mount lock is held.
1358 * If hmp is supplied a mount responsible for being the first to open
1359 * the block device failed and the block device and all PFSs using the
1360 * block device must be cleaned up.
1362 * If pmp is supplied multiple devices might be backing the PFS and each
1363 * must be disconnect. This might not be the last PFS using some of the
1364 * underlying devices. Also, we have to adjust our hmp->mount_count
1365 * accounting for the devices backing the pmp which is now undergoing an
1370 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1372 hammer2_cluster_t *cluster;
1373 hammer2_chain_t *rchain;
1374 struct vnode *devvp;
1380 * If no device supplied this is a high-level unmount and we have to
1381 * to disconnect the mount, adjust mount_count, and locate devices
1382 * that might now have no mounts.
1385 KKASSERT(hmp == NULL);
1386 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp);
1388 mp->mnt_data = NULL;
1391 * After pmp->mp is cleared we have to account for
1394 cluster = &pmp->iroot->cluster;
1395 for (i = 0; i < cluster->nchains; ++i) {
1396 rchain = cluster->array[i].chain;
1399 --rchain->hmp->mount_count;
1400 kprintf("hammer2_unmount hmp=%p --mount_count=%d\n",
1401 rchain->hmp, rchain->hmp->mount_count);
1402 /* scrapping hmp now may invalidate the pmp */
1405 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1406 if (hmp->mount_count == 0) {
1407 hammer2_unmount_helper(NULL, NULL, hmp);
1415 * Try to terminate the block device. We can't terminate it if
1416 * there are still PFSs referencing it.
1418 kprintf("hammer2_unmount hmp=%p mount_count=%d\n",
1419 hmp, hmp->mount_count);
1420 if (hmp->mount_count)
1423 hammer2_pfsfree_scan(hmp);
1424 hammer2_dev_exlock(hmp); /* XXX order */
1427 * Cycle the volume data lock as a safety (probably not needed any
1428 * more). To ensure everything is out we need to flush at least
1429 * three times. (1) The running of the unlinkq can dirty the
1430 * filesystem, (2) A normal flush can dirty the freemap, and
1431 * (3) ensure that the freemap is fully synchronized.
1433 * The next mount's recovery scan can clean everything up but we want
1434 * to leave the filesystem in a 100% clean state on a normal unmount.
1437 hammer2_voldata_lock(hmp);
1438 hammer2_voldata_unlock(hmp);
1440 hammer2_iocom_uninit(hmp);
1442 if ((hmp->vchain.flags | hmp->fchain.flags) &
1443 HAMMER2_CHAIN_FLUSH_MASK) {
1444 kprintf("hammer2_unmount: chains left over "
1445 "after final sync\n");
1446 kprintf(" vchain %08x\n", hmp->vchain.flags);
1447 kprintf(" fchain %08x\n", hmp->fchain.flags);
1449 if (hammer2_debug & 0x0010)
1450 Debugger("entered debugger");
1453 KKASSERT(hmp->spmp == NULL);
1456 * Finish up with the device vnode
1458 if ((devvp = hmp->devvp) != NULL) {
1459 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1460 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1462 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL);
1469 * Clear vchain/fchain flags that might prevent final cleanup
1472 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1473 atomic_clear_int(&hmp->vchain.flags,
1474 HAMMER2_CHAIN_MODIFIED);
1475 hammer2_pfs_memory_wakeup(hmp->vchain.pmp);
1476 hammer2_chain_drop(&hmp->vchain);
1478 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1479 atomic_clear_int(&hmp->vchain.flags,
1480 HAMMER2_CHAIN_UPDATE);
1481 hammer2_chain_drop(&hmp->vchain);
1484 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1485 atomic_clear_int(&hmp->fchain.flags,
1486 HAMMER2_CHAIN_MODIFIED);
1487 hammer2_pfs_memory_wakeup(hmp->fchain.pmp);
1488 hammer2_chain_drop(&hmp->fchain);
1490 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1491 atomic_clear_int(&hmp->fchain.flags,
1492 HAMMER2_CHAIN_UPDATE);
1493 hammer2_chain_drop(&hmp->fchain);
1497 * Final drop of embedded freemap root chain to
1498 * clean up fchain.core (fchain structure is not
1499 * flagged ALLOCATED so it is cleaned out and then
1502 hammer2_chain_drop(&hmp->fchain);
1505 * Final drop of embedded volume root chain to clean
1506 * up vchain.core (vchain structure is not flagged
1507 * ALLOCATED so it is cleaned out and then left to
1511 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v');
1513 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f');
1514 hammer2_dev_unlock(hmp);
1515 hammer2_chain_drop(&hmp->vchain);
1517 hammer2_io_cleanup(hmp, &hmp->iotree);
1518 if (hmp->iofree_count) {
1519 kprintf("io_cleanup: %d I/O's left hanging\n",
1523 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1524 kmalloc_destroy(&hmp->mchain);
1525 kfree(hmp, M_HAMMER2);
1530 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1531 ino_t ino, struct vnode **vpp)
1533 kprintf("hammer2_vget\n");
1534 return (EOPNOTSUPP);
1539 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1542 hammer2_cluster_t *cparent;
1547 if (pmp->iroot == NULL) {
1551 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_ALWAYS |
1552 HAMMER2_RESOLVE_SHARED);
1553 cparent = hammer2_inode_cluster(pmp->iroot,
1554 HAMMER2_RESOLVE_ALWAYS |
1555 HAMMER2_RESOLVE_SHARED);
1558 * Initialize pmp->inode_tid and pmp->modify_tid on first access
1559 * to the root of mount that resolves good.
1560 * XXX probably not the best place for this.
1562 if (pmp->inode_tid == 0 &&
1563 cparent->error == 0 && cparent->focus) {
1564 const hammer2_inode_data_t *ripdata;
1565 hammer2_blockref_t bref;
1567 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1568 hammer2_cluster_bref(cparent, &bref);
1569 pmp->inode_tid = ripdata->meta.pfs_inum + 1;
1570 pmp->modify_tid = bref.modify_tid;
1571 pmp->iroot->meta = ripdata->meta;
1572 hammer2_cluster_bref(cparent, &pmp->iroot->bref);
1573 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1574 pmp->inode_tid, pmp->modify_tid);
1577 vp = hammer2_igetv(pmp->iroot, cparent, &error);
1578 hammer2_inode_unlock(pmp->iroot, cparent);
1581 kprintf("vnodefail\n");
1590 * XXX incorporate ipdata->meta.inode_quota and data_quota
1594 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1598 hammer2_blockref_t bref;
1601 KKASSERT(pmp->iroot->cluster.nchains >= 1);
1602 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */
1603 bref = pmp->iroot->cluster.focus->bref; /* no lock */
1605 mp->mnt_stat.f_files = bref.inode_count;
1606 mp->mnt_stat.f_ffree = 0;
1607 mp->mnt_stat.f_blocks = (bref.data_count +
1608 hmp->voldata.allocator_free) /
1609 mp->mnt_vstat.f_bsize;
1610 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free /
1611 mp->mnt_vstat.f_bsize;
1612 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1614 *sbp = mp->mnt_stat;
1620 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1624 hammer2_blockref_t bref;
1627 KKASSERT(pmp->iroot->cluster.nchains >= 1);
1628 hmp = pmp->iroot->cluster.focus->hmp; /* iroot retains focus */
1629 bref = pmp->iroot->cluster.focus->bref; /* no lock */
1631 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1632 mp->mnt_vstat.f_files = bref.inode_count;
1633 mp->mnt_vstat.f_ffree = 0;
1634 mp->mnt_vstat.f_blocks = (bref.data_count +
1635 hmp->voldata.allocator_free) /
1636 mp->mnt_vstat.f_bsize;
1637 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free /
1638 mp->mnt_vstat.f_bsize;
1639 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1641 *sbp = mp->mnt_vstat;
1646 * Mount-time recovery (RW mounts)
1648 * Updates to the free block table are allowed to lag flushes by one
1649 * transaction. In case of a crash, then on a fresh mount we must do an
1650 * incremental scan of the last committed transaction id and make sure that
1651 * all related blocks have been marked allocated.
1653 * The super-root topology and each PFS has its own transaction id domain,
1654 * so we must track PFS boundary transitions.
1656 struct hammer2_recovery_elm {
1657 TAILQ_ENTRY(hammer2_recovery_elm) entry;
1658 hammer2_chain_t *chain;
1659 hammer2_tid_t sync_tid;
1662 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
1664 struct hammer2_recovery_info {
1665 struct hammer2_recovery_list list;
1669 static int hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_dev_t *hmp,
1670 hammer2_chain_t *parent,
1671 struct hammer2_recovery_info *info,
1672 hammer2_tid_t sync_tid);
1674 #define HAMMER2_RECOVERY_MAXDEPTH 10
1678 hammer2_recovery(hammer2_dev_t *hmp)
1680 hammer2_trans_t trans;
1681 struct hammer2_recovery_info info;
1682 struct hammer2_recovery_elm *elm;
1683 hammer2_chain_t *parent;
1684 hammer2_tid_t sync_tid;
1685 hammer2_tid_t mirror_tid;
1687 int cumulative_error = 0;
1689 hammer2_trans_init(&trans, hmp->spmp, 0);
1691 sync_tid = hmp->voldata.freemap_tid;
1692 mirror_tid = hmp->voldata.mirror_tid;
1694 kprintf("hammer2 mount \"%s\": ", hmp->devrepname);
1695 if (sync_tid >= mirror_tid) {
1696 kprintf(" no recovery needed\n");
1698 kprintf(" freemap recovery %016jx-%016jx\n",
1699 sync_tid + 1, mirror_tid);
1702 TAILQ_INIT(&info.list);
1704 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1705 cumulative_error = hammer2_recovery_scan(&trans, hmp, parent,
1707 hammer2_chain_lookup_done(parent);
1709 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
1710 TAILQ_REMOVE(&info.list, elm, entry);
1711 parent = elm->chain;
1712 sync_tid = elm->sync_tid;
1713 kfree(elm, M_HAMMER2);
1715 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1716 error = hammer2_recovery_scan(&trans, hmp, parent,
1718 hmp->voldata.freemap_tid);
1719 hammer2_chain_unlock(parent);
1720 hammer2_chain_drop(parent); /* drop elm->chain ref */
1722 cumulative_error = error;
1724 hammer2_trans_done(&trans);
1726 return cumulative_error;
1731 hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_dev_t *hmp,
1732 hammer2_chain_t *parent,
1733 struct hammer2_recovery_info *info,
1734 hammer2_tid_t sync_tid)
1736 const hammer2_inode_data_t *ripdata;
1737 hammer2_chain_t *chain;
1739 int cumulative_error = 0;
1743 * Adjust freemap to ensure that the block(s) are marked allocated.
1745 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
1746 hammer2_freemap_adjust(trans, hmp, &parent->bref,
1747 HAMMER2_FREEMAP_DORECOVER);
1751 * Check type for recursive scan
1753 switch(parent->bref.type) {
1754 case HAMMER2_BREF_TYPE_VOLUME:
1755 /* data already instantiated */
1757 case HAMMER2_BREF_TYPE_INODE:
1759 * Must instantiate data for DIRECTDATA test and also
1762 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1763 ripdata = &hammer2_chain_rdata(parent)->ipdata;
1764 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1765 /* not applicable to recovery scan */
1766 hammer2_chain_unlock(parent);
1769 hammer2_chain_unlock(parent);
1771 case HAMMER2_BREF_TYPE_INDIRECT:
1773 * Must instantiate data for recursion
1775 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1776 hammer2_chain_unlock(parent);
1778 case HAMMER2_BREF_TYPE_DATA:
1779 case HAMMER2_BREF_TYPE_FREEMAP:
1780 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1781 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1782 /* not applicable to recovery scan */
1790 * Defer operation if depth limit reached or if we are crossing a
1793 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
1794 struct hammer2_recovery_elm *elm;
1796 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
1797 elm->chain = parent;
1798 elm->sync_tid = sync_tid;
1799 hammer2_chain_ref(parent);
1800 TAILQ_INSERT_TAIL(&info->list, elm, entry);
1801 /* unlocked by caller */
1808 * Recursive scan of the last flushed transaction only. We are
1809 * doing this without pmp assignments so don't leave the chains
1810 * hanging around after we are done with them.
1813 chain = hammer2_chain_scan(parent, NULL, &cache_index,
1814 HAMMER2_LOOKUP_NODATA);
1816 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
1817 if (chain->bref.mirror_tid > sync_tid) {
1819 error = hammer2_recovery_scan(trans, hmp, chain,
1823 cumulative_error = error;
1827 * Flush the recovery at the PFS boundary to stage it for
1828 * the final flush of the super-root topology.
1830 if ((chain->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
1831 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
1832 hammer2_flush(trans, chain, 1);
1834 chain = hammer2_chain_scan(parent, chain, &cache_index,
1835 HAMMER2_LOOKUP_NODATA);
1838 return cumulative_error;
1842 * Sync a mount point; this is called on a per-mount basis from the
1843 * filesystem syncer process periodically and whenever a user issues
1847 hammer2_vfs_sync(struct mount *mp, int waitfor)
1849 struct hammer2_sync_info info;
1850 hammer2_inode_t *iroot;
1851 hammer2_chain_t *chain;
1852 hammer2_chain_t *parent;
1864 KKASSERT(iroot->pmp == pmp);
1867 * We can't acquire locks on existing vnodes while in a transaction
1868 * without risking a deadlock. This assumes that vfsync() can be
1869 * called without the vnode locked (which it can in DragonFly).
1870 * Otherwise we'd have to implement a multi-pass or flag the lock
1871 * failures and retry.
1873 * The reclamation code interlocks with the sync list's token
1874 * (by removing the vnode from the scan list) before unlocking
1875 * the inode, giving us time to ref the inode.
1877 /*flags = VMSC_GETVP;*/
1879 if (waitfor & MNT_LAZY)
1880 flags |= VMSC_ONEPASS;
1884 * Preflush the vnodes using a normal transaction before interlocking
1885 * with a flush transaction.
1887 hammer2_trans_init(&info.trans, pmp, 0);
1889 info.waitfor = MNT_NOWAIT;
1890 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1891 hammer2_trans_done(&info.trans);
1895 * Start our flush transaction. This does not return until all
1896 * concurrent transactions have completed and will prevent any
1897 * new transactions from running concurrently, except for the
1898 * buffer cache transactions.
1900 * For efficiency do an async pass before making sure with a
1901 * synchronous pass on all related buffer cache buffers. It
1902 * should theoretically not be possible for any new file buffers
1903 * to be instantiated during this sequence.
1905 hammer2_trans_init(&info.trans, pmp, HAMMER2_TRANS_ISFLUSH |
1906 HAMMER2_TRANS_PREFLUSH);
1907 hammer2_run_unlinkq(&info.trans, pmp);
1910 info.waitfor = MNT_NOWAIT;
1911 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1912 info.waitfor = MNT_WAIT;
1913 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
1916 * Clear PREFLUSH. This prevents (or asserts on) any new logical
1917 * buffer cache flushes which occur during the flush. Device buffers
1920 hammer2_bioq_sync(info.trans.pmp);
1921 atomic_clear_int(&info.trans.flags, HAMMER2_TRANS_PREFLUSH);
1926 * Flush all nodes to synchronize the PFSROOT subtopology to the media.
1928 * Note that this flush will not be visible on crash recovery until
1929 * we flush the super-root topology in the next loop.
1931 for (i = 0; iroot && i < iroot->cluster.nchains; ++i) {
1932 chain = iroot->cluster.array[i].chain;
1936 hammer2_chain_ref(chain);
1937 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1938 if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1939 hammer2_flush(&info.trans, chain, 1);
1940 parent = chain->parent;
1941 KKASSERT(chain->pmp != parent->pmp);
1942 hammer2_chain_setflush(&info.trans, parent);
1944 hammer2_chain_unlock(chain);
1945 hammer2_chain_drop(chain);
1947 hammer2_trans_done(&info.trans);
1950 * Flush all volume roots to synchronize PFS flushes with the
1951 * storage media volume header. This will flush the freemap and
1952 * the superroot topology but stops when it reaches a PFSROOT
1953 * (which we already flushed above).
1955 * This is the last step which connects the volume root to the
1956 * PFSROOT dirs flushed above.
1958 * Each spmp (representing the hmp's super-root) requires its own
1961 for (i = 0; iroot && i < iroot->cluster.nchains; ++i) {
1962 hammer2_chain_t *tmp;
1964 chain = iroot->cluster.array[i].chain;
1971 * We only have to flush each hmp once
1973 for (j = i - 1; j >= 0; --j) {
1974 if ((tmp = iroot->cluster.array[j].chain) != NULL) {
1975 if (tmp->hmp == hmp)
1983 * spmp transaction. The super-root is never directly
1984 * mounted so there shouldn't be any vnodes, let alone any
1985 * dirty vnodes associated with it.
1987 hammer2_trans_init(&info.trans, hmp->spmp,
1988 HAMMER2_TRANS_ISFLUSH);
1991 * Media mounts have two 'roots', vchain for the topology
1992 * and fchain for the free block table. Flush both.
1994 * Note that the topology and free block table are handled
1995 * independently, so the free block table can wind up being
1996 * ahead of the topology. We depend on the bulk free scan
1997 * code to deal with any loose ends.
1999 hammer2_chain_ref(&hmp->vchain);
2000 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
2001 hammer2_chain_ref(&hmp->fchain);
2002 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
2003 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
2005 * This will also modify vchain as a side effect,
2006 * mark vchain as modified now.
2008 hammer2_voldata_modify(hmp);
2009 chain = &hmp->fchain;
2010 hammer2_flush(&info.trans, chain, 1);
2011 KKASSERT(chain == &hmp->fchain);
2013 hammer2_chain_unlock(&hmp->fchain);
2014 hammer2_chain_unlock(&hmp->vchain);
2015 hammer2_chain_drop(&hmp->fchain);
2016 /* vchain dropped down below */
2018 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
2019 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
2020 chain = &hmp->vchain;
2021 hammer2_flush(&info.trans, chain, 1);
2022 KKASSERT(chain == &hmp->vchain);
2024 hammer2_chain_unlock(&hmp->vchain);
2025 hammer2_chain_drop(&hmp->vchain);
2030 * We can't safely flush the volume header until we have
2031 * flushed any device buffers which have built up.
2033 * XXX this isn't being incremental
2035 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
2036 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
2037 vn_unlock(hmp->devvp);
2040 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
2041 * volume header needs synchronization via hmp->volsync.
2043 * XXX synchronize the flag & data with only this flush XXX
2046 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
2050 * Synchronize the disk before flushing the volume
2054 bp->b_bio1.bio_offset = 0;
2057 bp->b_cmd = BUF_CMD_FLUSH;
2058 bp->b_bio1.bio_done = biodone_sync;
2059 bp->b_bio1.bio_flags |= BIO_SYNC;
2060 vn_strategy(hmp->devvp, &bp->b_bio1);
2061 biowait(&bp->b_bio1, "h2vol");
2065 * Then we can safely flush the version of the
2066 * volume header synchronized by the flush code.
2068 i = hmp->volhdrno + 1;
2069 if (i >= HAMMER2_NUM_VOLHDRS)
2071 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
2072 hmp->volsync.volu_size) {
2075 kprintf("sync volhdr %d %jd\n",
2076 i, (intmax_t)hmp->volsync.volu_size);
2077 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2078 HAMMER2_PBUFSIZE, 0, 0);
2079 atomic_clear_int(&hmp->vchain.flags,
2080 HAMMER2_CHAIN_VOLUMESYNC);
2081 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
2086 total_error = error;
2088 hammer2_trans_done(&info.trans); /* spmp trans */
2090 return (total_error);
2097 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
2099 struct hammer2_sync_info *info = data;
2100 hammer2_inode_t *ip;
2104 * Degenerate cases. Note that ip == NULL typically means the
2105 * syncer vnode itself and we don't want to vclrisdirty() in that
2112 if (vp->v_type == VNON || vp->v_type == VBAD) {
2118 * VOP_FSYNC will start a new transaction so replicate some code
2119 * here to do it inline (see hammer2_vop_fsync()).
2121 * WARNING: The vfsync interacts with the buffer cache and might
2122 * block, we can't hold the inode lock at that time.
2123 * However, we MUST ref ip before blocking to ensure that
2124 * it isn't ripped out from under us (since we do not
2125 * hold a lock on the vnode).
2127 hammer2_inode_ref(ip);
2128 if ((ip->flags & HAMMER2_INODE_MODIFIED) ||
2129 !RB_EMPTY(&vp->v_rbdirty_tree)) {
2130 vfsync(vp, info->waitfor, 1, NULL, NULL);
2131 hammer2_inode_fsync(&info->trans, ip, NULL);
2133 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
2134 RB_EMPTY(&vp->v_rbdirty_tree)) {
2138 hammer2_inode_drop(ip);
2142 info->error = error;
2149 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
2156 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
2157 struct fid *fhp, struct vnode **vpp)
2164 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2165 int *exflagsp, struct ucred **credanonp)
2171 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume
2172 * header into the HMP
2174 * XXX read four volhdrs and use the one with the highest TID whos CRC
2179 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2180 * nonexistant locations.
2182 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2186 hammer2_install_volume_header(hammer2_dev_t *hmp)
2188 hammer2_volume_data_t *vd;
2190 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
2202 * There are up to 4 copies of the volume header (syncs iterate
2203 * between them so there is no single master). We don't trust the
2204 * volu_size field so we don't know precisely how large the filesystem
2205 * is, so depend on the OS to return an error if we go beyond the
2206 * block device's EOF.
2208 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
2209 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2210 HAMMER2_VOLUME_BYTES, &bp);
2217 vd = (struct hammer2_volume_data *) bp->b_data;
2218 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
2219 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
2225 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
2226 /* XXX: Reversed-endianness filesystem */
2227 kprintf("hammer2: reverse-endian filesystem detected");
2233 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
2234 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
2235 HAMMER2_VOLUME_ICRC0_SIZE);
2236 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
2237 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
2238 HAMMER2_VOLUME_ICRC1_SIZE);
2239 if ((crc0 != crc) || (bcrc0 != bcrc)) {
2240 kprintf("hammer2 volume header crc "
2241 "mismatch copy #%d %08x/%08x\n",
2248 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
2257 hmp->volsync = hmp->voldata;
2259 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
2260 kprintf("hammer2: using volume header #%d\n",
2265 kprintf("hammer2: no valid volume headers found!\n");
2271 * This handles hysteresis on regular file flushes. Because the BIOs are
2272 * routed to a thread it is possible for an excessive number to build up
2273 * and cause long front-end stalls long before the runningbuffspace limit
2274 * is hit, so we implement hammer2_flush_pipe to control the
2277 * This is a particular problem when compression is used.
2280 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2282 atomic_add_int(&pmp->count_lwinprog, 1);
2286 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2290 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2291 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2292 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2293 atomic_clear_int(&pmp->count_lwinprog,
2294 HAMMER2_LWINPROG_WAITING);
2295 wakeup(&pmp->count_lwinprog);
2300 hammer2_lwinprog_wait(hammer2_pfs_t *pmp)
2305 lwinprog = pmp->count_lwinprog;
2307 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe)
2309 tsleep_interlock(&pmp->count_lwinprog, 0);
2310 atomic_set_int(&pmp->count_lwinprog, HAMMER2_LWINPROG_WAITING);
2311 lwinprog = pmp->count_lwinprog;
2312 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe)
2314 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2319 * Manage excessive memory resource use for chain and related
2323 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2333 * Atomic check condition and wait. Also do an early speedup of
2334 * the syncer to try to avoid hitting the wait.
2337 waiting = pmp->inmem_dirty_chains;
2339 count = waiting & HAMMER2_DIRTYCHAIN_MASK;
2341 limit = pmp->mp->mnt_nvnodelistsize / 10;
2342 if (limit < hammer2_limit_dirty_chains)
2343 limit = hammer2_limit_dirty_chains;
2348 if ((int)(ticks - zzticks) > hz) {
2350 kprintf("count %ld %ld\n", count, limit);
2355 * Block if there are too many dirty chains present, wait
2356 * for the flush to clean some out.
2358 if (count > limit) {
2359 tsleep_interlock(&pmp->inmem_dirty_chains, 0);
2360 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2362 waiting | HAMMER2_DIRTYCHAIN_WAITING)) {
2363 speedup_syncer(pmp->mp);
2364 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED,
2367 continue; /* loop on success or fail */
2371 * Try to start an early flush before we are forced to block.
2373 if (count > limit * 7 / 10)
2374 speedup_syncer(pmp->mp);
2380 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
2383 atomic_add_int(&pmp->inmem_dirty_chains, 1);
2388 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp)
2396 waiting = pmp->inmem_dirty_chains;
2398 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2401 ~HAMMER2_DIRTYCHAIN_WAITING)) {
2406 if (waiting & HAMMER2_DIRTYCHAIN_WAITING)
2407 wakeup(&pmp->inmem_dirty_chains);
2414 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx)
2416 hammer2_chain_t *scan;
2417 hammer2_chain_t *parent;
2421 kprintf("%*.*s...\n", tab, tab, "");
2426 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n",
2428 chain, chain->bref.type,
2429 chain->bref.key, chain->bref.keybits,
2430 chain->bref.mirror_tid);
2432 kprintf("%*.*s [%08x] (%s) refs=%d",
2435 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2436 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2439 parent = chain->parent;
2441 kprintf("\n%*.*s p=%p [pflags %08x prefs %d",
2443 parent, parent->flags, parent->refs);
2444 if (RB_EMPTY(&chain->core.rbtree)) {
2448 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree)
2449 hammer2_dump_chain(scan, tab + 4, countp, 'a');
2450 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2451 kprintf("%*.*s}(%s)\n", tab, tab, "",
2452 chain->data->ipdata.filename);
2454 kprintf("%*.*s}\n", tab, tab, "");