2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
67 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
69 struct hammer2_sync_info {
74 TAILQ_HEAD(hammer2_mntlist, hammer2_dev);
75 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
76 static struct hammer2_mntlist hammer2_mntlist;
77 static struct hammer2_pfslist hammer2_pfslist;
78 static struct lock hammer2_mntlk;
81 int hammer2_cluster_read = 4; /* physical read-ahead */
82 int hammer2_cluster_write = 0; /* bdwrite() so later inval works */
83 int hammer2_hardlink_enable = 1;
84 int hammer2_flush_pipe = 100;
85 int hammer2_synchronous_flush = 1;
86 int hammer2_dio_count;
87 long hammer2_chain_allocs;
88 long hammer2_chain_frees;
89 long hammer2_limit_dirty_chains;
90 long hammer2_count_modified_chains;
91 long hammer2_iod_file_read;
92 long hammer2_iod_meta_read;
93 long hammer2_iod_indr_read;
94 long hammer2_iod_fmap_read;
95 long hammer2_iod_volu_read;
96 long hammer2_iod_file_write;
97 long hammer2_iod_file_wembed;
98 long hammer2_iod_file_wzero;
99 long hammer2_iod_file_wdedup;
100 long hammer2_iod_meta_write;
101 long hammer2_iod_indr_write;
102 long hammer2_iod_fmap_write;
103 long hammer2_iod_volu_write;
104 long hammer2_ioa_file_read;
105 long hammer2_ioa_meta_read;
106 long hammer2_ioa_indr_read;
107 long hammer2_ioa_fmap_read;
108 long hammer2_ioa_volu_read;
109 long hammer2_ioa_fmap_write;
110 long hammer2_ioa_file_write;
111 long hammer2_ioa_meta_write;
112 long hammer2_ioa_indr_write;
113 long hammer2_ioa_volu_write;
115 MALLOC_DECLARE(M_HAMMER2_CBUFFER);
116 MALLOC_DEFINE(M_HAMMER2_CBUFFER, "HAMMER2-compbuffer",
117 "Buffer used for compression.");
119 MALLOC_DECLARE(M_HAMMER2_DEBUFFER);
120 MALLOC_DEFINE(M_HAMMER2_DEBUFFER, "HAMMER2-decompbuffer",
121 "Buffer used for decompression.");
123 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
125 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
126 &hammer2_debug, 0, "");
127 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_read, CTLFLAG_RW,
128 &hammer2_cluster_read, 0, "");
129 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_write, CTLFLAG_RW,
130 &hammer2_cluster_write, 0, "");
131 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
132 &hammer2_hardlink_enable, 0, "");
133 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
134 &hammer2_flush_pipe, 0, "");
135 SYSCTL_INT(_vfs_hammer2, OID_AUTO, synchronous_flush, CTLFLAG_RW,
136 &hammer2_synchronous_flush, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_allocs, CTLFLAG_RW,
138 &hammer2_chain_allocs, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, chain_frees, CTLFLAG_RW,
140 &hammer2_chain_frees, 0, "");
141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
142 &hammer2_limit_dirty_chains, 0, "");
143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW,
144 &hammer2_count_modified_chains, 0, "");
145 SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
146 &hammer2_dio_count, 0, "");
148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
149 &hammer2_iod_file_read, 0, "");
150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
151 &hammer2_iod_meta_read, 0, "");
152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
153 &hammer2_iod_indr_read, 0, "");
154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
155 &hammer2_iod_fmap_read, 0, "");
156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
157 &hammer2_iod_volu_read, 0, "");
159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
160 &hammer2_iod_file_write, 0, "");
161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wembed, CTLFLAG_RW,
162 &hammer2_iod_file_wembed, 0, "");
163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wzero, CTLFLAG_RW,
164 &hammer2_iod_file_wzero, 0, "");
165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_wdedup, CTLFLAG_RW,
166 &hammer2_iod_file_wdedup, 0, "");
167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
168 &hammer2_iod_meta_write, 0, "");
169 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
170 &hammer2_iod_indr_write, 0, "");
171 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
172 &hammer2_iod_fmap_write, 0, "");
173 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
174 &hammer2_iod_volu_write, 0, "");
176 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
177 &hammer2_ioa_file_read, 0, "");
178 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
179 &hammer2_ioa_meta_read, 0, "");
180 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
181 &hammer2_ioa_indr_read, 0, "");
182 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
183 &hammer2_ioa_fmap_read, 0, "");
184 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
185 &hammer2_ioa_volu_read, 0, "");
187 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
188 &hammer2_ioa_file_write, 0, "");
189 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
190 &hammer2_ioa_meta_write, 0, "");
191 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
192 &hammer2_ioa_indr_write, 0, "");
193 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
194 &hammer2_ioa_fmap_write, 0, "");
195 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
196 &hammer2_ioa_volu_write, 0, "");
198 long hammer2_check_icrc32;
199 long hammer2_check_xxhash64;
200 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, check_icrc32, CTLFLAG_RW,
201 &hammer2_check_icrc32, 0, "");
202 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, check_xxhash64, CTLFLAG_RW,
203 &hammer2_check_xxhash64, 0, "");
205 static int hammer2_vfs_init(struct vfsconf *conf);
206 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
207 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
209 static int hammer2_remount(hammer2_dev_t *, struct mount *, char *,
210 struct vnode *, struct ucred *);
211 static int hammer2_recovery(hammer2_dev_t *hmp);
212 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
213 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
214 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
216 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
218 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
219 struct fid *fhp, struct vnode **vpp);
220 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
221 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
222 int *exflagsp, struct ucred **credanonp);
224 static int hammer2_install_volume_header(hammer2_dev_t *hmp);
225 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
227 static void hammer2_update_pmps(hammer2_dev_t *hmp);
229 static void hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp);
230 static void hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp,
234 * HAMMER2 vfs operations.
236 static struct vfsops hammer2_vfsops = {
237 .vfs_init = hammer2_vfs_init,
238 .vfs_uninit = hammer2_vfs_uninit,
239 .vfs_sync = hammer2_vfs_sync,
240 .vfs_mount = hammer2_vfs_mount,
241 .vfs_unmount = hammer2_vfs_unmount,
242 .vfs_root = hammer2_vfs_root,
243 .vfs_statfs = hammer2_vfs_statfs,
244 .vfs_statvfs = hammer2_vfs_statvfs,
245 .vfs_vget = hammer2_vfs_vget,
246 .vfs_vptofh = hammer2_vfs_vptofh,
247 .vfs_fhtovp = hammer2_vfs_fhtovp,
248 .vfs_checkexp = hammer2_vfs_checkexp
251 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
253 VFS_SET(hammer2_vfsops, hammer2, 0);
254 MODULE_VERSION(hammer2, 1);
258 hammer2_vfs_init(struct vfsconf *conf)
260 static struct objcache_malloc_args margs_read;
261 static struct objcache_malloc_args margs_write;
262 static struct objcache_malloc_args margs_vop;
268 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
270 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
272 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
276 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
278 margs_read.objsize = 65536;
279 margs_read.mtype = M_HAMMER2_DEBUFFER;
281 margs_write.objsize = 32768;
282 margs_write.mtype = M_HAMMER2_CBUFFER;
284 margs_vop.objsize = sizeof(hammer2_xop_t);
285 margs_vop.mtype = M_HAMMER2;
288 * Note thaht for the XOPS cache we want backing store allocations
289 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
290 * confusion), so use the backing store function that does it. This
291 * means that initial XOPS objects are zerod but REUSED objects are
292 * not. So we are responsible for cleaning the object up sufficiently
293 * for our needs before objcache_put()ing it back (typically just the
296 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
297 0, 1, NULL, NULL, NULL,
298 objcache_malloc_alloc,
299 objcache_malloc_free,
301 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
302 0, 1, NULL, NULL, NULL,
303 objcache_malloc_alloc,
304 objcache_malloc_free,
306 cache_xops = objcache_create(margs_vop.mtype->ks_shortdesc,
307 0, 1, NULL, NULL, NULL,
308 objcache_malloc_alloc_zero,
309 objcache_malloc_free,
313 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
314 TAILQ_INIT(&hammer2_mntlist);
315 TAILQ_INIT(&hammer2_pfslist);
317 hammer2_limit_dirty_chains = desiredvnodes / 10;
324 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
326 objcache_destroy(cache_buffer_read);
327 objcache_destroy(cache_buffer_write);
328 objcache_destroy(cache_xops);
333 * Core PFS allocator. Used to allocate the pmp structure for PFS cluster
334 * mounts and the spmp structure for media (hmp) structures.
336 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
337 * transactions. Note that synchronization does not use this field.
338 * (typically frontend operations and synchronization cannot run on the
339 * same PFS node at the same time).
344 hammer2_pfsalloc(hammer2_chain_t *chain, const hammer2_inode_data_t *ripdata,
345 hammer2_tid_t modify_tid, hammer2_dev_t *force_local)
347 hammer2_inode_t *iroot;
354 * Locate or create the PFS based on the cluster id. If ripdata
355 * is NULL this is a spmp which is unique and is always allocated.
357 * If the device is mounted in local mode all PFSs are considered
358 * independent and not part of any cluster (for debugging only).
361 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
362 if (force_local != pmp->force_local)
364 if (force_local == NULL &&
365 bcmp(&pmp->pfs_clid, &ripdata->meta.pfs_clid,
366 sizeof(pmp->pfs_clid)) == 0) {
368 } else if (force_local && pmp->pfs_names[0] &&
369 strcmp(pmp->pfs_names[0], ripdata->filename) == 0) {
378 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
379 pmp->force_local = force_local;
380 hammer2_trans_manage_init(pmp);
381 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
382 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
383 lockinit(&pmp->lock, "pfslk", 0, 0);
384 lockinit(&pmp->lock_nlink, "h2nlink", 0, 0);
385 spin_init(&pmp->inum_spin, "hm2pfsalloc_inum");
386 spin_init(&pmp->xop_spin, "h2xop");
387 spin_init(&pmp->lru_spin, "h2lru");
388 RB_INIT(&pmp->inum_tree);
389 TAILQ_INIT(&pmp->sideq);
390 TAILQ_INIT(&pmp->lru_list);
391 spin_init(&pmp->list_spin, "hm2pfsalloc_list");
394 * Distribute backend operations to threads
396 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
397 for (j = 0; j < HAMMER2_XOPGROUPS +
398 HAMMER2_SPECTHREADS; ++j) {
399 TAILQ_INIT(&pmp->xopq[i][j]);
402 for (i = 0; i < HAMMER2_XOPGROUPS; ++i)
403 hammer2_xop_group_init(pmp, &pmp->xop_groups[i]);
406 * Save the last media transaction id for the flusher. Set
410 pmp->pfs_clid = ripdata->meta.pfs_clid;
411 TAILQ_INSERT_TAIL(&hammer2_pfslist, pmp, mntentry);
414 * The synchronization thread may start too early, make
415 * sure it stays frozen until we are ready to let it go.
419 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
420 HAMMER2_THREAD_REMASTER;
425 * Create the PFS's root inode.
427 if ((iroot = pmp->iroot) == NULL) {
428 iroot = hammer2_inode_get(pmp, NULL, NULL, -1);
430 hammer2_inode_ref(iroot);
431 hammer2_inode_unlock(iroot);
435 * Stop here if no chain is passed in.
441 * When a chain is passed in we must add it to the PFS's root
442 * inode, update pmp->pfs_types[], and update the syncronization
445 * When forcing local mode, mark the PFS as a MASTER regardless.
447 * At the moment empty spots can develop due to removals or failures.
448 * Ultimately we want to re-fill these spots but doing so might
449 * confused running code. XXX
451 hammer2_inode_ref(iroot);
452 hammer2_mtx_ex(&iroot->lock);
453 j = iroot->cluster.nchains;
455 kprintf("add PFS to pmp %p[%d]\n", pmp, j);
457 if (j == HAMMER2_MAXCLUSTER) {
458 kprintf("hammer2_mount: cluster full!\n");
459 /* XXX fatal error? */
461 KKASSERT(chain->pmp == NULL);
463 hammer2_chain_ref(chain);
464 iroot->cluster.array[j].chain = chain;
466 pmp->pfs_types[j] = HAMMER2_PFSTYPE_MASTER;
468 pmp->pfs_types[j] = ripdata->meta.pfs_type;
469 pmp->pfs_names[j] = kstrdup(ripdata->filename, M_HAMMER2);
470 pmp->pfs_hmps[j] = chain->hmp;
473 * If the PFS is already mounted we must account
474 * for the mount_count here.
477 ++chain->hmp->mount_count;
480 * May have to fixup dirty chain tracking. Previous
481 * pmp was NULL so nothing to undo.
483 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
484 hammer2_pfs_memory_inc(pmp);
487 iroot->cluster.nchains = j;
490 * Update nmasters from any PFS inode which is part of the cluster.
491 * It is possible that this will result in a value which is too
492 * high. MASTER PFSs are authoritative for pfs_nmasters and will
493 * override this value later on.
495 * (This informs us of masters that might not currently be
496 * discoverable by this mount).
498 if (ripdata && pmp->pfs_nmasters < ripdata->meta.pfs_nmasters) {
499 pmp->pfs_nmasters = ripdata->meta.pfs_nmasters;
503 * Count visible masters. Masters are usually added with
504 * ripdata->meta.pfs_nmasters set to 1. This detects when there
505 * are more (XXX and must update the master inodes).
508 for (i = 0; i < iroot->cluster.nchains; ++i) {
509 if (pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER)
512 if (pmp->pfs_nmasters < count)
513 pmp->pfs_nmasters = count;
516 * Create missing synchronization and support threads.
518 * Single-node masters (including snapshots) have nothing to
519 * synchronize and do not require this thread.
521 * Multi-node masters or any number of soft masters, slaves, copy,
522 * or other PFS types need the thread.
524 * Each thread is responsible for its particular cluster index.
525 * We use independent threads so stalls or mismatches related to
526 * any given target do not affect other targets.
528 for (i = 0; i < iroot->cluster.nchains; ++i) {
530 * Single-node masters (including snapshots) have nothing
531 * to synchronize and will make direct xops support calls,
532 * thus they do not require this thread.
534 * Note that there can be thousands of snapshots. We do not
535 * want to create thousands of threads.
537 if (pmp->pfs_nmasters <= 1 &&
538 pmp->pfs_types[i] == HAMMER2_PFSTYPE_MASTER) {
543 * Sync support thread
545 if (pmp->sync_thrs[i].td == NULL) {
546 hammer2_thr_create(&pmp->sync_thrs[i], pmp,
548 hammer2_primary_sync_thread);
553 * Create missing Xop threads
556 hammer2_xop_helper_create(pmp);
558 hammer2_mtx_unlock(&iroot->lock);
559 hammer2_inode_drop(iroot);
565 * Destroy a PFS, typically only occurs after the last mount on a device
569 hammer2_pfsfree(hammer2_pfs_t *pmp)
571 hammer2_inode_t *iroot;
572 hammer2_chain_t *chain;
577 * Cleanup our reference on iroot. iroot is (should) not be needed
580 TAILQ_REMOVE(&hammer2_pfslist, pmp, mntentry);
584 for (i = 0; i < iroot->cluster.nchains; ++i) {
585 hammer2_thr_delete(&pmp->sync_thrs[i]);
586 for (j = 0; j < HAMMER2_XOPGROUPS; ++j)
587 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
589 #if REPORT_REFS_ERRORS
590 if (pmp->iroot->refs != 1)
591 kprintf("PMP->IROOT %p REFS WRONG %d\n",
592 pmp->iroot, pmp->iroot->refs);
594 KKASSERT(pmp->iroot->refs == 1);
596 /* ref for pmp->iroot */
597 hammer2_inode_drop(pmp->iroot);
602 * Cleanup chains remaining on LRU list.
604 kprintf("pfsfree: %p lrucount=%d\n", pmp, pmp->lru_count);
605 while ((chain = TAILQ_FIRST(&pmp->lru_list)) != NULL) {
606 hammer2_chain_ref(chain);
607 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
608 hammer2_chain_drop(chain);
612 * Free remaining pmp resources
614 kmalloc_destroy(&pmp->mmsg);
615 kmalloc_destroy(&pmp->minode);
617 kfree(pmp, M_HAMMER2);
621 * Remove all references to hmp from the pfs list. Any PFS which becomes
622 * empty is terminated and freed.
627 hammer2_pfsfree_scan(hammer2_dev_t *hmp)
630 hammer2_inode_t *iroot;
631 hammer2_chain_t *rchain;
637 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
638 if ((iroot = pmp->iroot) == NULL)
640 if (hmp->spmp == pmp) {
641 kprintf("unmount hmp %p remove spmp %p\n",
647 * Determine if this PFS is affected. If it is we must
648 * freeze all management threads and lock its iroot.
650 * Freezing a management thread forces it idle, operations
651 * in-progress will be aborted and it will have to start
652 * over again when unfrozen, or exit if told to exit.
654 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
655 if (pmp->pfs_hmps[i] == hmp)
658 if (i != HAMMER2_MAXCLUSTER) {
660 * Make sure all synchronization threads are locked
663 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
664 if (pmp->pfs_hmps[i] == NULL)
666 hammer2_thr_freeze_async(&pmp->sync_thrs[i]);
667 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
668 hammer2_thr_freeze_async(
669 &pmp->xop_groups[j].thrs[i]);
672 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
673 if (pmp->pfs_hmps[i] == NULL)
675 hammer2_thr_freeze(&pmp->sync_thrs[i]);
676 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
678 &pmp->xop_groups[j].thrs[i]);
683 * Lock the inode and clean out matching chains.
684 * Note that we cannot use hammer2_inode_lock_*()
685 * here because that would attempt to validate the
686 * cluster that we are in the middle of ripping
689 * WARNING! We are working directly on the inodes
692 hammer2_mtx_ex(&iroot->lock);
695 * Remove the chain from matching elements of the PFS.
697 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
698 if (pmp->pfs_hmps[i] != hmp)
700 hammer2_thr_delete(&pmp->sync_thrs[i]);
701 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
703 &pmp->xop_groups[j].thrs[i]);
705 rchain = iroot->cluster.array[i].chain;
706 iroot->cluster.array[i].chain = NULL;
707 pmp->pfs_types[i] = 0;
708 if (pmp->pfs_names[i]) {
709 kfree(pmp->pfs_names[i], M_HAMMER2);
710 pmp->pfs_names[i] = NULL;
713 hammer2_chain_drop(rchain);
715 if (iroot->cluster.focus == rchain)
716 iroot->cluster.focus = NULL;
718 pmp->pfs_hmps[i] = NULL;
720 hammer2_mtx_unlock(&iroot->lock);
721 didfreeze = 1; /* remaster, unfreeze down below */
727 * Cleanup trailing chains. Gaps may remain.
729 for (i = HAMMER2_MAXCLUSTER - 1; i >= 0; --i) {
730 if (pmp->pfs_hmps[i])
733 iroot->cluster.nchains = i + 1;
736 * If the PMP has no elements remaining we can destroy it.
737 * (this will transition management threads from frozen->exit).
739 if (iroot->cluster.nchains == 0) {
740 kprintf("unmount hmp %p last ref to PMP=%p\n",
742 hammer2_pfsfree(pmp);
747 * If elements still remain we need to set the REMASTER
748 * flag and unfreeze it.
751 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
752 if (pmp->pfs_hmps[i] == NULL)
754 hammer2_thr_remaster(&pmp->sync_thrs[i]);
755 hammer2_thr_unfreeze(&pmp->sync_thrs[i]);
756 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
757 hammer2_thr_remaster(
758 &pmp->xop_groups[j].thrs[i]);
759 hammer2_thr_unfreeze(
760 &pmp->xop_groups[j].thrs[i]);
768 * Mount or remount HAMMER2 fileystem from physical media
771 * mp mount point structure
777 * mp mount point structure
778 * path path to mount point
779 * data pointer to argument structure in user space
780 * volume volume path (device@LABEL form)
781 * hflags user mount flags
782 * cred user credentials
789 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
792 struct hammer2_mount_info info;
796 hammer2_dev_t *force_local;
797 hammer2_key_t key_next;
798 hammer2_key_t key_dummy;
801 struct nlookupdata nd;
802 hammer2_chain_t *parent;
803 hammer2_chain_t *chain;
804 hammer2_cluster_t *cluster;
805 const hammer2_inode_data_t *ripdata;
806 hammer2_blockref_t bref;
808 char devstr[MNAMELEN];
825 kprintf("hammer2_mount\n");
831 bzero(&info, sizeof(info));
832 info.cluster_fd = -1;
833 ksnprintf(devstr, sizeof(devstr), "%s",
834 mp->mnt_stat.f_mntfromname);
835 kprintf("hammer2_mount: root '%s'\n", devstr);
838 * Non-root mount or updating a mount
840 error = copyin(data, &info, sizeof(info));
844 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
849 /* Extract device and label */
851 label = strchr(devstr, '@');
853 ((label + 1) - dev) > done) {
861 if (mp->mnt_flag & MNT_UPDATE) {
863 * Update mount. Note that pmp->iroot->cluster is
864 * an inode-embedded cluster and thus cannot be
867 * XXX HAMMER2 needs to implement NFS export via
871 pmp->hflags = info.hflags;
872 cluster = &pmp->iroot->cluster;
873 for (i = 0; i < cluster->nchains; ++i) {
874 if (cluster->array[i].chain == NULL)
876 hmp = cluster->array[i].chain->hmp;
878 error = hammer2_remount(hmp, mp, path,
890 * Lookup name and verify it refers to a block device.
893 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
895 error = nlookup(&nd);
897 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
901 cdev_t cdev = kgetdiskbyname(dev);
902 error = bdevvp(cdev, &devvp);
904 kprintf("hammer2: cannot find '%s'\n", dev);
908 if (vn_isdisk(devvp, &error))
909 error = vfs_mountedon(devvp);
913 * Determine if the device has already been mounted. After this
914 * check hmp will be non-NULL if we are doing the second or more
915 * hammer2 mounts from the same device.
917 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
918 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
919 if (hmp->devvp == devvp)
924 * Open the device if this isn't a secondary mount and construct
925 * the H2 device mount (hmp).
928 hammer2_chain_t *schain;
931 if (error == 0 && vcount(devvp) > 0)
935 * Now open the device
938 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
939 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
940 error = vinvalbuf(devvp, V_SAVE, 0, 0);
942 error = VOP_OPEN(devvp,
943 ronly ? FREAD : FREAD | FWRITE,
948 if (error && devvp) {
953 lockmgr(&hammer2_mntlk, LK_RELEASE);
956 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
957 ksnprintf(hmp->devrepname, sizeof(hmp->devrepname), "%s", dev);
960 hmp->hflags = info.hflags & HMNT2_DEVFLAGS;
961 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
962 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
963 RB_INIT(&hmp->iotree);
964 spin_init(&hmp->io_spin, "hm2mount_io");
965 spin_init(&hmp->list_spin, "hm2mount_list");
966 TAILQ_INIT(&hmp->flushq);
968 lockinit(&hmp->vollk, "h2vol", 0, 0);
969 lockinit(&hmp->bulklk, "h2bulk", 0, 0);
972 * vchain setup. vchain.data is embedded.
973 * vchain.refs is initialized and will never drop to 0.
975 * NOTE! voldata is not yet loaded.
977 hmp->vchain.hmp = hmp;
978 hmp->vchain.refs = 1;
979 hmp->vchain.data = (void *)&hmp->voldata;
980 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
981 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
982 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
984 hammer2_chain_core_init(&hmp->vchain);
985 /* hmp->vchain.u.xxx is left NULL */
988 * fchain setup. fchain.data is embedded.
989 * fchain.refs is initialized and will never drop to 0.
991 * The data is not used but needs to be initialized to
992 * pass assertion muster. We use this chain primarily
993 * as a placeholder for the freemap's top-level RBTREE
994 * so it does not interfere with the volume's topology
997 hmp->fchain.hmp = hmp;
998 hmp->fchain.refs = 1;
999 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
1000 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
1001 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
1002 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1003 hmp->fchain.bref.methods =
1004 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
1005 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
1007 hammer2_chain_core_init(&hmp->fchain);
1008 /* hmp->fchain.u.xxx is left NULL */
1011 * Install the volume header and initialize fields from
1014 error = hammer2_install_volume_header(hmp);
1016 hammer2_unmount_helper(mp, NULL, hmp);
1017 lockmgr(&hammer2_mntlk, LK_RELEASE);
1018 hammer2_vfs_unmount(mp, MNT_FORCE);
1023 * Really important to get these right or flush will get
1026 hmp->spmp = hammer2_pfsalloc(NULL, NULL, 0, NULL);
1027 kprintf("alloc spmp %p tid %016jx\n",
1028 hmp->spmp, hmp->voldata.mirror_tid);
1032 * Dummy-up vchain and fchain's modify_tid. mirror_tid
1033 * is inherited from the volume header.
1036 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
1037 hmp->vchain.bref.modify_tid = hmp->vchain.bref.mirror_tid;
1038 hmp->vchain.pmp = spmp;
1039 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
1040 hmp->fchain.bref.modify_tid = hmp->fchain.bref.mirror_tid;
1041 hmp->fchain.pmp = spmp;
1044 * First locate the super-root inode, which is key 0
1045 * relative to the volume header's blockset.
1047 * Then locate the root inode by scanning the directory keyspace
1048 * represented by the label.
1050 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1051 schain = hammer2_chain_lookup(&parent, &key_dummy,
1052 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
1054 hammer2_chain_lookup_done(parent);
1055 if (schain == NULL) {
1056 kprintf("hammer2_mount: invalid super-root\n");
1057 hammer2_unmount_helper(mp, NULL, hmp);
1058 lockmgr(&hammer2_mntlk, LK_RELEASE);
1059 hammer2_vfs_unmount(mp, MNT_FORCE);
1062 if (schain->error) {
1063 kprintf("hammer2_mount: error %s reading super-root\n",
1064 hammer2_error_str(schain->error));
1065 hammer2_chain_unlock(schain);
1066 hammer2_chain_drop(schain);
1068 hammer2_unmount_helper(mp, NULL, hmp);
1069 lockmgr(&hammer2_mntlk, LK_RELEASE);
1070 hammer2_vfs_unmount(mp, MNT_FORCE);
1075 * The super-root always uses an inode_tid of 1 when
1078 spmp->inode_tid = 1;
1079 spmp->modify_tid = schain->bref.modify_tid + 1;
1082 * Sanity-check schain's pmp and finish initialization.
1083 * Any chain belonging to the super-root topology should
1084 * have a NULL pmp (not even set to spmp).
1086 ripdata = &hammer2_chain_rdata(schain)->ipdata;
1087 KKASSERT(schain->pmp == NULL);
1088 spmp->pfs_clid = ripdata->meta.pfs_clid;
1091 * Replace the dummy spmp->iroot with a real one. It's
1092 * easier to just do a wholesale replacement than to try
1093 * to update the chain and fixup the iroot fields.
1095 * The returned inode is locked with the supplied cluster.
1097 cluster = hammer2_cluster_from_chain(schain);
1098 hammer2_inode_drop(spmp->iroot);
1100 spmp->iroot = hammer2_inode_get(spmp, NULL, cluster, -1);
1101 spmp->spmp_hmp = hmp;
1102 spmp->pfs_types[0] = ripdata->meta.pfs_type;
1103 spmp->pfs_hmps[0] = hmp;
1104 hammer2_inode_ref(spmp->iroot);
1105 hammer2_inode_unlock(spmp->iroot);
1106 hammer2_cluster_unlock(cluster);
1107 hammer2_cluster_drop(cluster);
1109 /* leave spmp->iroot with one ref */
1111 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1112 error = hammer2_recovery(hmp);
1113 /* XXX do something with error */
1115 hammer2_update_pmps(hmp);
1116 hammer2_iocom_init(hmp);
1119 * Ref the cluster management messaging descriptor. The mount
1120 * program deals with the other end of the communications pipe.
1122 * Root mounts typically do not supply one.
1124 if (info.cluster_fd >= 0) {
1125 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
1127 hammer2_cluster_reconnect(hmp, fp);
1129 kprintf("hammer2_mount: bad cluster_fd!\n");
1134 if (info.hflags & HMNT2_DEVFLAGS) {
1135 kprintf("hammer2: Warning: mount flags pertaining "
1136 "to the whole device may only be specified "
1137 "on the first mount of the device: %08x\n",
1138 info.hflags & HMNT2_DEVFLAGS);
1143 * Force local mount (disassociate all PFSs from their clusters).
1144 * Used primarily for debugging.
1146 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1149 * Lookup the mount point under the media-localized super-root.
1150 * Scanning hammer2_pfslist doesn't help us because it represents
1151 * PFS cluster ids which can aggregate several named PFSs together.
1153 * cluster->pmp will incorrectly point to spmp and must be fixed
1156 hammer2_inode_lock(spmp->iroot, 0);
1157 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1158 lhc = hammer2_dirhash(label, strlen(label));
1159 chain = hammer2_chain_lookup(&parent, &key_next,
1160 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1163 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1164 strcmp(label, chain->data->ipdata.filename) == 0) {
1167 chain = hammer2_chain_next(&parent, chain, &key_next,
1169 lhc + HAMMER2_DIRHASH_LOMASK,
1173 hammer2_chain_unlock(parent);
1174 hammer2_chain_drop(parent);
1176 hammer2_inode_unlock(spmp->iroot);
1179 * PFS could not be found?
1181 if (chain == NULL) {
1182 kprintf("hammer2_mount: PFS label not found\n");
1183 hammer2_unmount_helper(mp, NULL, hmp);
1184 lockmgr(&hammer2_mntlk, LK_RELEASE);
1185 hammer2_vfs_unmount(mp, MNT_FORCE);
1191 * Acquire the pmp structure (it should have already been allocated
1192 * via hammer2_update_pmps() so do not pass cluster in to add to
1193 * available chains).
1195 * Check if the cluster has already been mounted. A cluster can
1196 * only be mounted once, use null mounts to mount additional copies.
1198 ripdata = &chain->data->ipdata;
1200 pmp = hammer2_pfsalloc(NULL, ripdata, bref.modify_tid, force_local);
1201 hammer2_chain_unlock(chain);
1202 hammer2_chain_drop(chain);
1205 kprintf("hammer2_mount: PFS already mounted!\n");
1206 hammer2_unmount_helper(mp, NULL, hmp);
1207 lockmgr(&hammer2_mntlk, LK_RELEASE);
1208 hammer2_vfs_unmount(mp, MNT_FORCE);
1216 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp, pmp);
1218 pmp->hflags = info.hflags;
1219 mp->mnt_flag = MNT_LOCAL;
1220 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
1221 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
1224 * required mount structure initializations
1226 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
1227 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
1229 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
1230 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1235 mp->mnt_iosize_max = MAXPHYS;
1238 * Connect up mount pointers.
1240 hammer2_mount_helper(mp, pmp);
1242 lockmgr(&hammer2_mntlk, LK_RELEASE);
1248 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
1249 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
1250 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
1253 copyinstr(info.volume, mp->mnt_stat.f_mntfromname,
1254 MNAMELEN - 1, &size);
1255 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1256 } /* else root mount, already in there */
1258 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
1260 copyinstr(path, mp->mnt_stat.f_mntonname,
1261 sizeof(mp->mnt_stat.f_mntonname) - 1,
1265 mp->mnt_stat.f_mntonname[0] = '/';
1269 * Initial statfs to prime mnt_stat.
1271 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
1277 * Scan PFSs under the super-root and create hammer2_pfs structures.
1281 hammer2_update_pmps(hammer2_dev_t *hmp)
1283 const hammer2_inode_data_t *ripdata;
1284 hammer2_chain_t *parent;
1285 hammer2_chain_t *chain;
1286 hammer2_blockref_t bref;
1287 hammer2_dev_t *force_local;
1288 hammer2_pfs_t *spmp;
1290 hammer2_key_t key_next;
1291 int cache_index = -1;
1294 * Force local mount (disassociate all PFSs from their clusters).
1295 * Used primarily for debugging.
1297 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
1300 * Lookup mount point under the media-localized super-root.
1302 * cluster->pmp will incorrectly point to spmp and must be fixed
1306 hammer2_inode_lock(spmp->iroot, 0);
1307 parent = hammer2_inode_chain(spmp->iroot, 0, HAMMER2_RESOLVE_ALWAYS);
1308 chain = hammer2_chain_lookup(&parent, &key_next,
1309 HAMMER2_KEY_MIN, HAMMER2_KEY_MAX,
1312 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE)
1314 ripdata = &chain->data->ipdata;
1316 kprintf("ADD LOCAL PFS: %s\n", ripdata->filename);
1318 pmp = hammer2_pfsalloc(chain, ripdata,
1319 bref.modify_tid, force_local);
1320 chain = hammer2_chain_next(&parent, chain, &key_next,
1321 key_next, HAMMER2_KEY_MAX,
1325 hammer2_chain_unlock(parent);
1326 hammer2_chain_drop(parent);
1328 hammer2_inode_unlock(spmp->iroot);
1333 hammer2_remount(hammer2_dev_t *hmp, struct mount *mp, char *path __unused,
1334 struct vnode *devvp, struct ucred *cred)
1338 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
1339 error = hammer2_recovery(hmp);
1348 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1359 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1362 * If mount initialization proceeded far enough we must flush
1363 * its vnodes and sync the underlying mount points. Three syncs
1364 * are required to fully flush the filesystem (freemap updates lag
1365 * by one flush, and one extra for safety).
1367 if (mntflags & MNT_FORCE)
1372 error = vflush(mp, 0, flags);
1375 hammer2_vfs_sync(mp, MNT_WAIT);
1376 hammer2_vfs_sync(mp, MNT_WAIT);
1377 hammer2_vfs_sync(mp, MNT_WAIT);
1381 * Cleanup the frontend support XOPS threads
1383 hammer2_xop_helper_cleanup(pmp);
1386 * Cleanup our reference on ihidden.
1389 hammer2_inode_drop(pmp->ihidden);
1390 pmp->ihidden = NULL;
1393 hammer2_unmount_helper(mp, pmp, NULL);
1397 lockmgr(&hammer2_mntlk, LK_RELEASE);
1403 * Mount helper, hook the system mount into our PFS.
1404 * The mount lock is held.
1406 * We must bump the mount_count on related devices for any
1411 hammer2_mount_helper(struct mount *mp, hammer2_pfs_t *pmp)
1413 hammer2_cluster_t *cluster;
1414 hammer2_chain_t *rchain;
1417 mp->mnt_data = (qaddr_t)pmp;
1421 * After pmp->mp is set we have to adjust hmp->mount_count.
1423 cluster = &pmp->iroot->cluster;
1424 for (i = 0; i < cluster->nchains; ++i) {
1425 rchain = cluster->array[i].chain;
1428 ++rchain->hmp->mount_count;
1429 kprintf("hammer2_mount hmp=%p ++mount_count=%d\n",
1430 rchain->hmp, rchain->hmp->mount_count);
1434 * Create missing Xop threads
1436 hammer2_xop_helper_create(pmp);
1440 * Mount helper, unhook the system mount from our PFS.
1441 * The mount lock is held.
1443 * If hmp is supplied a mount responsible for being the first to open
1444 * the block device failed and the block device and all PFSs using the
1445 * block device must be cleaned up.
1447 * If pmp is supplied multiple devices might be backing the PFS and each
1448 * must be disconnected. This might not be the last PFS using some of the
1449 * underlying devices. Also, we have to adjust our hmp->mount_count
1450 * accounting for the devices backing the pmp which is now undergoing an
1455 hammer2_unmount_helper(struct mount *mp, hammer2_pfs_t *pmp, hammer2_dev_t *hmp)
1457 hammer2_cluster_t *cluster;
1458 hammer2_chain_t *rchain;
1459 struct vnode *devvp;
1465 * If no device supplied this is a high-level unmount and we have to
1466 * to disconnect the mount, adjust mount_count, and locate devices
1467 * that might now have no mounts.
1470 KKASSERT(hmp == NULL);
1471 KKASSERT((void *)(intptr_t)mp->mnt_data == pmp);
1473 mp->mnt_data = NULL;
1476 * After pmp->mp is cleared we have to account for
1479 cluster = &pmp->iroot->cluster;
1480 for (i = 0; i < cluster->nchains; ++i) {
1481 rchain = cluster->array[i].chain;
1484 --rchain->hmp->mount_count;
1485 kprintf("hammer2_unmount hmp=%p --mount_count=%d\n",
1486 rchain->hmp, rchain->hmp->mount_count);
1487 /* scrapping hmp now may invalidate the pmp */
1490 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
1491 if (hmp->mount_count == 0) {
1492 hammer2_unmount_helper(NULL, NULL, hmp);
1500 * Try to terminate the block device. We can't terminate it if
1501 * there are still PFSs referencing it.
1503 kprintf("hammer2_unmount hmp=%p mount_count=%d\n",
1504 hmp, hmp->mount_count);
1505 if (hmp->mount_count)
1508 hammer2_pfsfree_scan(hmp);
1509 hammer2_dev_exlock(hmp); /* XXX order */
1512 * Cycle the volume data lock as a safety (probably not needed any
1513 * more). To ensure everything is out we need to flush at least
1514 * three times. (1) The running of the sideq can dirty the
1515 * filesystem, (2) A normal flush can dirty the freemap, and
1516 * (3) ensure that the freemap is fully synchronized.
1518 * The next mount's recovery scan can clean everything up but we want
1519 * to leave the filesystem in a 100% clean state on a normal unmount.
1522 hammer2_voldata_lock(hmp);
1523 hammer2_voldata_unlock(hmp);
1525 hammer2_iocom_uninit(hmp);
1527 if ((hmp->vchain.flags | hmp->fchain.flags) &
1528 HAMMER2_CHAIN_FLUSH_MASK) {
1529 kprintf("hammer2_unmount: chains left over "
1530 "after final sync\n");
1531 kprintf(" vchain %08x\n", hmp->vchain.flags);
1532 kprintf(" fchain %08x\n", hmp->fchain.flags);
1534 if (hammer2_debug & 0x0010)
1535 Debugger("entered debugger");
1538 KKASSERT(hmp->spmp == NULL);
1541 * Finish up with the device vnode
1543 if ((devvp = hmp->devvp) != NULL) {
1544 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1545 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1547 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE), NULL);
1554 * Clear vchain/fchain flags that might prevent final cleanup
1557 if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
1558 atomic_add_long(&hammer2_count_modified_chains, -1);
1559 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1560 hammer2_pfs_memory_wakeup(hmp->vchain.pmp);
1562 if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
1563 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE);
1566 if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
1567 atomic_add_long(&hammer2_count_modified_chains, -1);
1568 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED);
1569 hammer2_pfs_memory_wakeup(hmp->fchain.pmp);
1571 if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
1572 atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE);
1576 * Final drop of embedded freemap root chain to
1577 * clean up fchain.core (fchain structure is not
1578 * flagged ALLOCATED so it is cleaned out and then
1581 hammer2_chain_drop(&hmp->fchain);
1584 * Final drop of embedded volume root chain to clean
1585 * up vchain.core (vchain structure is not flagged
1586 * ALLOCATED so it is cleaned out and then left to
1590 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt, 'v');
1592 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt, 'f');
1593 hammer2_dev_unlock(hmp);
1594 hammer2_chain_drop(&hmp->vchain);
1596 hammer2_io_cleanup(hmp, &hmp->iotree);
1597 if (hmp->iofree_count) {
1598 kprintf("io_cleanup: %d I/O's left hanging\n",
1602 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1603 kmalloc_destroy(&hmp->mchain);
1604 kfree(hmp, M_HAMMER2);
1608 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1609 ino_t ino, struct vnode **vpp)
1611 hammer2_xop_lookup_t *xop;
1613 hammer2_inode_t *ip;
1617 inum = (hammer2_tid_t)ino & HAMMER2_DIRHASH_USERMSK;
1623 * Easy if we already have it cached
1625 ip = hammer2_inode_lookup(pmp, inum);
1627 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
1628 *vpp = hammer2_igetv(ip, &error);
1629 hammer2_inode_unlock(ip);
1630 hammer2_inode_drop(ip); /* from lookup */
1636 * Otherwise we have to find the inode
1638 xop = hammer2_xop_alloc(pmp->iroot, 0);
1640 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1641 error = hammer2_xop_collect(&xop->head, 0);
1644 if (hammer2_cluster_rdata(&xop->head.cluster) == NULL) {
1645 kprintf("vget: no collect error but also no rdata\n");
1646 kprintf("xop %p\n", xop);
1647 while ((hammer2_debug & 0x80000) == 0) {
1648 tsleep(xop, PCATCH, "wait", hz * 10);
1652 ip = hammer2_inode_get(pmp, NULL, &xop->head.cluster, -1);
1655 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1658 *vpp = hammer2_igetv(ip, &error);
1659 hammer2_inode_unlock(ip);
1669 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1676 if (pmp->iroot == NULL) {
1682 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1684 while (pmp->inode_tid == 0) {
1685 hammer2_xop_ipcluster_t *xop;
1686 hammer2_inode_meta_t *meta;
1688 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1689 hammer2_xop_start(&xop->head, hammer2_xop_ipcluster);
1690 error = hammer2_xop_collect(&xop->head, 0);
1693 meta = &xop->head.cluster.focus->data->ipdata.meta;
1694 pmp->iroot->meta = *meta;
1695 pmp->inode_tid = meta->pfs_inum + 1;
1696 if (pmp->inode_tid < HAMMER2_INODE_START)
1697 pmp->inode_tid = HAMMER2_INODE_START;
1699 xop->head.cluster.focus->bref.modify_tid + 1;
1700 kprintf("PFS: Starting inode %jd\n",
1701 (intmax_t)pmp->inode_tid);
1702 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1703 pmp->inode_tid, pmp->modify_tid);
1704 wakeup(&pmp->iroot);
1706 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1709 * Prime the mount info.
1711 hammer2_vfs_statfs(mp, &mp->mnt_stat, NULL);
1714 * With the cluster operational, check for and
1715 * install ihidden if needed. The install_hidden
1716 * code needs to get a transaction so we must unlock
1719 * This is only applicable PFS mounts, there is no
1720 * hidden directory in the spmp.
1722 hammer2_inode_unlock(pmp->iroot);
1723 hammer2_inode_install_hidden(pmp);
1724 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1732 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1733 hammer2_inode_unlock(pmp->iroot);
1734 error = tsleep(&pmp->iroot, PCATCH, "h2root", hz);
1735 hammer2_inode_lock(pmp->iroot, HAMMER2_RESOLVE_SHARED);
1741 hammer2_inode_unlock(pmp->iroot);
1744 vp = hammer2_igetv(pmp->iroot, &error);
1745 hammer2_inode_unlock(pmp->iroot);
1755 * XXX incorporate ipdata->meta.inode_quota and data_quota
1759 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1763 hammer2_blockref_t bref;
1767 * NOTE: iroot might not have validated the cluster yet.
1771 mp->mnt_stat.f_files = 0;
1772 mp->mnt_stat.f_ffree = 0;
1773 mp->mnt_stat.f_blocks = 0;
1774 mp->mnt_stat.f_bfree = 0;
1775 mp->mnt_stat.f_bavail = 0;
1777 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
1778 hmp = pmp->pfs_hmps[i];
1781 if (pmp->iroot->cluster.array[i].chain)
1782 bref = pmp->iroot->cluster.array[i].chain->bref;
1784 bzero(&bref, sizeof(bref));
1786 mp->mnt_stat.f_files = bref.inode_count;
1787 mp->mnt_stat.f_ffree = 0;
1788 mp->mnt_stat.f_blocks = (bref.data_count +
1789 hmp->voldata.allocator_free) /
1790 mp->mnt_vstat.f_bsize;
1791 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free /
1792 mp->mnt_vstat.f_bsize;
1793 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1795 *sbp = mp->mnt_stat;
1802 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1806 hammer2_blockref_t bref;
1810 * NOTE: iroot might not have validated the cluster yet.
1814 mp->mnt_vstat.f_bsize = 0;
1815 mp->mnt_vstat.f_files = 0;
1816 mp->mnt_vstat.f_ffree = 0;
1817 mp->mnt_vstat.f_blocks = 0;
1818 mp->mnt_vstat.f_bfree = 0;
1819 mp->mnt_vstat.f_bavail = 0;
1821 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
1822 hmp = pmp->pfs_hmps[i];
1825 if (pmp->iroot->cluster.array[i].chain)
1826 bref = pmp->iroot->cluster.array[i].chain->bref;
1828 bzero(&bref, sizeof(bref));
1830 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1831 mp->mnt_vstat.f_files = bref.inode_count;
1832 mp->mnt_vstat.f_ffree = 0;
1833 mp->mnt_vstat.f_blocks = (bref.data_count +
1834 hmp->voldata.allocator_free) /
1835 mp->mnt_vstat.f_bsize;
1836 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free /
1837 mp->mnt_vstat.f_bsize;
1838 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1840 *sbp = mp->mnt_vstat;
1846 * Mount-time recovery (RW mounts)
1848 * Updates to the free block table are allowed to lag flushes by one
1849 * transaction. In case of a crash, then on a fresh mount we must do an
1850 * incremental scan of the last committed transaction id and make sure that
1851 * all related blocks have been marked allocated.
1853 * The super-root topology and each PFS has its own transaction id domain,
1854 * so we must track PFS boundary transitions.
1856 struct hammer2_recovery_elm {
1857 TAILQ_ENTRY(hammer2_recovery_elm) entry;
1858 hammer2_chain_t *chain;
1859 hammer2_tid_t sync_tid;
1862 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
1864 struct hammer2_recovery_info {
1865 struct hammer2_recovery_list list;
1870 static int hammer2_recovery_scan(hammer2_dev_t *hmp,
1871 hammer2_chain_t *parent,
1872 struct hammer2_recovery_info *info,
1873 hammer2_tid_t sync_tid);
1875 #define HAMMER2_RECOVERY_MAXDEPTH 10
1879 hammer2_recovery(hammer2_dev_t *hmp)
1881 struct hammer2_recovery_info info;
1882 struct hammer2_recovery_elm *elm;
1883 hammer2_chain_t *parent;
1884 hammer2_tid_t sync_tid;
1885 hammer2_tid_t mirror_tid;
1887 int cumulative_error = 0;
1889 hammer2_trans_init(hmp->spmp, 0);
1891 sync_tid = hmp->voldata.freemap_tid;
1892 mirror_tid = hmp->voldata.mirror_tid;
1894 kprintf("hammer2 mount \"%s\": ", hmp->devrepname);
1895 if (sync_tid >= mirror_tid) {
1896 kprintf(" no recovery needed\n");
1898 kprintf(" freemap recovery %016jx-%016jx\n",
1899 sync_tid + 1, mirror_tid);
1902 TAILQ_INIT(&info.list);
1904 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1905 cumulative_error = hammer2_recovery_scan(hmp, parent, &info, sync_tid);
1906 hammer2_chain_lookup_done(parent);
1908 while ((elm = TAILQ_FIRST(&info.list)) != NULL) {
1909 TAILQ_REMOVE(&info.list, elm, entry);
1910 parent = elm->chain;
1911 sync_tid = elm->sync_tid;
1912 kfree(elm, M_HAMMER2);
1914 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1915 error = hammer2_recovery_scan(hmp, parent, &info,
1916 hmp->voldata.freemap_tid);
1917 hammer2_chain_unlock(parent);
1918 hammer2_chain_drop(parent); /* drop elm->chain ref */
1920 cumulative_error = error;
1922 hammer2_trans_done(hmp->spmp);
1924 return cumulative_error;
1929 hammer2_recovery_scan(hammer2_dev_t *hmp, hammer2_chain_t *parent,
1930 struct hammer2_recovery_info *info,
1931 hammer2_tid_t sync_tid)
1933 const hammer2_inode_data_t *ripdata;
1934 hammer2_chain_t *chain;
1935 hammer2_blockref_t bref;
1937 int cumulative_error = 0;
1942 * Adjust freemap to ensure that the block(s) are marked allocated.
1944 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
1945 hammer2_freemap_adjust(hmp, &parent->bref,
1946 HAMMER2_FREEMAP_DORECOVER);
1950 * Check type for recursive scan
1952 switch(parent->bref.type) {
1953 case HAMMER2_BREF_TYPE_VOLUME:
1954 /* data already instantiated */
1956 case HAMMER2_BREF_TYPE_INODE:
1958 * Must instantiate data for DIRECTDATA test and also
1961 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1962 ripdata = &hammer2_chain_rdata(parent)->ipdata;
1963 if (ripdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1964 /* not applicable to recovery scan */
1965 hammer2_chain_unlock(parent);
1968 hammer2_chain_unlock(parent);
1970 case HAMMER2_BREF_TYPE_INDIRECT:
1972 * Must instantiate data for recursion
1974 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1975 hammer2_chain_unlock(parent);
1977 case HAMMER2_BREF_TYPE_DATA:
1978 case HAMMER2_BREF_TYPE_FREEMAP:
1979 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1980 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1981 /* not applicable to recovery scan */
1989 * Defer operation if depth limit reached or if we are crossing a
1992 if (info->depth >= HAMMER2_RECOVERY_MAXDEPTH) {
1993 struct hammer2_recovery_elm *elm;
1995 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
1996 elm->chain = parent;
1997 elm->sync_tid = sync_tid;
1998 hammer2_chain_ref(parent);
1999 TAILQ_INSERT_TAIL(&info->list, elm, entry);
2000 /* unlocked by caller */
2007 * Recursive scan of the last flushed transaction only. We are
2008 * doing this without pmp assignments so don't leave the chains
2009 * hanging around after we are done with them.
2015 while (hammer2_chain_scan(parent, &chain, &bref,
2016 &first, &cache_index,
2017 HAMMER2_LOOKUP_NODATA) != NULL) {
2021 if (chain == NULL) {
2022 if (bref.mirror_tid > sync_tid) {
2023 hammer2_freemap_adjust(hmp, &bref,
2024 HAMMER2_FREEMAP_DORECOVER);
2030 * This may or may not be a recursive node.
2032 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
2033 if (bref.mirror_tid > sync_tid) {
2035 error = hammer2_recovery_scan(hmp, chain,
2039 cumulative_error = error;
2043 * Flush the recovery at the PFS boundary to stage it for
2044 * the final flush of the super-root topology.
2046 if ((bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
2047 (chain->flags & HAMMER2_CHAIN_ONFLUSH)) {
2048 hammer2_flush(chain, HAMMER2_FLUSH_TOP);
2052 return cumulative_error;
2056 * Sync a mount point; this is called on a per-mount basis from the
2057 * filesystem syncer process periodically and whenever a user issues
2061 hammer2_vfs_sync(struct mount *mp, int waitfor)
2063 hammer2_xop_flush_t *xop;
2064 struct hammer2_sync_info info;
2065 hammer2_inode_t *iroot;
2073 KKASSERT(iroot->pmp == pmp);
2076 * We can't acquire locks on existing vnodes while in a transaction
2077 * without risking a deadlock. This assumes that vfsync() can be
2078 * called without the vnode locked (which it can in DragonFly).
2079 * Otherwise we'd have to implement a multi-pass or flag the lock
2080 * failures and retry.
2082 * The reclamation code interlocks with the sync list's token
2083 * (by removing the vnode from the scan list) before unlocking
2084 * the inode, giving us time to ref the inode.
2086 /*flags = VMSC_GETVP;*/
2088 if (waitfor & MNT_LAZY)
2089 flags |= VMSC_ONEPASS;
2093 * Preflush the vnodes using a normal transaction before interlocking
2094 * with a flush transaction.
2096 hammer2_trans_init(pmp, 0);
2098 info.waitfor = MNT_NOWAIT;
2099 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
2100 hammer2_trans_done(pmp);
2104 * Start our flush transaction. This does not return until all
2105 * concurrent transactions have completed and will prevent any
2106 * new transactions from running concurrently, except for the
2107 * buffer cache transactions.
2109 * For efficiency do an async pass before making sure with a
2110 * synchronous pass on all related buffer cache buffers. It
2111 * should theoretically not be possible for any new file buffers
2112 * to be instantiated during this sequence.
2114 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH |
2115 HAMMER2_TRANS_PREFLUSH);
2116 hammer2_inode_run_sideq(pmp);
2119 info.waitfor = MNT_NOWAIT;
2120 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
2121 info.waitfor = MNT_WAIT;
2122 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
2125 * Clear PREFLUSH. This prevents (or asserts on) any new logical
2126 * buffer cache flushes which occur during the flush. Device buffers
2129 hammer2_bioq_sync(pmp);
2130 hammer2_trans_clear_preflush(pmp);
2133 * Use the XOP interface to concurrently flush all nodes to
2134 * synchronize the PFSROOT subtopology to the media. A standard
2135 * end-of-scan ENOENT error indicates cluster sufficiency.
2137 * Note that this flush will not be visible on crash recovery until
2138 * we flush the super-root topology in the next loop.
2140 * XXX For now wait for all flushes to complete.
2143 xop = hammer2_xop_alloc(iroot, HAMMER2_XOP_MODIFYING);
2144 hammer2_xop_start(&xop->head, hammer2_inode_xop_flush);
2145 error = hammer2_xop_collect(&xop->head,
2146 HAMMER2_XOP_COLLECT_WAITALL);
2147 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2148 if (error == ENOENT)
2153 hammer2_trans_done(pmp);
2161 * Note that we ignore the tranasction mtid we got above. Instead,
2162 * each vfsync below will ultimately get its own via TRANS_BUFCACHE
2166 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
2168 struct hammer2_sync_info *info = data;
2169 hammer2_inode_t *ip;
2173 * Degenerate cases. Note that ip == NULL typically means the
2174 * syncer vnode itself and we don't want to vclrisdirty() in that
2181 if (vp->v_type == VNON || vp->v_type == VBAD) {
2187 * VOP_FSYNC will start a new transaction so replicate some code
2188 * here to do it inline (see hammer2_vop_fsync()).
2190 * WARNING: The vfsync interacts with the buffer cache and might
2191 * block, we can't hold the inode lock at that time.
2192 * However, we MUST ref ip before blocking to ensure that
2193 * it isn't ripped out from under us (since we do not
2194 * hold a lock on the vnode).
2196 hammer2_inode_ref(ip);
2197 if ((ip->flags & HAMMER2_INODE_MODIFIED) ||
2198 !RB_EMPTY(&vp->v_rbdirty_tree)) {
2199 vfsync(vp, info->waitfor, 1, NULL, NULL);
2200 if (ip->flags & (HAMMER2_INODE_RESIZED |
2201 HAMMER2_INODE_MODIFIED)) {
2202 hammer2_inode_lock(ip, 0);
2203 hammer2_inode_chain_sync(ip);
2204 hammer2_inode_unlock(ip);
2207 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
2208 RB_EMPTY(&vp->v_rbdirty_tree)) {
2212 hammer2_inode_drop(ip);
2216 info->error = error;
2223 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
2225 hammer2_inode_t *ip;
2227 KKASSERT(MAXFIDSZ >= 16);
2229 fhp->fid_len = offsetof(struct fid, fid_data[16]);
2231 ((hammer2_tid_t *)fhp->fid_data)[0] = ip->meta.inum;
2232 ((hammer2_tid_t *)fhp->fid_data)[1] = 0;
2239 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
2240 struct fid *fhp, struct vnode **vpp)
2247 inum = ((hammer2_tid_t *)fhp->fid_data)[0] & HAMMER2_DIRHASH_USERMSK;
2250 error = hammer2_vfs_root(mp, vpp);
2252 error = hammer2_vfs_vget(mp, NULL, inum, vpp);
2257 kprintf("fhtovp: %016jx -> %p, %d\n", inum, *vpp, error);
2263 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2264 int *exflagsp, struct ucred **credanonp)
2271 np = vfs_export_lookup(mp, &pmp->export, nam);
2273 *exflagsp = np->netc_exflags;
2274 *credanonp = &np->netc_anon;
2283 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume
2284 * header into the HMP
2286 * XXX read four volhdrs and use the one with the highest TID whos CRC
2291 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2292 * nonexistant locations.
2294 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2298 hammer2_install_volume_header(hammer2_dev_t *hmp)
2300 hammer2_volume_data_t *vd;
2302 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
2314 * There are up to 4 copies of the volume header (syncs iterate
2315 * between them so there is no single master). We don't trust the
2316 * volu_size field so we don't know precisely how large the filesystem
2317 * is, so depend on the OS to return an error if we go beyond the
2318 * block device's EOF.
2320 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
2321 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2322 HAMMER2_VOLUME_BYTES, &bp);
2329 vd = (struct hammer2_volume_data *) bp->b_data;
2330 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
2331 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
2337 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
2338 /* XXX: Reversed-endianness filesystem */
2339 kprintf("hammer2: reverse-endian filesystem detected");
2345 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
2346 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
2347 HAMMER2_VOLUME_ICRC0_SIZE);
2348 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
2349 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
2350 HAMMER2_VOLUME_ICRC1_SIZE);
2351 if ((crc0 != crc) || (bcrc0 != bcrc)) {
2352 kprintf("hammer2 volume header crc "
2353 "mismatch copy #%d %08x/%08x\n",
2360 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
2369 hmp->volsync = hmp->voldata;
2371 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
2372 kprintf("hammer2: using volume header #%d\n",
2377 kprintf("hammer2: no valid volume headers found!\n");
2383 * This handles hysteresis on regular file flushes. Because the BIOs are
2384 * routed to a thread it is possible for an excessive number to build up
2385 * and cause long front-end stalls long before the runningbuffspace limit
2386 * is hit, so we implement hammer2_flush_pipe to control the
2389 * This is a particular problem when compression is used.
2392 hammer2_lwinprog_ref(hammer2_pfs_t *pmp)
2394 atomic_add_int(&pmp->count_lwinprog, 1);
2398 hammer2_lwinprog_drop(hammer2_pfs_t *pmp)
2402 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2403 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2404 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2405 atomic_clear_int(&pmp->count_lwinprog,
2406 HAMMER2_LWINPROG_WAITING);
2407 wakeup(&pmp->count_lwinprog);
2409 if ((lwinprog & HAMMER2_LWINPROG_WAITING0) &&
2410 (lwinprog & HAMMER2_LWINPROG_MASK) <= 0) {
2411 atomic_clear_int(&pmp->count_lwinprog,
2412 HAMMER2_LWINPROG_WAITING0);
2413 wakeup(&pmp->count_lwinprog);
2418 hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int flush_pipe)
2421 int lwflag = (flush_pipe) ? HAMMER2_LWINPROG_WAITING :
2422 HAMMER2_LWINPROG_WAITING0;
2425 lwinprog = pmp->count_lwinprog;
2427 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2429 tsleep_interlock(&pmp->count_lwinprog, 0);
2430 atomic_set_int(&pmp->count_lwinprog, lwflag);
2431 lwinprog = pmp->count_lwinprog;
2432 if ((lwinprog & HAMMER2_LWINPROG_MASK) <= flush_pipe)
2434 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2439 * Manage excessive memory resource use for chain and related
2443 hammer2_pfs_memory_wait(hammer2_pfs_t *pmp)
2453 * Atomic check condition and wait. Also do an early speedup of
2454 * the syncer to try to avoid hitting the wait.
2457 waiting = pmp->inmem_dirty_chains;
2459 count = waiting & HAMMER2_DIRTYCHAIN_MASK;
2461 limit = pmp->mp->mnt_nvnodelistsize / 10;
2462 if (limit < hammer2_limit_dirty_chains)
2463 limit = hammer2_limit_dirty_chains;
2468 if ((int)(ticks - zzticks) > hz) {
2470 kprintf("count %ld %ld\n", count, limit);
2475 * Block if there are too many dirty chains present, wait
2476 * for the flush to clean some out.
2478 if (count > limit) {
2479 tsleep_interlock(&pmp->inmem_dirty_chains, 0);
2480 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2482 waiting | HAMMER2_DIRTYCHAIN_WAITING)) {
2483 speedup_syncer(pmp->mp);
2484 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED,
2487 continue; /* loop on success or fail */
2491 * Try to start an early flush before we are forced to block.
2493 if (count > limit * 7 / 10)
2494 speedup_syncer(pmp->mp);
2500 hammer2_pfs_memory_inc(hammer2_pfs_t *pmp)
2503 atomic_add_int(&pmp->inmem_dirty_chains, 1);
2508 hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp)
2516 waiting = pmp->inmem_dirty_chains;
2518 if (atomic_cmpset_int(&pmp->inmem_dirty_chains,
2521 ~HAMMER2_DIRTYCHAIN_WAITING)) {
2526 if (waiting & HAMMER2_DIRTYCHAIN_WAITING)
2527 wakeup(&pmp->inmem_dirty_chains);
2534 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx)
2536 hammer2_chain_t *scan;
2537 hammer2_chain_t *parent;
2541 kprintf("%*.*s...\n", tab, tab, "");
2546 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n",
2548 chain, chain->bref.type,
2549 chain->bref.key, chain->bref.keybits,
2550 chain->bref.mirror_tid);
2552 kprintf("%*.*s [%08x] (%s) refs=%d",
2555 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2556 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2559 parent = chain->parent;
2561 kprintf("\n%*.*s p=%p [pflags %08x prefs %d",
2563 parent, parent->flags, parent->refs);
2564 if (RB_EMPTY(&chain->core.rbtree)) {
2568 RB_FOREACH(scan, hammer2_chain_tree, &chain->core.rbtree)
2569 hammer2_dump_chain(scan, tab + 4, countp, 'a');
2570 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2571 kprintf("%*.*s}(%s)\n", tab, tab, "",
2572 chain->data->ipdata.filename);
2574 kprintf("%*.*s}\n", tab, tab, "");