2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/nlookup.h>
40 * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them
43 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
45 int hammer_debug_general;
46 int hammer_debug_debug = 1; /* medium-error panics */
47 int hammer_debug_inode;
48 int hammer_debug_locks;
49 int hammer_debug_btree;
51 int hammer_debug_recover; /* -1 will disable, +1 will force */
52 int hammer_debug_recover_faults;
53 int hammer_debug_critical; /* non-zero enter debugger on error */
54 int hammer_cluster_enable = 1; /* enable read clustering by default */
55 int hammer_live_dedup = 0;
56 int hammer_tdmux_ticks;
57 int hammer_count_fsyncs;
58 int hammer_count_inodes;
59 int hammer_count_iqueued;
60 int hammer_count_reclaims;
61 int hammer_count_records;
62 int hammer_count_record_datas;
63 int hammer_count_volumes;
64 int hammer_count_buffers;
65 int hammer_count_nodes;
66 int64_t hammer_count_extra_space_used;
67 int64_t hammer_stats_btree_lookups;
68 int64_t hammer_stats_btree_searches;
69 int64_t hammer_stats_btree_inserts;
70 int64_t hammer_stats_btree_deletes;
71 int64_t hammer_stats_btree_elements;
72 int64_t hammer_stats_btree_splits;
73 int64_t hammer_stats_btree_iterations;
74 int64_t hammer_stats_btree_root_iterations;
75 int64_t hammer_stats_record_iterations;
77 int64_t hammer_stats_file_read;
78 int64_t hammer_stats_file_write;
79 int64_t hammer_stats_file_iopsr;
80 int64_t hammer_stats_file_iopsw;
81 int64_t hammer_stats_disk_read;
82 int64_t hammer_stats_disk_write;
83 int64_t hammer_stats_inode_flushes;
84 int64_t hammer_stats_commits;
85 int64_t hammer_stats_undo;
86 int64_t hammer_stats_redo;
88 long hammer_count_dirtybufspace; /* global */
89 int hammer_count_refedbufs; /* global */
90 int hammer_count_reservations;
91 long hammer_count_io_running_read;
92 long hammer_count_io_running_write;
93 int hammer_count_io_locked;
94 long hammer_limit_dirtybufspace; /* per-mount */
95 int hammer_limit_recs; /* as a whole XXX */
96 int hammer_limit_inode_recs = 2048; /* per inode */
97 int hammer_limit_reclaims;
98 int hammer_live_dedup_cache_size = DEDUP_CACHE_SIZE;
99 int hammer_limit_redo = 4096 * 1024; /* per inode */
100 int hammer_autoflush = 500; /* auto flush (typ on reclaim) */
101 int hammer_bio_count;
102 int hammer_verify_zone;
103 int hammer_verify_data = 1;
104 int hammer_write_mode;
105 int hammer_double_buffer;
106 int hammer_btree_full_undo = 1;
107 int hammer_yield_check = 16;
108 int hammer_fsync_mode = 3;
109 int64_t hammer_contention_count;
110 int64_t hammer_zone_limit;
113 * Live dedup debug counters (sysctls are writable so that counters
114 * can be reset from userspace).
116 int64_t hammer_live_dedup_vnode_bcmps = 0;
117 int64_t hammer_live_dedup_device_bcmps = 0;
118 int64_t hammer_live_dedup_findblk_failures = 0;
119 int64_t hammer_live_dedup_bmap_saves = 0;
122 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
125 &hammer_supported_version, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
127 &hammer_debug_general, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
129 &hammer_debug_io, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
131 &hammer_debug_debug, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
133 &hammer_debug_inode, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
135 &hammer_debug_locks, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
137 &hammer_debug_btree, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
139 &hammer_debug_tid, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
141 &hammer_debug_recover, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
143 &hammer_debug_recover_faults, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
145 &hammer_debug_critical, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
147 &hammer_cluster_enable, 0, "");
149 * 0 - live dedup is disabled
150 * 1 - dedup cache is populated on reads only
151 * 2 - dedup cache is populated on both reads and writes
153 * LIVE_DEDUP IS DISABLED PERMANENTLY! This feature appears to cause
154 * blockmap corruption over time so we've turned it off permanently.
156 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup, CTLFLAG_RD,
157 &hammer_live_dedup, 0, "Enable live dedup (experimental)");
158 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
159 &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
161 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
162 &hammer_limit_dirtybufspace, 0, "");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
164 &hammer_limit_recs, 0, "");
165 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
166 &hammer_limit_inode_recs, 0, "");
167 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
168 &hammer_limit_reclaims, 0, "");
169 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
170 &hammer_live_dedup_cache_size, 0,
171 "Number of cache entries");
172 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
173 &hammer_limit_redo, 0, "");
175 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
176 &hammer_count_fsyncs, 0, "");
177 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
178 &hammer_count_inodes, 0, "");
179 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
180 &hammer_count_iqueued, 0, "");
181 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
182 &hammer_count_reclaims, 0, "");
183 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
184 &hammer_count_records, 0, "");
185 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
186 &hammer_count_record_datas, 0, "");
187 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
188 &hammer_count_volumes, 0, "");
189 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
190 &hammer_count_buffers, 0, "");
191 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
192 &hammer_count_nodes, 0, "");
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
194 &hammer_count_extra_space_used, 0, "");
196 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
197 &hammer_stats_btree_searches, 0, "");
198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
199 &hammer_stats_btree_lookups, 0, "");
200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
201 &hammer_stats_btree_inserts, 0, "");
202 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
203 &hammer_stats_btree_deletes, 0, "");
204 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
205 &hammer_stats_btree_elements, 0, "");
206 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
207 &hammer_stats_btree_splits, 0, "");
208 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
209 &hammer_stats_btree_iterations, 0, "");
210 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
211 &hammer_stats_btree_root_iterations, 0, "");
212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
213 &hammer_stats_record_iterations, 0, "");
215 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
216 &hammer_stats_file_read, 0, "");
217 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
218 &hammer_stats_file_write, 0, "");
219 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
220 &hammer_stats_file_iopsr, 0, "");
221 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
222 &hammer_stats_file_iopsw, 0, "");
223 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
224 &hammer_stats_disk_read, 0, "");
225 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
226 &hammer_stats_disk_write, 0, "");
227 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
228 &hammer_stats_inode_flushes, 0, "");
229 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
230 &hammer_stats_commits, 0, "");
231 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
232 &hammer_stats_undo, 0, "");
233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
234 &hammer_stats_redo, 0, "");
236 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_vnode_bcmps, CTLFLAG_RW,
237 &hammer_live_dedup_vnode_bcmps, 0,
238 "successful vnode buffer comparisons");
239 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_device_bcmps, CTLFLAG_RW,
240 &hammer_live_dedup_device_bcmps, 0,
241 "successful device buffer comparisons");
242 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_findblk_failures, CTLFLAG_RW,
243 &hammer_live_dedup_findblk_failures, 0,
244 "block lookup failures for comparison");
245 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_bmap_saves, CTLFLAG_RW,
246 &hammer_live_dedup_bmap_saves, 0,
247 "useful physical block lookups");
249 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
250 &hammer_count_dirtybufspace, 0, "");
251 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
252 &hammer_count_refedbufs, 0, "");
253 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
254 &hammer_count_reservations, 0, "");
255 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
256 &hammer_count_io_running_read, 0, "");
257 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
258 &hammer_count_io_locked, 0, "");
259 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
260 &hammer_count_io_running_write, 0, "");
261 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
262 &hammer_zone_limit, 0, "");
263 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
264 &hammer_contention_count, 0, "");
265 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
266 &hammer_autoflush, 0, "");
267 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
268 &hammer_verify_zone, 0, "");
269 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
270 &hammer_verify_data, 0, "");
271 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
272 &hammer_write_mode, 0, "");
273 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
274 &hammer_double_buffer, 0, "");
275 SYSCTL_INT(_vfs_hammer, OID_AUTO, btree_full_undo, CTLFLAG_RW,
276 &hammer_btree_full_undo, 0, "");
277 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
278 &hammer_yield_check, 0, "");
279 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
280 &hammer_fsync_mode, 0, "");
282 /* KTR_INFO_MASTER(hammer); */
287 static void hammer_free_hmp(struct mount *mp);
289 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
291 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
292 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
293 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
295 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
297 static int hammer_vfs_sync(struct mount *mp, int waitfor);
298 static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
299 ino_t ino, struct vnode **vpp);
300 static int hammer_vfs_init(struct vfsconf *conf);
301 static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
302 struct fid *fhp, struct vnode **vpp);
303 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
304 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
305 int *exflagsp, struct ucred **credanonp);
308 static struct vfsops hammer_vfsops = {
309 .vfs_mount = hammer_vfs_mount,
310 .vfs_unmount = hammer_vfs_unmount,
311 .vfs_root = hammer_vfs_root,
312 .vfs_statfs = hammer_vfs_statfs,
313 .vfs_statvfs = hammer_vfs_statvfs,
314 .vfs_sync = hammer_vfs_sync,
315 .vfs_vget = hammer_vfs_vget,
316 .vfs_init = hammer_vfs_init,
317 .vfs_vptofh = hammer_vfs_vptofh,
318 .vfs_fhtovp = hammer_vfs_fhtovp,
319 .vfs_checkexp = hammer_vfs_checkexp
322 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
324 VFS_SET(hammer_vfsops, hammer, 0);
325 MODULE_VERSION(hammer, 1);
328 hammer_vfs_init(struct vfsconf *conf)
333 * Wait up to this long for an exclusive deadlock to clear
334 * before acquiring a new shared lock on the ip. The deadlock
335 * may have occured on a b-tree node related to the ip.
337 if (hammer_tdmux_ticks == 0)
338 hammer_tdmux_ticks = hz / 5;
341 * Autosize, but be careful because a hammer filesystem's
342 * reserve is partially calculated based on dirtybufspace,
343 * so we simply cannot allow it to get too large.
345 if (hammer_limit_recs == 0) {
347 if (n > kmalloc_limit(M_HAMMER) / 512)
348 n = kmalloc_limit(M_HAMMER) / 512;
349 if (n > 2 * 1024 * 1024)
351 hammer_limit_recs = (int)n;
353 if (hammer_limit_dirtybufspace == 0) {
354 hammer_limit_dirtybufspace = hidirtybufspace / 2;
355 if (hammer_limit_dirtybufspace < 1L * 1024 * 1024)
356 hammer_limit_dirtybufspace = 1024L * 1024;
357 if (hammer_limit_dirtybufspace > 1024L * 1024 * 1024)
358 hammer_limit_dirtybufspace = 1024L * 1024 * 1024;
362 * The hammer_inode structure detaches from the vnode on reclaim.
363 * This limits the number of inodes in this state to prevent a
364 * memory pool blowout.
366 if (hammer_limit_reclaims == 0)
367 hammer_limit_reclaims = desiredvnodes / 10;
373 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
376 struct hammer_mount_info info;
378 hammer_volume_t rootvol;
379 struct vnode *rootvp;
380 struct vnode *devvp = NULL;
381 const char *upath; /* volume name in userspace */
382 char *path; /* volume name in system space */
387 char *next_volume_ptr = NULL;
390 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot.
393 bzero(&info, sizeof(info));
398 next_volume_ptr = mp->mnt_stat.f_mntfromname;
400 /* Count number of volumes separated by ':' */
401 for (char *p = next_volume_ptr; *p != '\0'; ++p) {
407 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
409 if ((error = copyin(data, &info, sizeof(info))) != 0)
414 * updating or new mount
416 if (mp->mnt_flag & MNT_UPDATE) {
417 hmp = (void *)mp->mnt_data;
418 KKASSERT(hmp != NULL);
420 if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
426 * master-id validation. The master id may not be changed by a
429 if (info.hflags & HMNT_MASTERID) {
430 if (hmp && hmp->master_id != info.master_id) {
431 kprintf("HAMMER: cannot change master id "
432 "with mount update\n");
435 master_id = info.master_id;
436 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
440 master_id = hmp->master_id;
446 * Internal mount data structure
449 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
450 mp->mnt_data = (qaddr_t)hmp;
454 * Make sure kmalloc type limits are set appropriately.
456 * Our inode kmalloc group is sized based on maxvnodes
457 * (controlled by the system, not us).
459 kmalloc_create(&hmp->m_misc, "HAMMER-others");
460 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
462 kmalloc_raise_limit(hmp->m_inodes, 0); /* unlimited */
464 hmp->root_btree_beg.localization = 0x00000000U;
465 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
466 hmp->root_btree_beg.key = -0x8000000000000000LL;
467 hmp->root_btree_beg.create_tid = 1;
468 hmp->root_btree_beg.delete_tid = 1;
469 hmp->root_btree_beg.rec_type = 0;
470 hmp->root_btree_beg.obj_type = 0;
471 hmp->root_btree_beg.btype = HAMMER_BTREE_TYPE_NONE;
473 hmp->root_btree_end.localization = 0xFFFFFFFFU;
474 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
475 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
476 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
477 hmp->root_btree_end.delete_tid = 0; /* special case */
478 hmp->root_btree_end.rec_type = 0xFFFFU;
479 hmp->root_btree_end.obj_type = 0;
480 hmp->root_btree_end.btype = HAMMER_BTREE_TYPE_NONE;
482 hmp->krate.freq = 1; /* maximum reporting rate (hz) */
483 hmp->krate.count = -16; /* initial burst */
484 hmp->kdiag.freq = 1; /* maximum reporting rate (hz) */
485 hmp->kdiag.count = -16; /* initial burst */
487 hmp->sync_lock.refs = 1;
488 hmp->free_lock.refs = 1;
489 hmp->undo_lock.refs = 1;
490 hmp->blkmap_lock.refs = 1;
491 hmp->snapshot_lock.refs = 1;
492 hmp->volume_lock.refs = 1;
494 TAILQ_INIT(&hmp->delay_list);
495 TAILQ_INIT(&hmp->flush_group_list);
496 TAILQ_INIT(&hmp->objid_cache_list);
497 TAILQ_INIT(&hmp->undo_lru_list);
498 TAILQ_INIT(&hmp->reclaim_list);
500 RB_INIT(&hmp->rb_dedup_crc_root);
501 RB_INIT(&hmp->rb_dedup_off_root);
502 TAILQ_INIT(&hmp->dedup_lru_list);
504 hmp->hflags &= ~HMNT_USERFLAGS;
505 hmp->hflags |= info.hflags & HMNT_USERFLAGS;
507 hmp->master_id = master_id;
510 mp->mnt_flag |= MNT_RDONLY;
511 hmp->asof = info.asof;
513 hmp->asof = HAMMER_MAX_TID;
516 hmp->volume_to_remove = -1;
519 * Re-open read-write if originally read-only, or vise-versa.
521 * When going from read-only to read-write execute the stage2
522 * recovery if it has not already been run.
524 if (mp->mnt_flag & MNT_UPDATE) {
525 lwkt_gettoken(&hmp->fs_token);
527 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
528 kprintf("HAMMER: read-only -> read-write\n");
530 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
531 hammer_adjust_volume_mode, NULL);
532 rootvol = hammer_get_root_volume(hmp, &error);
534 hammer_recover_flush_buffers(hmp, rootvol, 1);
535 error = hammer_recover_stage2(hmp, rootvol);
536 bcopy(rootvol->ondisk->vol0_blockmap,
538 sizeof(hmp->blockmap));
539 hammer_rel_volume(rootvol, 0);
541 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
542 hammer_reload_inode, NULL);
543 /* kernel clears MNT_RDONLY */
544 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
545 kprintf("HAMMER: read-write -> read-only\n");
546 hmp->ronly = 1; /* messy */
547 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
548 hammer_reload_inode, NULL);
550 hammer_flusher_sync(hmp);
551 hammer_flusher_sync(hmp);
552 hammer_flusher_sync(hmp);
554 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
555 hammer_adjust_volume_mode, NULL);
557 lwkt_reltoken(&hmp->fs_token);
561 RB_INIT(&hmp->rb_vols_root);
562 RB_INIT(&hmp->rb_inos_root);
563 RB_INIT(&hmp->rb_redo_root);
564 RB_INIT(&hmp->rb_nods_root);
565 RB_INIT(&hmp->rb_undo_root);
566 RB_INIT(&hmp->rb_resv_root);
567 RB_INIT(&hmp->rb_bufs_root);
568 RB_INIT(&hmp->rb_pfsm_root);
570 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
572 RB_INIT(&hmp->volu_root);
573 RB_INIT(&hmp->undo_root);
574 RB_INIT(&hmp->data_root);
575 RB_INIT(&hmp->meta_root);
576 RB_INIT(&hmp->lose_root);
577 TAILQ_INIT(&hmp->iorun_list);
579 lwkt_token_init(&hmp->fs_token, "hammerfs");
580 lwkt_token_init(&hmp->io_token, "hammerio");
582 lwkt_gettoken(&hmp->fs_token);
587 path = objcache_get(namei_oc, M_WAITOK);
589 for (i = 0; i < info.nvolumes; ++i) {
594 KKASSERT(next_volume_ptr != NULL);
596 if (*next_volume_ptr != '/') {
598 strcpy(path, "/dev/");
601 for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
602 if (*next_volume_ptr == '\0') {
604 } else if (*next_volume_ptr == ':') {
608 path[k] = *next_volume_ptr;
615 cdev_t dev = kgetdiskbyname(path);
616 error = bdevvp(dev, &devvp);
618 kprintf("hammer_mount: can't find devvp\n");
621 error = copyin(&info.volumes[i], &upath,
624 error = copyinstr(upath, path,
628 error = hammer_install_volume(hmp, path, devvp, NULL);
632 objcache_put(namei_oc, path);
635 * Make sure we found a root volume
637 if (hmp->rootvol == NULL) {
638 kprintf("hammer_mount: No root volume found!\n");
644 * Check that all required volumes are available
646 if (error == 0 && hammer_mountcheck_volumes(hmp)) {
647 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
656 kprintf("hammer_mount: Failed to load volumes!\n");
660 nvolumes = hammer_get_installed_volumes(hmp);
661 if (hmp->nvolumes != nvolumes) {
662 kprintf("hammer_mount: volume header says %d volumes, "
663 "but %d installed\n",
664 hmp->nvolumes, nvolumes);
670 * No errors, setup enough of the mount point so we can lookup the
673 mp->mnt_iosize_max = MAXPHYS;
674 mp->mnt_kern_flag |= MNTK_FSMID;
675 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
678 * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE
679 * will acquire a per-mount token prior to entry and release it
682 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
685 * note: f_iosize is used by vnode_pager_haspage() when constructing
688 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
689 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
691 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
692 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
694 mp->mnt_maxsymlinklen = 255;
695 mp->mnt_flag |= MNT_LOCAL;
697 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
698 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
699 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
702 * The root volume's ondisk pointer is only valid if we hold a
705 rootvol = hammer_get_root_volume(hmp, &error);
710 * Perform any necessary UNDO operations. The recovery code does
711 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
712 * and then re-copy it again after recovery is complete.
714 * If this is a read-only mount the UNDO information is retained
715 * in memory in the form of dirty buffer cache buffers, and not
716 * written back to the media.
718 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
719 sizeof(hmp->blockmap));
722 * Check filesystem version
724 hmp->version = rootvol->ondisk->vol_version;
725 if (hmp->version < HAMMER_VOL_VERSION_MIN ||
726 hmp->version > HAMMER_VOL_VERSION_MAX) {
727 kprintf("HAMMER: mount unsupported fs version %d\n",
734 * The undo_rec_limit limits the size of flush groups to avoid
735 * blowing out the UNDO FIFO. This calculation is typically in
736 * the tens of thousands and is designed primarily when small
737 * HAMMER filesystems are created.
739 hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
740 if (hammer_debug_general & 0x0001)
741 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
744 * NOTE: Recover stage1 not only handles meta-data recovery, it
745 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
747 error = hammer_recover_stage1(hmp, rootvol);
749 kprintf("Failed to recover HAMMER filesystem on mount\n");
754 * Finish setup now that we have a good root volume.
756 * The top 16 bits of fsid.val[1] is a pfs id.
758 ksnprintf(mp->mnt_stat.f_mntfromname,
759 sizeof(mp->mnt_stat.f_mntfromname), "%s",
760 rootvol->ondisk->vol_name);
761 mp->mnt_stat.f_fsid.val[0] =
762 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
763 mp->mnt_stat.f_fsid.val[1] =
764 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
765 mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
767 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
768 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
769 sizeof(mp->mnt_vstat.f_fsid_uuid));
772 * Certain often-modified fields in the root volume are cached in
773 * the hammer_mount structure so we do not have to generate lots
774 * of little UNDO structures for them.
776 * Recopy after recovery. This also has the side effect of
777 * setting our cached undo FIFO's first_offset, which serves to
778 * placemark the FIFO start for the NEXT flush cycle while the
779 * on-disk first_offset represents the LAST flush cycle.
781 hmp->next_tid = rootvol->ondisk->vol0_next_tid;
782 hmp->flush_tid1 = hmp->next_tid;
783 hmp->flush_tid2 = hmp->next_tid;
784 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
785 sizeof(hmp->blockmap));
786 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
788 hammer_flusher_create(hmp);
791 * Locate the root directory with an obj_id of 1.
793 error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, &rootvp);
798 error = hammer_recover_stage2(hmp, rootvol);
801 * If the stage2 recovery fails be sure to clean out all cached
802 * vnodes before throwing away the mount structure or bad things
809 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
812 /* Populate info for mount point (NULL pad)*/
813 bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
816 copyinstr(mntpt, mp->mnt_stat.f_mntonname,
818 } else { /* Root mount */
819 mp->mnt_stat.f_mntonname[0] = '/';
822 (void)VFS_STATFS(mp, &mp->mnt_stat, cred);
823 hammer_rel_volume(rootvol, 0);
826 * Cleanup and return.
829 /* called with fs_token held */
832 lwkt_reltoken(&hmp->fs_token);
838 hammer_vfs_unmount(struct mount *mp, int mntflags)
840 hammer_mount_t hmp = (void *)mp->mnt_data;
845 * Clean out the vnodes
847 lwkt_gettoken(&hmp->fs_token);
849 if (mntflags & MNT_FORCE)
851 error = vflush(mp, 0, flags);
854 * Clean up the internal mount structure and related entities. This
858 /* called with fs_token held */
861 lwkt_reltoken(&hmp->fs_token);
867 * Clean up the internal mount structure and disassociate it from the mount.
868 * This may issue I/O.
870 * Called with fs_token held.
873 hammer_free_hmp(struct mount *mp)
875 hammer_mount_t hmp = (void *)mp->mnt_data;
876 hammer_flush_group_t flg;
879 * Flush anything dirty. This won't even run if the
880 * filesystem errored-out.
882 hammer_flush_dirty(hmp, 30);
885 * If the mount had a critical error we have to destroy any
886 * remaining inodes before we can finish cleaning up the flusher.
888 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
889 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
890 hammer_destroy_inode_callback, NULL);
894 * There shouldn't be any inodes left now and any left over
895 * flush groups should now be empty.
897 KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
898 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
899 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
900 KKASSERT(RB_EMPTY(&flg->flush_tree));
902 kprintf("HAMMER: Warning, flush_group %p was "
903 "not empty on umount!\n", flg);
905 kfree(flg, hmp->m_misc);
909 * We can finally destroy the flusher
911 hammer_flusher_destroy(hmp);
914 * We may have held recovered buffers due to a read-only mount.
915 * These must be discarded.
918 hammer_recover_flush_buffers(hmp, NULL, -1);
921 * Unload buffers and then volumes
923 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
924 hammer_unload_buffer, NULL);
925 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
926 hammer_unload_volume, NULL);
929 mp->mnt_flag &= ~MNT_LOCAL;
931 hammer_destroy_objid_cache(hmp);
932 hammer_destroy_dedup_cache(hmp);
933 if (hmp->dedup_free_cache != NULL) {
934 kfree(hmp->dedup_free_cache, hmp->m_misc);
935 hmp->dedup_free_cache = NULL;
937 kmalloc_destroy(&hmp->m_misc);
938 kmalloc_destroy(&hmp->m_inodes);
939 lwkt_reltoken(&hmp->fs_token);
940 kfree(hmp, M_HAMMER);
944 * Report critical errors. ip may be NULL.
947 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
948 int error, const char *msg)
950 hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
952 krateprintf(&hmp->krate,
953 "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
954 hmp->mp->mnt_stat.f_mntfromname,
955 (intmax_t)(ip ? ip->obj_id : -1),
958 if (hmp->ronly == 0) {
959 hmp->ronly = 2; /* special errored read-only mode */
960 hmp->mp->mnt_flag |= MNT_RDONLY;
961 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
962 hammer_adjust_volume_mode, NULL);
963 kprintf("HAMMER(%s): Forcing read-only mode\n",
964 hmp->mp->mnt_stat.f_mntfromname);
967 if (hammer_debug_critical)
968 Debugger("Entering debugger");
973 * Obtain a vnode for the specified inode number. An exclusively locked
977 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
978 ino_t ino, struct vnode **vpp)
980 struct hammer_transaction trans;
981 struct hammer_mount *hmp = (void *)mp->mnt_data;
982 struct hammer_inode *ip;
984 u_int32_t localization;
986 lwkt_gettoken(&hmp->fs_token);
987 hammer_simple_transaction(&trans, hmp);
990 * If a directory vnode is supplied (mainly NFS) then we can acquire
991 * the PFS domain from it. Otherwise we would only be able to vget
992 * inodes in the root PFS.
995 localization = HAMMER_DEF_LOCALIZATION +
996 VTOI(dvp)->obj_localization;
998 localization = HAMMER_DEF_LOCALIZATION;
1002 * Lookup the requested HAMMER inode. The structure must be
1003 * left unlocked while we manipulate the related vnode to avoid
1006 ip = hammer_get_inode(&trans, NULL, ino,
1007 hmp->asof, localization,
1012 error = hammer_get_vnode(ip, vpp);
1013 hammer_rel_inode(ip, 0);
1015 hammer_done_transaction(&trans);
1016 lwkt_reltoken(&hmp->fs_token);
1021 * Return the root vnode for the filesystem.
1023 * HAMMER stores the root vnode in the hammer_mount structure so
1024 * getting it is easy.
1027 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
1031 error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, vpp);
1036 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1038 struct hammer_mount *hmp = (void *)mp->mnt_data;
1039 hammer_volume_t volume;
1040 hammer_volume_ondisk_t ondisk;
1045 lwkt_gettoken(&hmp->fs_token);
1046 volume = hammer_get_root_volume(hmp, &error);
1048 lwkt_reltoken(&hmp->fs_token);
1051 ondisk = volume->ondisk;
1056 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1057 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1058 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1059 hammer_rel_volume(volume, 0);
1061 mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1062 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1063 if (mp->mnt_stat.f_files < 0)
1064 mp->mnt_stat.f_files = 0;
1066 *sbp = mp->mnt_stat;
1067 lwkt_reltoken(&hmp->fs_token);
1072 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1074 struct hammer_mount *hmp = (void *)mp->mnt_data;
1075 hammer_volume_t volume;
1076 hammer_volume_ondisk_t ondisk;
1081 lwkt_gettoken(&hmp->fs_token);
1082 volume = hammer_get_root_volume(hmp, &error);
1084 lwkt_reltoken(&hmp->fs_token);
1087 ondisk = volume->ondisk;
1092 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1093 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1094 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1095 hammer_rel_volume(volume, 0);
1097 mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1098 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1099 if (mp->mnt_vstat.f_files < 0)
1100 mp->mnt_vstat.f_files = 0;
1101 *sbp = mp->mnt_vstat;
1102 lwkt_reltoken(&hmp->fs_token);
1107 * Sync the filesystem. Currently we have to run it twice, the second
1108 * one will advance the undo start index to the end index, so if a crash
1109 * occurs no undos will be run on mount.
1111 * We do not sync the filesystem if we are called from a panic. If we did
1112 * we might end up blowing up a sync that was already in progress.
1115 hammer_vfs_sync(struct mount *mp, int waitfor)
1117 struct hammer_mount *hmp = (void *)mp->mnt_data;
1120 lwkt_gettoken(&hmp->fs_token);
1121 if (panicstr == NULL) {
1122 error = hammer_sync_hmp(hmp, waitfor);
1126 lwkt_reltoken(&hmp->fs_token);
1131 * Convert a vnode to a file handle.
1133 * Accesses read-only fields on already-referenced structures so
1134 * no token is needed.
1137 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1141 KKASSERT(MAXFIDSZ >= 16);
1143 fhp->fid_len = offsetof(struct fid, fid_data[16]);
1144 fhp->fid_ext = ip->obj_localization >> 16;
1145 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1146 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1152 * Convert a file handle back to a vnode.
1154 * Use rootvp to enforce PFS isolation when a PFS is exported via a
1158 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1159 struct fid *fhp, struct vnode **vpp)
1161 hammer_mount_t hmp = (void *)mp->mnt_data;
1162 struct hammer_transaction trans;
1163 struct hammer_inode *ip;
1164 struct hammer_inode_info info;
1166 u_int32_t localization;
1168 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1169 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1171 localization = VTOI(rootvp)->obj_localization;
1173 localization = (u_int32_t)fhp->fid_ext << 16;
1175 lwkt_gettoken(&hmp->fs_token);
1176 hammer_simple_transaction(&trans, hmp);
1179 * Get/allocate the hammer_inode structure. The structure must be
1180 * unlocked while we manipulate the related vnode to avoid a
1183 ip = hammer_get_inode(&trans, NULL, info.obj_id,
1184 info.obj_asof, localization, 0, &error);
1186 error = hammer_get_vnode(ip, vpp);
1187 hammer_rel_inode(ip, 0);
1191 hammer_done_transaction(&trans);
1192 lwkt_reltoken(&hmp->fs_token);
1197 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1198 int *exflagsp, struct ucred **credanonp)
1200 hammer_mount_t hmp = (void *)mp->mnt_data;
1204 lwkt_gettoken(&hmp->fs_token);
1205 np = vfs_export_lookup(mp, &hmp->export, nam);
1207 *exflagsp = np->netc_exflags;
1208 *credanonp = &np->netc_anon;
1213 lwkt_reltoken(&hmp->fs_token);
1219 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1221 hammer_mount_t hmp = (void *)mp->mnt_data;
1224 lwkt_gettoken(&hmp->fs_token);
1227 case MOUNTCTL_SET_EXPORT:
1228 error = vfs_export(mp, &hmp->export, export);
1234 lwkt_reltoken(&hmp->fs_token);