2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.16 2008/01/25 10:36:04 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
50 int hammer_debug_general;
51 int hammer_debug_btree;
53 int hammer_debug_recover; /* -1 will disable, +1 will force */
54 int hammer_debug_recover_faults;
55 int hammer_count_inodes;
56 int hammer_count_records;
57 int hammer_count_record_datas;
58 int hammer_count_volumes;
59 int hammer_count_supercls;
60 int hammer_count_clusters;
61 int hammer_count_buffers;
62 int hammer_count_nodes;
63 int hammer_count_spikes;
65 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
66 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
67 &hammer_debug_general, 0, "");
68 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
69 &hammer_debug_btree, 0, "");
70 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
71 &hammer_debug_tid, 0, "");
72 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
73 &hammer_debug_recover, 0, "");
74 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
75 &hammer_debug_recover_faults, 0, "");
76 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
77 &hammer_count_inodes, 0, "");
78 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
79 &hammer_count_records, 0, "");
80 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
81 &hammer_count_record_datas, 0, "");
82 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
83 &hammer_count_volumes, 0, "");
84 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_supercls, CTLFLAG_RD,
85 &hammer_count_supercls, 0, "");
86 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_clusters, CTLFLAG_RD,
87 &hammer_count_clusters, 0, "");
88 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
89 &hammer_count_buffers, 0, "");
90 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
91 &hammer_count_nodes, 0, "");
92 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_spikes, CTLFLAG_RD,
93 &hammer_count_spikes, 0, "");
98 static void hammer_free_hmp(struct mount *mp);
100 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
102 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
103 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
104 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
106 static int hammer_vfs_sync(struct mount *mp, int waitfor);
107 static int hammer_vfs_init(struct vfsconf *conf);
109 static struct vfsops hammer_vfsops = {
110 .vfs_mount = hammer_vfs_mount,
111 .vfs_unmount = hammer_vfs_unmount,
112 .vfs_root = hammer_vfs_root,
113 .vfs_statfs = hammer_vfs_statfs,
114 .vfs_sync = hammer_vfs_sync,
115 .vfs_vget = hammer_vfs_vget,
116 .vfs_init = hammer_vfs_init
119 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount");
121 VFS_SET(hammer_vfsops, hammer, 0);
122 MODULE_VERSION(hammer, 1);
125 hammer_vfs_init(struct vfsconf *conf)
127 hammer_init_alist_config();
132 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
135 struct hammer_mount_info info;
137 hammer_volume_t rootvol;
138 struct vnode *rootvp;
139 const char *upath; /* volume name in userspace */
140 char *path; /* volume name in system space */
144 if ((error = copyin(data, &info, sizeof(info))) != 0)
146 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
150 * Interal mount data structure
152 if (mp->mnt_flag & MNT_UPDATE) {
153 hmp = (void *)mp->mnt_data;
154 KKASSERT(hmp != NULL);
156 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
157 mp->mnt_data = (qaddr_t)hmp;
159 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO);
160 hmp->namekey_iterator = mycpu->gd_time_seconds;
161 /*TAILQ_INIT(&hmp->recycle_list);*/
163 hmp->hflags = info.hflags;
165 mp->mnt_flag |= MNT_RDONLY;
166 hmp->asof = info.asof;
168 hmp->asof = HAMMER_MAX_TID;
172 * Re-open read-write if originally read-only, or vise-versa XXX
174 if (mp->mnt_flag & MNT_UPDATE) {
175 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
176 kprintf("HAMMER read-write -> read-only XXX\n");
178 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) {
179 kprintf("HAMMER read-only -> read-write XXX\n");
185 RB_INIT(&hmp->rb_vols_root);
186 RB_INIT(&hmp->rb_inos_root);
187 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
192 path = objcache_get(namei_oc, M_WAITOK);
193 hmp->nvolumes = info.nvolumes;
194 for (i = 0; i < info.nvolumes; ++i) {
195 error = copyin(&info.volumes[i], &upath, sizeof(char *));
197 error = copyinstr(upath, path, MAXPATHLEN, NULL);
199 error = hammer_install_volume(hmp, path);
203 objcache_put(namei_oc, path);
206 * Make sure we found a root volume
208 if (error == 0 && hmp->rootvol == NULL) {
209 kprintf("hammer_mount: No root volume found!\n");
212 if (error == 0 && hmp->rootcl == NULL) {
213 kprintf("hammer_mount: No root cluster found!\n");
222 * No errors, setup enough of the mount point so we can lookup the
225 mp->mnt_iosize_max = MAXPHYS;
226 mp->mnt_kern_flag |= MNTK_FSMID;
227 mp->mnt_stat.f_fsid.val[0] = 0; /* XXX */
228 mp->mnt_stat.f_fsid.val[1] = 0; /* XXX */
231 * note: f_iosize is used by vnode_pager_haspage() when constructing
234 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
235 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
236 vfs_getnewfsid(mp); /* XXX */
237 mp->mnt_maxsymlinklen = 255;
238 mp->mnt_flag |= MNT_LOCAL;
240 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
241 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
242 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
245 * The root volume's ondisk pointer is only valid if we hold a
248 rootvol = hammer_get_root_volume(hmp, &error);
251 ksnprintf(mp->mnt_stat.f_mntfromname,
252 sizeof(mp->mnt_stat.f_mntfromname), "%s",
253 rootvol->ondisk->vol_name);
254 hammer_rel_volume(rootvol, 0);
257 * Locate the root directory using the root cluster's B-Tree as a
258 * starting point. The root directory uses an obj_id of 1.
260 * FUTURE: Leave the root directory cached referenced but unlocked
261 * in hmp->rootvp (need to flush it on unmount).
263 error = hammer_vfs_vget(mp, 1, &rootvp);
267 /*vn_unlock(hmp->rootvp);*/
271 * Cleanup and return.
279 hammer_vfs_unmount(struct mount *mp, int mntflags)
282 struct hammer_mount *hmp = (void *)mp->mnt_data;
288 * Clean out the vnodes
291 if (mntflags & MNT_FORCE)
293 if ((error = vflush(mp, 0, flags)) != 0)
297 * Clean up the internal mount structure and related entities. This
305 * Clean up the internal mount structure and disassociate it from the mount.
306 * This may issue I/O.
309 hammer_free_hmp(struct mount *mp)
311 struct hammer_mount *hmp = (void *)mp->mnt_data;
315 * Clean up the root vnode
324 * Unload & flush inodes
326 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
327 hammer_unload_inode, (void *)MNT_WAIT);
330 * Unload & flush volumes
332 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
333 hammer_unload_volume, NULL);
336 mp->mnt_flag &= ~MNT_LOCAL;
338 kfree(hmp->zbuf, M_HAMMER);
339 kfree(hmp, M_HAMMER);
343 * Return the root vnode for the filesystem.
345 * HAMMER stores the root vnode in the hammer_mount structure so
346 * getting it is easy.
349 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
351 struct hammer_mount *hmp = (void *)mp->mnt_data;
354 if (hmp->rootcl == NULL)
357 error = hammer_vfs_vget(mp, 1, vpp);
360 /* FUTURE - cached root vnode */
361 if ((vp = hmp->rootvp) != NULL) {
363 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
374 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
376 struct hammer_mount *hmp = (void *)mp->mnt_data;
377 hammer_volume_t volume;
378 hammer_volume_ondisk_t ondisk;
381 volume = hammer_get_root_volume(hmp, &error);
385 ondisk = volume->ondisk;
387 mp->mnt_stat.f_bfree = mp->mnt_stat.f_blocks -
388 ondisk->vol0_stat_idx_bufs -
389 ondisk->vol0_stat_rec_bufs -
390 ondisk->vol0_stat_data_bufs;
391 if (mp->mnt_stat.f_bfree < 0)
392 mp->mnt_stat.f_bfree = 0;
393 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
394 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
395 if (mp->mnt_stat.f_files < 0)
396 mp->mnt_stat.f_files = 0;
398 hammer_rel_volume(volume, 0);
404 hammer_vfs_sync(struct mount *mp, int waitfor)
406 struct hammer_mount *hmp = (void *)mp->mnt_data;
409 error = hammer_sync_hmp(hmp, waitfor);