2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.14 2008/01/24 02:14:45 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
50 int hammer_debug_btree;
52 int hammer_debug_recover = -1; /* temporarily disabled */
53 int hammer_count_inodes;
54 int hammer_count_records;
55 int hammer_count_record_datas;
56 int hammer_count_volumes;
57 int hammer_count_supercls;
58 int hammer_count_clusters;
59 int hammer_count_buffers;
60 int hammer_count_nodes;
61 int hammer_count_spikes;
63 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
64 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
65 &hammer_debug_btree, 0, "");
66 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
67 &hammer_debug_tid, 0, "");
68 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
69 &hammer_debug_recover, 0, "");
70 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
71 &hammer_count_inodes, 0, "");
72 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
73 &hammer_count_records, 0, "");
74 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
75 &hammer_count_record_datas, 0, "");
76 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
77 &hammer_count_volumes, 0, "");
78 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_supercls, CTLFLAG_RD,
79 &hammer_count_supercls, 0, "");
80 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_clusters, CTLFLAG_RD,
81 &hammer_count_clusters, 0, "");
82 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
83 &hammer_count_buffers, 0, "");
84 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
85 &hammer_count_nodes, 0, "");
86 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_spikes, CTLFLAG_RD,
87 &hammer_count_spikes, 0, "");
92 static void hammer_free_hmp(struct mount *mp);
94 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
96 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
97 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
98 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
100 static int hammer_vfs_sync(struct mount *mp, int waitfor);
101 static int hammer_vfs_init(struct vfsconf *conf);
103 static struct vfsops hammer_vfsops = {
104 .vfs_mount = hammer_vfs_mount,
105 .vfs_unmount = hammer_vfs_unmount,
106 .vfs_root = hammer_vfs_root,
107 .vfs_statfs = hammer_vfs_statfs,
108 .vfs_sync = hammer_vfs_sync,
109 .vfs_vget = hammer_vfs_vget,
110 .vfs_init = hammer_vfs_init
113 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount");
115 VFS_SET(hammer_vfsops, hammer, 0);
116 MODULE_VERSION(hammer, 1);
119 hammer_vfs_init(struct vfsconf *conf)
121 hammer_init_alist_config();
126 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
129 struct hammer_mount_info info;
131 hammer_volume_t rootvol;
132 struct vnode *rootvp;
133 const char *upath; /* volume name in userspace */
134 char *path; /* volume name in system space */
138 if ((error = copyin(data, &info, sizeof(info))) != 0)
140 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
144 * Interal mount data structure
146 if (mp->mnt_flag & MNT_UPDATE) {
147 hmp = (void *)mp->mnt_data;
148 KKASSERT(hmp != NULL);
150 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
151 mp->mnt_data = (qaddr_t)hmp;
153 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO);
154 hmp->namekey_iterator = mycpu->gd_time_seconds;
156 hmp->hflags = info.hflags;
158 mp->mnt_flag |= MNT_RDONLY;
159 hmp->asof = info.asof;
161 hmp->asof = HAMMER_MAX_TID;
165 * Re-open read-write if originally read-only, or vise-versa XXX
167 if (mp->mnt_flag & MNT_UPDATE) {
168 if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
169 kprintf("HAMMER read-write -> read-only XXX\n");
171 } else if (hmp->ronly && (mp->mnt_flag & MNT_RDONLY) == 0) {
172 kprintf("HAMMER read-only -> read-write XXX\n");
178 RB_INIT(&hmp->rb_vols_root);
179 RB_INIT(&hmp->rb_inos_root);
180 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
185 path = objcache_get(namei_oc, M_WAITOK);
186 hmp->nvolumes = info.nvolumes;
187 for (i = 0; i < info.nvolumes; ++i) {
188 error = copyin(&info.volumes[i], &upath, sizeof(char *));
190 error = copyinstr(upath, path, MAXPATHLEN, NULL);
192 error = hammer_install_volume(hmp, path);
196 objcache_put(namei_oc, path);
199 * Make sure we found a root volume
201 if (error == 0 && hmp->rootvol == NULL) {
202 kprintf("hammer_mount: No root volume found!\n");
205 if (error == 0 && hmp->rootcl == NULL) {
206 kprintf("hammer_mount: No root cluster found!\n");
215 * No errors, setup enough of the mount point so we can lookup the
218 mp->mnt_iosize_max = MAXPHYS;
219 mp->mnt_kern_flag |= MNTK_FSMID;
220 mp->mnt_stat.f_fsid.val[0] = 0; /* XXX */
221 mp->mnt_stat.f_fsid.val[1] = 0; /* XXX */
224 * note: f_iosize is used by vnode_pager_haspage() when constructing
227 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
228 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
229 vfs_getnewfsid(mp); /* XXX */
230 mp->mnt_maxsymlinklen = 255;
231 mp->mnt_flag |= MNT_LOCAL;
233 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
234 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
235 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
238 * The root volume's ondisk pointer is only valid if we hold a
241 rootvol = hammer_get_root_volume(hmp, &error);
244 ksnprintf(mp->mnt_stat.f_mntfromname,
245 sizeof(mp->mnt_stat.f_mntfromname), "%s",
246 rootvol->ondisk->vol_name);
247 hammer_rel_volume(rootvol, 0);
250 * Locate the root directory using the root cluster's B-Tree as a
251 * starting point. The root directory uses an obj_id of 1.
253 * FUTURE: Leave the root directory cached referenced but unlocked
254 * in hmp->rootvp (need to flush it on unmount).
256 error = hammer_vfs_vget(mp, 1, &rootvp);
260 /*vn_unlock(hmp->rootvp);*/
264 * Cleanup and return.
272 hammer_vfs_unmount(struct mount *mp, int mntflags)
275 struct hammer_mount *hmp = (void *)mp->mnt_data;
281 * Clean out the vnodes
284 if (mntflags & MNT_FORCE)
286 if ((error = vflush(mp, 0, flags)) != 0)
290 * Clean up the internal mount structure and related entities. This
298 * Clean up the internal mount structure and disassociate it from the mount.
299 * This may issue I/O.
302 hammer_free_hmp(struct mount *mp)
304 struct hammer_mount *hmp = (void *)mp->mnt_data;
308 * Clean up the root vnode
317 * Unload & flush inodes
319 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
320 hammer_unload_inode, (void *)MNT_WAIT);
323 * Unload & flush volumes
325 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
326 hammer_unload_volume, NULL);
329 mp->mnt_flag &= ~MNT_LOCAL;
331 kfree(hmp->zbuf, M_HAMMER);
332 kfree(hmp, M_HAMMER);
336 * Return the root vnode for the filesystem.
338 * HAMMER stores the root vnode in the hammer_mount structure so
339 * getting it is easy.
342 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
344 struct hammer_mount *hmp = (void *)mp->mnt_data;
347 if (hmp->rootcl == NULL)
350 error = hammer_vfs_vget(mp, 1, vpp);
353 /* FUTURE - cached root vnode */
354 if ((vp = hmp->rootvp) != NULL) {
356 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
367 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
369 struct hammer_mount *hmp = (void *)mp->mnt_data;
370 hammer_volume_t volume;
371 hammer_volume_ondisk_t ondisk;
374 volume = hammer_get_root_volume(hmp, &error);
378 ondisk = volume->ondisk;
380 mp->mnt_stat.f_bfree = mp->mnt_stat.f_blocks -
381 ondisk->vol0_stat_idx_bufs -
382 ondisk->vol0_stat_rec_bufs -
383 ondisk->vol0_stat_data_bufs;
384 if (mp->mnt_stat.f_bfree < 0)
385 mp->mnt_stat.f_bfree = 0;
386 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
387 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
388 if (mp->mnt_stat.f_files < 0)
389 mp->mnt_stat.f_files = 0;
391 hammer_rel_volume(volume, 0);
397 hammer_vfs_sync(struct mount *mp, int waitfor)
399 struct hammer_mount *hmp = (void *)mp->mnt_data;
402 error = hammer_sync_hmp(hmp, waitfor);