2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.47 2008/06/13 00:25:33 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
51 int hammer_debug_general;
52 int hammer_debug_debug;
53 int hammer_debug_inode;
54 int hammer_debug_locks;
55 int hammer_debug_btree;
57 int hammer_debug_recover; /* -1 will disable, +1 will force */
58 int hammer_debug_recover_faults;
59 int hammer_debug_cluster_enable = 1; /* enable read clustering by default */
60 int hammer_count_inodes;
61 int hammer_count_iqueued;
62 int hammer_count_reclaiming;
63 int hammer_count_records;
64 int hammer_count_record_datas;
65 int hammer_count_volumes;
66 int hammer_count_buffers;
67 int hammer_count_nodes;
68 int hammer_count_dirtybufs; /* global */
69 int hammer_count_refedbufs; /* global */
70 int hammer_count_reservations;
71 int hammer_count_io_running_read;
72 int hammer_count_io_running_write;
73 int hammer_count_io_locked;
74 int hammer_stats_btree_iterations;
75 int hammer_stats_record_iterations;
76 int hammer_limit_dirtybufs; /* per-mount */
77 int hammer_limit_irecs; /* per-inode */
78 int hammer_limit_recs; /* as a whole XXX */
79 int hammer_limit_iqueued; /* per-mount */
81 int64_t hammer_contention_count;
82 int64_t hammer_zone_limit;
84 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
85 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
86 &hammer_debug_general, 0, "");
87 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
88 &hammer_debug_io, 0, "");
89 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
90 &hammer_debug_debug, 0, "");
91 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
92 &hammer_debug_inode, 0, "");
93 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
94 &hammer_debug_locks, 0, "");
95 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
96 &hammer_debug_btree, 0, "");
97 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
98 &hammer_debug_tid, 0, "");
99 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
100 &hammer_debug_recover, 0, "");
101 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
102 &hammer_debug_recover_faults, 0, "");
103 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_cluster_enable, CTLFLAG_RW,
104 &hammer_debug_cluster_enable, 0, "");
106 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW,
107 &hammer_limit_dirtybufs, 0, "");
108 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_irecs, CTLFLAG_RW,
109 &hammer_limit_irecs, 0, "");
110 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
111 &hammer_limit_recs, 0, "");
112 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW,
113 &hammer_limit_iqueued, 0, "");
115 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
116 &hammer_count_inodes, 0, "");
117 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
118 &hammer_count_iqueued, 0, "");
119 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
120 &hammer_count_reclaiming, 0, "");
121 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
122 &hammer_count_records, 0, "");
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
124 &hammer_count_record_datas, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
126 &hammer_count_volumes, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
128 &hammer_count_buffers, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
130 &hammer_count_nodes, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD,
132 &hammer_count_dirtybufs, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
134 &hammer_count_refedbufs, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
136 &hammer_count_reservations, 0, "");
137 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
138 &hammer_count_io_running_read, 0, "");
139 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
140 &hammer_count_io_locked, 0, "");
141 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
142 &hammer_count_io_running_write, 0, "");
143 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
144 &hammer_zone_limit, 0, "");
145 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
146 &hammer_contention_count, 0, "");
151 static void hammer_free_hmp(struct mount *mp);
153 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
155 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
156 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
157 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
159 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
161 static int hammer_vfs_sync(struct mount *mp, int waitfor);
162 static int hammer_vfs_vget(struct mount *mp, ino_t ino,
164 static int hammer_vfs_init(struct vfsconf *conf);
165 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp,
167 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
168 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
169 int *exflagsp, struct ucred **credanonp);
172 static struct vfsops hammer_vfsops = {
173 .vfs_mount = hammer_vfs_mount,
174 .vfs_unmount = hammer_vfs_unmount,
175 .vfs_root = hammer_vfs_root,
176 .vfs_statfs = hammer_vfs_statfs,
177 .vfs_statvfs = hammer_vfs_statvfs,
178 .vfs_sync = hammer_vfs_sync,
179 .vfs_vget = hammer_vfs_vget,
180 .vfs_init = hammer_vfs_init,
181 .vfs_vptofh = hammer_vfs_vptofh,
182 .vfs_fhtovp = hammer_vfs_fhtovp,
183 .vfs_checkexp = hammer_vfs_checkexp
186 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount");
188 VFS_SET(hammer_vfsops, hammer, 0);
189 MODULE_VERSION(hammer, 1);
192 hammer_vfs_init(struct vfsconf *conf)
194 if (hammer_limit_irecs == 0)
195 hammer_limit_irecs = nbuf * 8;
196 if (hammer_limit_recs == 0) /* XXX TODO */
197 hammer_limit_recs = nbuf * 25;
198 if (hammer_limit_dirtybufs == 0) {
199 hammer_limit_dirtybufs = hidirtybuffers / 2;
200 if (hammer_limit_dirtybufs < 100)
201 hammer_limit_dirtybufs = 100;
203 if (hammer_limit_iqueued == 0)
204 hammer_limit_iqueued = desiredvnodes / 5;
209 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
212 struct hammer_mount_info info;
214 hammer_volume_t rootvol;
215 struct vnode *rootvp;
216 const char *upath; /* volume name in userspace */
217 char *path; /* volume name in system space */
221 if ((error = copyin(data, &info, sizeof(info))) != 0)
223 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
224 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
229 * Interal mount data structure
231 if (mp->mnt_flag & MNT_UPDATE) {
232 hmp = (void *)mp->mnt_data;
233 KKASSERT(hmp != NULL);
235 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
236 mp->mnt_data = (qaddr_t)hmp;
238 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO);
239 hmp->namekey_iterator = mycpu->gd_time_seconds;
240 /*TAILQ_INIT(&hmp->recycle_list);*/
242 hmp->root_btree_beg.localization = HAMMER_MIN_LOCALIZATION;
243 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
244 hmp->root_btree_beg.key = -0x8000000000000000LL;
245 hmp->root_btree_beg.create_tid = 1;
246 hmp->root_btree_beg.delete_tid = 1;
247 hmp->root_btree_beg.rec_type = 0;
248 hmp->root_btree_beg.obj_type = 0;
250 hmp->root_btree_end.localization = HAMMER_MAX_LOCALIZATION;
251 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
252 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
253 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
254 hmp->root_btree_end.delete_tid = 0; /* special case */
255 hmp->root_btree_end.rec_type = 0xFFFFU;
256 hmp->root_btree_end.obj_type = 0;
258 hmp->sync_lock.refs = 1;
259 hmp->free_lock.refs = 1;
260 hmp->undo_lock.refs = 1;
261 hmp->blkmap_lock.refs = 1;
263 TAILQ_INIT(&hmp->flush_list);
264 TAILQ_INIT(&hmp->delay_list);
265 TAILQ_INIT(&hmp->objid_cache_list);
266 TAILQ_INIT(&hmp->undo_lru_list);
267 TAILQ_INIT(&hmp->reclaim_list);
270 * Set default zone limits. This value can be reduced
271 * further by the zone limit specified in the root volume.
273 * The sysctl can force a small zone limit for debugging
276 for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
277 hmp->zone_limits[i] =
278 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT);
280 if (hammer_zone_limit) {
281 hmp->zone_limits[i] =
282 HAMMER_ZONE_ENCODE(i, hammer_zone_limit);
284 hammer_init_holes(hmp, &hmp->holes[i]);
287 hmp->hflags &= ~HMNT_USERFLAGS;
288 hmp->hflags |= info.hflags & HMNT_USERFLAGS;
291 mp->mnt_flag |= MNT_RDONLY;
292 hmp->asof = info.asof;
294 hmp->asof = HAMMER_MAX_TID;
298 * Re-open read-write if originally read-only, or vise-versa.
300 if (mp->mnt_flag & MNT_UPDATE) {
302 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
303 kprintf("HAMMER read-only -> read-write\n");
305 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
306 hammer_adjust_volume_mode, NULL);
307 rootvol = hammer_get_root_volume(hmp, &error);
309 hammer_recover_flush_buffers(hmp, rootvol);
310 bcopy(rootvol->ondisk->vol0_blockmap,
312 sizeof(hmp->blockmap));
313 hammer_rel_volume(rootvol, 0);
315 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
316 hammer_reload_inode, NULL);
317 /* kernel clears MNT_RDONLY */
318 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
319 kprintf("HAMMER read-write -> read-only\n");
320 hmp->ronly = 1; /* messy */
321 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
322 hammer_reload_inode, NULL);
324 hammer_flusher_sync(hmp);
325 hammer_flusher_sync(hmp);
326 hammer_flusher_sync(hmp);
328 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
329 hammer_adjust_volume_mode, NULL);
334 RB_INIT(&hmp->rb_vols_root);
335 RB_INIT(&hmp->rb_inos_root);
336 RB_INIT(&hmp->rb_nods_root);
337 RB_INIT(&hmp->rb_undo_root);
338 RB_INIT(&hmp->rb_resv_root);
339 RB_INIT(&hmp->rb_bufs_root);
341 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
343 TAILQ_INIT(&hmp->volu_list);
344 TAILQ_INIT(&hmp->undo_list);
345 TAILQ_INIT(&hmp->data_list);
346 TAILQ_INIT(&hmp->meta_list);
347 TAILQ_INIT(&hmp->lose_list);
352 path = objcache_get(namei_oc, M_WAITOK);
353 hmp->nvolumes = info.nvolumes;
354 for (i = 0; i < info.nvolumes; ++i) {
355 error = copyin(&info.volumes[i], &upath, sizeof(char *));
357 error = copyinstr(upath, path, MAXPATHLEN, NULL);
359 error = hammer_install_volume(hmp, path);
363 objcache_put(namei_oc, path);
366 * Make sure we found a root volume
368 if (error == 0 && hmp->rootvol == NULL) {
369 kprintf("hammer_mount: No root volume found!\n");
378 * No errors, setup enough of the mount point so we can lookup the
381 mp->mnt_iosize_max = MAXPHYS;
382 mp->mnt_kern_flag |= MNTK_FSMID;
385 * note: f_iosize is used by vnode_pager_haspage() when constructing
388 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
389 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
391 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
392 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
394 mp->mnt_maxsymlinklen = 255;
395 mp->mnt_flag |= MNT_LOCAL;
397 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
398 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
399 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
402 * The root volume's ondisk pointer is only valid if we hold a
405 rootvol = hammer_get_root_volume(hmp, &error);
410 * Perform any necessary UNDO operations. The recovery code does
411 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
412 * and then re-copy it again after recovery is complete.
414 * If this is a read-only mount the UNDO information is retained
415 * in memory in the form of dirty buffer cache buffers, and not
416 * written back to the media.
418 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
419 sizeof(hmp->blockmap));
421 error = hammer_recover(hmp, rootvol);
423 kprintf("Failed to recover HAMMER filesystem on mount\n");
428 * Finish setup now that we have a good root volume
430 ksnprintf(mp->mnt_stat.f_mntfromname,
431 sizeof(mp->mnt_stat.f_mntfromname), "%s",
432 rootvol->ondisk->vol_name);
433 mp->mnt_stat.f_fsid.val[0] =
434 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
435 mp->mnt_stat.f_fsid.val[1] =
436 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
438 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
439 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
440 sizeof(mp->mnt_vstat.f_fsid_uuid));
443 * Certain often-modified fields in the root volume are cached in
444 * the hammer_mount structure so we do not have to generate lots
445 * of little UNDO structures for them.
447 * Recopy after recovery. This also has the side effect of
448 * setting our cached undo FIFO's first_offset, which serves to
449 * placemark the FIFO start for the NEXT flush cycle while the
450 * on-disk first_offset represents the LAST flush cycle.
452 hmp->next_tid = rootvol->ondisk->vol0_next_tid;
453 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
454 sizeof(hmp->blockmap));
455 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
458 * Use the zone limit set by newfs_hammer, or the zone limit set by
459 * sysctl (for debugging), whichever is smaller.
461 if (rootvol->ondisk->vol0_zone_limit) {
462 hammer_off_t vol0_zone_limit;
464 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit;
465 for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
466 if (hmp->zone_limits[i] > vol0_zone_limit)
467 hmp->zone_limits[i] = vol0_zone_limit;
471 hammer_flusher_create(hmp);
474 * Locate the root directory using the root cluster's B-Tree as a
475 * starting point. The root directory uses an obj_id of 1.
477 * FUTURE: Leave the root directory cached referenced but unlocked
478 * in hmp->rootvp (need to flush it on unmount).
480 error = hammer_vfs_vget(mp, 1, &rootvp);
484 /*vn_unlock(hmp->rootvp);*/
487 hammer_rel_volume(rootvol, 0);
490 * Cleanup and return.
498 hammer_vfs_unmount(struct mount *mp, int mntflags)
501 struct hammer_mount *hmp = (void *)mp->mnt_data;
507 * Clean out the vnodes
510 if (mntflags & MNT_FORCE)
512 if ((error = vflush(mp, 0, flags)) != 0)
516 * Clean up the internal mount structure and related entities. This
524 * Clean up the internal mount structure and disassociate it from the mount.
525 * This may issue I/O.
528 hammer_free_hmp(struct mount *mp)
530 struct hammer_mount *hmp = (void *)mp->mnt_data;
535 * Clean up the root vnode
543 hammer_flusher_sync(hmp);
545 hammer_flusher_sync(hmp);
547 hammer_flusher_destroy(hmp);
550 KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
554 * Unload & flush inodes
556 * XXX illegal to call this from here, it can only be done from
559 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
560 hammer_unload_inode, (void *)MNT_WAIT);
563 * Unload & flush volumes
567 * Unload buffers and then volumes
569 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
570 hammer_unload_buffer, NULL);
572 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
573 hammer_unload_volume, NULL);
577 mp->mnt_flag &= ~MNT_LOCAL;
580 hammer_destroy_objid_cache(hmp);
582 kfree(hmp->zbuf, M_HAMMER);
586 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
587 hammer_free_holes(hmp, &hmp->holes[i]);
590 kfree(hmp, M_HAMMER);
595 * Obtain a vnode for the specified inode number. An exclusively locked
599 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
601 struct hammer_transaction trans;
602 struct hammer_mount *hmp = (void *)mp->mnt_data;
603 struct hammer_inode *ip;
606 hammer_simple_transaction(&trans, hmp);
609 * Lookup the requested HAMMER inode. The structure must be
610 * left unlocked while we manipulate the related vnode to avoid
613 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error);
618 error = hammer_get_vnode(ip, vpp);
619 hammer_rel_inode(ip, 0);
620 hammer_done_transaction(&trans);
625 * Return the root vnode for the filesystem.
627 * HAMMER stores the root vnode in the hammer_mount structure so
628 * getting it is easy.
631 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
634 struct hammer_mount *hmp = (void *)mp->mnt_data;
638 error = hammer_vfs_vget(mp, 1, vpp);
643 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
645 struct hammer_mount *hmp = (void *)mp->mnt_data;
646 hammer_volume_t volume;
647 hammer_volume_ondisk_t ondisk;
651 volume = hammer_get_root_volume(hmp, &error);
654 ondisk = volume->ondisk;
659 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
660 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
661 hammer_rel_volume(volume, 0);
663 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE;
664 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
665 if (mp->mnt_stat.f_files < 0)
666 mp->mnt_stat.f_files = 0;
673 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
675 struct hammer_mount *hmp = (void *)mp->mnt_data;
676 hammer_volume_t volume;
677 hammer_volume_ondisk_t ondisk;
681 volume = hammer_get_root_volume(hmp, &error);
684 ondisk = volume->ondisk;
689 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
690 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
691 hammer_rel_volume(volume, 0);
693 mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE;
694 mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
695 if (mp->mnt_vstat.f_files < 0)
696 mp->mnt_vstat.f_files = 0;
697 *sbp = mp->mnt_vstat;
702 * Sync the filesystem. Currently we have to run it twice, the second
703 * one will advance the undo start index to the end index, so if a crash
704 * occurs no undos will be run on mount.
706 * We do not sync the filesystem if we are called from a panic. If we did
707 * we might end up blowing up a sync that was already in progress.
710 hammer_vfs_sync(struct mount *mp, int waitfor)
712 struct hammer_mount *hmp = (void *)mp->mnt_data;
715 if (panicstr == NULL) {
716 error = hammer_sync_hmp(hmp, waitfor);
718 error = hammer_sync_hmp(hmp, waitfor);
726 * Convert a vnode to a file handle.
729 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
733 KKASSERT(MAXFIDSZ >= 16);
735 fhp->fid_len = offsetof(struct fid, fid_data[16]);
736 fhp->fid_reserved = 0;
737 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
738 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
744 * Convert a file handle back to a vnode.
747 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
749 struct hammer_transaction trans;
750 struct hammer_inode *ip;
751 struct hammer_inode_info info;
754 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
755 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
757 hammer_simple_transaction(&trans, (void *)mp->mnt_data);
760 * Get/allocate the hammer_inode structure. The structure must be
761 * unlocked while we manipulate the related vnode to avoid a
764 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof,
770 error = hammer_get_vnode(ip, vpp);
771 hammer_rel_inode(ip, 0);
772 hammer_done_transaction(&trans);
777 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
778 int *exflagsp, struct ucred **credanonp)
780 hammer_mount_t hmp = (void *)mp->mnt_data;
784 np = vfs_export_lookup(mp, &hmp->export, nam);
786 *exflagsp = np->netc_exflags;
787 *credanonp = &np->netc_anon;
797 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
799 hammer_mount_t hmp = (void *)mp->mnt_data;
803 case MOUNTCTL_SET_EXPORT:
804 error = vfs_export(mp, &hmp->export, export);