2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.46 2008/06/12 00:16:10 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
51 int hammer_debug_general;
52 int hammer_debug_debug;
53 int hammer_debug_inode;
54 int hammer_debug_locks;
55 int hammer_debug_btree;
57 int hammer_debug_recover; /* -1 will disable, +1 will force */
58 int hammer_debug_recover_faults;
59 int hammer_debug_write_release; /* if 1 release buffer on strategy */
60 int hammer_debug_cluster_enable = 1; /* enable read clustering by default */
61 int hammer_count_inodes;
62 int hammer_count_iqueued;
63 int hammer_count_reclaiming;
64 int hammer_count_records;
65 int hammer_count_record_datas;
66 int hammer_count_volumes;
67 int hammer_count_buffers;
68 int hammer_count_nodes;
69 int hammer_count_dirtybufs; /* global */
70 int hammer_count_refedbufs; /* global */
71 int hammer_count_reservations;
72 int hammer_count_io_running_read;
73 int hammer_count_io_running_write;
74 int hammer_count_io_locked;
75 int hammer_stats_btree_iterations;
76 int hammer_stats_record_iterations;
77 int hammer_limit_dirtybufs; /* per-mount */
78 int hammer_limit_irecs; /* per-inode */
79 int hammer_limit_recs; /* as a whole XXX */
80 int hammer_limit_iqueued; /* per-mount */
82 int64_t hammer_contention_count;
83 int64_t hammer_zone_limit;
85 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
86 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
87 &hammer_debug_general, 0, "");
88 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
89 &hammer_debug_io, 0, "");
90 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
91 &hammer_debug_debug, 0, "");
92 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
93 &hammer_debug_inode, 0, "");
94 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
95 &hammer_debug_locks, 0, "");
96 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
97 &hammer_debug_btree, 0, "");
98 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
99 &hammer_debug_tid, 0, "");
100 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
101 &hammer_debug_recover, 0, "");
102 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
103 &hammer_debug_recover_faults, 0, "");
104 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_write_release, CTLFLAG_RW,
105 &hammer_debug_write_release, 0, "");
106 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_cluster_enable, CTLFLAG_RW,
107 &hammer_debug_cluster_enable, 0, "");
109 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW,
110 &hammer_limit_dirtybufs, 0, "");
111 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_irecs, CTLFLAG_RW,
112 &hammer_limit_irecs, 0, "");
113 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
114 &hammer_limit_recs, 0, "");
115 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW,
116 &hammer_limit_iqueued, 0, "");
118 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
119 &hammer_count_inodes, 0, "");
120 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
121 &hammer_count_iqueued, 0, "");
122 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
123 &hammer_count_reclaiming, 0, "");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
125 &hammer_count_records, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
127 &hammer_count_record_datas, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
129 &hammer_count_volumes, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
131 &hammer_count_buffers, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
133 &hammer_count_nodes, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD,
135 &hammer_count_dirtybufs, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
137 &hammer_count_refedbufs, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
139 &hammer_count_reservations, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
141 &hammer_count_io_running_read, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
143 &hammer_count_io_locked, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
145 &hammer_count_io_running_write, 0, "");
146 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
147 &hammer_zone_limit, 0, "");
148 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
149 &hammer_contention_count, 0, "");
154 static void hammer_free_hmp(struct mount *mp);
156 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
158 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
159 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
160 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
162 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
164 static int hammer_vfs_sync(struct mount *mp, int waitfor);
165 static int hammer_vfs_vget(struct mount *mp, ino_t ino,
167 static int hammer_vfs_init(struct vfsconf *conf);
168 static int hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp,
170 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
171 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
172 int *exflagsp, struct ucred **credanonp);
175 static struct vfsops hammer_vfsops = {
176 .vfs_mount = hammer_vfs_mount,
177 .vfs_unmount = hammer_vfs_unmount,
178 .vfs_root = hammer_vfs_root,
179 .vfs_statfs = hammer_vfs_statfs,
180 .vfs_statvfs = hammer_vfs_statvfs,
181 .vfs_sync = hammer_vfs_sync,
182 .vfs_vget = hammer_vfs_vget,
183 .vfs_init = hammer_vfs_init,
184 .vfs_vptofh = hammer_vfs_vptofh,
185 .vfs_fhtovp = hammer_vfs_fhtovp,
186 .vfs_checkexp = hammer_vfs_checkexp
189 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount");
191 VFS_SET(hammer_vfsops, hammer, 0);
192 MODULE_VERSION(hammer, 1);
195 hammer_vfs_init(struct vfsconf *conf)
197 if (hammer_limit_irecs == 0)
198 hammer_limit_irecs = nbuf * 8;
199 if (hammer_limit_recs == 0) /* XXX TODO */
200 hammer_limit_recs = nbuf * 25;
201 if (hammer_limit_dirtybufs == 0) {
202 hammer_limit_dirtybufs = hidirtybuffers / 2;
203 if (hammer_limit_dirtybufs < 100)
204 hammer_limit_dirtybufs = 100;
206 if (hammer_limit_iqueued == 0)
207 hammer_limit_iqueued = desiredvnodes / 5;
212 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
215 struct hammer_mount_info info;
217 hammer_volume_t rootvol;
218 struct vnode *rootvp;
219 const char *upath; /* volume name in userspace */
220 char *path; /* volume name in system space */
224 if ((error = copyin(data, &info, sizeof(info))) != 0)
226 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
227 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
232 * Interal mount data structure
234 if (mp->mnt_flag & MNT_UPDATE) {
235 hmp = (void *)mp->mnt_data;
236 KKASSERT(hmp != NULL);
238 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
239 mp->mnt_data = (qaddr_t)hmp;
241 hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO);
242 hmp->namekey_iterator = mycpu->gd_time_seconds;
243 /*TAILQ_INIT(&hmp->recycle_list);*/
245 hmp->root_btree_beg.localization = HAMMER_MIN_LOCALIZATION;
246 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
247 hmp->root_btree_beg.key = -0x8000000000000000LL;
248 hmp->root_btree_beg.create_tid = 1;
249 hmp->root_btree_beg.delete_tid = 1;
250 hmp->root_btree_beg.rec_type = 0;
251 hmp->root_btree_beg.obj_type = 0;
253 hmp->root_btree_end.localization = HAMMER_MAX_LOCALIZATION;
254 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
255 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
256 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
257 hmp->root_btree_end.delete_tid = 0; /* special case */
258 hmp->root_btree_end.rec_type = 0xFFFFU;
259 hmp->root_btree_end.obj_type = 0;
261 hmp->sync_lock.refs = 1;
262 hmp->free_lock.refs = 1;
263 hmp->undo_lock.refs = 1;
264 hmp->blkmap_lock.refs = 1;
266 TAILQ_INIT(&hmp->flush_list);
267 TAILQ_INIT(&hmp->delay_list);
268 TAILQ_INIT(&hmp->objid_cache_list);
269 TAILQ_INIT(&hmp->undo_lru_list);
272 * Set default zone limits. This value can be reduced
273 * further by the zone limit specified in the root volume.
275 * The sysctl can force a small zone limit for debugging
278 for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
279 hmp->zone_limits[i] =
280 HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT);
282 if (hammer_zone_limit) {
283 hmp->zone_limits[i] =
284 HAMMER_ZONE_ENCODE(i, hammer_zone_limit);
286 hammer_init_holes(hmp, &hmp->holes[i]);
289 hmp->hflags &= ~HMNT_USERFLAGS;
290 hmp->hflags |= info.hflags & HMNT_USERFLAGS;
293 mp->mnt_flag |= MNT_RDONLY;
294 hmp->asof = info.asof;
296 hmp->asof = HAMMER_MAX_TID;
300 * Re-open read-write if originally read-only, or vise-versa.
302 if (mp->mnt_flag & MNT_UPDATE) {
304 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
305 kprintf("HAMMER read-only -> read-write\n");
307 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
308 hammer_adjust_volume_mode, NULL);
309 rootvol = hammer_get_root_volume(hmp, &error);
311 hammer_recover_flush_buffers(hmp, rootvol);
312 bcopy(rootvol->ondisk->vol0_blockmap,
314 sizeof(hmp->blockmap));
315 hammer_rel_volume(rootvol, 0);
317 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
318 hammer_reload_inode, NULL);
319 /* kernel clears MNT_RDONLY */
320 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
321 kprintf("HAMMER read-write -> read-only\n");
322 hmp->ronly = 1; /* messy */
323 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
324 hammer_reload_inode, NULL);
326 hammer_flusher_sync(hmp);
327 hammer_flusher_sync(hmp);
328 hammer_flusher_sync(hmp);
330 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
331 hammer_adjust_volume_mode, NULL);
336 RB_INIT(&hmp->rb_vols_root);
337 RB_INIT(&hmp->rb_inos_root);
338 RB_INIT(&hmp->rb_nods_root);
339 RB_INIT(&hmp->rb_undo_root);
340 RB_INIT(&hmp->rb_resv_root);
341 RB_INIT(&hmp->rb_bufs_root);
343 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
345 TAILQ_INIT(&hmp->volu_list);
346 TAILQ_INIT(&hmp->undo_list);
347 TAILQ_INIT(&hmp->data_list);
348 TAILQ_INIT(&hmp->meta_list);
349 TAILQ_INIT(&hmp->lose_list);
354 path = objcache_get(namei_oc, M_WAITOK);
355 hmp->nvolumes = info.nvolumes;
356 for (i = 0; i < info.nvolumes; ++i) {
357 error = copyin(&info.volumes[i], &upath, sizeof(char *));
359 error = copyinstr(upath, path, MAXPATHLEN, NULL);
361 error = hammer_install_volume(hmp, path);
365 objcache_put(namei_oc, path);
368 * Make sure we found a root volume
370 if (error == 0 && hmp->rootvol == NULL) {
371 kprintf("hammer_mount: No root volume found!\n");
380 * No errors, setup enough of the mount point so we can lookup the
383 mp->mnt_iosize_max = MAXPHYS;
384 mp->mnt_kern_flag |= MNTK_FSMID;
387 * note: f_iosize is used by vnode_pager_haspage() when constructing
390 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
391 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
393 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
394 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
396 mp->mnt_maxsymlinklen = 255;
397 mp->mnt_flag |= MNT_LOCAL;
399 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
400 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
401 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
404 * The root volume's ondisk pointer is only valid if we hold a
407 rootvol = hammer_get_root_volume(hmp, &error);
412 * Perform any necessary UNDO operations. The recovery code does
413 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
414 * and then re-copy it again after recovery is complete.
416 * If this is a read-only mount the UNDO information is retained
417 * in memory in the form of dirty buffer cache buffers, and not
418 * written back to the media.
420 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
421 sizeof(hmp->blockmap));
423 error = hammer_recover(hmp, rootvol);
425 kprintf("Failed to recover HAMMER filesystem on mount\n");
430 * Finish setup now that we have a good root volume
432 ksnprintf(mp->mnt_stat.f_mntfromname,
433 sizeof(mp->mnt_stat.f_mntfromname), "%s",
434 rootvol->ondisk->vol_name);
435 mp->mnt_stat.f_fsid.val[0] =
436 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
437 mp->mnt_stat.f_fsid.val[1] =
438 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
440 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
441 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
442 sizeof(mp->mnt_vstat.f_fsid_uuid));
445 * Certain often-modified fields in the root volume are cached in
446 * the hammer_mount structure so we do not have to generate lots
447 * of little UNDO structures for them.
449 * Recopy after recovery. This also has the side effect of
450 * setting our cached undo FIFO's first_offset, which serves to
451 * placemark the FIFO start for the NEXT flush cycle while the
452 * on-disk first_offset represents the LAST flush cycle.
454 hmp->next_tid = rootvol->ondisk->vol0_next_tid;
455 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
456 sizeof(hmp->blockmap));
457 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
460 * Use the zone limit set by newfs_hammer, or the zone limit set by
461 * sysctl (for debugging), whichever is smaller.
463 if (rootvol->ondisk->vol0_zone_limit) {
464 hammer_off_t vol0_zone_limit;
466 vol0_zone_limit = rootvol->ondisk->vol0_zone_limit;
467 for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
468 if (hmp->zone_limits[i] > vol0_zone_limit)
469 hmp->zone_limits[i] = vol0_zone_limit;
473 hammer_flusher_create(hmp);
476 * Locate the root directory using the root cluster's B-Tree as a
477 * starting point. The root directory uses an obj_id of 1.
479 * FUTURE: Leave the root directory cached referenced but unlocked
480 * in hmp->rootvp (need to flush it on unmount).
482 error = hammer_vfs_vget(mp, 1, &rootvp);
486 /*vn_unlock(hmp->rootvp);*/
489 hammer_rel_volume(rootvol, 0);
492 * Cleanup and return.
500 hammer_vfs_unmount(struct mount *mp, int mntflags)
503 struct hammer_mount *hmp = (void *)mp->mnt_data;
509 * Clean out the vnodes
512 if (mntflags & MNT_FORCE)
514 if ((error = vflush(mp, 0, flags)) != 0)
518 * Clean up the internal mount structure and related entities. This
526 * Clean up the internal mount structure and disassociate it from the mount.
527 * This may issue I/O.
530 hammer_free_hmp(struct mount *mp)
532 struct hammer_mount *hmp = (void *)mp->mnt_data;
537 * Clean up the root vnode
545 hammer_flusher_sync(hmp);
547 hammer_flusher_sync(hmp);
549 hammer_flusher_destroy(hmp);
552 KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
556 * Unload & flush inodes
558 * XXX illegal to call this from here, it can only be done from
561 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
562 hammer_unload_inode, (void *)MNT_WAIT);
565 * Unload & flush volumes
569 * Unload buffers and then volumes
571 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
572 hammer_unload_buffer, NULL);
574 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
575 hammer_unload_volume, NULL);
579 mp->mnt_flag &= ~MNT_LOCAL;
582 hammer_destroy_objid_cache(hmp);
584 kfree(hmp->zbuf, M_HAMMER);
588 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
589 hammer_free_holes(hmp, &hmp->holes[i]);
592 kfree(hmp, M_HAMMER);
597 * Obtain a vnode for the specified inode number. An exclusively locked
601 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
603 struct hammer_transaction trans;
604 struct hammer_mount *hmp = (void *)mp->mnt_data;
605 struct hammer_inode *ip;
608 hammer_simple_transaction(&trans, hmp);
611 * Lookup the requested HAMMER inode. The structure must be
612 * left unlocked while we manipulate the related vnode to avoid
615 ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error);
620 error = hammer_get_vnode(ip, vpp);
621 hammer_rel_inode(ip, 0);
622 hammer_done_transaction(&trans);
627 * Return the root vnode for the filesystem.
629 * HAMMER stores the root vnode in the hammer_mount structure so
630 * getting it is easy.
633 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
636 struct hammer_mount *hmp = (void *)mp->mnt_data;
640 error = hammer_vfs_vget(mp, 1, vpp);
645 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
647 struct hammer_mount *hmp = (void *)mp->mnt_data;
648 hammer_volume_t volume;
649 hammer_volume_ondisk_t ondisk;
653 volume = hammer_get_root_volume(hmp, &error);
656 ondisk = volume->ondisk;
661 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
662 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
663 hammer_rel_volume(volume, 0);
665 mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE;
666 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
667 if (mp->mnt_stat.f_files < 0)
668 mp->mnt_stat.f_files = 0;
675 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
677 struct hammer_mount *hmp = (void *)mp->mnt_data;
678 hammer_volume_t volume;
679 hammer_volume_ondisk_t ondisk;
683 volume = hammer_get_root_volume(hmp, &error);
686 ondisk = volume->ondisk;
691 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
692 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
693 hammer_rel_volume(volume, 0);
695 mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE;
696 mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
697 if (mp->mnt_vstat.f_files < 0)
698 mp->mnt_vstat.f_files = 0;
699 *sbp = mp->mnt_vstat;
704 * Sync the filesystem. Currently we have to run it twice, the second
705 * one will advance the undo start index to the end index, so if a crash
706 * occurs no undos will be run on mount.
708 * We do not sync the filesystem if we are called from a panic. If we did
709 * we might end up blowing up a sync that was already in progress.
712 hammer_vfs_sync(struct mount *mp, int waitfor)
714 struct hammer_mount *hmp = (void *)mp->mnt_data;
717 if (panicstr == NULL) {
718 error = hammer_sync_hmp(hmp, waitfor);
720 error = hammer_sync_hmp(hmp, waitfor);
728 * Convert a vnode to a file handle.
731 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
735 KKASSERT(MAXFIDSZ >= 16);
737 fhp->fid_len = offsetof(struct fid, fid_data[16]);
738 fhp->fid_reserved = 0;
739 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
740 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
746 * Convert a file handle back to a vnode.
749 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
751 struct hammer_transaction trans;
752 struct hammer_inode *ip;
753 struct hammer_inode_info info;
756 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
757 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
759 hammer_simple_transaction(&trans, (void *)mp->mnt_data);
762 * Get/allocate the hammer_inode structure. The structure must be
763 * unlocked while we manipulate the related vnode to avoid a
766 ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof,
772 error = hammer_get_vnode(ip, vpp);
773 hammer_rel_inode(ip, 0);
774 hammer_done_transaction(&trans);
779 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
780 int *exflagsp, struct ucred **credanonp)
782 hammer_mount_t hmp = (void *)mp->mnt_data;
786 np = vfs_export_lookup(mp, &hmp->export, nam);
788 *exflagsp = np->netc_exflags;
789 *credanonp = &np->netc_anon;
799 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
801 hammer_mount_t hmp = (void *)mp->mnt_data;
805 case MOUNTCTL_SET_EXPORT:
806 error = vfs_export(mp, &hmp->export, export);