2 * Copyright (c) 2011, 2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/nlookup.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/fcntl.h>
43 #include <sys/vfsops.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
48 #include "hammer2_disk.h"
49 #include "hammer2_mount.h"
50 #include "hammer2_network.h"
52 struct hammer2_sync_info {
57 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
58 static struct hammer2_mntlist hammer2_mntlist;
59 static struct lock hammer2_mntlk;
62 int hammer2_cluster_enable = 1;
63 int hammer2_hardlink_enable = 1;
64 long hammer2_iod_file_read;
65 long hammer2_iod_meta_read;
66 long hammer2_iod_indr_read;
67 long hammer2_iod_file_write;
68 long hammer2_iod_meta_write;
69 long hammer2_iod_indr_write;
70 long hammer2_iod_volu_write;
71 long hammer2_ioa_file_read;
72 long hammer2_ioa_meta_read;
73 long hammer2_ioa_indr_read;
74 long hammer2_ioa_file_write;
75 long hammer2_ioa_meta_write;
76 long hammer2_ioa_indr_write;
77 long hammer2_ioa_volu_write;
79 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
81 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
82 &hammer2_debug, 0, "");
83 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
84 &hammer2_cluster_enable, 0, "");
85 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
86 &hammer2_hardlink_enable, 0, "");
87 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
88 &hammer2_iod_file_read, 0, "");
89 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
90 &hammer2_iod_meta_read, 0, "");
91 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
92 &hammer2_iod_indr_read, 0, "");
93 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
94 &hammer2_iod_file_write, 0, "");
95 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
96 &hammer2_iod_meta_write, 0, "");
97 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
98 &hammer2_iod_indr_write, 0, "");
99 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
100 &hammer2_iod_volu_write, 0, "");
101 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
102 &hammer2_ioa_file_read, 0, "");
103 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
104 &hammer2_ioa_meta_read, 0, "");
105 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
106 &hammer2_ioa_indr_read, 0, "");
107 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
108 &hammer2_ioa_file_write, 0, "");
109 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
110 &hammer2_ioa_meta_write, 0, "");
111 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
112 &hammer2_ioa_indr_write, 0, "");
113 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
114 &hammer2_ioa_volu_write, 0, "");
116 static int hammer2_vfs_init(struct vfsconf *conf);
117 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
119 static int hammer2_remount(struct mount *, char *, struct vnode *,
121 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
122 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
123 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
125 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
127 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
128 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
129 ino_t ino, struct vnode **vpp);
130 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
131 struct fid *fhp, struct vnode **vpp);
132 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
133 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
134 int *exflagsp, struct ucred **credanonp);
136 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
137 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
138 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
140 static void hammer2_cluster_thread_rd(void *arg);
141 static void hammer2_cluster_thread_wr(void *arg);
142 static int hammer2_msg_span_reply(hammer2_pfsmount_t *pmp, hammer2_msg_t *msg);
145 * HAMMER2 vfs operations.
147 static struct vfsops hammer2_vfsops = {
148 .vfs_init = hammer2_vfs_init,
149 .vfs_sync = hammer2_vfs_sync,
150 .vfs_mount = hammer2_vfs_mount,
151 .vfs_unmount = hammer2_vfs_unmount,
152 .vfs_root = hammer2_vfs_root,
153 .vfs_statfs = hammer2_vfs_statfs,
154 .vfs_statvfs = hammer2_vfs_statvfs,
155 .vfs_vget = hammer2_vfs_vget,
156 .vfs_vptofh = hammer2_vfs_vptofh,
157 .vfs_fhtovp = hammer2_vfs_fhtovp,
158 .vfs_checkexp = hammer2_vfs_checkexp
161 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
163 VFS_SET(hammer2_vfsops, hammer2, 0);
164 MODULE_VERSION(hammer2, 1);
168 hammer2_vfs_init(struct vfsconf *conf)
174 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
176 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
178 if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
180 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
184 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
186 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
187 TAILQ_INIT(&hammer2_mntlist);
193 * Mount or remount HAMMER2 fileystem from physical media
196 * mp mount point structure
202 * mp mount point structure
203 * path path to mount point
204 * data pointer to argument structure in user space
205 * volume volume path (device@LABEL form)
206 * hflags user mount flags
207 * cred user credentials
214 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
217 struct hammer2_mount_info info;
218 hammer2_pfsmount_t *pmp;
219 hammer2_mount_t *hmp;
222 struct nlookupdata nd;
223 hammer2_chain_t *parent;
224 hammer2_chain_t *schain;
225 hammer2_chain_t *rchain;
226 char devstr[MNAMELEN];
241 kprintf("hammer2_mount\n");
247 bzero(&info, sizeof(info));
248 info.cluster_fd = -1;
252 * Non-root mount or updating a mount
254 error = copyin(data, &info, sizeof(info));
258 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
262 /* Extract device and label */
264 label = strchr(devstr, '@');
266 ((label + 1) - dev) > done) {
274 if (mp->mnt_flag & MNT_UPDATE) {
276 /* HAMMER2 implements NFS export via mountctl */
279 error = hammer2_remount(mp, path, devvp, cred);
287 * Lookup name and verify it refers to a block device.
289 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
291 error = nlookup(&nd);
293 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
297 if (vn_isdisk(devvp, &error))
298 error = vfs_mountedon(devvp);
302 * Determine if the device has already been mounted. After this
303 * check hmp will be non-NULL if we are doing the second or more
304 * hammer2 mounts from the same device.
306 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
307 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
308 if (hmp->devvp == devvp)
313 * Open the device if this isn't a secondary mount
319 if (error == 0 && vcount(devvp) > 0)
323 * Now open the device
326 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
327 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
328 error = vinvalbuf(devvp, V_SAVE, 0, 0);
330 error = VOP_OPEN(devvp,
331 ronly ? FREAD : FREAD | FWRITE,
336 if (error && devvp) {
341 lockmgr(&hammer2_mntlk, LK_RELEASE);
347 * Block device opened successfully, finish initializing the
350 * From this point on we have to call hammer2_unmount() on failure.
352 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
353 mp->mnt_data = (qaddr_t)pmp;
355 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
356 lockinit(&pmp->msglk, "h2msg", 0, 0);
357 TAILQ_INIT(&pmp->msgq);
358 RB_INIT(&pmp->staterd_tree);
359 RB_INIT(&pmp->statewr_tree);
362 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
365 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
366 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
367 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
369 ccms_domain_init(&pmp->ccms_dom);
372 lockmgr(&hammer2_mntlk, LK_RELEASE);
373 kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
375 mp->mnt_flag = MNT_LOCAL;
376 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
380 * vchain setup. vchain.data is special cased to NULL.
381 * vchain.refs is initialized and will never drop to 0.
383 hmp->vchain.refs = 1;
384 hmp->vchain.data = (void *)&hmp->voldata;
385 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
386 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
387 hmp->vchain.bref_flush = hmp->vchain.bref;
388 ccms_cst_init(&hmp->vchain.cst, NULL);
389 /* hmp->vchain.u.xxx is left NULL */
390 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
391 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
394 * Install the volume header
396 error = hammer2_install_volume_header(hmp);
398 hammer2_vfs_unmount(mp, MNT_FORCE);
404 * required mount structure initializations
406 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
407 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
409 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
410 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
415 mp->mnt_iosize_max = MAXPHYS;
418 * First locate the super-root inode, which is key 0 relative to the
419 * volume header's blockset.
421 * Then locate the root inode by scanning the directory keyspace
422 * represented by the label.
425 parent = &hmp->vchain;
426 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
427 schain = hammer2_chain_lookup(hmp, &parent,
428 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
429 hammer2_chain_unlock(hmp, parent);
430 if (schain == NULL) {
431 kprintf("hammer2_mount: invalid super-root\n");
432 hammer2_vfs_unmount(mp, MNT_FORCE);
435 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
436 hmp->schain = schain; /* left locked */
438 schain = hmp->schain;
439 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
443 lhc = hammer2_dirhash(label, strlen(label));
444 rchain = hammer2_chain_lookup(hmp, &parent,
445 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
448 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
450 strcmp(label, rchain->data->ipdata.filename) == 0) {
453 rchain = hammer2_chain_next(hmp, &parent, rchain,
454 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
457 hammer2_chain_unlock(hmp, parent);
458 if (rchain == NULL) {
459 kprintf("hammer2_mount: PFS label not found\n");
460 hammer2_vfs_unmount(mp, MNT_FORCE);
463 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
464 hammer2_chain_unlock(hmp, rchain);
465 kprintf("hammer2_mount: PFS label already mounted!\n");
466 hammer2_vfs_unmount(mp, MNT_FORCE);
469 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
471 hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
472 hammer2_chain_unlock(hmp, rchain);
473 pmp->rchain = rchain; /* left held & unlocked */
474 pmp->iroot = rchain->u.ip; /* implied hold from rchain */
475 pmp->iroot->pmp = pmp;
477 kprintf("iroot %p\n", pmp->iroot);
480 * Ref the cluster management messaging descriptor. The mount
481 * program deals with the other end of the communications pipe.
483 pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
484 if (pmp->msg_fp == NULL) {
485 kprintf("hammer2_mount: bad cluster_fd!\n");
486 hammer2_vfs_unmount(mp, MNT_FORCE);
489 lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
490 NULL, 0, -1, "hammer2-msgrd");
491 lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
492 NULL, 0, -1, "hammer2-msgwr");
498 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
499 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
500 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
502 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
503 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
504 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
505 copyinstr(path, mp->mnt_stat.f_mntonname,
506 sizeof(mp->mnt_stat.f_mntonname) - 1,
510 * Initial statfs to prime mnt_stat.
512 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
519 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
527 hammer2_vfs_unmount(struct mount *mp, int mntflags)
529 hammer2_pfsmount_t *pmp;
530 hammer2_mount_t *hmp;
533 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
540 if (mntflags & MNT_FORCE)
543 hammer2_mount_exlock(hmp);
546 * If mount initialization proceeded far enough we must flush
550 error = vflush(mp, 0, flags);
555 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
557 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
560 * Flush any left over chains. The voldata lock is only used
561 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
563 hammer2_voldata_lock(hmp);
564 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
565 HAMMER2_CHAIN_MODIFIED_AUX |
566 HAMMER2_CHAIN_SUBMODIFIED)) {
567 hammer2_voldata_unlock(hmp);
568 hammer2_vfs_sync(mp, MNT_WAIT);
570 hammer2_voldata_unlock(hmp);
572 if (hmp->pmp_count == 0) {
573 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
574 HAMMER2_CHAIN_MODIFIED_AUX |
575 HAMMER2_CHAIN_SUBMODIFIED)) {
576 kprintf("hammer2_unmount: chains left over after "
578 if (hammer2_debug & 0x0010)
579 Debugger("entered debugger");
584 * Cleanup the root and super-root chain elements (which should be
589 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
590 KKASSERT(pmp->rchain->refs == 1);
591 hammer2_chain_drop(hmp, pmp->rchain);
594 ccms_domain_uninit(&pmp->ccms_dom);
597 * Ask the cluster controller to go away
599 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
600 while (pmp->msgrd_td || pmp->msgwr_td) {
601 wakeup(&pmp->msg_ctl);
602 tsleep(pmp, 0, "clstrkl", hz);
606 * Drop communications descriptor
614 * If no PFS's left drop the master hammer2_mount for the device.
616 if (hmp->pmp_count == 0) {
618 KKASSERT(hmp->schain->refs == 1);
619 hammer2_chain_drop(hmp, hmp->schain);
624 * Finish up with the device vnode
626 if ((devvp = hmp->devvp) != NULL) {
627 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
629 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
634 hammer2_mount_unlock(hmp);
640 kmalloc_destroy(&pmp->mmsg);
642 kfree(pmp, M_HAMMER2);
643 if (hmp->pmp_count == 0) {
644 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
645 kmalloc_destroy(&hmp->minode);
646 kmalloc_destroy(&hmp->mchain);
647 kfree(hmp, M_HAMMER2);
649 lockmgr(&hammer2_mntlk, LK_RELEASE);
655 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
656 ino_t ino, struct vnode **vpp)
658 kprintf("hammer2_vget\n");
664 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
666 hammer2_pfsmount_t *pmp;
667 hammer2_mount_t *hmp;
673 hammer2_mount_exlock(hmp);
674 if (pmp->iroot == NULL) {
678 hammer2_chain_lock(hmp, &pmp->iroot->chain,
679 HAMMER2_RESOLVE_ALWAYS |
680 HAMMER2_RESOLVE_SHARED);
681 vp = hammer2_igetv(pmp->iroot, &error);
682 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
685 kprintf("vnodefail\n");
687 hammer2_mount_unlock(hmp);
695 * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
699 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
701 hammer2_pfsmount_t *pmp;
702 hammer2_mount_t *hmp;
707 mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
708 pmp->iroot->delta_icount;
709 mp->mnt_stat.f_ffree = 0;
710 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
711 mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
712 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
713 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
721 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
723 hammer2_pfsmount_t *pmp;
724 hammer2_mount_t *hmp;
729 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
730 mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
731 pmp->iroot->delta_icount;
732 mp->mnt_vstat.f_ffree = 0;
733 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
734 mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
735 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
736 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
738 *sbp = mp->mnt_vstat;
743 * Sync the entire filesystem; this is called from the filesystem syncer
744 * process periodically and whenever a user calls sync(1) on the hammer
747 * Currently is actually called from the syncer! \o/
749 * This task will have to snapshot the state of the dirty inode chain.
750 * From that, it will have to make sure all of the inodes on the dirty
751 * chain have IO initiated. We make sure that io is initiated for the root
754 * If waitfor is set, we wait for media to acknowledge the new rootblock.
756 * THINKS: side A vs side B, to have sync not stall all I/O?
760 hammer2_vfs_sync(struct mount *mp, int waitfor)
762 struct hammer2_sync_info info;
763 hammer2_mount_t *hmp;
771 if (waitfor & MNT_LAZY)
772 flags |= VMSC_ONEPASS;
775 info.waitfor = MNT_NOWAIT;
776 vmntvnodescan(mp, flags | VMSC_NOWAIT,
778 hammer2_sync_scan2, &info);
779 if (info.error == 0 && (waitfor & MNT_WAIT)) {
780 info.waitfor = waitfor;
781 vmntvnodescan(mp, flags,
783 hammer2_sync_scan2, &info);
787 if (waitfor == MNT_WAIT) {
793 hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
794 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
795 HAMMER2_CHAIN_MODIFIED_AUX |
796 HAMMER2_CHAIN_SUBMODIFIED)) {
797 hammer2_chain_flush(hmp, &hmp->vchain, 0);
802 hammer2_chain_unlock(hmp, &hmp->vchain);
806 if ((waitfor & MNT_LAZY) == 0) {
807 waitfor = MNT_NOWAIT;
808 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
809 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
810 vn_unlock(hmp->devvp);
813 if (error == 0 && haswork) {
817 * Synchronize the disk before flushing the volume
821 bp->b_bio1.bio_offset = 0;
824 bp->b_cmd = BUF_CMD_FLUSH;
825 bp->b_bio1.bio_done = biodone_sync;
826 bp->b_bio1.bio_flags |= BIO_SYNC;
827 vn_strategy(hmp->devvp, &bp->b_bio1);
828 biowait(&bp->b_bio1, "h2vol");
832 * Then we can safely flush the volume header. Volume
833 * data is locked separately to prevent ioctl functions
834 * from deadlocking due to a configuration issue.
836 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
837 hammer2_voldata_lock(hmp);
838 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
839 hammer2_voldata_unlock(hmp);
848 * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
849 * won't flush on those flags. The syncer code above will do a
850 * general meta-data flush globally that will catch these flags.
853 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
858 if (vp->v_type == VNON || ip == NULL ||
859 ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
860 HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
861 RB_EMPTY(&vp->v_rbdirty_tree))) {
868 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
870 struct hammer2_sync_info *info = data;
875 if (vp->v_type == VNON || vp->v_type == VBAD ||
876 ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
877 HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
878 RB_EMPTY(&vp->v_rbdirty_tree))) {
881 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
889 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
896 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
897 struct fid *fhp, struct vnode **vpp)
904 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
905 int *exflagsp, struct ucred **credanonp)
911 * Support code for hammer2_mount(). Read, verify, and install the volume
912 * header into the HMP
914 * XXX read four volhdrs and use the one with the highest TID whos CRC
919 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
920 * nonexistant locations.
922 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
926 hammer2_install_volume_header(hammer2_mount_t *hmp)
928 hammer2_volume_data_t *vd;
930 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
942 * There are up to 4 copies of the volume header (syncs iterate
943 * between them so there is no single master). We don't trust the
944 * volu_size field so we don't know precisely how large the filesystem
945 * is, so depend on the OS to return an error if we go beyond the
946 * block device's EOF.
948 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
949 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
950 HAMMER2_VOLUME_BYTES, &bp);
957 vd = (struct hammer2_volume_data *) bp->b_data;
958 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
959 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
965 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
966 /* XXX: Reversed-endianness filesystem */
967 kprintf("hammer2: reverse-endian filesystem detected");
973 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
974 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
975 HAMMER2_VOLUME_ICRC0_SIZE);
976 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
977 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
978 HAMMER2_VOLUME_ICRC1_SIZE);
979 if ((crc0 != crc) || (bcrc0 != bcrc)) {
980 kprintf("hammer2 volume header crc "
981 "mismatch copy #%d\t%08x %08x",
988 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
998 kprintf("hammer2: a valid volume header was found\n");
1001 kprintf("hammer2: no valid volume headers found!\n");
1007 * Cluster controller thread. Perform messaging functions. We have one
1008 * thread for the reader and one for the writer. The writer handles
1009 * shutdown requests (which should break the reader thread).
1013 hammer2_cluster_thread_rd(void *arg)
1015 hammer2_pfsmount_t *pmp = arg;
1016 hammer2_msg_hdr_t hdr;
1018 hammer2_state_t *state;
1022 while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
1024 * Retrieve the message from the pipe or socket.
1026 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
1027 NULL, 1, UIO_SYSSPACE);
1030 if (hdr.magic != HAMMER2_MSGHDR_MAGIC) {
1031 kprintf("hammer2: msgrd: bad magic: %04x\n",
1036 hbytes = (hdr.cmd & HAMMER2_MSGF_SIZE) * HAMMER2_MSG_ALIGN;
1037 if (hbytes < sizeof(hdr) || hbytes > HAMMER2_MSGAUX_MAX) {
1038 kprintf("hammer2: msgrd: bad header size %zd\n",
1043 msg = kmalloc(offsetof(struct hammer2_msg, any) + hbytes,
1044 pmp->mmsg, M_WAITOK | M_ZERO);
1045 msg->any.head = hdr;
1046 msg->hdr_size = hbytes;
1047 if (hbytes > sizeof(hdr)) {
1048 error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1049 hbytes - sizeof(hdr),
1050 NULL, 1, UIO_SYSSPACE);
1052 kprintf("hammer2: short msg received\n");
1057 msg->aux_size = hdr.aux_bytes * HAMMER2_MSG_ALIGN;
1058 if (msg->aux_size > HAMMER2_MSGAUX_MAX) {
1059 kprintf("hammer2: illegal msg payload size %zd\n",
1064 if (msg->aux_size) {
1065 msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1067 error = fp_read(pmp->msg_fp, msg->aux_data,
1069 NULL, 1, UIO_SYSSPACE);
1071 kprintf("hammer2: short msg "
1072 "payload received\n");
1078 * State machine tracking, state assignment for msg,
1079 * returns error and discard status. Errors are fatal
1080 * to the connection except for EALREADY which forces
1081 * a discard without execution.
1083 error = hammer2_state_msgrx(pmp, msg);
1085 hammer2_msg_free(pmp, msg);
1086 if (error == EALREADY)
1088 } else if (msg->state) {
1089 error = msg->state->func(pmp, msg);
1090 hammer2_state_cleanuprx(pmp, msg);
1092 error = hammer2_msg_adhoc_input(pmp, msg);
1093 hammer2_state_cleanuprx(pmp, msg);
1099 kprintf("hammer2: msg read failed error %d\n", error);
1101 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1103 if (msg->state && msg->state->msg == msg)
1104 msg->state->msg = NULL;
1105 hammer2_msg_free(pmp, msg);
1108 if ((state = pmp->freerd_state) != NULL) {
1109 pmp->freerd_state = NULL;
1110 hammer2_state_free(state);
1113 while ((state = RB_ROOT(&pmp->staterd_tree)) != NULL) {
1114 RB_REMOVE(hammer2_state_tree, &pmp->staterd_tree, state);
1115 hammer2_state_free(state);
1117 lockmgr(&pmp->msglk, LK_RELEASE);
1119 fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1120 pmp->msgrd_td = NULL;
1121 /* pmp can be ripped out from under us at this point */
1128 hammer2_cluster_thread_wr(void *arg)
1130 hammer2_pfsmount_t *pmp = arg;
1131 hammer2_msg_t *msg = NULL;
1132 hammer2_state_t *state;
1137 * Initiate a SPAN transaction registering our PFS with the other
1138 * end using {source}=1. The transaction is left open.
1140 msg = hammer2_msg_alloc(pmp, 1, 0,
1141 HAMMER2_LNK_SPAN | HAMMER2_MSGF_CREATE);
1142 hammer2_msg_write(pmp, msg, hammer2_msg_span_reply);
1148 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1150 while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1151 lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1152 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1154 * Remove msg from the transmit queue and do
1155 * persist and half-closed state handling.
1157 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1158 lockmgr(&pmp->msglk, LK_RELEASE);
1160 error = hammer2_state_msgtx(pmp, msg);
1161 if (error == EALREADY) {
1163 hammer2_msg_free(pmp, msg);
1164 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1171 * Dump the message to the pipe or socket.
1173 error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1174 &res, UIO_SYSSPACE);
1175 if (error || res != msg->hdr_size) {
1178 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1181 if (msg->aux_size) {
1182 error = fp_write(pmp->msg_fp,
1183 msg->aux_data, msg->aux_size,
1184 &res, UIO_SYSSPACE);
1185 if (error || res != msg->aux_size) {
1188 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1192 hammer2_state_cleanuptx(pmp, msg);
1193 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1198 * Cleanup messages pending transmission and release msgq lock.
1201 kprintf("hammer2: msg write failed error %d\n", error);
1204 if (msg->state && msg->state->msg == msg)
1205 msg->state->msg = NULL;
1206 hammer2_msg_free(pmp, msg);
1209 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1210 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1211 if (msg->state && msg->state->msg == msg)
1212 msg->state->msg = NULL;
1213 hammer2_msg_free(pmp, msg);
1216 if ((state = pmp->freewr_state) != NULL) {
1217 pmp->freewr_state = NULL;
1218 hammer2_state_free(state);
1221 while ((state = RB_ROOT(&pmp->statewr_tree)) != NULL) {
1222 RB_REMOVE(hammer2_state_tree, &pmp->statewr_tree, state);
1223 hammer2_state_free(state);
1225 lockmgr(&pmp->msglk, LK_RELEASE);
1228 * Cleanup descriptor, be sure the read size is shutdown so the
1229 * (probably blocked) read operations returns an error.
1231 * pmp can be ripped out from under us once msgwr_td is set to NULL.
1233 fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1234 pmp->msgwr_td = NULL;
1240 hammer2_msg_span_reply(hammer2_pfsmount_t *pmp, hammer2_msg_t *msg)
1242 kprintf("SPAN REPLY\n");