2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
63 #include "hammer2_lz4.h"
65 #include "zlib/hammer2_zlib.h"
67 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
69 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
71 struct hammer2_sync_info {
72 hammer2_trans_t trans;
77 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
78 static struct hammer2_mntlist hammer2_mntlist;
79 static struct lock hammer2_mntlk;
82 int hammer2_cluster_enable = 1;
83 int hammer2_hardlink_enable = 1;
84 long hammer2_iod_file_read;
85 long hammer2_iod_meta_read;
86 long hammer2_iod_indr_read;
87 long hammer2_iod_fmap_read;
88 long hammer2_iod_volu_read;
89 long hammer2_iod_file_write;
90 long hammer2_iod_meta_write;
91 long hammer2_iod_indr_write;
92 long hammer2_iod_fmap_write;
93 long hammer2_iod_volu_write;
94 long hammer2_ioa_file_read;
95 long hammer2_ioa_meta_read;
96 long hammer2_ioa_indr_read;
97 long hammer2_ioa_fmap_read;
98 long hammer2_ioa_volu_read;
99 long hammer2_ioa_fmap_write;
100 long hammer2_ioa_file_write;
101 long hammer2_ioa_meta_write;
102 long hammer2_ioa_indr_write;
103 long hammer2_ioa_volu_write;
105 MALLOC_DECLARE(C_BUFFER);
106 MALLOC_DEFINE(C_BUFFER, "compbuffer", "Buffer used for compression.");
108 MALLOC_DECLARE(D_BUFFER);
109 MALLOC_DEFINE(D_BUFFER, "decompbuffer", "Buffer used for decompression.");
111 MALLOC_DECLARE(W_BIOQUEUE);
112 MALLOC_DEFINE(W_BIOQUEUE, "wbioqueue", "Writing bio queue.");
114 MALLOC_DECLARE(W_MTX);
115 MALLOC_DEFINE(W_MTX, "wmutex", "Mutex for write thread.");
117 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
119 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
120 &hammer2_debug, 0, "");
121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
122 &hammer2_cluster_enable, 0, "");
123 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
124 &hammer2_hardlink_enable, 0, "");
126 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
127 &hammer2_iod_file_read, 0, "");
128 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
129 &hammer2_iod_meta_read, 0, "");
130 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
131 &hammer2_iod_indr_read, 0, "");
132 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
133 &hammer2_iod_fmap_read, 0, "");
134 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
135 &hammer2_iod_volu_read, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
138 &hammer2_iod_file_write, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
140 &hammer2_iod_meta_write, 0, "");
141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
142 &hammer2_iod_indr_write, 0, "");
143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
144 &hammer2_iod_fmap_write, 0, "");
145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
146 &hammer2_iod_volu_write, 0, "");
148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
149 &hammer2_ioa_file_read, 0, "");
150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
151 &hammer2_ioa_meta_read, 0, "");
152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
153 &hammer2_ioa_indr_read, 0, "");
154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
155 &hammer2_ioa_fmap_read, 0, "");
156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
157 &hammer2_ioa_volu_read, 0, "");
159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
160 &hammer2_ioa_file_write, 0, "");
161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
162 &hammer2_ioa_meta_write, 0, "");
163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
164 &hammer2_ioa_indr_write, 0, "");
165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
166 &hammer2_ioa_fmap_write, 0, "");
167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
168 &hammer2_ioa_volu_write, 0, "");
170 static int hammer2_vfs_init(struct vfsconf *conf);
171 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
172 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
174 static int hammer2_remount(hammer2_mount_t *, char *, struct vnode *,
176 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
177 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
178 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
180 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
182 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
183 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
184 ino_t ino, struct vnode **vpp);
185 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
186 struct fid *fhp, struct vnode **vpp);
187 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
188 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
189 int *exflagsp, struct ucred **credanonp);
191 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
192 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
193 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
195 static void hammer2_write_thread(void *arg);
198 * Functions for compression in threads,
199 * from hammer2_vnops.c
201 static void hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
203 hammer2_inode_data_t *ipdata,
204 hammer2_chain_t **parentp,
205 hammer2_key_t lbase, int ioflag, int pblksize,
207 static void hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
209 hammer2_inode_data_t *ipdata,
210 hammer2_chain_t **parentp,
211 hammer2_key_t lbase, int ioflag,
212 int pblksize, int *errorp, int comp_algo);
213 static void hammer2_zero_check_and_write(struct buf *bp,
214 hammer2_trans_t *trans, hammer2_inode_t *ip,
215 hammer2_inode_data_t *ipdata,
216 hammer2_chain_t **parentp,
218 int ioflag, int pblksize, int *errorp);
219 static int test_block_zeros(const char *buf, size_t bytes);
220 static void zero_write(struct buf *bp, hammer2_trans_t *trans,
222 hammer2_inode_data_t *ipdata,
223 hammer2_chain_t **parentp,
226 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp,
227 int ioflag, int pblksize, int *errorp);
229 static int hammer2_rcvdmsg(kdmsg_msg_t *msg);
230 static void hammer2_autodmsg(kdmsg_msg_t *msg);
234 * HAMMER2 vfs operations.
236 static struct vfsops hammer2_vfsops = {
237 .vfs_init = hammer2_vfs_init,
238 .vfs_uninit = hammer2_vfs_uninit,
239 .vfs_sync = hammer2_vfs_sync,
240 .vfs_mount = hammer2_vfs_mount,
241 .vfs_unmount = hammer2_vfs_unmount,
242 .vfs_root = hammer2_vfs_root,
243 .vfs_statfs = hammer2_vfs_statfs,
244 .vfs_statvfs = hammer2_vfs_statvfs,
245 .vfs_vget = hammer2_vfs_vget,
246 .vfs_vptofh = hammer2_vfs_vptofh,
247 .vfs_fhtovp = hammer2_vfs_fhtovp,
248 .vfs_checkexp = hammer2_vfs_checkexp
251 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
253 VFS_SET(hammer2_vfsops, hammer2, 0);
254 MODULE_VERSION(hammer2, 1);
258 hammer2_vfs_init(struct vfsconf *conf)
260 static struct objcache_malloc_args margs_read;
261 static struct objcache_malloc_args margs_write;
267 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
269 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
271 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
275 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
277 margs_read.objsize = 65536;
278 margs_read.mtype = D_BUFFER;
280 margs_write.objsize = 32768;
281 margs_write.mtype = C_BUFFER;
283 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
284 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
285 objcache_malloc_free, &margs_read);
286 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
287 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
288 objcache_malloc_free, &margs_write);
290 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
291 TAILQ_INIT(&hammer2_mntlist);
298 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
300 objcache_destroy(cache_buffer_read);
301 objcache_destroy(cache_buffer_write);
306 * Mount or remount HAMMER2 fileystem from physical media
309 * mp mount point structure
315 * mp mount point structure
316 * path path to mount point
317 * data pointer to argument structure in user space
318 * volume volume path (device@LABEL form)
319 * hflags user mount flags
320 * cred user credentials
327 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
330 struct hammer2_mount_info info;
331 hammer2_pfsmount_t *pmp;
332 hammer2_mount_t *hmp;
333 hammer2_key_t key_next;
334 hammer2_key_t key_dummy;
337 struct nlookupdata nd;
338 hammer2_chain_t *parent;
339 hammer2_chain_t *schain;
340 hammer2_chain_t *rchain;
342 char devstr[MNAMELEN];
359 kprintf("hammer2_mount\n");
365 bzero(&info, sizeof(info));
366 info.cluster_fd = -1;
370 * Non-root mount or updating a mount
372 error = copyin(data, &info, sizeof(info));
376 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
380 /* Extract device and label */
382 label = strchr(devstr, '@');
384 ((label + 1) - dev) > done) {
392 if (mp->mnt_flag & MNT_UPDATE) {
394 /* HAMMER2 implements NFS export via mountctl */
396 for (i = 0; i < pmp->cluster.nchains; ++i) {
397 hmp = pmp->cluster.chains[i]->hmp;
399 error = hammer2_remount(hmp, path, devvp, cred);
410 * Lookup name and verify it refers to a block device.
412 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
414 error = nlookup(&nd);
416 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
420 if (vn_isdisk(devvp, &error))
421 error = vfs_mountedon(devvp);
425 * Determine if the device has already been mounted. After this
426 * check hmp will be non-NULL if we are doing the second or more
427 * hammer2 mounts from the same device.
429 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
430 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
431 if (hmp->devvp == devvp)
436 * Open the device if this isn't a secondary mount and construct
437 * the H2 device mount (hmp).
440 if (error == 0 && vcount(devvp) > 0)
444 * Now open the device
447 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
448 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
449 error = vinvalbuf(devvp, V_SAVE, 0, 0);
451 error = VOP_OPEN(devvp,
452 ronly ? FREAD : FREAD | FWRITE,
457 if (error && devvp) {
462 lockmgr(&hammer2_mntlk, LK_RELEASE);
465 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
468 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
469 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
471 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
472 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
473 TAILQ_INIT(&hmp->transq);
476 * vchain setup. vchain.data is embedded.
477 * vchain.refs is initialized and will never drop to 0.
479 hmp->vchain.hmp = hmp;
480 hmp->vchain.refs = 1;
481 hmp->vchain.data = (void *)&hmp->voldata;
482 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
483 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
484 hmp->vchain.delete_tid = HAMMER2_MAX_TID;
485 hammer2_chain_core_alloc(NULL, &hmp->vchain, NULL);
486 /* hmp->vchain.u.xxx is left NULL */
489 * fchain setup. fchain.data is embedded.
490 * fchain.refs is initialized and will never drop to 0.
492 * The data is not used but needs to be initialized to
493 * pass assertion muster. We use this chain primarily
494 * as a placeholder for the freemap's top-level RBTREE
495 * so it does not interfere with the volume's topology
498 hmp->fchain.hmp = hmp;
499 hmp->fchain.refs = 1;
500 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
501 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
502 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
503 hmp->fchain.bref.methods =
504 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
505 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
506 hmp->fchain.delete_tid = HAMMER2_MAX_TID;
508 hammer2_chain_core_alloc(NULL, &hmp->fchain, NULL);
509 /* hmp->fchain.u.xxx is left NULL */
512 * Install the volume header
514 error = hammer2_install_volume_header(hmp);
516 hammer2_vfs_unmount(mp, MNT_FORCE);
521 * First locate the super-root inode, which is key 0
522 * relative to the volume header's blockset.
524 * Then locate the root inode by scanning the directory keyspace
525 * represented by the label.
527 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
528 schain = hammer2_chain_lookup(&parent, &key_dummy,
529 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
531 hammer2_chain_lookup_done(parent);
532 if (schain == NULL) {
533 kprintf("hammer2_mount: invalid super-root\n");
534 hammer2_vfs_unmount(mp, MNT_FORCE);
539 * NOTE: inode_get sucks up schain's lock.
541 atomic_set_int(&schain->flags, HAMMER2_CHAIN_PFSROOT);
542 hmp->sroot = hammer2_inode_get(NULL, NULL, schain);
543 hammer2_inode_ref(hmp->sroot);
544 hammer2_inode_unlock_ex(hmp->sroot, schain);
546 /* leave hmp->sroot with one ref */
548 mtx_init(&hmp->wthread_mtx);
549 bioq_init(&hmp->wthread_bioq);
550 hmp->wthread_destroy = 0;
555 lwkt_create(hammer2_write_thread, hmp,
556 NULL, NULL, 0, -1, "hammer2-write");
560 * Block device opened successfully, finish initializing the
563 * From this point on we have to call hammer2_unmount() on failure.
565 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
567 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
568 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
570 spin_init(&pmp->inum_spin);
571 RB_INIT(&pmp->inum_tree);
573 kdmsg_iocom_init(&pmp->iocom, pmp,
574 KDMSG_IOCOMF_AUTOCONN |
575 KDMSG_IOCOMF_AUTOSPAN |
576 KDMSG_IOCOMF_AUTOCIRC,
577 pmp->mmsg, hammer2_rcvdmsg);
579 ccms_domain_init(&pmp->ccms_dom);
581 lockmgr(&hammer2_mntlk, LK_RELEASE);
582 kprintf("hammer2_mount hmp=%p pmp=%p pmpcnt=%d\n",
583 hmp, pmp, hmp->pmp_count);
585 mp->mnt_flag = MNT_LOCAL;
586 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
589 * required mount structure initializations
591 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
592 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
594 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
595 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
600 mp->mnt_iosize_max = MAXPHYS;
601 mp->mnt_data = (qaddr_t)pmp;
605 * Lookup mount point under the media-localized super-root.
607 parent = hammer2_inode_lock_ex(hmp->sroot);
608 lhc = hammer2_dirhash(label, strlen(label));
609 rchain = hammer2_chain_lookup(&parent, &key_next,
610 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
613 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
614 strcmp(label, rchain->data->ipdata.filename) == 0) {
617 rchain = hammer2_chain_next(&parent, rchain, &key_next,
619 lhc + HAMMER2_DIRHASH_LOMASK,
622 hammer2_inode_unlock_ex(hmp->sroot, parent);
624 if (rchain == NULL) {
625 kprintf("hammer2_mount: PFS label not found\n");
627 hammer2_vfs_unmount(mp, MNT_FORCE);
630 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
631 hammer2_chain_unlock(rchain);
632 kprintf("hammer2_mount: PFS label already mounted!\n");
634 hammer2_vfs_unmount(mp, MNT_FORCE);
638 if (rchain->flags & HAMMER2_CHAIN_RECYCLE) {
639 kprintf("hammer2_mount: PFS label currently recycling\n");
641 hammer2_vfs_unmount(mp, MNT_FORCE);
646 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
649 * NOTE: *_get() integrates chain's lock into the inode lock.
651 hammer2_chain_ref(rchain); /* for pmp->rchain */
652 pmp->cluster.nchains = 1;
653 pmp->cluster.chains[0] = rchain;
654 pmp->iroot = hammer2_inode_get(pmp, NULL, rchain);
655 hammer2_inode_ref(pmp->iroot); /* ref for pmp->iroot */
657 KKASSERT(rchain->pmp == NULL); /* tracking pmp for rchain */
659 atomic_add_long(&pmp->inmem_chains, 1);
661 hammer2_inode_unlock_ex(pmp->iroot, rchain);
663 kprintf("iroot %p\n", pmp->iroot);
666 * Ref the cluster management messaging descriptor. The mount
667 * program deals with the other end of the communications pipe.
669 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
671 kprintf("hammer2_mount: bad cluster_fd!\n");
672 hammer2_vfs_unmount(mp, MNT_FORCE);
675 hammer2_cluster_reconnect(pmp, fp);
681 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
682 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
683 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
685 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
686 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
687 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
688 copyinstr(path, mp->mnt_stat.f_mntonname,
689 sizeof(mp->mnt_stat.f_mntonname) - 1,
693 * Initial statfs to prime mnt_stat.
695 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
701 * Handle bioq for strategy write
705 hammer2_write_thread(void *arg)
707 hammer2_mount_t* hmp;
710 hammer2_trans_t trans;
712 hammer2_inode_t *last_ip;
714 hammer2_chain_t *parent;
715 hammer2_chain_t **parentp;
716 hammer2_inode_data_t *ipdata;
724 mtx_lock(&hmp->wthread_mtx);
725 while (hmp->wthread_destroy == 0) {
726 if (bioq_first(&hmp->wthread_bioq) == NULL) {
727 mtxsleep(&hmp->wthread_bioq, &hmp->wthread_mtx,
734 while ((bio = bioq_takefirst(&hmp->wthread_bioq)) != NULL) {
735 mtx_unlock(&hmp->wthread_mtx);
743 * Cache transaction for multi-buffer flush efficiency.
744 * Lock the ip separately for each buffer to allow
745 * interleaving with frontend writes.
749 hammer2_trans_done(&trans);
750 hammer2_trans_init(&trans, ip->pmp,
751 HAMMER2_TRANS_BUFCACHE);
754 parent = hammer2_inode_lock_ex(ip);
757 * Inode is modified, flush size and mtime changes
758 * to ensure that the file size remains consistent
759 * with the buffers being flushed.
761 if (ip->flags & (HAMMER2_INODE_RESIZED |
762 HAMMER2_INODE_MTIME)) {
763 hammer2_inode_fsync(&trans, ip, parentp);
765 ipdata = hammer2_chain_modify_ip(&trans, ip,
767 lblksize = hammer2_calc_logical(ip, bio->bio_offset,
769 pblksize = hammer2_calc_physical(ip, lbase);
770 hammer2_write_file_core(bp, &trans, ip, ipdata,
774 hammer2_inode_unlock_ex(ip, parent);
776 kprintf("hammer2: error in buffer write\n");
777 bp->b_flags |= B_ERROR;
781 mtx_lock(&hmp->wthread_mtx);
785 * Clean out transaction cache
788 hammer2_trans_done(&trans);
790 hmp->wthread_destroy = -1;
791 wakeup(&hmp->wthread_destroy);
793 mtx_unlock(&hmp->wthread_mtx);
797 * Return a chain suitable for I/O, creating the chain if necessary
798 * and assigning its physical block.
802 hammer2_assign_physical(hammer2_trans_t *trans,
803 hammer2_inode_t *ip, hammer2_chain_t **parentp,
804 hammer2_key_t lbase, int pblksize, int *errorp)
806 hammer2_chain_t *parent;
807 hammer2_chain_t *chain;
809 hammer2_key_t key_dummy;
810 int pradix = hammer2_getradix(pblksize);
811 int cache_index = -1;
814 * Locate the chain associated with lbase, return a locked chain.
815 * However, do not instantiate any data reference (which utilizes a
816 * device buffer) because we will be using direct IO via the
817 * logical buffer cache buffer.
820 KKASSERT(pblksize >= HAMMER2_MIN_ALLOC);
823 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); /* extra lock */
824 chain = hammer2_chain_lookup(&parent, &key_dummy,
826 &cache_index, HAMMER2_LOOKUP_NODATA);
830 * We found a hole, create a new chain entry.
832 * NOTE: DATA chains are created without device backing
833 * store (nor do we want any).
835 *errorp = hammer2_chain_create(trans, &parent, &chain,
836 lbase, HAMMER2_PBUFRADIX,
837 HAMMER2_BREF_TYPE_DATA,
840 hammer2_chain_lookup_done(parent);
841 panic("hammer2_chain_create: par=%p error=%d\n",
846 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
847 /*ip->delta_dcount += pblksize;*/
849 switch (chain->bref.type) {
850 case HAMMER2_BREF_TYPE_INODE:
852 * The data is embedded in the inode. The
853 * caller is responsible for marking the inode
854 * modified and copying the data to the embedded
859 case HAMMER2_BREF_TYPE_DATA:
860 if (chain->bytes != pblksize) {
861 hammer2_chain_resize(trans, ip,
864 HAMMER2_MODIFY_OPTDATA);
866 hammer2_chain_modify(trans, &chain,
867 HAMMER2_MODIFY_OPTDATA);
868 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
871 panic("hammer2_assign_physical: bad type");
879 * Cleanup. If chain wound up being the inode (i.e. DIRECTDATA),
880 * we might have to replace *parentp.
882 hammer2_chain_lookup_done(parent);
884 if (*parentp != chain &&
885 (*parentp)->core == chain->core) {
887 *parentp = chain; /* eats lock */
888 hammer2_chain_unlock(parent);
889 hammer2_chain_lock(chain, 0); /* need another */
891 /* else chain already locked for return */
897 * From hammer2_vnops.c.
898 * The core write function which determines which path to take
899 * depending on compression settings.
903 hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
904 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
905 hammer2_chain_t **parentp,
906 hammer2_key_t lbase, int ioflag, int pblksize,
909 hammer2_chain_t *chain;
911 switch(HAMMER2_DEC_COMP(ipdata->comp_algo)) {
912 case HAMMER2_COMP_NONE:
914 * We have to assign physical storage to the buffer
915 * we intend to dirty or write now to avoid deadlocks
916 * in the strategy code later.
918 * This can return NOOFFSET for inode-embedded data.
919 * The strategy code will take care of it in that case.
921 chain = hammer2_assign_physical(trans, ip, parentp,
924 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
926 hammer2_chain_unlock(chain);
928 case HAMMER2_COMP_AUTOZERO:
930 * Check for zero-fill only
932 hammer2_zero_check_and_write(bp, trans, ip,
933 ipdata, parentp, lbase,
934 ioflag, pblksize, errorp);
936 case HAMMER2_COMP_LZ4:
937 case HAMMER2_COMP_ZLIB:
940 * Check for zero-fill and attempt compression.
942 hammer2_compress_and_write(bp, trans, ip,
949 ipdata = &ip->chain->data->ipdata; /* reload */
953 * From hammer2_vnops.c
954 * Generic function that will perform the compression in compression
955 * write path. The compression algorithm is determined by the settings
956 * obtained from inode.
960 hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
961 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
962 hammer2_chain_t **parentp,
963 hammer2_key_t lbase, int ioflag, int pblksize,
964 int *errorp, int comp_algo)
966 hammer2_chain_t *chain;
971 if (test_block_zeros(bp->b_data, pblksize)) {
972 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
979 KKASSERT(pblksize / 2 <= 32768);
981 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
982 z_stream strm_compress;
986 switch(HAMMER2_DEC_COMP(comp_algo)) {
987 case HAMMER2_COMP_LZ4:
988 comp_buffer = objcache_get(cache_buffer_write,
990 comp_size = LZ4_compress_limitedOutput(
992 &comp_buffer[sizeof(int)],
994 pblksize / 2 - sizeof(int));
996 * We need to prefix with the size, LZ4
997 * doesn't do it for us. Add the related
1000 *(int *)comp_buffer = comp_size;
1002 comp_size += sizeof(int);
1004 case HAMMER2_COMP_ZLIB:
1005 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
1006 if (comp_level == 0)
1007 comp_level = 6; /* default zlib compression */
1008 else if (comp_level < 6)
1010 else if (comp_level > 9)
1012 ret = deflateInit(&strm_compress, comp_level);
1014 kprintf("HAMMER2 ZLIB: fatal error "
1015 "on deflateInit.\n");
1018 comp_buffer = objcache_get(cache_buffer_write,
1020 strm_compress.next_in = bp->b_data;
1021 strm_compress.avail_in = pblksize;
1022 strm_compress.next_out = comp_buffer;
1023 strm_compress.avail_out = pblksize / 2;
1024 ret = deflate(&strm_compress, Z_FINISH);
1025 if (ret == Z_STREAM_END) {
1026 comp_size = pblksize / 2 -
1027 strm_compress.avail_out;
1031 ret = deflateEnd(&strm_compress);
1034 kprintf("Error: Unknown compression method.\n");
1035 kprintf("Comp_method = %d.\n", comp_algo);
1040 if (comp_size == 0) {
1042 * compression failed or turned off
1044 comp_block_size = pblksize; /* safety */
1045 if (++ip->comp_heuristic > 128)
1046 ip->comp_heuristic = 8;
1049 * compression succeeded
1051 ip->comp_heuristic = 0;
1052 if (comp_size <= 1024) {
1053 comp_block_size = 1024;
1054 } else if (comp_size <= 2048) {
1055 comp_block_size = 2048;
1056 } else if (comp_size <= 4096) {
1057 comp_block_size = 4096;
1058 } else if (comp_size <= 8192) {
1059 comp_block_size = 8192;
1060 } else if (comp_size <= 16384) {
1061 comp_block_size = 16384;
1062 } else if (comp_size <= 32768) {
1063 comp_block_size = 32768;
1065 panic("hammer2: WRITE PATH: "
1066 "Weird comp_size value.");
1068 comp_block_size = pblksize;
1072 chain = hammer2_assign_physical(trans, ip, parentp,
1073 lbase, comp_block_size,
1075 ipdata = &ip->chain->data->ipdata; /* RELOAD */
1078 kprintf("WRITE PATH: An error occurred while "
1079 "assigning physical space.\n");
1080 KKASSERT(chain == NULL);
1082 /* Get device offset */
1083 hammer2_off_t pbase;
1084 hammer2_off_t pmask;
1091 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1093 switch(chain->bref.type) {
1094 case HAMMER2_BREF_TYPE_INODE:
1095 KKASSERT(chain->data->ipdata.op_flags &
1096 HAMMER2_OPFLAG_DIRECTDATA);
1097 KKASSERT(bp->b_loffset == 0);
1098 bcopy(bp->b_data, chain->data->ipdata.u.data,
1099 HAMMER2_EMBEDDED_BYTES);
1101 case HAMMER2_BREF_TYPE_DATA:
1102 psize = hammer2_devblksize(chain->bytes);
1103 pmask = (hammer2_off_t)psize - 1;
1104 pbase = chain->bref.data_off & ~pmask;
1105 boff = chain->bref.data_off &
1106 (HAMMER2_OFF_MASK & pmask);
1107 peof = (pbase + HAMMER2_SEGMASK64) &
1109 temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1112 * Optimize out the read-before-write
1115 if (comp_block_size == psize) {
1116 dbp = getblk(chain->hmp->devvp, pbase,
1119 *errorp = bread(chain->hmp->devvp,
1120 pbase, psize, &dbp);
1122 kprintf("hammer2: WRITE PATH: "
1123 "dbp bread error\n");
1129 * When loading the block make sure we don't
1130 * leave garbage after the compressed data.
1133 chain->bref.methods =
1134 HAMMER2_ENC_COMP(comp_algo) +
1135 HAMMER2_ENC_CHECK(temp_check);
1136 bcopy(comp_buffer, dbp->b_data + boff,
1138 if (comp_size != comp_block_size) {
1139 bzero(dbp->b_data + boff +
1145 chain->bref.methods =
1147 HAMMER2_COMP_NONE) +
1148 HAMMER2_ENC_CHECK(temp_check);
1149 bcopy(bp->b_data, dbp->b_data + boff,
1154 * Device buffer is now valid, chain is no
1155 * longer in the initial state.
1157 atomic_clear_int(&chain->flags,
1158 HAMMER2_CHAIN_INITIAL);
1160 /* Now write the related bdp. */
1161 if (ioflag & IO_SYNC) {
1163 * Synchronous I/O requested.
1167 } else if ((ioflag & IO_DIRECT) &&
1168 loff + n == pblksize) {
1171 } else if (ioflag & IO_ASYNC) {
1173 } else if (hammer2_cluster_enable) {
1174 cluster_write(dbp, peof,
1182 panic("hammer2_write_bp: bad chain type %d\n",
1188 hammer2_chain_unlock(chain);
1191 objcache_put(cache_buffer_write, comp_buffer);
1195 * Function that performs zero-checking and writing without compression,
1196 * it corresponds to default zero-checking path.
1200 hammer2_zero_check_and_write(struct buf *bp, hammer2_trans_t *trans,
1201 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
1202 hammer2_chain_t **parentp,
1203 hammer2_key_t lbase, int ioflag, int pblksize, int *errorp)
1205 hammer2_chain_t *chain;
1207 if (test_block_zeros(bp->b_data, pblksize)) {
1208 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
1210 chain = hammer2_assign_physical(trans, ip, parentp,
1211 lbase, pblksize, errorp);
1212 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
1214 hammer2_chain_unlock(chain);
1219 * A function to test whether a block of data contains only zeros,
1220 * returns TRUE (non-zero) if the block is all zeros.
1224 test_block_zeros(const char *buf, size_t bytes)
1228 for (i = 0; i < bytes; i += sizeof(long)) {
1229 if (*(const long *)(buf + i) != 0)
1236 * Function to "write" a block that contains only zeros.
1240 zero_write(struct buf *bp, hammer2_trans_t *trans, hammer2_inode_t *ip,
1241 hammer2_inode_data_t *ipdata, hammer2_chain_t **parentp,
1242 hammer2_key_t lbase, int *errorp __unused)
1244 hammer2_chain_t *parent;
1245 hammer2_chain_t *chain;
1246 hammer2_key_t key_dummy;
1247 int cache_index = -1;
1249 parent = hammer2_chain_lookup_init(*parentp, 0);
1251 chain = hammer2_chain_lookup(&parent, &key_dummy, lbase, lbase,
1252 &cache_index, HAMMER2_LOOKUP_NODATA);
1254 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1255 bzero(chain->data->ipdata.u.data,
1256 HAMMER2_EMBEDDED_BYTES);
1258 hammer2_chain_delete(trans, chain, 0);
1260 hammer2_chain_unlock(chain);
1262 hammer2_chain_lookup_done(parent);
1266 * Function to write the data as it is, without performing any sort of
1267 * compression. This function is used in path without compression and
1268 * default zero-checking path.
1272 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag,
1273 int pblksize, int *errorp)
1275 hammer2_off_t pbase;
1276 hammer2_off_t pmask;
1282 int temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1284 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1286 switch(chain->bref.type) {
1287 case HAMMER2_BREF_TYPE_INODE:
1288 KKASSERT(chain->data->ipdata.op_flags &
1289 HAMMER2_OPFLAG_DIRECTDATA);
1290 KKASSERT(bp->b_loffset == 0);
1291 bcopy(bp->b_data, chain->data->ipdata.u.data,
1292 HAMMER2_EMBEDDED_BYTES);
1295 case HAMMER2_BREF_TYPE_DATA:
1296 psize = hammer2_devblksize(chain->bytes);
1297 pmask = (hammer2_off_t)psize - 1;
1298 pbase = chain->bref.data_off & ~pmask;
1299 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
1300 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1302 if (psize == pblksize) {
1303 dbp = getblk(chain->hmp->devvp, pbase,
1307 error = bread(chain->hmp->devvp, pbase, psize, &dbp);
1309 kprintf("hammer2: WRITE PATH: "
1310 "dbp bread error\n");
1315 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1316 HAMMER2_ENC_CHECK(temp_check);
1317 bcopy(bp->b_data, dbp->b_data + boff, chain->bytes);
1320 * Device buffer is now valid, chain is no
1321 * longer in the initial state.
1323 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1325 if (ioflag & IO_SYNC) {
1327 * Synchronous I/O requested.
1331 } else if ((ioflag & IO_DIRECT) && loff + n == pblksize) {
1334 } else if (ioflag & IO_ASYNC) {
1336 } else if (hammer2_cluster_enable) {
1337 cluster_write(dbp, peof, HAMMER2_PBUFSIZE, 4/*XXX*/);
1343 panic("hammer2_write_bp: bad chain type %d\n",
1354 hammer2_remount(hammer2_mount_t *hmp, char *path, struct vnode *devvp,
1362 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1364 hammer2_pfsmount_t *pmp;
1365 hammer2_mount_t *hmp;
1366 hammer2_chain_t *rchain;
1369 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
1372 struct vnode *devvp;
1376 ccms_domain_uninit(&pmp->ccms_dom);
1377 kdmsg_iocom_uninit(&pmp->iocom); /* XXX chain dependency */
1379 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1381 for (i = 0; i < pmp->cluster.nchains; ++i) {
1382 hmp = pmp->cluster.chains[i]->hmp;
1386 if (mntflags & MNT_FORCE)
1387 flags |= FORCECLOSE;
1389 hammer2_mount_exlock(hmp);
1392 * If mount initialization proceeded far enough we must flush
1396 error = vflush(mp, 0, flags);
1399 hammer2_mount_unlock(hmp);
1404 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n",
1405 hmp, hmp->pmp_count);
1408 * Flush any left over chains. The voldata lock is only used
1409 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
1411 hammer2_voldata_lock(hmp);
1412 if ((hmp->vchain.flags | hmp->fchain.flags) &
1413 (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_SUBMODIFIED)) {
1414 hammer2_voldata_unlock(hmp, 0);
1415 hammer2_vfs_sync(mp, MNT_WAIT);
1416 hammer2_vfs_sync(mp, MNT_WAIT);
1418 hammer2_voldata_unlock(hmp, 0);
1420 if (hmp->pmp_count == 0) {
1421 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
1422 HAMMER2_CHAIN_SUBMODIFIED)) {
1423 kprintf("hammer2_unmount: chains left over "
1424 "after final sync\n");
1425 if (hammer2_debug & 0x0010)
1426 Debugger("entered debugger");
1431 * Cleanup the root and super-root chain elements
1432 * (which should be clean).
1435 #if REPORT_REFS_ERRORS
1436 if (pmp->iroot->refs != 1)
1437 kprintf("PMP->IROOT %p REFS WRONG %d\n",
1438 pmp->iroot, pmp->iroot->refs);
1440 KKASSERT(pmp->iroot->refs == 1);
1442 /* ref for pmp->iroot */
1443 hammer2_inode_drop(pmp->iroot);
1447 rchain = pmp->cluster.chains[i];
1449 atomic_clear_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
1450 #if REPORT_REFS_ERRORS
1451 if (rchain->refs != 1)
1452 kprintf("PMP->RCHAIN %p REFS WRONG %d\n",
1453 rchain, rchain->refs);
1455 KKASSERT(rchain->refs == 1);
1457 hammer2_chain_drop(rchain);
1458 pmp->cluster.chains[i] = NULL;
1462 * If no PFS's left drop the master hammer2_mount for the
1465 if (hmp->pmp_count == 0) {
1467 hammer2_inode_drop(hmp->sroot);
1472 * Finish up with the device vnode
1474 if ((devvp = hmp->devvp) != NULL) {
1475 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1478 (ronly ? FREAD : FREAD|FWRITE));
1484 * Final drop of embedded freemap root chain to
1485 * clean up fchain.core (fchain structure is not
1486 * flagged ALLOCATED so it is cleaned out and then
1489 hammer2_chain_drop(&hmp->fchain);
1492 * Final drop of embedded volume root chain to clean up
1493 * vchain.core (vchain structure is not flagged ALLOCATED
1494 * so it is cleaned out and then left to rot).
1497 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt);
1498 hammer2_mount_unlock(hmp);
1499 hammer2_chain_drop(&hmp->vchain);
1501 hammer2_mount_unlock(hmp);
1503 if (hmp->pmp_count == 0) {
1504 mtx_lock(&hmp->wthread_mtx);
1505 hmp->wthread_destroy = 1;
1506 wakeup(&hmp->wthread_bioq);
1507 while (hmp->wthread_destroy != -1) {
1508 mtxsleep(&hmp->wthread_destroy,
1509 &hmp->wthread_mtx, 0,
1512 mtx_unlock(&hmp->wthread_mtx);
1514 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1515 kmalloc_destroy(&hmp->mchain);
1516 kfree(hmp, M_HAMMER2);
1521 mp->mnt_data = NULL;
1523 kmalloc_destroy(&pmp->mmsg);
1524 kmalloc_destroy(&pmp->minode);
1526 kfree(pmp, M_HAMMER2);
1530 lockmgr(&hammer2_mntlk, LK_RELEASE);
1537 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1538 ino_t ino, struct vnode **vpp)
1540 kprintf("hammer2_vget\n");
1541 return (EOPNOTSUPP);
1546 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1548 hammer2_pfsmount_t *pmp;
1549 hammer2_chain_t *parent;
1554 if (pmp->iroot == NULL) {
1558 parent = hammer2_inode_lock_sh(pmp->iroot);
1559 vp = hammer2_igetv(pmp->iroot, &error);
1560 hammer2_inode_unlock_sh(pmp->iroot, parent);
1563 kprintf("vnodefail\n");
1572 * XXX incorporate ipdata->inode_quota and data_quota
1576 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1578 hammer2_pfsmount_t *pmp;
1579 hammer2_mount_t *hmp;
1582 KKASSERT(pmp->cluster.nchains >= 1);
1583 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1585 mp->mnt_stat.f_files = pmp->inode_count;
1586 mp->mnt_stat.f_ffree = 0;
1587 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
1588 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
1589 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1591 *sbp = mp->mnt_stat;
1597 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1599 hammer2_pfsmount_t *pmp;
1600 hammer2_mount_t *hmp;
1603 KKASSERT(pmp->cluster.nchains >= 1);
1604 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1606 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1607 mp->mnt_vstat.f_files = pmp->inode_count;
1608 mp->mnt_vstat.f_ffree = 0;
1609 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
1610 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
1611 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1613 *sbp = mp->mnt_vstat;
1618 * Sync the entire filesystem; this is called from the filesystem syncer
1619 * process periodically and whenever a user calls sync(1) on the hammer
1622 * Currently is actually called from the syncer! \o/
1624 * This task will have to snapshot the state of the dirty inode chain.
1625 * From that, it will have to make sure all of the inodes on the dirty
1626 * chain have IO initiated. We make sure that io is initiated for the root
1629 * If waitfor is set, we wait for media to acknowledge the new rootblock.
1631 * THINKS: side A vs side B, to have sync not stall all I/O?
1635 hammer2_vfs_sync(struct mount *mp, int waitfor)
1637 struct hammer2_sync_info info;
1638 hammer2_pfsmount_t *pmp;
1639 hammer2_mount_t *hmp;
1648 * We can't acquire locks on existing vnodes while in a transaction
1649 * without risking a deadlock. This assumes that vfsync() can be
1650 * called without the vnode locked (which it can in DragonFly).
1651 * Otherwise we'd have to implement a multi-pass or flag the lock
1652 * failures and retry.
1654 /*flags = VMSC_GETVP;*/
1656 if (waitfor & MNT_LAZY)
1657 flags |= VMSC_ONEPASS;
1659 hammer2_trans_init(&info.trans, pmp, HAMMER2_TRANS_ISFLUSH);
1662 * vfsync the vnodes. XXX
1665 info.waitfor = MNT_NOWAIT;
1666 vmntvnodescan(mp, flags | VMSC_NOWAIT,
1668 hammer2_sync_scan2, &info);
1669 if (info.error == 0 && (waitfor & MNT_WAIT)) {
1670 info.waitfor = waitfor;
1671 vmntvnodescan(mp, flags,
1673 hammer2_sync_scan2, &info);
1677 if (waitfor == MNT_WAIT) {
1685 for (i = 0; i < pmp->cluster.nchains; ++i) {
1686 hmp = pmp->cluster.chains[i]->hmp;
1689 * Media mounts have two 'roots', vchain for the topology
1690 * and fchain for the free block table. Flush both.
1692 * Note that the topology and free block table are handled
1693 * independently, so the free block table can wind up being
1694 * ahead of the topology. We depend on the bulk free scan
1695 * code to deal with any loose ends.
1697 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1698 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
1699 HAMMER2_CHAIN_SUBMODIFIED)) {
1700 hammer2_chain_flush(&info.trans, &hmp->vchain);
1702 hammer2_chain_unlock(&hmp->vchain);
1704 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1705 if (hmp->fchain.flags & (HAMMER2_CHAIN_MODIFIED |
1706 HAMMER2_CHAIN_SUBMODIFIED)) {
1707 /* this will also modify vchain as a side effect */
1708 hammer2_chain_flush(&info.trans, &hmp->fchain);
1710 hammer2_chain_unlock(&hmp->fchain);
1715 * We can't safely flush the volume header until we have
1716 * flushed any device buffers which have built up.
1718 * XXX this isn't being incremental
1720 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1721 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1722 vn_unlock(hmp->devvp);
1725 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1726 * volume header needs synchronization via hmp->volsync.
1728 * XXX synchronize the flag & data with only this flush XXX
1731 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1735 * Synchronize the disk before flushing the volume
1739 bp->b_bio1.bio_offset = 0;
1742 bp->b_cmd = BUF_CMD_FLUSH;
1743 bp->b_bio1.bio_done = biodone_sync;
1744 bp->b_bio1.bio_flags |= BIO_SYNC;
1745 vn_strategy(hmp->devvp, &bp->b_bio1);
1746 biowait(&bp->b_bio1, "h2vol");
1750 * Then we can safely flush the version of the
1751 * volume header synchronized by the flush code.
1753 i = hmp->volhdrno + 1;
1754 if (i >= HAMMER2_NUM_VOLHDRS)
1756 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1757 hmp->volsync.volu_size) {
1760 kprintf("sync volhdr %d %jd\n",
1761 i, (intmax_t)hmp->volsync.volu_size);
1762 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
1763 HAMMER2_PBUFSIZE, 0, 0);
1764 atomic_clear_int(&hmp->vchain.flags,
1765 HAMMER2_CHAIN_VOLUMESYNC);
1766 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1771 total_error = error;
1774 hammer2_trans_done(&info.trans);
1775 return (total_error);
1781 * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
1782 * won't flush on those flags. The syncer code above will do a
1783 * general meta-data flush globally that will catch these flags.
1786 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1788 hammer2_inode_t *ip;
1791 if (vp->v_type == VNON || ip == NULL ||
1792 ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
1793 RB_EMPTY(&vp->v_rbdirty_tree))) {
1800 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1802 struct hammer2_sync_info *info = data;
1803 hammer2_inode_t *ip;
1804 hammer2_chain_t *parent;
1808 if (vp->v_type == VNON || vp->v_type == VBAD ||
1809 ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
1810 RB_EMPTY(&vp->v_rbdirty_tree))) {
1815 * VOP_FSYNC will start a new transaction so replicate some code
1816 * here to do it inline (see hammer2_vop_fsync()).
1818 * WARNING: The vfsync interacts with the buffer cache and might
1819 * block, we can't hold the inode lock at that time.
1821 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1823 vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1824 parent = hammer2_inode_lock_ex(ip);
1825 hammer2_chain_flush(&info->trans, parent);
1826 hammer2_inode_unlock_ex(ip, parent);
1829 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1832 info->error = error;
1838 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1845 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1846 struct fid *fhp, struct vnode **vpp)
1853 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1854 int *exflagsp, struct ucred **credanonp)
1860 * Support code for hammer2_mount(). Read, verify, and install the volume
1861 * header into the HMP
1863 * XXX read four volhdrs and use the one with the highest TID whos CRC
1868 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
1869 * nonexistant locations.
1871 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
1875 hammer2_install_volume_header(hammer2_mount_t *hmp)
1877 hammer2_volume_data_t *vd;
1879 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
1891 * There are up to 4 copies of the volume header (syncs iterate
1892 * between them so there is no single master). We don't trust the
1893 * volu_size field so we don't know precisely how large the filesystem
1894 * is, so depend on the OS to return an error if we go beyond the
1895 * block device's EOF.
1897 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
1898 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
1899 HAMMER2_VOLUME_BYTES, &bp);
1906 vd = (struct hammer2_volume_data *) bp->b_data;
1907 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
1908 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
1914 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
1915 /* XXX: Reversed-endianness filesystem */
1916 kprintf("hammer2: reverse-endian filesystem detected");
1922 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
1923 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
1924 HAMMER2_VOLUME_ICRC0_SIZE);
1925 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
1926 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
1927 HAMMER2_VOLUME_ICRC1_SIZE);
1928 if ((crc0 != crc) || (bcrc0 != bcrc)) {
1929 kprintf("hammer2 volume header crc "
1930 "mismatch copy #%d %08x/%08x\n",
1937 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
1946 hmp->volsync = hmp->voldata;
1948 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
1949 kprintf("hammer2: using volume header #%d\n",
1954 kprintf("hammer2: no valid volume headers found!\n");
1960 * Reconnect using the passed file pointer. The caller must ref the
1964 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1966 hammer2_inode_data_t *ipdata;
1967 hammer2_chain_t *parent;
1968 hammer2_mount_t *hmp;
1971 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1974 * Closes old comm descriptor, kills threads, cleans up
1975 * states, then installs the new descriptor and creates
1978 kdmsg_iocom_reconnect(&pmp->iocom, fp, "hammer2");
1981 * Setup LNK_CONN fields for autoinitiated state machine
1983 parent = hammer2_inode_lock_ex(pmp->iroot);
1984 ipdata = &parent->data->ipdata;
1985 pmp->iocom.auto_lnk_conn.pfs_clid = ipdata->pfs_clid;
1986 pmp->iocom.auto_lnk_conn.pfs_fsid = ipdata->pfs_fsid;
1987 pmp->iocom.auto_lnk_conn.pfs_type = ipdata->pfs_type;
1988 pmp->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
1989 pmp->iocom.auto_lnk_conn.peer_type = hmp->voldata.peer_type;
1992 * Filter adjustment. Clients do not need visibility into other
1993 * clients (otherwise millions of clients would present a serious
1994 * problem). The fs_label also serves to restrict the namespace.
1996 pmp->iocom.auto_lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
1997 pmp->iocom.auto_lnk_conn.pfs_mask = (uint64_t)-1;
1998 switch (ipdata->pfs_type) {
1999 case DMSG_PFSTYPE_CLIENT:
2000 pmp->iocom.auto_lnk_conn.peer_mask &=
2001 ~(1LLU << DMSG_PFSTYPE_CLIENT);
2007 name_len = ipdata->name_len;
2008 if (name_len >= sizeof(pmp->iocom.auto_lnk_conn.fs_label))
2009 name_len = sizeof(pmp->iocom.auto_lnk_conn.fs_label) - 1;
2010 bcopy(ipdata->filename,
2011 pmp->iocom.auto_lnk_conn.fs_label,
2013 pmp->iocom.auto_lnk_conn.fs_label[name_len] = 0;
2016 * Setup LNK_SPAN fields for autoinitiated state machine
2018 pmp->iocom.auto_lnk_span.pfs_clid = ipdata->pfs_clid;
2019 pmp->iocom.auto_lnk_span.pfs_fsid = ipdata->pfs_fsid;
2020 pmp->iocom.auto_lnk_span.pfs_type = ipdata->pfs_type;
2021 pmp->iocom.auto_lnk_span.peer_type = hmp->voldata.peer_type;
2022 pmp->iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1;
2023 name_len = ipdata->name_len;
2024 if (name_len >= sizeof(pmp->iocom.auto_lnk_span.fs_label))
2025 name_len = sizeof(pmp->iocom.auto_lnk_span.fs_label) - 1;
2026 bcopy(ipdata->filename,
2027 pmp->iocom.auto_lnk_span.fs_label,
2029 pmp->iocom.auto_lnk_span.fs_label[name_len] = 0;
2030 hammer2_inode_unlock_ex(pmp->iroot, parent);
2032 kdmsg_iocom_autoinitiate(&pmp->iocom, hammer2_autodmsg);
2036 hammer2_rcvdmsg(kdmsg_msg_t *msg)
2038 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
2039 case DMSG_DBG_SHELL:
2042 * Execute shell command (not supported atm)
2044 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
2046 case DMSG_DBG_SHELL | DMSGF_REPLY:
2050 if (msg->aux_data) {
2051 msg->aux_data[msg->aux_size - 1] = 0;
2052 kprintf("HAMMER2 DBG: %s\n", msg->aux_data);
2057 * Unsupported message received. We only need to
2058 * reply if it's a transaction in order to close our end.
2059 * Ignore any one-way messages are any further messages
2060 * associated with the transaction.
2062 * NOTE: This case also includes DMSG_LNK_ERROR messages
2063 * which might be one-way, replying to those would
2064 * cause an infinite ping-pong.
2066 if (msg->any.head.cmd & DMSGF_CREATE)
2067 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
2074 * This function is called after KDMSG has automatically handled processing
2075 * of a LNK layer message (typically CONN, SPAN, or CIRC).
2077 * We tag off the LNK_CONN to trigger our LNK_VOLCONF messages which
2078 * advertises all available hammer2 super-root volumes.
2081 hammer2_autodmsg(kdmsg_msg_t *msg)
2083 hammer2_pfsmount_t *pmp = msg->iocom->handle;
2084 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
2088 * We only care about replies to our LNK_CONN auto-request. kdmsg
2089 * has already processed the reply, we use this calback as a shim
2090 * to know when we can advertise available super-root volumes.
2092 if ((msg->any.head.cmd & DMSGF_TRANSMASK) !=
2093 (DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_REPLY) ||
2094 msg->state == NULL) {
2098 kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
2100 if (msg->any.head.cmd & DMSGF_CREATE) {
2101 kprintf("HAMMER2: VOLDATA DUMP\n");
2104 * Dump the configuration stored in the volume header
2106 hammer2_voldata_lock(hmp);
2107 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
2108 if (hmp->voldata.copyinfo[copyid].copyid == 0)
2110 hammer2_volconf_update(pmp, copyid);
2112 hammer2_voldata_unlock(hmp, 0);
2114 if ((msg->any.head.cmd & DMSGF_DELETE) &&
2115 msg->state && (msg->state->txcmd & DMSGF_DELETE) == 0) {
2116 kprintf("HAMMER2: CONN WAS TERMINATED\n");
2121 * Volume configuration updates are passed onto the userland service
2122 * daemon via the open LNK_CONN transaction.
2125 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
2127 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
2130 /* XXX interlock against connection state termination */
2131 kprintf("volconf update %p\n", pmp->iocom.conn_state);
2132 if (pmp->iocom.conn_state) {
2133 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
2134 msg = kdmsg_msg_alloc_state(pmp->iocom.conn_state,
2135 DMSG_LNK_VOLCONF, NULL, NULL);
2136 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
2137 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
2138 msg->any.lnk_volconf.index = index;
2139 kdmsg_msg_write(msg);
2144 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp)
2146 hammer2_chain_layer_t *layer;
2147 hammer2_chain_t *scan;
2148 hammer2_chain_t *first_parent;
2152 kprintf("%*.*s...\n", tab, tab, "");
2157 first_parent = chain->core ? TAILQ_FIRST(&chain->core->ownerq) : NULL;
2158 kprintf("%*.*schain %p.%d [%08x][core=%p fp=%p] (%s) np=%p dt=%s refs=%d",
2160 chain, chain->bref.type, chain->flags,
2163 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2164 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2165 (first_parent ? TAILQ_NEXT(chain, core_entry) : NULL),
2166 (chain->delete_tid == HAMMER2_MAX_TID ? "max" : "fls"),
2169 kprintf(" [fpflags %08x fprefs %d\n",
2170 first_parent->flags,
2171 first_parent->refs);
2172 if (chain->core == NULL || TAILQ_EMPTY(&chain->core->layerq))
2176 TAILQ_FOREACH(layer, &chain->core->layerq, entry) {
2177 RB_FOREACH(scan, hammer2_chain_tree, &layer->rbtree) {
2178 hammer2_dump_chain(scan, tab + 4, countp);
2181 if (chain->core && !TAILQ_EMPTY(&chain->core->layerq)) {
2182 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2183 kprintf("%*.*s}(%s)\n", tab, tab, "",
2184 chain->data->ipdata.filename);
2186 kprintf("%*.*s}\n", tab, tab, "");