2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
63 #include "hammer2_lz4.h"
65 #include "zlib/hammer2_zlib.h"
67 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
69 MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
71 struct hammer2_sync_info {
72 hammer2_trans_t trans;
77 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
78 static struct hammer2_mntlist hammer2_mntlist;
79 static struct lock hammer2_mntlk;
82 int hammer2_cluster_enable = 1;
83 int hammer2_hardlink_enable = 1;
84 int hammer2_flush_pipe = 100;
85 long hammer2_limit_dirty_chains;
86 long hammer2_iod_file_read;
87 long hammer2_iod_meta_read;
88 long hammer2_iod_indr_read;
89 long hammer2_iod_fmap_read;
90 long hammer2_iod_volu_read;
91 long hammer2_iod_file_write;
92 long hammer2_iod_meta_write;
93 long hammer2_iod_indr_write;
94 long hammer2_iod_fmap_write;
95 long hammer2_iod_volu_write;
96 long hammer2_ioa_file_read;
97 long hammer2_ioa_meta_read;
98 long hammer2_ioa_indr_read;
99 long hammer2_ioa_fmap_read;
100 long hammer2_ioa_volu_read;
101 long hammer2_ioa_fmap_write;
102 long hammer2_ioa_file_write;
103 long hammer2_ioa_meta_write;
104 long hammer2_ioa_indr_write;
105 long hammer2_ioa_volu_write;
107 MALLOC_DECLARE(C_BUFFER);
108 MALLOC_DEFINE(C_BUFFER, "compbuffer", "Buffer used for compression.");
110 MALLOC_DECLARE(D_BUFFER);
111 MALLOC_DEFINE(D_BUFFER, "decompbuffer", "Buffer used for decompression.");
113 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
115 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
116 &hammer2_debug, 0, "");
117 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
118 &hammer2_cluster_enable, 0, "");
119 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
120 &hammer2_hardlink_enable, 0, "");
121 SYSCTL_INT(_vfs_hammer2, OID_AUTO, flush_pipe, CTLFLAG_RW,
122 &hammer2_flush_pipe, 0, "");
123 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
124 &hammer2_limit_dirty_chains, 0, "");
126 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
127 &hammer2_iod_file_read, 0, "");
128 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
129 &hammer2_iod_meta_read, 0, "");
130 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
131 &hammer2_iod_indr_read, 0, "");
132 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
133 &hammer2_iod_fmap_read, 0, "");
134 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
135 &hammer2_iod_volu_read, 0, "");
137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
138 &hammer2_iod_file_write, 0, "");
139 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
140 &hammer2_iod_meta_write, 0, "");
141 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
142 &hammer2_iod_indr_write, 0, "");
143 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
144 &hammer2_iod_fmap_write, 0, "");
145 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
146 &hammer2_iod_volu_write, 0, "");
148 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
149 &hammer2_ioa_file_read, 0, "");
150 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
151 &hammer2_ioa_meta_read, 0, "");
152 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
153 &hammer2_ioa_indr_read, 0, "");
154 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
155 &hammer2_ioa_fmap_read, 0, "");
156 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
157 &hammer2_ioa_volu_read, 0, "");
159 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
160 &hammer2_ioa_file_write, 0, "");
161 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
162 &hammer2_ioa_meta_write, 0, "");
163 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
164 &hammer2_ioa_indr_write, 0, "");
165 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
166 &hammer2_ioa_fmap_write, 0, "");
167 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
168 &hammer2_ioa_volu_write, 0, "");
170 static int hammer2_vfs_init(struct vfsconf *conf);
171 static int hammer2_vfs_uninit(struct vfsconf *vfsp);
172 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
174 static int hammer2_remount(hammer2_mount_t *, struct mount *, char *,
175 struct vnode *, struct ucred *);
176 static int hammer2_recovery(hammer2_mount_t *hmp);
177 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
178 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
179 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
181 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
183 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
184 ino_t ino, struct vnode **vpp);
185 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
186 struct fid *fhp, struct vnode **vpp);
187 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
188 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
189 int *exflagsp, struct ucred **credanonp);
191 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
192 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
194 static void hammer2_write_thread(void *arg);
196 static void hammer2_vfs_unmount_hmp1(struct mount *mp, hammer2_mount_t *hmp);
197 static void hammer2_vfs_unmount_hmp2(struct mount *mp, hammer2_mount_t *hmp);
200 * Functions for compression in threads,
201 * from hammer2_vnops.c
203 static void hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
205 hammer2_inode_data_t *ipdata,
206 hammer2_chain_t **parentp,
207 hammer2_key_t lbase, int ioflag, int pblksize,
209 static void hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
211 hammer2_inode_data_t *ipdata,
212 hammer2_chain_t **parentp,
213 hammer2_key_t lbase, int ioflag,
214 int pblksize, int *errorp, int comp_algo);
215 static void hammer2_zero_check_and_write(struct buf *bp,
216 hammer2_trans_t *trans, hammer2_inode_t *ip,
217 hammer2_inode_data_t *ipdata,
218 hammer2_chain_t **parentp,
220 int ioflag, int pblksize, int *errorp);
221 static int test_block_zeros(const char *buf, size_t bytes);
222 static void zero_write(struct buf *bp, hammer2_trans_t *trans,
224 hammer2_inode_data_t *ipdata,
225 hammer2_chain_t **parentp,
228 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp,
229 int ioflag, int pblksize, int *errorp);
231 static int hammer2_rcvdmsg(kdmsg_msg_t *msg);
232 static void hammer2_autodmsg(kdmsg_msg_t *msg);
236 * HAMMER2 vfs operations.
238 static struct vfsops hammer2_vfsops = {
239 .vfs_init = hammer2_vfs_init,
240 .vfs_uninit = hammer2_vfs_uninit,
241 .vfs_sync = hammer2_vfs_sync,
242 .vfs_mount = hammer2_vfs_mount,
243 .vfs_unmount = hammer2_vfs_unmount,
244 .vfs_root = hammer2_vfs_root,
245 .vfs_statfs = hammer2_vfs_statfs,
246 .vfs_statvfs = hammer2_vfs_statvfs,
247 .vfs_vget = hammer2_vfs_vget,
248 .vfs_vptofh = hammer2_vfs_vptofh,
249 .vfs_fhtovp = hammer2_vfs_fhtovp,
250 .vfs_checkexp = hammer2_vfs_checkexp
253 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
255 VFS_SET(hammer2_vfsops, hammer2, 0);
256 MODULE_VERSION(hammer2, 1);
260 hammer2_vfs_init(struct vfsconf *conf)
262 static struct objcache_malloc_args margs_read;
263 static struct objcache_malloc_args margs_write;
269 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
271 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
273 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
277 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
279 margs_read.objsize = 65536;
280 margs_read.mtype = D_BUFFER;
282 margs_write.objsize = 32768;
283 margs_write.mtype = C_BUFFER;
285 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
286 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
287 objcache_malloc_free, &margs_read);
288 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
289 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
290 objcache_malloc_free, &margs_write);
292 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
293 TAILQ_INIT(&hammer2_mntlist);
295 hammer2_limit_dirty_chains = desiredvnodes / 10;
302 hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
304 objcache_destroy(cache_buffer_read);
305 objcache_destroy(cache_buffer_write);
310 * Mount or remount HAMMER2 fileystem from physical media
313 * mp mount point structure
319 * mp mount point structure
320 * path path to mount point
321 * data pointer to argument structure in user space
322 * volume volume path (device@LABEL form)
323 * hflags user mount flags
324 * cred user credentials
331 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
334 struct hammer2_mount_info info;
335 hammer2_pfsmount_t *pmp;
336 hammer2_mount_t *hmp;
337 hammer2_key_t key_next;
338 hammer2_key_t key_dummy;
341 struct nlookupdata nd;
342 hammer2_chain_t *parent;
343 hammer2_chain_t *schain;
344 hammer2_chain_t *rchain;
346 char devstr[MNAMELEN];
363 kprintf("hammer2_mount\n");
369 bzero(&info, sizeof(info));
370 info.cluster_fd = -1;
374 * Non-root mount or updating a mount
376 error = copyin(data, &info, sizeof(info));
380 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
384 /* Extract device and label */
386 label = strchr(devstr, '@');
388 ((label + 1) - dev) > done) {
396 if (mp->mnt_flag & MNT_UPDATE) {
398 /* HAMMER2 implements NFS export via mountctl */
400 for (i = 0; i < pmp->cluster.nchains; ++i) {
401 hmp = pmp->cluster.chains[i]->hmp;
403 error = hammer2_remount(hmp, mp, path,
408 hammer2_inode_install_hidden(pmp);
417 * Lookup name and verify it refers to a block device.
419 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
421 error = nlookup(&nd);
423 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
427 if (vn_isdisk(devvp, &error))
428 error = vfs_mountedon(devvp);
432 * Determine if the device has already been mounted. After this
433 * check hmp will be non-NULL if we are doing the second or more
434 * hammer2 mounts from the same device.
436 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
437 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
438 if (hmp->devvp == devvp)
443 * Open the device if this isn't a secondary mount and construct
444 * the H2 device mount (hmp).
447 if (error == 0 && vcount(devvp) > 0)
451 * Now open the device
454 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
455 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
456 error = vinvalbuf(devvp, V_SAVE, 0, 0);
458 error = VOP_OPEN(devvp,
459 ronly ? FREAD : FREAD | FWRITE,
464 if (error && devvp) {
469 lockmgr(&hammer2_mntlk, LK_RELEASE);
472 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
475 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
476 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
477 RB_INIT(&hmp->iotree);
479 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
480 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
481 TAILQ_INIT(&hmp->transq);
484 * vchain setup. vchain.data is embedded.
485 * vchain.refs is initialized and will never drop to 0.
487 * NOTE! voldata is not yet loaded.
489 hmp->vchain.hmp = hmp;
490 hmp->vchain.refs = 1;
491 hmp->vchain.data = (void *)&hmp->voldata;
492 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
493 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
494 hmp->vchain.delete_tid = HAMMER2_MAX_TID;
496 hammer2_chain_core_alloc(NULL, &hmp->vchain, NULL);
497 /* hmp->vchain.u.xxx is left NULL */
500 * fchain setup. fchain.data is embedded.
501 * fchain.refs is initialized and will never drop to 0.
503 * The data is not used but needs to be initialized to
504 * pass assertion muster. We use this chain primarily
505 * as a placeholder for the freemap's top-level RBTREE
506 * so it does not interfere with the volume's topology
509 hmp->fchain.hmp = hmp;
510 hmp->fchain.refs = 1;
511 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
512 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
513 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
514 hmp->fchain.bref.methods =
515 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
516 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
517 hmp->fchain.delete_tid = HAMMER2_MAX_TID;
519 hammer2_chain_core_alloc(NULL, &hmp->fchain, NULL);
520 /* hmp->fchain.u.xxx is left NULL */
523 * Install the volume header and initialize fields from
526 error = hammer2_install_volume_header(hmp);
529 hammer2_vfs_unmount_hmp1(mp, hmp);
530 hammer2_vfs_unmount_hmp2(mp, hmp);
531 hammer2_vfs_unmount(mp, MNT_FORCE);
535 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
536 hmp->vchain.modify_tid = hmp->voldata.mirror_tid;
537 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
538 hmp->fchain.modify_tid = hmp->voldata.freemap_tid;
541 * First locate the super-root inode, which is key 0
542 * relative to the volume header's blockset.
544 * Then locate the root inode by scanning the directory keyspace
545 * represented by the label.
547 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
548 schain = hammer2_chain_lookup(&parent, &key_dummy,
549 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
551 hammer2_chain_lookup_done(parent);
552 if (schain == NULL) {
553 kprintf("hammer2_mount: invalid super-root\n");
555 hammer2_vfs_unmount_hmp1(mp, hmp);
556 hammer2_vfs_unmount_hmp2(mp, hmp);
557 hammer2_vfs_unmount(mp, MNT_FORCE);
562 * NOTE: inode_get sucks up schain's lock.
564 atomic_set_int(&schain->flags, HAMMER2_CHAIN_PFSROOT);
565 hmp->sroot = hammer2_inode_get(NULL, NULL, schain);
566 hammer2_inode_ref(hmp->sroot);
567 hammer2_inode_unlock_ex(hmp->sroot, schain);
569 /* leave hmp->sroot with one ref */
571 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
572 error = hammer2_recovery(hmp);
573 /* XXX do something with error */
578 * Block device opened successfully, finish initializing the
581 * From this point on we have to call hammer2_unmount() on failure.
583 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
585 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
586 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
587 lockinit(&pmp->lock, "pfslk", 0, 0);
588 spin_init(&pmp->inum_spin);
589 RB_INIT(&pmp->inum_tree);
591 kdmsg_iocom_init(&pmp->iocom, pmp,
592 KDMSG_IOCOMF_AUTOCONN |
593 KDMSG_IOCOMF_AUTOSPAN |
594 KDMSG_IOCOMF_AUTOCIRC,
595 pmp->mmsg, hammer2_rcvdmsg);
597 ccms_domain_init(&pmp->ccms_dom);
599 lockmgr(&hammer2_mntlk, LK_RELEASE);
600 kprintf("hammer2_mount hmp=%p pmp=%p pmpcnt=%d\n",
601 hmp, pmp, hmp->pmp_count);
603 mp->mnt_flag = MNT_LOCAL;
604 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
605 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
608 * required mount structure initializations
610 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
611 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
613 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
614 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
619 mp->mnt_iosize_max = MAXPHYS;
620 mp->mnt_data = (qaddr_t)pmp;
624 * Lookup mount point under the media-localized super-root.
626 parent = hammer2_inode_lock_ex(hmp->sroot);
627 lhc = hammer2_dirhash(label, strlen(label));
628 rchain = hammer2_chain_lookup(&parent, &key_next,
629 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
632 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
633 strcmp(label, rchain->data->ipdata.filename) == 0) {
636 rchain = hammer2_chain_next(&parent, rchain, &key_next,
638 lhc + HAMMER2_DIRHASH_LOMASK,
641 hammer2_inode_unlock_ex(hmp->sroot, parent);
643 if (rchain == NULL) {
644 kprintf("hammer2_mount: PFS label not found\n");
645 hammer2_vfs_unmount_hmp1(mp, hmp);
646 hammer2_vfs_unmount_hmp2(mp, hmp);
647 hammer2_vfs_unmount(mp, MNT_FORCE);
650 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
651 hammer2_chain_unlock(rchain);
652 kprintf("hammer2_mount: PFS label already mounted!\n");
653 hammer2_vfs_unmount_hmp1(mp, hmp);
654 hammer2_vfs_unmount_hmp2(mp, hmp);
655 hammer2_vfs_unmount(mp, MNT_FORCE);
659 if (rchain->flags & HAMMER2_CHAIN_RECYCLE) {
660 kprintf("hammer2_mount: PFS label currently recycling\n");
661 hammer2_vfs_unmount_hmp1(mp, hmp);
662 hammer2_vfs_unmount_hmp2(mp, hmp);
663 hammer2_vfs_unmount(mp, MNT_FORCE);
668 * After this point hammer2_vfs_unmount() has visibility on hmp
669 * and manual hmp1/hmp2 calls are not needed on fatal errors.
672 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
675 * NOTE: *_get() integrates chain's lock into the inode lock.
677 hammer2_chain_ref(rchain); /* for pmp->rchain */
678 pmp->cluster.nchains = 1;
679 pmp->cluster.chains[0] = rchain;
680 pmp->iroot = hammer2_inode_get(pmp, NULL, rchain);
681 hammer2_inode_ref(pmp->iroot); /* ref for pmp->iroot */
683 KKASSERT(rchain->pmp == NULL); /* tracking pmp for rchain */
686 hammer2_inode_unlock_ex(pmp->iroot, rchain);
688 kprintf("iroot %p\n", pmp->iroot);
691 * The logical file buffer bio write thread handles things
692 * like physical block assignment and compression.
694 mtx_init(&pmp->wthread_mtx);
695 bioq_init(&pmp->wthread_bioq);
696 pmp->wthread_destroy = 0;
697 lwkt_create(hammer2_write_thread, pmp,
698 &pmp->wthread_td, NULL, 0, -1, "hwrite-%s", label);
701 * Ref the cluster management messaging descriptor. The mount
702 * program deals with the other end of the communications pipe.
704 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
706 kprintf("hammer2_mount: bad cluster_fd!\n");
707 hammer2_vfs_unmount(mp, MNT_FORCE);
710 hammer2_cluster_reconnect(pmp, fp);
713 * With the cluster operational install ihidden.
715 hammer2_inode_install_hidden(pmp);
721 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
722 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
723 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
725 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
726 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
727 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
728 copyinstr(path, mp->mnt_stat.f_mntonname,
729 sizeof(mp->mnt_stat.f_mntonname) - 1,
733 * Initial statfs to prime mnt_stat.
735 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
741 * Handle bioq for strategy write
745 hammer2_write_thread(void *arg)
747 hammer2_pfsmount_t *pmp;
750 hammer2_trans_t trans;
753 hammer2_chain_t *parent;
754 hammer2_chain_t **parentp;
755 hammer2_inode_data_t *ipdata;
763 mtx_lock(&pmp->wthread_mtx);
764 while (pmp->wthread_destroy == 0) {
765 if (bioq_first(&pmp->wthread_bioq) == NULL) {
766 mtxsleep(&pmp->wthread_bioq, &pmp->wthread_mtx,
772 hammer2_trans_init(&trans, pmp, NULL, HAMMER2_TRANS_BUFCACHE);
774 while ((bio = bioq_takefirst(&pmp->wthread_bioq)) != NULL) {
776 * dummy bio for synchronization. The transaction
777 * must be reinitialized.
779 if (bio->bio_buf == NULL) {
780 bio->bio_flags |= BIO_DONE;
782 hammer2_trans_done(&trans);
783 hammer2_trans_init(&trans, pmp, NULL,
784 HAMMER2_TRANS_BUFCACHE);
789 * else normal bio processing
791 mtx_unlock(&pmp->wthread_mtx);
793 hammer2_lwinprog_drop(pmp);
801 * Inode is modified, flush size and mtime changes
802 * to ensure that the file size remains consistent
803 * with the buffers being flushed.
805 parent = hammer2_inode_lock_ex(ip);
806 if (ip->flags & (HAMMER2_INODE_RESIZED |
807 HAMMER2_INODE_MTIME)) {
808 hammer2_inode_fsync(&trans, ip, parentp);
810 ipdata = hammer2_chain_modify_ip(&trans, ip,
812 lblksize = hammer2_calc_logical(ip, bio->bio_offset,
814 pblksize = hammer2_calc_physical(ip, lbase);
815 hammer2_write_file_core(bp, &trans, ip, ipdata,
819 hammer2_inode_unlock_ex(ip, parent);
821 kprintf("hammer2: error in buffer write\n");
822 bp->b_flags |= B_ERROR;
826 mtx_lock(&pmp->wthread_mtx);
828 hammer2_trans_done(&trans);
830 pmp->wthread_destroy = -1;
831 wakeup(&pmp->wthread_destroy);
833 mtx_unlock(&pmp->wthread_mtx);
837 hammer2_bioq_sync(hammer2_pfsmount_t *pmp)
841 bzero(&sync_bio, sizeof(sync_bio)); /* dummy with no bio_buf */
842 mtx_lock(&pmp->wthread_mtx);
843 if (pmp->wthread_destroy == 0) {
844 if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
845 bioq_insert_tail(&pmp->wthread_bioq, &sync_bio);
846 wakeup(&pmp->wthread_bioq);
848 bioq_insert_tail(&pmp->wthread_bioq, &sync_bio);
850 while ((sync_bio.bio_flags & BIO_DONE) == 0)
851 mtxsleep(&sync_bio, &pmp->wthread_mtx, 0, "h2bioq", 0);
853 mtx_unlock(&pmp->wthread_mtx);
857 * Return a chain suitable for I/O, creating the chain if necessary
858 * and assigning its physical block.
862 hammer2_assign_physical(hammer2_trans_t *trans,
863 hammer2_inode_t *ip, hammer2_chain_t **parentp,
864 hammer2_key_t lbase, int pblksize, int *errorp)
866 hammer2_chain_t *parent;
867 hammer2_chain_t *chain;
869 hammer2_key_t key_dummy;
870 int pradix = hammer2_getradix(pblksize);
871 int cache_index = -1;
874 * Locate the chain associated with lbase, return a locked chain.
875 * However, do not instantiate any data reference (which utilizes a
876 * device buffer) because we will be using direct IO via the
877 * logical buffer cache buffer.
880 KKASSERT(pblksize >= HAMMER2_MIN_ALLOC);
883 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); /* extra lock */
884 chain = hammer2_chain_lookup(&parent, &key_dummy,
886 &cache_index, HAMMER2_LOOKUP_NODATA);
890 * We found a hole, create a new chain entry.
892 * NOTE: DATA chains are created without device backing
893 * store (nor do we want any).
895 *errorp = hammer2_chain_create(trans, &parent, &chain,
896 lbase, HAMMER2_PBUFRADIX,
897 HAMMER2_BREF_TYPE_DATA,
900 hammer2_chain_lookup_done(parent);
901 panic("hammer2_chain_create: par=%p error=%d\n",
906 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
907 /*ip->delta_dcount += pblksize;*/
909 switch (chain->bref.type) {
910 case HAMMER2_BREF_TYPE_INODE:
912 * The data is embedded in the inode. The
913 * caller is responsible for marking the inode
914 * modified and copying the data to the embedded
919 case HAMMER2_BREF_TYPE_DATA:
920 if (chain->bytes != pblksize) {
921 hammer2_chain_resize(trans, ip,
924 HAMMER2_MODIFY_OPTDATA);
926 hammer2_chain_modify(trans, &chain,
927 HAMMER2_MODIFY_OPTDATA);
928 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
931 panic("hammer2_assign_physical: bad type");
939 * Cleanup. If chain wound up being the inode (i.e. DIRECTDATA),
940 * we might have to replace *parentp.
942 hammer2_chain_lookup_done(parent);
944 if (*parentp != chain &&
945 (*parentp)->core == chain->core) {
947 *parentp = chain; /* eats lock */
948 hammer2_chain_unlock(parent);
949 hammer2_chain_lock(chain, 0); /* need another */
951 /* else chain already locked for return */
957 * From hammer2_vnops.c.
958 * The core write function which determines which path to take
959 * depending on compression settings.
963 hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
964 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
965 hammer2_chain_t **parentp,
966 hammer2_key_t lbase, int ioflag, int pblksize,
969 hammer2_chain_t *chain;
971 switch(HAMMER2_DEC_COMP(ipdata->comp_algo)) {
972 case HAMMER2_COMP_NONE:
974 * We have to assign physical storage to the buffer
975 * we intend to dirty or write now to avoid deadlocks
976 * in the strategy code later.
978 * This can return NOOFFSET for inode-embedded data.
979 * The strategy code will take care of it in that case.
981 chain = hammer2_assign_physical(trans, ip, parentp,
984 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
986 hammer2_chain_unlock(chain);
988 case HAMMER2_COMP_AUTOZERO:
990 * Check for zero-fill only
992 hammer2_zero_check_and_write(bp, trans, ip,
993 ipdata, parentp, lbase,
994 ioflag, pblksize, errorp);
996 case HAMMER2_COMP_LZ4:
997 case HAMMER2_COMP_ZLIB:
1000 * Check for zero-fill and attempt compression.
1002 hammer2_compress_and_write(bp, trans, ip,
1009 ipdata = &ip->chain->data->ipdata; /* reload */
1013 * From hammer2_vnops.c
1014 * Generic function that will perform the compression in compression
1015 * write path. The compression algorithm is determined by the settings
1016 * obtained from inode.
1020 hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
1021 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
1022 hammer2_chain_t **parentp,
1023 hammer2_key_t lbase, int ioflag, int pblksize,
1024 int *errorp, int comp_algo)
1026 hammer2_chain_t *chain;
1028 int comp_block_size;
1031 if (test_block_zeros(bp->b_data, pblksize)) {
1032 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
1039 KKASSERT(pblksize / 2 <= 32768);
1041 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
1042 z_stream strm_compress;
1046 switch(HAMMER2_DEC_COMP(comp_algo)) {
1047 case HAMMER2_COMP_LZ4:
1048 comp_buffer = objcache_get(cache_buffer_write,
1050 comp_size = LZ4_compress_limitedOutput(
1052 &comp_buffer[sizeof(int)],
1054 pblksize / 2 - sizeof(int));
1056 * We need to prefix with the size, LZ4
1057 * doesn't do it for us. Add the related
1060 *(int *)comp_buffer = comp_size;
1062 comp_size += sizeof(int);
1064 case HAMMER2_COMP_ZLIB:
1065 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
1066 if (comp_level == 0)
1067 comp_level = 6; /* default zlib compression */
1068 else if (comp_level < 6)
1070 else if (comp_level > 9)
1072 ret = deflateInit(&strm_compress, comp_level);
1074 kprintf("HAMMER2 ZLIB: fatal error "
1075 "on deflateInit.\n");
1078 comp_buffer = objcache_get(cache_buffer_write,
1080 strm_compress.next_in = bp->b_data;
1081 strm_compress.avail_in = pblksize;
1082 strm_compress.next_out = comp_buffer;
1083 strm_compress.avail_out = pblksize / 2;
1084 ret = deflate(&strm_compress, Z_FINISH);
1085 if (ret == Z_STREAM_END) {
1086 comp_size = pblksize / 2 -
1087 strm_compress.avail_out;
1091 ret = deflateEnd(&strm_compress);
1094 kprintf("Error: Unknown compression method.\n");
1095 kprintf("Comp_method = %d.\n", comp_algo);
1100 if (comp_size == 0) {
1102 * compression failed or turned off
1104 comp_block_size = pblksize; /* safety */
1105 if (++ip->comp_heuristic > 128)
1106 ip->comp_heuristic = 8;
1109 * compression succeeded
1111 ip->comp_heuristic = 0;
1112 if (comp_size <= 1024) {
1113 comp_block_size = 1024;
1114 } else if (comp_size <= 2048) {
1115 comp_block_size = 2048;
1116 } else if (comp_size <= 4096) {
1117 comp_block_size = 4096;
1118 } else if (comp_size <= 8192) {
1119 comp_block_size = 8192;
1120 } else if (comp_size <= 16384) {
1121 comp_block_size = 16384;
1122 } else if (comp_size <= 32768) {
1123 comp_block_size = 32768;
1125 panic("hammer2: WRITE PATH: "
1126 "Weird comp_size value.");
1128 comp_block_size = pblksize;
1132 chain = hammer2_assign_physical(trans, ip, parentp,
1133 lbase, comp_block_size,
1135 ipdata = &ip->chain->data->ipdata; /* RELOAD */
1138 kprintf("WRITE PATH: An error occurred while "
1139 "assigning physical space.\n");
1140 KKASSERT(chain == NULL);
1142 /* Get device offset */
1147 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1149 switch(chain->bref.type) {
1150 case HAMMER2_BREF_TYPE_INODE:
1151 KKASSERT(chain->data->ipdata.op_flags &
1152 HAMMER2_OPFLAG_DIRECTDATA);
1153 KKASSERT(bp->b_loffset == 0);
1154 bcopy(bp->b_data, chain->data->ipdata.u.data,
1155 HAMMER2_EMBEDDED_BYTES);
1157 case HAMMER2_BREF_TYPE_DATA:
1158 temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1161 * Optimize out the read-before-write
1164 *errorp = hammer2_io_newnz(chain->hmp,
1165 chain->bref.data_off,
1169 hammer2_io_brelse(&dio);
1170 kprintf("hammer2: WRITE PATH: "
1171 "dbp bread error\n");
1174 bdata = hammer2_io_data(dio, chain->bref.data_off);
1177 * When loading the block make sure we don't
1178 * leave garbage after the compressed data.
1181 chain->bref.methods =
1182 HAMMER2_ENC_COMP(comp_algo) +
1183 HAMMER2_ENC_CHECK(temp_check);
1184 bcopy(comp_buffer, bdata, comp_size);
1185 if (comp_size != comp_block_size) {
1186 bzero(bdata + comp_size,
1187 comp_block_size - comp_size);
1190 chain->bref.methods =
1192 HAMMER2_COMP_NONE) +
1193 HAMMER2_ENC_CHECK(temp_check);
1194 bcopy(bp->b_data, bdata, pblksize);
1198 * Device buffer is now valid, chain is no
1199 * longer in the initial state.
1201 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1203 /* Now write the related bdp. */
1204 if (ioflag & IO_SYNC) {
1206 * Synchronous I/O requested.
1208 hammer2_io_bwrite(&dio);
1210 } else if ((ioflag & IO_DIRECT) &&
1211 loff + n == pblksize) {
1212 hammer2_io_bdwrite(&dio);
1214 } else if (ioflag & IO_ASYNC) {
1215 hammer2_io_bawrite(&dio);
1217 hammer2_io_bdwrite(&dio);
1221 panic("hammer2_write_bp: bad chain type %d\n",
1227 hammer2_chain_unlock(chain);
1230 objcache_put(cache_buffer_write, comp_buffer);
1234 * Function that performs zero-checking and writing without compression,
1235 * it corresponds to default zero-checking path.
1239 hammer2_zero_check_and_write(struct buf *bp, hammer2_trans_t *trans,
1240 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
1241 hammer2_chain_t **parentp,
1242 hammer2_key_t lbase, int ioflag, int pblksize, int *errorp)
1244 hammer2_chain_t *chain;
1246 if (test_block_zeros(bp->b_data, pblksize)) {
1247 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
1249 chain = hammer2_assign_physical(trans, ip, parentp,
1250 lbase, pblksize, errorp);
1251 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
1253 hammer2_chain_unlock(chain);
1258 * A function to test whether a block of data contains only zeros,
1259 * returns TRUE (non-zero) if the block is all zeros.
1263 test_block_zeros(const char *buf, size_t bytes)
1267 for (i = 0; i < bytes; i += sizeof(long)) {
1268 if (*(const long *)(buf + i) != 0)
1275 * Function to "write" a block that contains only zeros.
1279 zero_write(struct buf *bp, hammer2_trans_t *trans, hammer2_inode_t *ip,
1280 hammer2_inode_data_t *ipdata, hammer2_chain_t **parentp,
1281 hammer2_key_t lbase, int *errorp __unused)
1283 hammer2_chain_t *parent;
1284 hammer2_chain_t *chain;
1285 hammer2_key_t key_dummy;
1286 int cache_index = -1;
1288 parent = hammer2_chain_lookup_init(*parentp, 0);
1290 chain = hammer2_chain_lookup(&parent, &key_dummy, lbase, lbase,
1291 &cache_index, HAMMER2_LOOKUP_NODATA);
1293 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1294 bzero(chain->data->ipdata.u.data,
1295 HAMMER2_EMBEDDED_BYTES);
1297 hammer2_chain_delete(trans, chain, 0);
1299 hammer2_chain_unlock(chain);
1301 hammer2_chain_lookup_done(parent);
1305 * Function to write the data as it is, without performing any sort of
1306 * compression. This function is used in path without compression and
1307 * default zero-checking path.
1311 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag,
1312 int pblksize, int *errorp)
1317 int temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1319 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1321 switch(chain->bref.type) {
1322 case HAMMER2_BREF_TYPE_INODE:
1323 KKASSERT(chain->data->ipdata.op_flags &
1324 HAMMER2_OPFLAG_DIRECTDATA);
1325 KKASSERT(bp->b_loffset == 0);
1326 bcopy(bp->b_data, chain->data->ipdata.u.data,
1327 HAMMER2_EMBEDDED_BYTES);
1330 case HAMMER2_BREF_TYPE_DATA:
1331 error = hammer2_io_newnz(chain->hmp, chain->bref.data_off,
1332 chain->bytes, &dio);
1334 hammer2_io_bqrelse(&dio);
1335 kprintf("hammer2: WRITE PATH: dbp bread error\n");
1338 bdata = hammer2_io_data(dio, chain->bref.data_off);
1340 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1341 HAMMER2_ENC_CHECK(temp_check);
1342 bcopy(bp->b_data, bdata, chain->bytes);
1345 * Device buffer is now valid, chain is no
1346 * longer in the initial state.
1348 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1350 if (ioflag & IO_SYNC) {
1352 * Synchronous I/O requested.
1354 hammer2_io_bwrite(&dio);
1356 } else if ((ioflag & IO_DIRECT) && loff + n == pblksize) {
1357 hammer2_io_bdwrite(&dio);
1359 } else if (ioflag & IO_ASYNC) {
1360 hammer2_io_bawrite(&dio);
1362 hammer2_io_bdwrite(&dio);
1366 panic("hammer2_write_bp: bad chain type %d\n",
1377 hammer2_remount(hammer2_mount_t *hmp, struct mount *mp, char *path,
1378 struct vnode *devvp, struct ucred *cred)
1382 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
1383 error = hammer2_recovery(hmp);
1392 hammer2_vfs_unmount(struct mount *mp, int mntflags)
1394 hammer2_pfsmount_t *pmp;
1395 hammer2_mount_t *hmp;
1396 hammer2_chain_t *rchain;
1406 ccms_domain_uninit(&pmp->ccms_dom);
1407 kdmsg_iocom_uninit(&pmp->iocom); /* XXX chain dependency */
1409 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1412 * If mount initialization proceeded far enough we must flush
1415 if (mntflags & MNT_FORCE)
1420 error = vflush(mp, 0, flags);
1425 if (pmp->wthread_td) {
1426 mtx_lock(&pmp->wthread_mtx);
1427 pmp->wthread_destroy = 1;
1428 wakeup(&pmp->wthread_bioq);
1429 while (pmp->wthread_destroy != -1) {
1430 mtxsleep(&pmp->wthread_destroy,
1431 &pmp->wthread_mtx, 0,
1434 mtx_unlock(&pmp->wthread_mtx);
1435 pmp->wthread_td = NULL;
1439 * Cleanup our reference on ihidden.
1442 hammer2_inode_drop(pmp->ihidden);
1443 pmp->ihidden = NULL;
1447 * Cleanup our reference on iroot. iroot is (should) not be needed
1448 * by the flush code.
1451 #if REPORT_REFS_ERRORS
1452 if (pmp->iroot->refs != 1)
1453 kprintf("PMP->IROOT %p REFS WRONG %d\n",
1454 pmp->iroot, pmp->iroot->refs);
1456 KKASSERT(pmp->iroot->refs == 1);
1458 /* ref for pmp->iroot */
1459 hammer2_inode_drop(pmp->iroot);
1463 for (i = 0; i < pmp->cluster.nchains; ++i) {
1464 hmp = pmp->cluster.chains[i]->hmp;
1466 hammer2_vfs_unmount_hmp1(mp, hmp);
1468 rchain = pmp->cluster.chains[i];
1470 atomic_clear_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
1471 #if REPORT_REFS_ERRORS
1472 if (rchain->refs != 1)
1473 kprintf("PMP->RCHAIN %p REFS WRONG %d\n",
1474 rchain, rchain->refs);
1476 KKASSERT(rchain->refs == 1);
1478 hammer2_chain_drop(rchain);
1479 pmp->cluster.chains[i] = NULL;
1482 hammer2_vfs_unmount_hmp2(mp, hmp);
1486 mp->mnt_data = NULL;
1488 kmalloc_destroy(&pmp->mmsg);
1489 kmalloc_destroy(&pmp->minode);
1491 kfree(pmp, M_HAMMER2);
1495 lockmgr(&hammer2_mntlk, LK_RELEASE);
1502 hammer2_vfs_unmount_hmp1(struct mount *mp, hammer2_mount_t *hmp)
1504 hammer2_mount_exlock(hmp);
1507 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
1510 * Flush any left over chains. The voldata lock is only used
1511 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
1513 * Flush twice to ensure that the freemap is completely
1514 * synchronized. If we only do it once the next mount's
1515 * recovery scan will have to do some fixups (which isn't
1516 * bad, but we don't want it to have to do it except when
1517 * recovering from a crash).
1519 hammer2_voldata_lock(hmp);
1520 if (((hmp->vchain.flags | hmp->fchain.flags) &
1521 HAMMER2_CHAIN_MODIFIED) ||
1522 hmp->vchain.core->update_hi > hmp->voldata.mirror_tid ||
1523 hmp->fchain.core->update_hi > hmp->voldata.freemap_tid) {
1524 hammer2_voldata_unlock(hmp, 0);
1525 hammer2_vfs_sync(mp, MNT_WAIT);
1526 /*hammer2_vfs_sync(mp, MNT_WAIT);*/
1528 hammer2_voldata_unlock(hmp, 0);
1530 if (hmp->pmp_count == 0) {
1531 if (((hmp->vchain.flags | hmp->fchain.flags) &
1532 HAMMER2_CHAIN_MODIFIED) ||
1533 (hmp->vchain.core->update_hi >
1534 hmp->voldata.mirror_tid) ||
1535 (hmp->fchain.core->update_hi >
1536 hmp->voldata.freemap_tid)) {
1537 kprintf("hammer2_unmount: chains left over "
1538 "after final sync\n");
1539 kprintf(" vchain %08x update_hi %jx/%jx\n",
1541 hmp->voldata.mirror_tid,
1542 hmp->vchain.core->update_hi);
1543 kprintf(" fchain %08x update_hi %jx/%jx\n",
1545 hmp->voldata.freemap_tid,
1546 hmp->fchain.core->update_hi);
1548 if (hammer2_debug & 0x0010)
1549 Debugger("entered debugger");
1556 hammer2_vfs_unmount_hmp2(struct mount *mp, hammer2_mount_t *hmp)
1558 struct vnode *devvp;
1560 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
1563 * If no PFS's left drop the master hammer2_mount for the
1566 if (hmp->pmp_count == 0) {
1568 hammer2_inode_drop(hmp->sroot);
1573 * Finish up with the device vnode
1575 if ((devvp = hmp->devvp) != NULL) {
1576 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1577 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1580 (ronly ? FREAD : FREAD|FWRITE));
1587 * Final drop of embedded freemap root chain to
1588 * clean up fchain.core (fchain structure is not
1589 * flagged ALLOCATED so it is cleaned out and then
1592 hammer2_chain_drop(&hmp->fchain);
1595 * Final drop of embedded volume root chain to clean
1596 * up vchain.core (vchain structure is not flagged
1597 * ALLOCATED so it is cleaned out and then left to
1601 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt);
1603 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt);
1604 hammer2_mount_unlock(hmp);
1605 hammer2_chain_drop(&hmp->vchain);
1607 hammer2_io_cleanup(hmp, &hmp->iotree);
1608 if (hmp->iofree_count) {
1609 kprintf("io_cleanup: %d I/O's left hanging\n",
1613 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1614 kmalloc_destroy(&hmp->mchain);
1615 kfree(hmp, M_HAMMER2);
1617 hammer2_mount_unlock(hmp);
1623 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1624 ino_t ino, struct vnode **vpp)
1626 kprintf("hammer2_vget\n");
1627 return (EOPNOTSUPP);
1632 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
1634 hammer2_pfsmount_t *pmp;
1635 hammer2_chain_t *parent;
1640 if (pmp->iroot == NULL) {
1644 parent = hammer2_inode_lock_sh(pmp->iroot);
1645 vp = hammer2_igetv(pmp->iroot, &error);
1646 hammer2_inode_unlock_sh(pmp->iroot, parent);
1649 kprintf("vnodefail\n");
1658 * XXX incorporate ipdata->inode_quota and data_quota
1662 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1664 hammer2_pfsmount_t *pmp;
1665 hammer2_mount_t *hmp;
1668 KKASSERT(pmp->cluster.nchains >= 1);
1669 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1671 mp->mnt_stat.f_files = pmp->inode_count;
1672 mp->mnt_stat.f_ffree = 0;
1673 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
1674 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
1675 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1677 *sbp = mp->mnt_stat;
1683 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1685 hammer2_pfsmount_t *pmp;
1686 hammer2_mount_t *hmp;
1689 KKASSERT(pmp->cluster.nchains >= 1);
1690 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1692 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
1693 mp->mnt_vstat.f_files = pmp->inode_count;
1694 mp->mnt_vstat.f_ffree = 0;
1695 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
1696 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
1697 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1699 *sbp = mp->mnt_vstat;
1704 * Mount-time recovery (RW mounts)
1706 * Updates to the free block table are allowed to lag flushes by one
1707 * transaction. In case of a crash, then on a fresh mount we must do an
1708 * incremental scan of transaction id voldata.mirror_tid and make sure the
1709 * related blocks have been marked allocated.
1712 struct hammer2_recovery_elm {
1713 TAILQ_ENTRY(hammer2_recovery_elm) entry;
1714 hammer2_chain_t *chain;
1717 TAILQ_HEAD(hammer2_recovery_list, hammer2_recovery_elm);
1719 static int hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_mount_t *hmp,
1720 hammer2_chain_t *parent,
1721 struct hammer2_recovery_list *list, int depth);
1723 #define HAMMER2_RECOVERY_MAXDEPTH 10
1727 hammer2_recovery(hammer2_mount_t *hmp)
1729 hammer2_trans_t trans;
1730 struct hammer2_recovery_list list;
1731 struct hammer2_recovery_elm *elm;
1732 hammer2_chain_t *parent;
1734 int cumulative_error = 0;
1736 hammer2_trans_init(&trans, NULL, hmp, 0);
1739 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1740 cumulative_error = hammer2_recovery_scan(&trans, hmp, parent, &list, 0);
1741 hammer2_chain_lookup_done(parent);
1743 while ((elm = TAILQ_FIRST(&list)) != NULL) {
1744 TAILQ_REMOVE(&list, elm, entry);
1745 parent = elm->chain;
1746 kfree(elm, M_HAMMER2);
1748 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1749 HAMMER2_RESOLVE_NOREF);
1750 error = hammer2_recovery_scan(&trans, hmp, parent, &list, 0);
1751 hammer2_chain_unlock(parent);
1753 cumulative_error = error;
1755 hammer2_trans_done(&trans);
1757 return cumulative_error;
1762 hammer2_recovery_scan(hammer2_trans_t *trans, hammer2_mount_t *hmp,
1763 hammer2_chain_t *parent,
1764 struct hammer2_recovery_list *list, int depth)
1766 hammer2_chain_t *chain;
1768 int cumulative_error = 0;
1772 * Defer operation if depth limit reached.
1774 if (depth >= HAMMER2_RECOVERY_MAXDEPTH) {
1775 struct hammer2_recovery_elm *elm;
1777 elm = kmalloc(sizeof(*elm), M_HAMMER2, M_ZERO | M_WAITOK);
1778 elm->chain = parent;
1779 hammer2_chain_ref(parent);
1780 TAILQ_INSERT_TAIL(list, elm, entry);
1781 /* unlocked by caller */
1787 * Adjust freemap to ensure that the block(s) are marked allocated.
1789 if (parent->bref.type != HAMMER2_BREF_TYPE_VOLUME) {
1790 hammer2_freemap_adjust(trans, hmp, &parent->bref,
1791 HAMMER2_FREEMAP_DORECOVER);
1795 * Check type for recursive scan
1797 switch(parent->bref.type) {
1798 case HAMMER2_BREF_TYPE_VOLUME:
1799 /* data already instantiated */
1801 case HAMMER2_BREF_TYPE_INODE:
1803 * Must instantiate data for DIRECTDATA test and also
1806 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1807 hammer2_chain_unlock(parent);
1808 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1809 /* not applicable to recovery scan */
1813 case HAMMER2_BREF_TYPE_INDIRECT:
1815 * Must instantiate data for recursion
1817 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1818 hammer2_chain_unlock(parent);
1820 case HAMMER2_BREF_TYPE_DATA:
1821 case HAMMER2_BREF_TYPE_FREEMAP:
1822 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1823 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1824 /* not applicable to recovery scan */
1832 * Recursive scan of the last flushed transaction only. We are
1833 * doing this without pmp assignments so don't leave the chains
1834 * hanging around after we are done with them.
1837 chain = hammer2_chain_scan(parent, NULL, &cache_index,
1838 HAMMER2_LOOKUP_NODATA);
1840 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
1841 if (chain->bref.mirror_tid >= hmp->voldata.mirror_tid) {
1842 error = hammer2_recovery_scan(trans, hmp, chain,
1845 cumulative_error = error;
1847 chain = hammer2_chain_scan(parent, chain, &cache_index,
1848 HAMMER2_LOOKUP_NODATA);
1851 return cumulative_error;
1855 * Sync the entire filesystem; this is called from the filesystem syncer
1856 * process periodically and whenever a user calls sync(1) on the hammer
1859 * Currently is actually called from the syncer! \o/
1861 * This task will have to snapshot the state of the dirty inode chain.
1862 * From that, it will have to make sure all of the inodes on the dirty
1863 * chain have IO initiated. We make sure that io is initiated for the root
1866 * If waitfor is set, we wait for media to acknowledge the new rootblock.
1868 * THINKS: side A vs side B, to have sync not stall all I/O?
1871 hammer2_vfs_sync(struct mount *mp, int waitfor)
1873 struct hammer2_sync_info info;
1874 hammer2_chain_t *chain;
1875 hammer2_pfsmount_t *pmp;
1876 hammer2_mount_t *hmp;
1886 * We can't acquire locks on existing vnodes while in a transaction
1887 * without risking a deadlock. This assumes that vfsync() can be
1888 * called without the vnode locked (which it can in DragonFly).
1889 * Otherwise we'd have to implement a multi-pass or flag the lock
1890 * failures and retry.
1892 * The reclamation code interlocks with the sync list's token
1893 * (by removing the vnode from the scan list) before unlocking
1894 * the inode, giving us time to ref the inode.
1896 /*flags = VMSC_GETVP;*/
1898 if (waitfor & MNT_LAZY)
1899 flags |= VMSC_ONEPASS;
1902 * Initialize a normal transaction and sync everything out, then
1903 * wait for pending I/O to finish (so it gets a transaction id
1904 * that the meta-data flush will catch).
1906 hammer2_trans_init(&info.trans, pmp, NULL, 0);
1908 info.waitfor = MNT_NOWAIT;
1909 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
1911 if (info.error == 0 && (waitfor & MNT_WAIT)) {
1912 info.waitfor = waitfor;
1913 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
1916 hammer2_trans_done(&info.trans);
1917 hammer2_bioq_sync(info.trans.pmp);
1920 * Start the flush transaction and flush all meta-data.
1922 hammer2_trans_init(&info.trans, pmp, NULL, HAMMER2_TRANS_ISFLUSH);
1925 for (i = 0; i < pmp->cluster.nchains; ++i) {
1926 hmp = pmp->cluster.chains[i]->hmp;
1929 * Media mounts have two 'roots', vchain for the topology
1930 * and fchain for the free block table. Flush both.
1932 * Note that the topology and free block table are handled
1933 * independently, so the free block table can wind up being
1934 * ahead of the topology. We depend on the bulk free scan
1935 * code to deal with any loose ends.
1938 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1939 if ((hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) ||
1940 hmp->fchain.core->update_hi > hmp->voldata.freemap_tid) {
1941 /* this will also modify vchain as a side effect */
1942 chain = &hmp->fchain;
1943 hammer2_chain_flush(&info.trans, &chain);
1944 KKASSERT(chain == &hmp->fchain);
1946 hammer2_chain_unlock(&hmp->fchain);
1949 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1950 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) ||
1951 hmp->vchain.core->update_hi > hmp->voldata.mirror_tid) {
1952 chain = &hmp->vchain;
1953 hammer2_chain_flush(&info.trans, &chain);
1954 KKASSERT(chain == &hmp->vchain);
1959 hammer2_chain_unlock(&hmp->vchain);
1962 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1963 if ((hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) ||
1964 hmp->fchain.core->update_hi > hmp->voldata.freemap_tid ||
1966 /* this will also modify vchain as a side effect */
1967 chain = &hmp->fchain;
1968 hammer2_chain_flush(&info.trans, &chain);
1969 KKASSERT(chain == &hmp->fchain);
1971 hammer2_chain_unlock(&hmp->fchain);
1977 * We can't safely flush the volume header until we have
1978 * flushed any device buffers which have built up.
1980 * XXX this isn't being incremental
1982 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1983 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1984 vn_unlock(hmp->devvp);
1987 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1988 * volume header needs synchronization via hmp->volsync.
1990 * XXX synchronize the flag & data with only this flush XXX
1993 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1997 * Synchronize the disk before flushing the volume
2001 bp->b_bio1.bio_offset = 0;
2004 bp->b_cmd = BUF_CMD_FLUSH;
2005 bp->b_bio1.bio_done = biodone_sync;
2006 bp->b_bio1.bio_flags |= BIO_SYNC;
2007 vn_strategy(hmp->devvp, &bp->b_bio1);
2008 biowait(&bp->b_bio1, "h2vol");
2012 * Then we can safely flush the version of the
2013 * volume header synchronized by the flush code.
2015 i = hmp->volhdrno + 1;
2016 if (i >= HAMMER2_NUM_VOLHDRS)
2018 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
2019 hmp->volsync.volu_size) {
2022 kprintf("sync volhdr %d %jd\n",
2023 i, (intmax_t)hmp->volsync.volu_size);
2024 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2025 HAMMER2_PBUFSIZE, 0, 0);
2026 atomic_clear_int(&hmp->vchain.flags,
2027 HAMMER2_CHAIN_VOLUMESYNC);
2028 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
2033 total_error = error;
2035 hammer2_trans_done(&info.trans);
2037 return (total_error);
2043 * NOTE: We don't test update_lo/update_hi or MOVED here because the fsync
2044 * code won't flush on those flags. The syncer code above will do a
2045 * general meta-data flush globally that will catch these flags.
2049 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
2051 struct hammer2_sync_info *info = data;
2052 hammer2_inode_t *ip;
2061 if (vp->v_type == VNON || vp->v_type == VBAD) {
2065 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
2066 RB_EMPTY(&vp->v_rbdirty_tree)) {
2072 * VOP_FSYNC will start a new transaction so replicate some code
2073 * here to do it inline (see hammer2_vop_fsync()).
2075 * WARNING: The vfsync interacts with the buffer cache and might
2076 * block, we can't hold the inode lock at that time.
2077 * However, we MUST ref ip before blocking to ensure that
2078 * it isn't ripped out from under us (since we do not
2079 * hold a lock on the vnode).
2081 hammer2_inode_ref(ip);
2082 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
2084 vfsync(vp, MNT_NOWAIT, 1, NULL, NULL);
2088 * XXX this interferes with flush operations mainly because the
2089 * same transaction id is being used by asynchronous buffer
2090 * operations above and can be reordered after the flush
2093 parent = hammer2_inode_lock_ex(ip);
2094 hammer2_chain_flush(&info->trans, &parent);
2095 hammer2_inode_unlock_ex(ip, parent);
2097 hammer2_inode_drop(ip);
2100 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
2103 info->error = error;
2109 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
2116 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
2117 struct fid *fhp, struct vnode **vpp)
2124 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
2125 int *exflagsp, struct ucred **credanonp)
2131 * Support code for hammer2_mount(). Read, verify, and install the volume
2132 * header into the HMP
2134 * XXX read four volhdrs and use the one with the highest TID whos CRC
2139 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2140 * nonexistant locations.
2142 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2146 hammer2_install_volume_header(hammer2_mount_t *hmp)
2148 hammer2_volume_data_t *vd;
2150 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
2162 * There are up to 4 copies of the volume header (syncs iterate
2163 * between them so there is no single master). We don't trust the
2164 * volu_size field so we don't know precisely how large the filesystem
2165 * is, so depend on the OS to return an error if we go beyond the
2166 * block device's EOF.
2168 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
2169 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
2170 HAMMER2_VOLUME_BYTES, &bp);
2177 vd = (struct hammer2_volume_data *) bp->b_data;
2178 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
2179 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
2185 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
2186 /* XXX: Reversed-endianness filesystem */
2187 kprintf("hammer2: reverse-endian filesystem detected");
2193 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
2194 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
2195 HAMMER2_VOLUME_ICRC0_SIZE);
2196 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
2197 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
2198 HAMMER2_VOLUME_ICRC1_SIZE);
2199 if ((crc0 != crc) || (bcrc0 != bcrc)) {
2200 kprintf("hammer2 volume header crc "
2201 "mismatch copy #%d %08x/%08x\n",
2208 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
2217 hmp->volsync = hmp->voldata;
2219 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
2220 kprintf("hammer2: using volume header #%d\n",
2225 kprintf("hammer2: no valid volume headers found!\n");
2231 * Reconnect using the passed file pointer. The caller must ref the
2235 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
2237 hammer2_inode_data_t *ipdata;
2238 hammer2_chain_t *parent;
2239 hammer2_mount_t *hmp;
2242 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
2245 * Closes old comm descriptor, kills threads, cleans up
2246 * states, then installs the new descriptor and creates
2249 kdmsg_iocom_reconnect(&pmp->iocom, fp, "hammer2");
2252 * Setup LNK_CONN fields for autoinitiated state machine
2254 parent = hammer2_inode_lock_ex(pmp->iroot);
2255 ipdata = &parent->data->ipdata;
2256 pmp->iocom.auto_lnk_conn.pfs_clid = ipdata->pfs_clid;
2257 pmp->iocom.auto_lnk_conn.pfs_fsid = ipdata->pfs_fsid;
2258 pmp->iocom.auto_lnk_conn.pfs_type = ipdata->pfs_type;
2259 pmp->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
2260 pmp->iocom.auto_lnk_conn.peer_type = hmp->voldata.peer_type;
2263 * Filter adjustment. Clients do not need visibility into other
2264 * clients (otherwise millions of clients would present a serious
2265 * problem). The fs_label also serves to restrict the namespace.
2267 pmp->iocom.auto_lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
2268 pmp->iocom.auto_lnk_conn.pfs_mask = (uint64_t)-1;
2269 switch (ipdata->pfs_type) {
2270 case DMSG_PFSTYPE_CLIENT:
2271 pmp->iocom.auto_lnk_conn.peer_mask &=
2272 ~(1LLU << DMSG_PFSTYPE_CLIENT);
2278 name_len = ipdata->name_len;
2279 if (name_len >= sizeof(pmp->iocom.auto_lnk_conn.fs_label))
2280 name_len = sizeof(pmp->iocom.auto_lnk_conn.fs_label) - 1;
2281 bcopy(ipdata->filename,
2282 pmp->iocom.auto_lnk_conn.fs_label,
2284 pmp->iocom.auto_lnk_conn.fs_label[name_len] = 0;
2287 * Setup LNK_SPAN fields for autoinitiated state machine
2289 pmp->iocom.auto_lnk_span.pfs_clid = ipdata->pfs_clid;
2290 pmp->iocom.auto_lnk_span.pfs_fsid = ipdata->pfs_fsid;
2291 pmp->iocom.auto_lnk_span.pfs_type = ipdata->pfs_type;
2292 pmp->iocom.auto_lnk_span.peer_type = hmp->voldata.peer_type;
2293 pmp->iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1;
2294 name_len = ipdata->name_len;
2295 if (name_len >= sizeof(pmp->iocom.auto_lnk_span.fs_label))
2296 name_len = sizeof(pmp->iocom.auto_lnk_span.fs_label) - 1;
2297 bcopy(ipdata->filename,
2298 pmp->iocom.auto_lnk_span.fs_label,
2300 pmp->iocom.auto_lnk_span.fs_label[name_len] = 0;
2301 hammer2_inode_unlock_ex(pmp->iroot, parent);
2303 kdmsg_iocom_autoinitiate(&pmp->iocom, hammer2_autodmsg);
2307 hammer2_rcvdmsg(kdmsg_msg_t *msg)
2309 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
2310 case DMSG_DBG_SHELL:
2313 * Execute shell command (not supported atm)
2315 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
2317 case DMSG_DBG_SHELL | DMSGF_REPLY:
2321 if (msg->aux_data) {
2322 msg->aux_data[msg->aux_size - 1] = 0;
2323 kprintf("HAMMER2 DBG: %s\n", msg->aux_data);
2328 * Unsupported message received. We only need to
2329 * reply if it's a transaction in order to close our end.
2330 * Ignore any one-way messages are any further messages
2331 * associated with the transaction.
2333 * NOTE: This case also includes DMSG_LNK_ERROR messages
2334 * which might be one-way, replying to those would
2335 * cause an infinite ping-pong.
2337 if (msg->any.head.cmd & DMSGF_CREATE)
2338 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
2345 * This function is called after KDMSG has automatically handled processing
2346 * of a LNK layer message (typically CONN, SPAN, or CIRC).
2348 * We tag off the LNK_CONN to trigger our LNK_VOLCONF messages which
2349 * advertises all available hammer2 super-root volumes.
2352 hammer2_autodmsg(kdmsg_msg_t *msg)
2354 hammer2_pfsmount_t *pmp = msg->iocom->handle;
2355 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
2359 * We only care about replies to our LNK_CONN auto-request. kdmsg
2360 * has already processed the reply, we use this calback as a shim
2361 * to know when we can advertise available super-root volumes.
2363 if ((msg->any.head.cmd & DMSGF_TRANSMASK) !=
2364 (DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_REPLY) ||
2365 msg->state == NULL) {
2369 kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
2371 if (msg->any.head.cmd & DMSGF_CREATE) {
2372 kprintf("HAMMER2: VOLDATA DUMP\n");
2375 * Dump the configuration stored in the volume header
2377 hammer2_voldata_lock(hmp);
2378 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
2379 if (hmp->voldata.copyinfo[copyid].copyid == 0)
2381 hammer2_volconf_update(pmp, copyid);
2383 hammer2_voldata_unlock(hmp, 0);
2385 if ((msg->any.head.cmd & DMSGF_DELETE) &&
2386 msg->state && (msg->state->txcmd & DMSGF_DELETE) == 0) {
2387 kprintf("HAMMER2: CONN WAS TERMINATED\n");
2392 * Volume configuration updates are passed onto the userland service
2393 * daemon via the open LNK_CONN transaction.
2396 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
2398 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
2401 /* XXX interlock against connection state termination */
2402 kprintf("volconf update %p\n", pmp->iocom.conn_state);
2403 if (pmp->iocom.conn_state) {
2404 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
2405 msg = kdmsg_msg_alloc_state(pmp->iocom.conn_state,
2406 DMSG_LNK_VOLCONF, NULL, NULL);
2407 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
2408 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
2409 msg->any.lnk_volconf.index = index;
2410 kdmsg_msg_write(msg);
2415 * This handles hysteresis on regular file flushes. Because the BIOs are
2416 * routed to a thread it is possible for an excessive number to build up
2417 * and cause long front-end stalls long before the runningbuffspace limit
2418 * is hit, so we implement hammer2_flush_pipe to control the
2421 * This is a particular problem when compression is used.
2424 hammer2_lwinprog_ref(hammer2_pfsmount_t *pmp)
2426 atomic_add_int(&pmp->count_lwinprog, 1);
2430 hammer2_lwinprog_drop(hammer2_pfsmount_t *pmp)
2434 lwinprog = atomic_fetchadd_int(&pmp->count_lwinprog, -1);
2435 if ((lwinprog & HAMMER2_LWINPROG_WAITING) &&
2436 (lwinprog & HAMMER2_LWINPROG_MASK) <= hammer2_flush_pipe * 2 / 3) {
2437 atomic_clear_int(&pmp->count_lwinprog,
2438 HAMMER2_LWINPROG_WAITING);
2439 wakeup(&pmp->count_lwinprog);
2444 hammer2_lwinprog_wait(hammer2_pfsmount_t *pmp)
2449 lwinprog = pmp->count_lwinprog;
2451 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe)
2453 tsleep_interlock(&pmp->count_lwinprog, 0);
2454 atomic_set_int(&pmp->count_lwinprog, HAMMER2_LWINPROG_WAITING);
2455 lwinprog = pmp->count_lwinprog;
2456 if ((lwinprog & HAMMER2_LWINPROG_MASK) < hammer2_flush_pipe)
2458 tsleep(&pmp->count_lwinprog, PINTERLOCKED, "h2wpipe", hz);
2463 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp)
2465 hammer2_chain_layer_t *layer;
2466 hammer2_chain_t *scan;
2467 hammer2_chain_t *first_parent;
2471 kprintf("%*.*s...\n", tab, tab, "");
2476 first_parent = chain->core ? TAILQ_FIRST(&chain->core->ownerq) : NULL;
2477 kprintf("%*.*schain %p.%d %016jx/%d mir=%016jx\n",
2479 chain, chain->bref.type,
2480 chain->bref.key, chain->bref.keybits,
2481 chain->bref.mirror_tid);
2483 kprintf("%*.*s [%08x] (%s) dt=%016jx refs=%d\n",
2486 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2487 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
2491 kprintf("%*.*s core %p [%08x] lo=%08jx hi=%08jx fp=%p np=%p",
2493 chain->core, (chain->core ? chain->core->flags : 0),
2494 (chain->core ? chain->core->update_lo : -1),
2495 (chain->core ? chain->core->update_hi : -1),
2497 (first_parent ? TAILQ_NEXT(chain, core_entry) : NULL));
2500 kprintf(" [fpflags %08x fprefs %d\n",
2501 first_parent->flags,
2502 first_parent->refs);
2503 if (chain->core == NULL || TAILQ_EMPTY(&chain->core->layerq))
2508 TAILQ_FOREACH(layer, &chain->core->layerq, entry) {
2509 RB_FOREACH(scan, hammer2_chain_tree, &layer->rbtree) {
2510 hammer2_dump_chain(scan, tab + 4, countp);
2514 if (chain->core && !TAILQ_EMPTY(&chain->core->layerq)) {
2515 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2516 kprintf("%*.*s}(%s)\n", tab, tab, "",
2517 chain->data->ipdata.filename);
2519 kprintf("%*.*s}\n", tab, tab, "");