opie{info,key,passwd}(1): Build with -std=gnu99, our default.
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
CommitLineData
47902fef 1/*-
0dea3156 2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
703720e4
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
355d67fc 6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
703720e4
MD
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
703720e4
MD
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/nlookup.h>
39#include <sys/vnode.h>
40#include <sys/mount.h>
41#include <sys/fcntl.h>
42#include <sys/buf.h>
43#include <sys/uuid.h>
a74bc66c 44#include <sys/vfsops.h>
37aa19df 45#include <sys/sysctl.h>
bfc3a7b1 46#include <sys/socket.h>
355d67fc
MD
47#include <sys/objcache.h>
48
49#include <sys/proc.h>
50#include <sys/namei.h>
51#include <sys/mountctl.h>
52#include <sys/dirent.h>
53#include <sys/uio.h>
54
55#include <sys/mutex.h>
56#include <sys/mutex2.h>
703720e4
MD
57
58#include "hammer2.h"
59#include "hammer2_disk.h"
60#include "hammer2_mount.h"
61
355d67fc
MD
62#include "hammer2.h"
63#include "hammer2_lz4.h"
64
65#include "zlib/hammer2_zlib.h"
66
0dea3156
MD
67#define REPORT_REFS_ERRORS 1 /* XXX remove me */
68
355d67fc
MD
69MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
70
b7926f31 71struct hammer2_sync_info {
0dea3156 72 hammer2_trans_t trans;
b7926f31
MD
73 int error;
74 int waitfor;
75};
76
e4e20f48
MD
77TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
78static struct hammer2_mntlist hammer2_mntlist;
79static struct lock hammer2_mntlk;
80
37aa19df 81int hammer2_debug;
a98aa0b0 82int hammer2_cluster_enable = 1;
99535653 83int hammer2_hardlink_enable = 1;
01eabad4
MD
84long hammer2_iod_file_read;
85long hammer2_iod_meta_read;
86long hammer2_iod_indr_read;
a98aa0b0
MD
87long hammer2_iod_fmap_read;
88long hammer2_iod_volu_read;
01eabad4
MD
89long hammer2_iod_file_write;
90long hammer2_iod_meta_write;
91long hammer2_iod_indr_write;
9061bde5 92long hammer2_iod_fmap_write;
01eabad4
MD
93long hammer2_iod_volu_write;
94long hammer2_ioa_file_read;
95long hammer2_ioa_meta_read;
96long hammer2_ioa_indr_read;
a98aa0b0
MD
97long hammer2_ioa_fmap_read;
98long hammer2_ioa_volu_read;
9061bde5 99long hammer2_ioa_fmap_write;
01eabad4
MD
100long hammer2_ioa_file_write;
101long hammer2_ioa_meta_write;
102long hammer2_ioa_indr_write;
103long hammer2_ioa_volu_write;
37aa19df 104
355d67fc
MD
105MALLOC_DECLARE(C_BUFFER);
106MALLOC_DEFINE(C_BUFFER, "compbuffer", "Buffer used for compression.");
107
108MALLOC_DECLARE(D_BUFFER);
109MALLOC_DEFINE(D_BUFFER, "decompbuffer", "Buffer used for decompression.");
110
37aa19df
MD
111SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
112
113SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
114 &hammer2_debug, 0, "");
01eabad4
MD
115SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
116 &hammer2_cluster_enable, 0, "");
e708f8b9
MD
117SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
118 &hammer2_hardlink_enable, 0, "");
a98aa0b0 119
01eabad4
MD
120SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
121 &hammer2_iod_file_read, 0, "");
122SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
123 &hammer2_iod_meta_read, 0, "");
124SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
125 &hammer2_iod_indr_read, 0, "");
a98aa0b0
MD
126SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW,
127 &hammer2_iod_fmap_read, 0, "");
128SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW,
129 &hammer2_iod_volu_read, 0, "");
130
01eabad4
MD
131SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
132 &hammer2_iod_file_write, 0, "");
133SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
134 &hammer2_iod_meta_write, 0, "");
135SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
136 &hammer2_iod_indr_write, 0, "");
a98aa0b0
MD
137SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW,
138 &hammer2_iod_fmap_write, 0, "");
01eabad4
MD
139SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
140 &hammer2_iod_volu_write, 0, "");
a98aa0b0 141
01eabad4
MD
142SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
143 &hammer2_ioa_file_read, 0, "");
144SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
145 &hammer2_ioa_meta_read, 0, "");
146SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
147 &hammer2_ioa_indr_read, 0, "");
a98aa0b0
MD
148SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW,
149 &hammer2_ioa_fmap_read, 0, "");
150SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW,
151 &hammer2_ioa_volu_read, 0, "");
152
01eabad4
MD
153SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
154 &hammer2_ioa_file_write, 0, "");
155SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
156 &hammer2_ioa_meta_write, 0, "");
157SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
158 &hammer2_ioa_indr_write, 0, "");
a98aa0b0
MD
159SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW,
160 &hammer2_ioa_fmap_write, 0, "");
01eabad4
MD
161SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
162 &hammer2_ioa_volu_write, 0, "");
37aa19df 163
b7926f31 164static int hammer2_vfs_init(struct vfsconf *conf);
355d67fc 165static int hammer2_vfs_uninit(struct vfsconf *vfsp);
b7926f31
MD
166static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
167 struct ucred *cred);
7bed8d7e 168static int hammer2_remount(hammer2_mount_t *, char *, struct vnode *,
703720e4 169 struct ucred *);
b7926f31
MD
170static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
171static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
172static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
703720e4 173 struct ucred *cred);
b7926f31
MD
174static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
175 struct ucred *cred);
b7926f31
MD
176static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
177 ino_t ino, struct vnode **vpp);
178static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
179 struct fid *fhp, struct vnode **vpp);
180static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
181static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
182 int *exflagsp, struct ucred **credanonp);
183
184static int hammer2_install_volume_header(hammer2_mount_t *hmp);
b7926f31 185static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
50e4f8f4 186
355d67fc
MD
187static void hammer2_write_thread(void *arg);
188
189/*
190 * Functions for compression in threads,
191 * from hammer2_vnops.c
192 */
1e5c08ba 193static void hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
194 hammer2_inode_t *ip,
195 hammer2_inode_data_t *ipdata,
196 hammer2_chain_t **parentp,
197 hammer2_key_t lbase, int ioflag, int pblksize,
198 int *errorp);
1e5c08ba 199static void hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
200 hammer2_inode_t *ip,
201 hammer2_inode_data_t *ipdata,
202 hammer2_chain_t **parentp,
203 hammer2_key_t lbase, int ioflag,
f481450f 204 int pblksize, int *errorp, int comp_algo);
1e5c08ba 205static void hammer2_zero_check_and_write(struct buf *bp,
355d67fc
MD
206 hammer2_trans_t *trans, hammer2_inode_t *ip,
207 hammer2_inode_data_t *ipdata,
208 hammer2_chain_t **parentp,
209 hammer2_key_t lbase,
1e5c08ba 210 int ioflag, int pblksize, int *errorp);
f481450f 211static int test_block_zeros(const char *buf, size_t bytes);
1e5c08ba 212static void zero_write(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
213 hammer2_inode_t *ip,
214 hammer2_inode_data_t *ipdata,
215 hammer2_chain_t **parentp,
1e5c08ba
MD
216 hammer2_key_t lbase,
217 int *errorp);
218static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp,
219 int ioflag, int pblksize, int *errorp);
355d67fc 220
537d97bc
MD
221static int hammer2_rcvdmsg(kdmsg_msg_t *msg);
222static void hammer2_autodmsg(kdmsg_msg_t *msg);
bfc3a7b1 223
355d67fc 224
703720e4
MD
225/*
226 * HAMMER2 vfs operations.
227 */
228static struct vfsops hammer2_vfsops = {
066e00cc 229 .vfs_init = hammer2_vfs_init,
355d67fc 230 .vfs_uninit = hammer2_vfs_uninit,
066e00cc
MD
231 .vfs_sync = hammer2_vfs_sync,
232 .vfs_mount = hammer2_vfs_mount,
233 .vfs_unmount = hammer2_vfs_unmount,
234 .vfs_root = hammer2_vfs_root,
235 .vfs_statfs = hammer2_vfs_statfs,
236 .vfs_statvfs = hammer2_vfs_statvfs,
237 .vfs_vget = hammer2_vfs_vget,
238 .vfs_vptofh = hammer2_vfs_vptofh,
239 .vfs_fhtovp = hammer2_vfs_fhtovp,
240 .vfs_checkexp = hammer2_vfs_checkexp
703720e4
MD
241};
242
703720e4
MD
243MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
244
245VFS_SET(hammer2_vfsops, hammer2, 0);
246MODULE_VERSION(hammer2, 1);
247
e118c14f
MD
248static
249int
066e00cc 250hammer2_vfs_init(struct vfsconf *conf)
703720e4 251{
355d67fc
MD
252 static struct objcache_malloc_args margs_read;
253 static struct objcache_malloc_args margs_write;
254
703720e4
MD
255 int error;
256
257 error = 0;
258
0e92b724 259 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
703720e4 260 error = EINVAL;
0e92b724 261 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
703720e4 262 error = EINVAL;
0e92b724 263 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
703720e4
MD
264 error = EINVAL;
265
266 if (error)
267 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
355d67fc
MD
268
269 margs_read.objsize = 65536;
270 margs_read.mtype = D_BUFFER;
271
272 margs_write.objsize = 32768;
273 margs_write.mtype = C_BUFFER;
274
275 cache_buffer_read = objcache_create(margs_read.mtype->ks_shortdesc,
276 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
277 objcache_malloc_free, &margs_read);
278 cache_buffer_write = objcache_create(margs_write.mtype->ks_shortdesc,
279 0, 1, NULL, NULL, NULL, objcache_malloc_alloc,
280 objcache_malloc_free, &margs_write);
703720e4 281
e4e20f48
MD
282 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
283 TAILQ_INIT(&hammer2_mntlist);
284
703720e4
MD
285 return (error);
286}
287
355d67fc
MD
288static
289int
290hammer2_vfs_uninit(struct vfsconf *vfsp __unused)
291{
292 objcache_destroy(cache_buffer_read);
293 objcache_destroy(cache_buffer_write);
294 return 0;
295}
296
703720e4
MD
297/*
298 * Mount or remount HAMMER2 fileystem from physical media
299 *
300 * mountroot
301 * mp mount point structure
302 * path NULL
303 * data <unused>
304 * cred <unused>
305 *
306 * mount
307 * mp mount point structure
308 * path path to mount point
309 * data pointer to argument structure in user space
310 * volume volume path (device@LABEL form)
311 * hflags user mount flags
312 * cred user credentials
313 *
314 * RETURNS: 0 Success
315 * !0 error number
316 */
e118c14f
MD
317static
318int
066e00cc 319hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
bfc3a7b1 320 struct ucred *cred)
703720e4
MD
321{
322 struct hammer2_mount_info info;
e4e20f48 323 hammer2_pfsmount_t *pmp;
54eb943b 324 hammer2_mount_t *hmp;
1897c66e
MD
325 hammer2_key_t key_next;
326 hammer2_key_t key_dummy;
7cfa8da5 327 hammer2_key_t lhc;
703720e4
MD
328 struct vnode *devvp;
329 struct nlookupdata nd;
5c23d7f1 330 hammer2_chain_t *parent;
7cfa8da5
MD
331 hammer2_chain_t *schain;
332 hammer2_chain_t *rchain;
3a5aa68f 333 struct file *fp;
703720e4
MD
334 char devstr[MNAMELEN];
335 size_t size;
336 size_t done;
50e4f8f4
MD
337 char *dev;
338 char *label;
54eb943b 339 int ronly = 1;
703720e4 340 int error;
1897c66e 341 int cache_index;
7bed8d7e 342 int i;
703720e4
MD
343
344 hmp = NULL;
e4e20f48 345 pmp = NULL;
50e4f8f4
MD
346 dev = NULL;
347 label = NULL;
703720e4 348 devvp = NULL;
1897c66e 349 cache_index = -1;
703720e4
MD
350
351 kprintf("hammer2_mount\n");
352
353 if (path == NULL) {
354 /*
355 * Root mount
356 */
bfc3a7b1
MD
357 bzero(&info, sizeof(info));
358 info.cluster_fd = -1;
703720e4
MD
359 return (EOPNOTSUPP);
360 } else {
361 /*
362 * Non-root mount or updating a mount
363 */
703720e4
MD
364 error = copyin(data, &info, sizeof(info));
365 if (error)
366 return (error);
367
368 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
369 if (error)
370 return (error);
371
372 /* Extract device and label */
373 dev = devstr;
374 label = strchr(devstr, '@');
375 if (label == NULL ||
50e4f8f4 376 ((label + 1) - dev) > done) {
703720e4 377 return (EINVAL);
50e4f8f4 378 }
703720e4
MD
379 *label = '\0';
380 label++;
381 if (*label == '\0')
382 return (EINVAL);
383
384 if (mp->mnt_flag & MNT_UPDATE) {
385 /* Update mount */
386 /* HAMMER2 implements NFS export via mountctl */
7bed8d7e
MD
387 pmp = MPTOPMP(mp);
388 for (i = 0; i < pmp->cluster.nchains; ++i) {
389 hmp = pmp->cluster.chains[i]->hmp;
390 devvp = hmp->devvp;
391 error = hammer2_remount(hmp, path, devvp, cred);
392 if (error)
393 break;
394 }
54eb943b 395 return error;
703720e4
MD
396 }
397 }
398
399 /*
bfc3a7b1
MD
400 * PFS mount
401 *
402 * Lookup name and verify it refers to a block device.
703720e4 403 */
703720e4 404 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
54eb943b
MD
405 if (error == 0)
406 error = nlookup(&nd);
407 if (error == 0)
408 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
703720e4
MD
409 nlookup_done(&nd);
410
54eb943b
MD
411 if (error == 0) {
412 if (vn_isdisk(devvp, &error))
413 error = vfs_mountedon(devvp);
703720e4
MD
414 }
415
416 /*
e4e20f48
MD
417 * Determine if the device has already been mounted. After this
418 * check hmp will be non-NULL if we are doing the second or more
419 * hammer2 mounts from the same device.
703720e4 420 */
e4e20f48
MD
421 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
422 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
423 if (hmp->devvp == devvp)
424 break;
425 }
426
427 /*
a5913bdf
MD
428 * Open the device if this isn't a secondary mount and construct
429 * the H2 device mount (hmp).
e4e20f48 430 */
a5913bdf 431 if (hmp == NULL) {
e4e20f48
MD
432 if (error == 0 && vcount(devvp) > 0)
433 error = EBUSY;
434
435 /*
436 * Now open the device
437 */
54eb943b 438 if (error == 0) {
e4e20f48
MD
439 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
440 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
441 error = vinvalbuf(devvp, V_SAVE, 0, 0);
442 if (error == 0) {
443 error = VOP_OPEN(devvp,
444 ronly ? FREAD : FREAD | FWRITE,
445 FSCRED, NULL);
446 }
447 vn_unlock(devvp);
448 }
449 if (error && devvp) {
450 vrele(devvp);
451 devvp = NULL;
452 }
453 if (error) {
454 lockmgr(&hammer2_mntlk, LK_RELEASE);
455 return error;
54eb943b 456 }
e4e20f48
MD
457 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
458 hmp->ronly = ronly;
459 hmp->devvp = devvp;
e4e20f48
MD
460 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
461 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
fdf62707 462 RB_INIT(&hmp->iotree);
50e4f8f4 463
0dea3156
MD
464 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
465 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
d001f460 466 TAILQ_INIT(&hmp->transq);
0dea3156 467
e4e20f48 468 /*
1a7cfe5a 469 * vchain setup. vchain.data is embedded.
e4e20f48
MD
470 * vchain.refs is initialized and will never drop to 0.
471 */
0dea3156 472 hmp->vchain.hmp = hmp;
e4e20f48
MD
473 hmp->vchain.refs = 1;
474 hmp->vchain.data = (void *)&hmp->voldata;
004f88b4 475 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
e4e20f48 476 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
731b2a84 477 hmp->vchain.delete_tid = HAMMER2_MAX_TID;
1897c66e 478 hammer2_chain_core_alloc(NULL, &hmp->vchain, NULL);
e4e20f48 479 /* hmp->vchain.u.xxx is left NULL */
232a50f9 480
1a7cfe5a
MD
481 /*
482 * fchain setup. fchain.data is embedded.
483 * fchain.refs is initialized and will never drop to 0.
484 *
485 * The data is not used but needs to be initialized to
486 * pass assertion muster. We use this chain primarily
487 * as a placeholder for the freemap's top-level RBTREE
488 * so it does not interfere with the volume's topology
489 * RBTREE.
490 */
491 hmp->fchain.hmp = hmp;
492 hmp->fchain.refs = 1;
493 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset;
494 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP;
495 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
512beabd
MD
496 hmp->fchain.bref.methods =
497 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) |
498 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE);
1a7cfe5a 499 hmp->fchain.delete_tid = HAMMER2_MAX_TID;
512beabd 500
1897c66e 501 hammer2_chain_core_alloc(NULL, &hmp->fchain, NULL);
1a7cfe5a
MD
502 /* hmp->fchain.u.xxx is left NULL */
503
e4e20f48
MD
504 /*
505 * Install the volume header
506 */
507 error = hammer2_install_volume_header(hmp);
508 if (error) {
509 hammer2_vfs_unmount(mp, MNT_FORCE);
510 return error;
511 }
a5913bdf 512
a4dc31e0
MD
513 hmp->vchain.bref.mirror_tid = hmp->voldata.mirror_tid;
514 hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid;
515
a5913bdf
MD
516 /*
517 * First locate the super-root inode, which is key 0
518 * relative to the volume header's blockset.
519 *
520 * Then locate the root inode by scanning the directory keyspace
521 * represented by the label.
522 */
523 parent = hammer2_chain_lookup_init(&hmp->vchain, 0);
1897c66e
MD
524 schain = hammer2_chain_lookup(&parent, &key_dummy,
525 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY,
526 &cache_index, 0);
a5913bdf
MD
527 hammer2_chain_lookup_done(parent);
528 if (schain == NULL) {
529 kprintf("hammer2_mount: invalid super-root\n");
530 hammer2_vfs_unmount(mp, MNT_FORCE);
531 return EINVAL;
532 }
7bed8d7e
MD
533
534 /*
535 * NOTE: inode_get sucks up schain's lock.
536 */
537 atomic_set_int(&schain->flags, HAMMER2_CHAIN_PFSROOT);
a5913bdf 538 hmp->sroot = hammer2_inode_get(NULL, NULL, schain);
7bed8d7e 539 hammer2_inode_ref(hmp->sroot);
a5913bdf
MD
540 hammer2_inode_unlock_ex(hmp->sroot, schain);
541 schain = NULL;
7bed8d7e 542 /* leave hmp->sroot with one ref */
50e4f8f4 543 }
703720e4 544
a5913bdf
MD
545 /*
546 * Block device opened successfully, finish initializing the
547 * mount structure.
548 *
549 * From this point on we have to call hammer2_unmount() on failure.
550 */
551 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
a5913bdf
MD
552
553 kmalloc_create(&pmp->minode, "HAMMER2-inodes");
554 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
555
a5913bdf
MD
556 spin_init(&pmp->inum_spin);
557 RB_INIT(&pmp->inum_tree);
558
559 kdmsg_iocom_init(&pmp->iocom, pmp,
560 KDMSG_IOCOMF_AUTOCONN |
561 KDMSG_IOCOMF_AUTOSPAN |
562 KDMSG_IOCOMF_AUTOCIRC,
563 pmp->mmsg, hammer2_rcvdmsg);
564
565 ccms_domain_init(&pmp->ccms_dom);
566 ++hmp->pmp_count;
567 lockmgr(&hammer2_mntlk, LK_RELEASE);
1e5c08ba
MD
568 kprintf("hammer2_mount hmp=%p pmp=%p pmpcnt=%d\n",
569 hmp, pmp, hmp->pmp_count);
a5913bdf
MD
570
571 mp->mnt_flag = MNT_LOCAL;
572 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
cf6a53ca 573 mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */
a5913bdf 574
703720e4 575 /*
50e4f8f4 576 * required mount structure initializations
703720e4 577 */
50e4f8f4
MD
578 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
579 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
580
581 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
582 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
583
2910a90c
MD
584 /*
585 * Optional fields
586 */
587 mp->mnt_iosize_max = MAXPHYS;
a5913bdf
MD
588 mp->mnt_data = (qaddr_t)pmp;
589 pmp->mp = mp;
5c23d7f1 590
10252dc7 591 /*
7bed8d7e 592 * Lookup mount point under the media-localized super-root.
10252dc7 593 */
7bed8d7e 594 parent = hammer2_inode_lock_ex(hmp->sroot);
e4e20f48 595 lhc = hammer2_dirhash(label, strlen(label));
1897c66e 596 rchain = hammer2_chain_lookup(&parent, &key_next,
c667909f 597 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1897c66e 598 &cache_index, 0);
7cfa8da5
MD
599 while (rchain) {
600 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
5c23d7f1 601 strcmp(label, rchain->data->ipdata.filename) == 0) {
7cfa8da5
MD
602 break;
603 }
1897c66e
MD
604 rchain = hammer2_chain_next(&parent, rchain, &key_next,
605 key_next,
606 lhc + HAMMER2_DIRHASH_LOMASK,
607 &cache_index, 0);
7cfa8da5 608 }
7bed8d7e
MD
609 hammer2_inode_unlock_ex(hmp->sroot, parent);
610
7cfa8da5 611 if (rchain == NULL) {
e4e20f48 612 kprintf("hammer2_mount: PFS label not found\n");
9b6b3df4 613 --hmp->pmp_count;
9c2e0de0 614 hammer2_vfs_unmount(mp, MNT_FORCE);
7cfa8da5
MD
615 return EINVAL;
616 }
e4e20f48 617 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
0dea3156 618 hammer2_chain_unlock(rchain);
e4e20f48 619 kprintf("hammer2_mount: PFS label already mounted!\n");
9b6b3df4 620 --hmp->pmp_count;
e4e20f48 621 hammer2_vfs_unmount(mp, MNT_FORCE);
4d5318eb 622 return EBUSY;
e4e20f48 623 }
9b6b3df4 624#if 0
9596b8c4
MD
625 if (rchain->flags & HAMMER2_CHAIN_RECYCLE) {
626 kprintf("hammer2_mount: PFS label currently recycling\n");
9b6b3df4 627 --hmp->pmp_count;
9596b8c4
MD
628 hammer2_vfs_unmount(mp, MNT_FORCE);
629 return EBUSY;
630 }
9b6b3df4 631#endif
9596b8c4 632
e4e20f48 633 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
5c23d7f1 634
476d2aad
MD
635 /*
636 * NOTE: *_get() integrates chain's lock into the inode lock.
637 */
0dea3156 638 hammer2_chain_ref(rchain); /* for pmp->rchain */
7bed8d7e
MD
639 pmp->cluster.nchains = 1;
640 pmp->cluster.chains[0] = rchain;
a5913bdf 641 pmp->iroot = hammer2_inode_get(pmp, NULL, rchain);
476d2aad 642 hammer2_inode_ref(pmp->iroot); /* ref for pmp->iroot */
355d67fc 643
1e5c08ba 644 KKASSERT(rchain->pmp == NULL); /* tracking pmp for rchain */
355d67fc
MD
645 rchain->pmp = pmp;
646 atomic_add_long(&pmp->inmem_chains, 1);
647
9596b8c4 648 hammer2_inode_unlock_ex(pmp->iroot, rchain);
609a8021 649
e4e20f48 650 kprintf("iroot %p\n", pmp->iroot);
703720e4 651
065f4046
MD
652 /*
653 * The logical file buffer bio write thread handles things
654 * like physical block assignment and compression.
655 */
656 mtx_init(&pmp->wthread_mtx);
657 bioq_init(&pmp->wthread_bioq);
658 pmp->wthread_destroy = 0;
659 lwkt_create(hammer2_write_thread, pmp,
660 &pmp->wthread_td, NULL, 0, -1, "hwrite-%s", label);
661
bfc3a7b1
MD
662 /*
663 * Ref the cluster management messaging descriptor. The mount
664 * program deals with the other end of the communications pipe.
665 */
3a5aa68f
MD
666 fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
667 if (fp == NULL) {
bfc3a7b1
MD
668 kprintf("hammer2_mount: bad cluster_fd!\n");
669 hammer2_vfs_unmount(mp, MNT_FORCE);
670 return EBADF;
671 }
3a5aa68f 672 hammer2_cluster_reconnect(pmp, fp);
bfc3a7b1
MD
673
674 /*
675 * Finish setup
676 */
f0206a67
VS
677 vfs_getnewfsid(mp);
678 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
679 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
680 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
703720e4 681
54f522df 682 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
703720e4
MD
683 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
684 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
685 copyinstr(path, mp->mnt_stat.f_mntonname,
686 sizeof(mp->mnt_stat.f_mntonname) - 1,
687 &size);
688
bfc3a7b1
MD
689 /*
690 * Initial statfs to prime mnt_stat.
691 */
066e00cc 692 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
355d67fc 693
47902fef 694 return 0;
703720e4
MD
695}
696
355d67fc
MD
697/*
698 * Handle bioq for strategy write
699 */
700static
701void
702hammer2_write_thread(void *arg)
703{
065f4046 704 hammer2_pfsmount_t *pmp;
355d67fc
MD
705 struct bio *bio;
706 struct buf *bp;
707 hammer2_trans_t trans;
708 struct vnode *vp;
355d67fc
MD
709 hammer2_inode_t *ip;
710 hammer2_chain_t *parent;
1e5c08ba 711 hammer2_chain_t **parentp;
355d67fc
MD
712 hammer2_inode_data_t *ipdata;
713 hammer2_key_t lbase;
714 int lblksize;
715 int pblksize;
716 int error;
717
065f4046 718 pmp = arg;
355d67fc 719
065f4046
MD
720 mtx_lock(&pmp->wthread_mtx);
721 while (pmp->wthread_destroy == 0) {
722 if (bioq_first(&pmp->wthread_bioq) == NULL) {
723 mtxsleep(&pmp->wthread_bioq, &pmp->wthread_mtx,
355d67fc
MD
724 0, "h2bioqw", 0);
725 }
355d67fc
MD
726 parent = NULL;
727 parentp = &parent;
728
a7720be7
MD
729 hammer2_trans_init(&trans, pmp, HAMMER2_TRANS_BUFCACHE);
730
065f4046 731 while ((bio = bioq_takefirst(&pmp->wthread_bioq)) != NULL) {
a7720be7 732 /*
a4dc31e0
MD
733 * dummy bio for synchronization. The transaction
734 * must be reinitialized.
a7720be7
MD
735 */
736 if (bio->bio_buf == NULL) {
737 bio->bio_flags |= BIO_DONE;
738 wakeup(bio);
a4dc31e0
MD
739 hammer2_trans_done(&trans);
740 hammer2_trans_init(&trans, pmp,
741 HAMMER2_TRANS_BUFCACHE);
a7720be7
MD
742 continue;
743 }
744
745 /*
746 * else normal bio processing
747 */
065f4046 748 mtx_unlock(&pmp->wthread_mtx);
355d67fc
MD
749
750 error = 0;
751 bp = bio->bio_buf;
752 vp = bp->b_vp;
753 ip = VTOI(vp);
754
355d67fc
MD
755 /*
756 * Inode is modified, flush size and mtime changes
757 * to ensure that the file size remains consistent
758 * with the buffers being flushed.
759 */
a7720be7 760 parent = hammer2_inode_lock_ex(ip);
355d67fc
MD
761 if (ip->flags & (HAMMER2_INODE_RESIZED |
762 HAMMER2_INODE_MTIME)) {
763 hammer2_inode_fsync(&trans, ip, parentp);
764 }
765 ipdata = hammer2_chain_modify_ip(&trans, ip,
766 parentp, 0);
767 lblksize = hammer2_calc_logical(ip, bio->bio_offset,
768 &lbase, NULL);
769 pblksize = hammer2_calc_physical(ip, lbase);
1e5c08ba 770 hammer2_write_file_core(bp, &trans, ip, ipdata,
355d67fc
MD
771 parentp,
772 lbase, IO_ASYNC,
773 pblksize, &error);
774 hammer2_inode_unlock_ex(ip, parent);
775 if (error) {
1e5c08ba
MD
776 kprintf("hammer2: error in buffer write\n");
777 bp->b_flags |= B_ERROR;
778 bp->b_error = EIO;
355d67fc
MD
779 }
780 biodone(bio);
065f4046 781 mtx_lock(&pmp->wthread_mtx);
355d67fc 782 }
a7720be7 783 hammer2_trans_done(&trans);
355d67fc 784 }
065f4046
MD
785 pmp->wthread_destroy = -1;
786 wakeup(&pmp->wthread_destroy);
355d67fc 787
065f4046 788 mtx_unlock(&pmp->wthread_mtx);
355d67fc
MD
789}
790
a7720be7
MD
791void
792hammer2_bioq_sync(hammer2_pfsmount_t *pmp)
793{
794 struct bio sync_bio;
795
796 bzero(&sync_bio, sizeof(sync_bio)); /* dummy with no bio_buf */
797 mtx_lock(&pmp->wthread_mtx);
798 if (pmp->wthread_destroy == 0) {
799 if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
800 bioq_insert_tail(&pmp->wthread_bioq, &sync_bio);
801 wakeup(&pmp->wthread_bioq);
802 } else {
803 bioq_insert_tail(&pmp->wthread_bioq, &sync_bio);
804 }
805 while ((sync_bio.bio_flags & BIO_DONE) == 0)
806 mtxsleep(&sync_bio, &pmp->wthread_mtx, 0, "h2bioq", 0);
807 }
808 mtx_unlock(&pmp->wthread_mtx);
809}
810
355d67fc 811/*
51a0d27c
MD
812 * Return a chain suitable for I/O, creating the chain if necessary
813 * and assigning its physical block.
355d67fc
MD
814 */
815static
816hammer2_chain_t *
817hammer2_assign_physical(hammer2_trans_t *trans,
818 hammer2_inode_t *ip, hammer2_chain_t **parentp,
819 hammer2_key_t lbase, int pblksize, int *errorp)
820{
821 hammer2_chain_t *parent;
822 hammer2_chain_t *chain;
823 hammer2_off_t pbase;
1897c66e 824 hammer2_key_t key_dummy;
355d67fc 825 int pradix = hammer2_getradix(pblksize);
1897c66e 826 int cache_index = -1;
355d67fc
MD
827
828 /*
829 * Locate the chain associated with lbase, return a locked chain.
830 * However, do not instantiate any data reference (which utilizes a
831 * device buffer) because we will be using direct IO via the
832 * logical buffer cache buffer.
833 */
834 *errorp = 0;
51a0d27c 835 KKASSERT(pblksize >= HAMMER2_MIN_ALLOC);
355d67fc
MD
836retry:
837 parent = *parentp;
838 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); /* extra lock */
1897c66e 839 chain = hammer2_chain_lookup(&parent, &key_dummy,
355d67fc 840 lbase, lbase,
1897c66e 841 &cache_index, HAMMER2_LOOKUP_NODATA);
355d67fc
MD
842
843 if (chain == NULL) {
844 /*
845 * We found a hole, create a new chain entry.
846 *
847 * NOTE: DATA chains are created without device backing
848 * store (nor do we want any).
849 */
850 *errorp = hammer2_chain_create(trans, &parent, &chain,
851 lbase, HAMMER2_PBUFRADIX,
852 HAMMER2_BREF_TYPE_DATA,
853 pblksize);
854 if (chain == NULL) {
855 hammer2_chain_lookup_done(parent);
856 panic("hammer2_chain_create: par=%p error=%d\n",
857 parent, *errorp);
858 goto retry;
859 }
860
861 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
862 /*ip->delta_dcount += pblksize;*/
863 } else {
864 switch (chain->bref.type) {
865 case HAMMER2_BREF_TYPE_INODE:
866 /*
867 * The data is embedded in the inode. The
868 * caller is responsible for marking the inode
869 * modified and copying the data to the embedded
870 * area.
871 */
872 pbase = NOOFFSET;
873 break;
874 case HAMMER2_BREF_TYPE_DATA:
875 if (chain->bytes != pblksize) {
876 hammer2_chain_resize(trans, ip,
877 parent, &chain,
878 pradix,
879 HAMMER2_MODIFY_OPTDATA);
880 }
881 hammer2_chain_modify(trans, &chain,
882 HAMMER2_MODIFY_OPTDATA);
883 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
884 break;
885 default:
886 panic("hammer2_assign_physical: bad type");
887 /* NOT REACHED */
888 pbase = NOOFFSET;
889 break;
890 }
891 }
892
893 /*
894 * Cleanup. If chain wound up being the inode (i.e. DIRECTDATA),
895 * we might have to replace *parentp.
896 */
897 hammer2_chain_lookup_done(parent);
898 if (chain) {
899 if (*parentp != chain &&
900 (*parentp)->core == chain->core) {
901 parent = *parentp;
902 *parentp = chain; /* eats lock */
903 hammer2_chain_unlock(parent);
904 hammer2_chain_lock(chain, 0); /* need another */
905 }
906 /* else chain already locked for return */
907 }
908 return (chain);
909}
910
911/*
912 * From hammer2_vnops.c.
913 * The core write function which determines which path to take
914 * depending on compression settings.
915 */
916static
917void
1e5c08ba 918hammer2_write_file_core(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
919 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
920 hammer2_chain_t **parentp,
921 hammer2_key_t lbase, int ioflag, int pblksize,
922 int *errorp)
923{
924 hammer2_chain_t *chain;
f481450f
MD
925
926 switch(HAMMER2_DEC_COMP(ipdata->comp_algo)) {
927 case HAMMER2_COMP_NONE:
355d67fc
MD
928 /*
929 * We have to assign physical storage to the buffer
930 * we intend to dirty or write now to avoid deadlocks
931 * in the strategy code later.
932 *
933 * This can return NOOFFSET for inode-embedded data.
934 * The strategy code will take care of it in that case.
935 */
936 chain = hammer2_assign_physical(trans, ip, parentp,
937 lbase, pblksize,
938 errorp);
1e5c08ba 939 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
355d67fc
MD
940 if (chain)
941 hammer2_chain_unlock(chain);
f481450f
MD
942 break;
943 case HAMMER2_COMP_AUTOZERO:
944 /*
945 * Check for zero-fill only
946 */
947 hammer2_zero_check_and_write(bp, trans, ip,
948 ipdata, parentp, lbase,
949 ioflag, pblksize, errorp);
950 break;
951 case HAMMER2_COMP_LZ4:
952 case HAMMER2_COMP_ZLIB:
953 default:
954 /*
955 * Check for zero-fill and attempt compression.
956 */
957 hammer2_compress_and_write(bp, trans, ip,
958 ipdata, parentp,
959 lbase, ioflag,
960 pblksize, errorp,
961 ipdata->comp_algo);
962 break;
355d67fc
MD
963 }
964 ipdata = &ip->chain->data->ipdata; /* reload */
965}
966
967/*
968 * From hammer2_vnops.c
969 * Generic function that will perform the compression in compression
970 * write path. The compression algorithm is determined by the settings
971 * obtained from inode.
972 */
973static
974void
1e5c08ba 975hammer2_compress_and_write(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
976 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
977 hammer2_chain_t **parentp,
978 hammer2_key_t lbase, int ioflag, int pblksize,
f481450f 979 int *errorp, int comp_algo)
355d67fc
MD
980{
981 hammer2_chain_t *chain;
f481450f
MD
982 int comp_size;
983 int comp_block_size;
984 char *comp_buffer;
355d67fc 985
f481450f
MD
986 if (test_block_zeros(bp->b_data, pblksize)) {
987 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
988 return;
989 }
1e5c08ba 990
f481450f
MD
991 comp_size = 0;
992 comp_buffer = NULL;
355d67fc 993
f481450f 994 KKASSERT(pblksize / 2 <= 32768);
355d67fc 995
f481450f
MD
996 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
997 z_stream strm_compress;
998 int comp_level;
999 int ret;
1000
1001 switch(HAMMER2_DEC_COMP(comp_algo)) {
1002 case HAMMER2_COMP_LZ4:
1003 comp_buffer = objcache_get(cache_buffer_write,
1004 M_INTWAIT);
1005 comp_size = LZ4_compress_limitedOutput(
1006 bp->b_data,
1007 &comp_buffer[sizeof(int)],
1008 pblksize,
1009 pblksize / 2 - sizeof(int));
1010 /*
1011 * We need to prefix with the size, LZ4
1012 * doesn't do it for us. Add the related
1013 * overhead.
1014 */
1015 *(int *)comp_buffer = comp_size;
1016 if (comp_size)
1017 comp_size += sizeof(int);
1018 break;
1019 case HAMMER2_COMP_ZLIB:
1020 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
1021 if (comp_level == 0)
1022 comp_level = 6; /* default zlib compression */
1023 else if (comp_level < 6)
1024 comp_level = 6;
1025 else if (comp_level > 9)
1026 comp_level = 9;
1027 ret = deflateInit(&strm_compress, comp_level);
1028 if (ret != Z_OK) {
1029 kprintf("HAMMER2 ZLIB: fatal error "
1030 "on deflateInit.\n");
1031 }
1032
1033 comp_buffer = objcache_get(cache_buffer_write,
1034 M_INTWAIT);
1035 strm_compress.next_in = bp->b_data;
1036 strm_compress.avail_in = pblksize;
1037 strm_compress.next_out = comp_buffer;
1038 strm_compress.avail_out = pblksize / 2;
1039 ret = deflate(&strm_compress, Z_FINISH);
1040 if (ret == Z_STREAM_END) {
1041 comp_size = pblksize / 2 -
1042 strm_compress.avail_out;
1e5c08ba 1043 } else {
f481450f 1044 comp_size = 0;
355d67fc 1045 }
f481450f
MD
1046 ret = deflateEnd(&strm_compress);
1047 break;
1048 default:
1049 kprintf("Error: Unknown compression method.\n");
1050 kprintf("Comp_method = %d.\n", comp_algo);
1051 break;
355d67fc 1052 }
f481450f 1053 }
1e5c08ba 1054
f481450f
MD
1055 if (comp_size == 0) {
1056 /*
1057 * compression failed or turned off
1058 */
1059 comp_block_size = pblksize; /* safety */
1060 if (++ip->comp_heuristic > 128)
1061 ip->comp_heuristic = 8;
1062 } else {
1063 /*
1064 * compression succeeded
1065 */
1066 ip->comp_heuristic = 0;
1067 if (comp_size <= 1024) {
1068 comp_block_size = 1024;
1069 } else if (comp_size <= 2048) {
1070 comp_block_size = 2048;
1071 } else if (comp_size <= 4096) {
1072 comp_block_size = 4096;
1073 } else if (comp_size <= 8192) {
1074 comp_block_size = 8192;
1075 } else if (comp_size <= 16384) {
1076 comp_block_size = 16384;
1077 } else if (comp_size <= 32768) {
1078 comp_block_size = 32768;
355d67fc 1079 } else {
f481450f
MD
1080 panic("hammer2: WRITE PATH: "
1081 "Weird comp_size value.");
1082 /* NOT REACHED */
1083 comp_block_size = pblksize;
1084 }
1085 }
1086
1087 chain = hammer2_assign_physical(trans, ip, parentp,
1088 lbase, comp_block_size,
1089 errorp);
1090 ipdata = &ip->chain->data->ipdata; /* RELOAD */
1091
1092 if (*errorp) {
1093 kprintf("WRITE PATH: An error occurred while "
1094 "assigning physical space.\n");
1095 KKASSERT(chain == NULL);
1096 } else {
1097 /* Get device offset */
fdf62707
MD
1098 hammer2_io_t *dio;
1099 char *bdata;
f481450f
MD
1100 int temp_check;
1101
1102 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1103
1104 switch(chain->bref.type) {
1105 case HAMMER2_BREF_TYPE_INODE:
1106 KKASSERT(chain->data->ipdata.op_flags &
1107 HAMMER2_OPFLAG_DIRECTDATA);
1108 KKASSERT(bp->b_loffset == 0);
1109 bcopy(bp->b_data, chain->data->ipdata.u.data,
1110 HAMMER2_EMBEDDED_BYTES);
1111 break;
1112 case HAMMER2_BREF_TYPE_DATA:
f481450f
MD
1113 temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1114
1e5c08ba 1115 /*
f481450f
MD
1116 * Optimize out the read-before-write
1117 * if possible.
1e5c08ba 1118 */
fdf62707
MD
1119 *errorp = hammer2_io_newnz(chain->hmp,
1120 chain->bref.data_off,
1121 chain->bytes,
1122 &dio);
1123 if (*errorp) {
1124 hammer2_io_brelse(&dio);
1125 kprintf("hammer2: WRITE PATH: "
1126 "dbp bread error\n");
1127 break;
355d67fc 1128 }
fdf62707 1129 bdata = hammer2_io_data(dio, chain->bref.data_off);
355d67fc 1130
f481450f
MD
1131 /*
1132 * When loading the block make sure we don't
1133 * leave garbage after the compressed data.
1134 */
1135 if (comp_size) {
1136 chain->bref.methods =
1137 HAMMER2_ENC_COMP(comp_algo) +
1138 HAMMER2_ENC_CHECK(temp_check);
fdf62707 1139 bcopy(comp_buffer, bdata, comp_size);
f481450f 1140 if (comp_size != comp_block_size) {
fdf62707
MD
1141 bzero(bdata + comp_size,
1142 comp_block_size - comp_size);
355d67fc 1143 }
f481450f
MD
1144 } else {
1145 chain->bref.methods =
1146 HAMMER2_ENC_COMP(
1147 HAMMER2_COMP_NONE) +
1148 HAMMER2_ENC_CHECK(temp_check);
fdf62707 1149 bcopy(bp->b_data, bdata, pblksize);
f481450f 1150 }
355d67fc 1151
f481450f
MD
1152 /*
1153 * Device buffer is now valid, chain is no
1154 * longer in the initial state.
1155 */
fdf62707 1156 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
355d67fc 1157
f481450f
MD
1158 /* Now write the related bdp. */
1159 if (ioflag & IO_SYNC) {
355d67fc 1160 /*
f481450f 1161 * Synchronous I/O requested.
355d67fc 1162 */
fdf62707 1163 hammer2_io_bwrite(&dio);
f481450f
MD
1164 /*
1165 } else if ((ioflag & IO_DIRECT) &&
1166 loff + n == pblksize) {
fdf62707 1167 hammer2_io_bdwrite(&dio);
f481450f
MD
1168 */
1169 } else if (ioflag & IO_ASYNC) {
fdf62707 1170 hammer2_io_bawrite(&dio);
f481450f 1171 } else {
fdf62707 1172 hammer2_io_bdwrite(&dio);
355d67fc 1173 }
f481450f
MD
1174 break;
1175 default:
1176 panic("hammer2_write_bp: bad chain type %d\n",
1177 chain->bref.type);
1178 /* NOT REACHED */
1179 break;
355d67fc 1180 }
f481450f
MD
1181
1182 hammer2_chain_unlock(chain);
355d67fc 1183 }
f481450f
MD
1184 if (comp_buffer)
1185 objcache_put(cache_buffer_write, comp_buffer);
355d67fc
MD
1186}
1187
1188/*
1189 * Function that performs zero-checking and writing without compression,
1190 * it corresponds to default zero-checking path.
1191 */
1192static
1193void
1e5c08ba 1194hammer2_zero_check_and_write(struct buf *bp, hammer2_trans_t *trans,
355d67fc
MD
1195 hammer2_inode_t *ip, hammer2_inode_data_t *ipdata,
1196 hammer2_chain_t **parentp,
1197 hammer2_key_t lbase, int ioflag, int pblksize, int *errorp)
1198{
1199 hammer2_chain_t *chain;
1200
f481450f
MD
1201 if (test_block_zeros(bp->b_data, pblksize)) {
1202 zero_write(bp, trans, ip, ipdata, parentp, lbase, errorp);
1203 } else {
355d67fc
MD
1204 chain = hammer2_assign_physical(trans, ip, parentp,
1205 lbase, pblksize, errorp);
1e5c08ba 1206 hammer2_write_bp(chain, bp, ioflag, pblksize, errorp);
355d67fc
MD
1207 if (chain)
1208 hammer2_chain_unlock(chain);
355d67fc
MD
1209 }
1210}
1211
1212/*
1213 * A function to test whether a block of data contains only zeros,
f481450f 1214 * returns TRUE (non-zero) if the block is all zeros.
355d67fc
MD
1215 */
1216static
1217int
f481450f 1218test_block_zeros(const char *buf, size_t bytes)
355d67fc
MD
1219{
1220 size_t i;
1221
1222 for (i = 0; i < bytes; i += sizeof(long)) {
f481450f
MD
1223 if (*(const long *)(buf + i) != 0)
1224 return (0);
355d67fc 1225 }
f481450f 1226 return (1);
355d67fc
MD
1227}
1228
1229/*
1230 * Function to "write" a block that contains only zeros.
1231 */
1232static
1233void
1e5c08ba 1234zero_write(struct buf *bp, hammer2_trans_t *trans, hammer2_inode_t *ip,
355d67fc 1235 hammer2_inode_data_t *ipdata, hammer2_chain_t **parentp,
1e5c08ba 1236 hammer2_key_t lbase, int *errorp __unused)
355d67fc
MD
1237{
1238 hammer2_chain_t *parent;
1239 hammer2_chain_t *chain;
1897c66e
MD
1240 hammer2_key_t key_dummy;
1241 int cache_index = -1;
355d67fc
MD
1242
1243 parent = hammer2_chain_lookup_init(*parentp, 0);
1244
1897c66e
MD
1245 chain = hammer2_chain_lookup(&parent, &key_dummy, lbase, lbase,
1246 &cache_index, HAMMER2_LOOKUP_NODATA);
355d67fc
MD
1247 if (chain) {
1248 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1249 bzero(chain->data->ipdata.u.data,
1250 HAMMER2_EMBEDDED_BYTES);
1251 } else {
1252 hammer2_chain_delete(trans, chain, 0);
1253 }
1254 hammer2_chain_unlock(chain);
1255 }
1256 hammer2_chain_lookup_done(parent);
1257}
1258
1259/*
1260 * Function to write the data as it is, without performing any sort of
1261 * compression. This function is used in path without compression and
1262 * default zero-checking path.
1263 */
1264static
1265void
1e5c08ba
MD
1266hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag,
1267 int pblksize, int *errorp)
355d67fc 1268{
fdf62707
MD
1269 hammer2_io_t *dio;
1270 char *bdata;
355d67fc
MD
1271 int error;
1272 int temp_check = HAMMER2_DEC_CHECK(chain->bref.methods);
1273
1274 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1275
1276 switch(chain->bref.type) {
1277 case HAMMER2_BREF_TYPE_INODE:
1278 KKASSERT(chain->data->ipdata.op_flags &
1279 HAMMER2_OPFLAG_DIRECTDATA);
1280 KKASSERT(bp->b_loffset == 0);
1281 bcopy(bp->b_data, chain->data->ipdata.u.data,
1282 HAMMER2_EMBEDDED_BYTES);
1e5c08ba 1283 error = 0;
355d67fc
MD
1284 break;
1285 case HAMMER2_BREF_TYPE_DATA:
fdf62707
MD
1286 error = hammer2_io_newnz(chain->hmp, chain->bref.data_off,
1287 chain->bytes, &dio);
1288 if (error) {
1289 hammer2_io_bqrelse(&dio);
1290 kprintf("hammer2: WRITE PATH: dbp bread error\n");
1291 break;
355d67fc 1292 }
fdf62707 1293 bdata = hammer2_io_data(dio, chain->bref.data_off);
355d67fc
MD
1294
1295 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1296 HAMMER2_ENC_CHECK(temp_check);
fdf62707 1297 bcopy(bp->b_data, bdata, chain->bytes);
355d67fc
MD
1298
1299 /*
1300 * Device buffer is now valid, chain is no
1301 * longer in the initial state.
1e5c08ba 1302 */
355d67fc
MD
1303 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1304
1305 if (ioflag & IO_SYNC) {
1306 /*
1307 * Synchronous I/O requested.
1308 */
fdf62707 1309 hammer2_io_bwrite(&dio);
355d67fc
MD
1310 /*
1311 } else if ((ioflag & IO_DIRECT) && loff + n == pblksize) {
fdf62707 1312 hammer2_io_bdwrite(&dio);
355d67fc
MD
1313 */
1314 } else if (ioflag & IO_ASYNC) {
fdf62707 1315 hammer2_io_bawrite(&dio);
355d67fc 1316 } else {
fdf62707 1317 hammer2_io_bdwrite(&dio);
355d67fc
MD
1318 }
1319 break;
1320 default:
1e5c08ba 1321 panic("hammer2_write_bp: bad chain type %d\n",
355d67fc
MD
1322 chain->bref.type);
1323 /* NOT REACHED */
1e5c08ba 1324 error = 0;
355d67fc
MD
1325 break;
1326 }
1e5c08ba 1327 *errorp = error;
355d67fc
MD
1328}
1329
e118c14f
MD
1330static
1331int
7bed8d7e 1332hammer2_remount(hammer2_mount_t *hmp, char *path, struct vnode *devvp,
703720e4
MD
1333 struct ucred *cred)
1334{
1335 return (0);
1336}
1337
e118c14f
MD
1338static
1339int
066e00cc 1340hammer2_vfs_unmount(struct mount *mp, int mntflags)
703720e4 1341{
e4e20f48 1342 hammer2_pfsmount_t *pmp;
54eb943b 1343 hammer2_mount_t *hmp;
7bed8d7e 1344 hammer2_chain_t *rchain;
703720e4 1345 int flags;
50e4f8f4 1346 int error = 0;
54eb943b 1347 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
a02dfba1 1348 int dumpcnt;
7bed8d7e 1349 int i;
54eb943b 1350 struct vnode *devvp;
703720e4 1351
e4e20f48 1352 pmp = MPTOPMP(mp);
703720e4 1353
7bed8d7e
MD
1354 ccms_domain_uninit(&pmp->ccms_dom);
1355 kdmsg_iocom_uninit(&pmp->iocom); /* XXX chain dependency */
703720e4 1356
9b6b3df4
MD
1357 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
1358
065f4046
MD
1359 /*
1360 * If mount initialization proceeded far enough we must flush
1361 * its vnodes.
1362 */
1363 if (mntflags & MNT_FORCE)
1364 flags = FORCECLOSE;
1365 else
7bed8d7e 1366 flags = 0;
065f4046
MD
1367 if (pmp->iroot) {
1368 error = vflush(mp, 0, flags);
1369 if (error)
1370 goto failed;
1371 }
50e4f8f4 1372
065f4046
MD
1373 if (pmp->wthread_td) {
1374 mtx_lock(&pmp->wthread_mtx);
1375 pmp->wthread_destroy = 1;
1376 wakeup(&pmp->wthread_bioq);
1377 while (pmp->wthread_destroy != -1) {
1378 mtxsleep(&pmp->wthread_destroy,
1379 &pmp->wthread_mtx, 0,
1380 "umount-sleep", 0);
1381 }
1382 mtx_unlock(&pmp->wthread_mtx);
1383 pmp->wthread_td = NULL;
1384 }
e4e20f48 1385
065f4046
MD
1386 for (i = 0; i < pmp->cluster.nchains; ++i) {
1387 hmp = pmp->cluster.chains[i]->hmp;
7bed8d7e 1388
065f4046 1389 hammer2_mount_exlock(hmp);
222d9e22 1390
7bed8d7e
MD
1391 --hmp->pmp_count;
1392 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n",
1393 hmp, hmp->pmp_count);
1394
1395 /*
1396 * Flush any left over chains. The voldata lock is only used
1397 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
1398 */
1399 hammer2_voldata_lock(hmp);
a7720be7
MD
1400 if (((hmp->vchain.flags | hmp->fchain.flags) &
1401 HAMMER2_CHAIN_MODIFIED) ||
925e4ad1
MD
1402 hmp->vchain.core->update_hi > hmp->voldata.mirror_tid ||
1403 hmp->fchain.core->update_hi > hmp->voldata.freemap_tid) {
7bed8d7e
MD
1404 hammer2_voldata_unlock(hmp, 0);
1405 hammer2_vfs_sync(mp, MNT_WAIT);
1406 hammer2_vfs_sync(mp, MNT_WAIT);
1407 } else {
1408 hammer2_voldata_unlock(hmp, 0);
1409 }
1410 if (hmp->pmp_count == 0) {
a4dc31e0
MD
1411 if (((hmp->vchain.flags | hmp->fchain.flags) &
1412 HAMMER2_CHAIN_MODIFIED) ||
925e4ad1 1413 (hmp->vchain.core->update_hi >
a4dc31e0 1414 hmp->voldata.mirror_tid) ||
925e4ad1 1415 (hmp->fchain.core->update_hi >
a4dc31e0 1416 hmp->voldata.freemap_tid)) {
7bed8d7e
MD
1417 kprintf("hammer2_unmount: chains left over "
1418 "after final sync\n");
1419 if (hammer2_debug & 0x0010)
1420 Debugger("entered debugger");
1421 }
1422 }
1423
1424 /*
1425 * Cleanup the root and super-root chain elements
1426 * (which should be clean).
1427 */
1428 if (pmp->iroot) {
0dea3156 1429#if REPORT_REFS_ERRORS
7bed8d7e
MD
1430 if (pmp->iroot->refs != 1)
1431 kprintf("PMP->IROOT %p REFS WRONG %d\n",
1432 pmp->iroot, pmp->iroot->refs);
0dea3156 1433#else
7bed8d7e 1434 KKASSERT(pmp->iroot->refs == 1);
0dea3156 1435#endif
7bed8d7e
MD
1436 /* ref for pmp->iroot */
1437 hammer2_inode_drop(pmp->iroot);
1438 pmp->iroot = NULL;
10252dc7 1439 }
7bed8d7e
MD
1440
1441 rchain = pmp->cluster.chains[i];
1442 if (rchain) {
1443 atomic_clear_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
0dea3156 1444#if REPORT_REFS_ERRORS
7bed8d7e
MD
1445 if (rchain->refs != 1)
1446 kprintf("PMP->RCHAIN %p REFS WRONG %d\n",
1447 rchain, rchain->refs);
0dea3156 1448#else
7bed8d7e 1449 KKASSERT(rchain->refs == 1);
0dea3156 1450#endif
7bed8d7e
MD
1451 hammer2_chain_drop(rchain);
1452 pmp->cluster.chains[i] = NULL;
e4e20f48 1453 }
222d9e22 1454
e4e20f48 1455 /*
7bed8d7e
MD
1456 * If no PFS's left drop the master hammer2_mount for the
1457 * device.
e4e20f48 1458 */
7bed8d7e
MD
1459 if (hmp->pmp_count == 0) {
1460 if (hmp->sroot) {
1461 hammer2_inode_drop(hmp->sroot);
1462 hmp->sroot = NULL;
1463 }
a864c5d9 1464
7bed8d7e
MD
1465 /*
1466 * Finish up with the device vnode
1467 */
1468 if ((devvp = hmp->devvp) != NULL) {
1469 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
1470 hmp->devvp = NULL;
1471 VOP_CLOSE(devvp,
1472 (ronly ? FREAD : FREAD|FWRITE));
1473 vrele(devvp);
1474 devvp = NULL;
1475 }
1a7cfe5a 1476
7bed8d7e 1477 /*
d7bfb2cb
MD
1478 * Final drop of embedded freemap root chain to
1479 * clean up fchain.core (fchain structure is not
1480 * flagged ALLOCATED so it is cleaned out and then
1481 * left to rot).
7bed8d7e
MD
1482 */
1483 hammer2_chain_drop(&hmp->fchain);
1484
1485 /*
065f4046
MD
1486 * Final drop of embedded volume root chain to clean
1487 * up vchain.core (vchain structure is not flagged
1488 * ALLOCATED so it is cleaned out and then left to
1489 * rot).
7bed8d7e
MD
1490 */
1491 dumpcnt = 50;
1492 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt);
a7720be7
MD
1493 dumpcnt = 50;
1494 hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt);
7bed8d7e
MD
1495 hammer2_mount_unlock(hmp);
1496 hammer2_chain_drop(&hmp->vchain);
7bed8d7e 1497
fdf62707
MD
1498 hammer2_io_cleanup(hmp, &hmp->iotree);
1499 if (hmp->iofree_count) {
1500 kprintf("io_cleanup: %d I/O's left hanging\n",
1501 hmp->iofree_count);
1502 }
1503
7bed8d7e
MD
1504 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
1505 kmalloc_destroy(&hmp->mchain);
1506 kfree(hmp, M_HAMMER2);
065f4046
MD
1507 } else {
1508 hammer2_mount_unlock(hmp);
7bed8d7e 1509 }
54eb943b 1510 }
703720e4 1511
e4e20f48 1512 pmp->mp = NULL;
54eb943b 1513 mp->mnt_data = NULL;
703720e4 1514
26bf1a36 1515 kmalloc_destroy(&pmp->mmsg);
99da41ea 1516 kmalloc_destroy(&pmp->minode);
26bf1a36 1517
e4e20f48 1518 kfree(pmp, M_HAMMER2);
9b6b3df4
MD
1519 error = 0;
1520
1521failed:
e4e20f48 1522 lockmgr(&hammer2_mntlk, LK_RELEASE);
a5913bdf 1523
703720e4
MD
1524 return (error);
1525}
1526
e118c14f
MD
1527static
1528int
066e00cc 1529hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
703720e4
MD
1530 ino_t ino, struct vnode **vpp)
1531{
1532 kprintf("hammer2_vget\n");
1533 return (EOPNOTSUPP);
1534}
1535
e118c14f
MD
1536static
1537int
066e00cc 1538hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
703720e4 1539{
e4e20f48 1540 hammer2_pfsmount_t *pmp;
9596b8c4 1541 hammer2_chain_t *parent;
703720e4
MD
1542 int error;
1543 struct vnode *vp;
1544
e4e20f48 1545 pmp = MPTOPMP(mp);
e4e20f48 1546 if (pmp->iroot == NULL) {
703720e4
MD
1547 *vpp = NULL;
1548 error = EINVAL;
1549 } else {
9596b8c4 1550 parent = hammer2_inode_lock_sh(pmp->iroot);
e4e20f48 1551 vp = hammer2_igetv(pmp->iroot, &error);
9596b8c4 1552 hammer2_inode_unlock_sh(pmp->iroot, parent);
703720e4
MD
1553 *vpp = vp;
1554 if (vp == NULL)
1555 kprintf("vnodefail\n");
1556 }
703720e4
MD
1557
1558 return (error);
1559}
1560
28ee5f14
MD
1561/*
1562 * Filesystem status
1563 *
476d2aad 1564 * XXX incorporate ipdata->inode_quota and data_quota
28ee5f14 1565 */
e118c14f
MD
1566static
1567int
066e00cc 1568hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
703720e4 1569{
28ee5f14 1570 hammer2_pfsmount_t *pmp;
54eb943b 1571 hammer2_mount_t *hmp;
703720e4 1572
28ee5f14 1573 pmp = MPTOPMP(mp);
7bed8d7e
MD
1574 KKASSERT(pmp->cluster.nchains >= 1);
1575 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
703720e4 1576
476d2aad 1577 mp->mnt_stat.f_files = pmp->inode_count;
28ee5f14
MD
1578 mp->mnt_stat.f_ffree = 0;
1579 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
9f604b01 1580 mp->mnt_stat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
50e4f8f4 1581 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
703720e4 1582
50e4f8f4 1583 *sbp = mp->mnt_stat;
703720e4
MD
1584 return (0);
1585}
1586
e118c14f
MD
1587static
1588int
066e00cc 1589hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
703720e4 1590{
28ee5f14 1591 hammer2_pfsmount_t *pmp;
50e4f8f4
MD
1592 hammer2_mount_t *hmp;
1593
28ee5f14 1594 pmp = MPTOPMP(mp);
7bed8d7e
MD
1595 KKASSERT(pmp->cluster.nchains >= 1);
1596 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
50e4f8f4 1597
88a032af 1598 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
476d2aad 1599 mp->mnt_vstat.f_files = pmp->inode_count;
28ee5f14
MD
1600 mp->mnt_vstat.f_ffree = 0;
1601 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
51f0b4b7 1602 mp->mnt_vstat.f_bfree = hmp->voldata.allocator_free / HAMMER2_PBUFSIZE;
28ee5f14 1603 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
50e4f8f4
MD
1604
1605 *sbp = mp->mnt_vstat;
1606 return (0);
703720e4
MD
1607}
1608
1609/*
1610 * Sync the entire filesystem; this is called from the filesystem syncer
1611 * process periodically and whenever a user calls sync(1) on the hammer
1612 * mountpoint.
1613 *
1614 * Currently is actually called from the syncer! \o/
1615 *
1616 * This task will have to snapshot the state of the dirty inode chain.
1617 * From that, it will have to make sure all of the inodes on the dirty
1618 * chain have IO initiated. We make sure that io is initiated for the root
1619 * block.
1620 *
1621 * If waitfor is set, we wait for media to acknowledge the new rootblock.
1622 *
1623 * THINKS: side A vs side B, to have sync not stall all I/O?
1624 */
e118c14f 1625int
066e00cc 1626hammer2_vfs_sync(struct mount *mp, int waitfor)
703720e4 1627{
b7926f31 1628 struct hammer2_sync_info info;
a7720be7 1629 hammer2_chain_t *chain;
a5913bdf 1630 hammer2_pfsmount_t *pmp;
54eb943b 1631 hammer2_mount_t *hmp;
b7926f31
MD
1632 int flags;
1633 int error;
7bed8d7e 1634 int total_error;
a7720be7 1635 int force_fchain;
3fc4c63d 1636 int i;
703720e4 1637
a5913bdf 1638 pmp = MPTOPMP(mp);
703720e4 1639
355d67fc
MD
1640 /*
1641 * We can't acquire locks on existing vnodes while in a transaction
1642 * without risking a deadlock. This assumes that vfsync() can be
1643 * called without the vnode locked (which it can in DragonFly).
1644 * Otherwise we'd have to implement a multi-pass or flag the lock
1645 * failures and retry.
c057466c
MD
1646 *
1647 * The reclamation code interlocks with the sync list's token
1648 * (by removing the vnode from the scan list) before unlocking
1649 * the inode, giving us time to ref the inode.
355d67fc
MD
1650 */
1651 /*flags = VMSC_GETVP;*/
1652 flags = 0;
b7926f31
MD
1653 if (waitfor & MNT_LAZY)
1654 flags |= VMSC_ONEPASS;
1655
d7bfb2cb 1656 /*
a4dc31e0
MD
1657 * Initialize a normal transaction and sync everything out, then
1658 * wait for pending I/O to finish (so it gets a transaction id
1659 * that the meta-data flush will catch).
d7bfb2cb 1660 */
a4dc31e0 1661 hammer2_trans_init(&info.trans, pmp, 0);
b7926f31
MD
1662 info.error = 0;
1663 info.waitfor = MNT_NOWAIT;
eddc656a 1664 vsyncscan(mp, flags | VMSC_NOWAIT, hammer2_sync_scan2, &info);
a7720be7 1665
b7926f31
MD
1666 if (info.error == 0 && (waitfor & MNT_WAIT)) {
1667 info.waitfor = waitfor;
eddc656a 1668 vsyncscan(mp, flags, hammer2_sync_scan2, &info);
b7926f31
MD
1669
1670 }
a4dc31e0
MD
1671 hammer2_trans_done(&info.trans);
1672 hammer2_bioq_sync(info.trans.pmp);
a7720be7
MD
1673
1674 /*
a4dc31e0 1675 * Start the flush transaction and flush all meta-data.
a7720be7 1676 */
a4dc31e0 1677 hammer2_trans_init(&info.trans, pmp, HAMMER2_TRANS_ISFLUSH);
a5913bdf 1678
7bed8d7e
MD
1679 total_error = 0;
1680 for (i = 0; i < pmp->cluster.nchains; ++i) {
1681 hmp = pmp->cluster.chains[i]->hmp;
a5913bdf 1682
d7bfb2cb
MD
1683 /*
1684 * Media mounts have two 'roots', vchain for the topology
1685 * and fchain for the free block table. Flush both.
1686 *
1687 * Note that the topology and free block table are handled
1688 * independently, so the free block table can wind up being
1689 * ahead of the topology. We depend on the bulk free scan
1690 * code to deal with any loose ends.
1691 */
7bed8d7e 1692 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
a7720be7 1693 if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) ||
925e4ad1 1694 hmp->vchain.core->update_hi > hmp->voldata.mirror_tid) {
a7720be7
MD
1695 chain = &hmp->vchain;
1696 hammer2_chain_flush(&info.trans, &chain);
1697 KKASSERT(chain == &hmp->vchain);
925e4ad1 1698 hmp->voldata.mirror_tid = chain->bref.mirror_tid;
a7720be7
MD
1699 force_fchain = 1;
1700 } else {
1701 force_fchain = 0;
7bed8d7e
MD
1702 }
1703 hammer2_chain_unlock(&hmp->vchain);
1a7cfe5a 1704
7bed8d7e 1705 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
a7720be7 1706 if ((hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) ||
925e4ad1 1707 hmp->fchain.core->update_hi > hmp->voldata.freemap_tid ||
a7720be7 1708 force_fchain) {
d7bfb2cb 1709 /* this will also modify vchain as a side effect */
a7720be7
MD
1710 chain = &hmp->fchain;
1711 hammer2_chain_flush(&info.trans, &chain);
1712 KKASSERT(chain == &hmp->fchain);
925e4ad1 1713 hmp->voldata.freemap_tid = chain->bref.mirror_tid;
7bed8d7e
MD
1714 }
1715 hammer2_chain_unlock(&hmp->fchain);
1a7cfe5a 1716
7bed8d7e 1717 error = 0;
b7926f31 1718
2910a90c 1719 /*
7bed8d7e
MD
1720 * We can't safely flush the volume header until we have
1721 * flushed any device buffers which have built up.
1722 *
1723 * XXX this isn't being incremental
2910a90c 1724 */
7bed8d7e
MD
1725 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1726 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1727 vn_unlock(hmp->devvp);
b7926f31 1728
2910a90c 1729 /*
7bed8d7e
MD
1730 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1731 * volume header needs synchronization via hmp->volsync.
1732 *
1733 * XXX synchronize the flag & data with only this flush XXX
2910a90c 1734 */
7bed8d7e
MD
1735 if (error == 0 &&
1736 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1737 struct buf *bp;
1738
1739 /*
1740 * Synchronize the disk before flushing the volume
1741 * header.
1742 */
1743 bp = getpbuf(NULL);
1744 bp->b_bio1.bio_offset = 0;
1745 bp->b_bufsize = 0;
1746 bp->b_bcount = 0;
1747 bp->b_cmd = BUF_CMD_FLUSH;
1748 bp->b_bio1.bio_done = biodone_sync;
1749 bp->b_bio1.bio_flags |= BIO_SYNC;
1750 vn_strategy(hmp->devvp, &bp->b_bio1);
1751 biowait(&bp->b_bio1, "h2vol");
1752 relpbuf(bp, NULL);
1753
1754 /*
1755 * Then we can safely flush the version of the
1756 * volume header synchronized by the flush code.
1757 */
1758 i = hmp->volhdrno + 1;
1759 if (i >= HAMMER2_NUM_VOLHDRS)
1760 i = 0;
1761 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1762 hmp->volsync.volu_size) {
1763 i = 0;
1764 }
1765 kprintf("sync volhdr %d %jd\n",
1766 i, (intmax_t)hmp->volsync.volu_size);
1767 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
1768 HAMMER2_PBUFSIZE, 0, 0);
1769 atomic_clear_int(&hmp->vchain.flags,
1770 HAMMER2_CHAIN_VOLUMESYNC);
1771 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1772 bawrite(bp);
1773 hmp->volhdrno = i;
3fc4c63d 1774 }
7bed8d7e
MD
1775 if (error)
1776 total_error = error;
b7926f31 1777 }
d001f460 1778 hammer2_trans_done(&info.trans);
a4dc31e0 1779
7bed8d7e 1780 return (total_error);
b7926f31 1781}
703720e4 1782
214f4a77
MD
1783/*
1784 * Sync passes.
1785 *
925e4ad1
MD
1786 * NOTE: We don't test update_lo/update_hi or MOVED here because the fsync
1787 * code won't flush on those flags. The syncer code above will do a
214f4a77
MD
1788 * general meta-data flush globally that will catch these flags.
1789 */
b7926f31
MD
1790
1791static int
1792hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1793{
1794 struct hammer2_sync_info *info = data;
1795 hammer2_inode_t *ip;
1796 int error;
1797
c057466c
MD
1798 /*
1799 *
1800 */
b7926f31 1801 ip = VTOI(vp);
eddc656a
MD
1802 if (ip == NULL)
1803 return(0);
1804 if (vp->v_type == VNON || vp->v_type == VBAD) {
1805 vclrisdirty(vp);
1806 return(0);
1807 }
1808 if ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 &&
1809 RB_EMPTY(&vp->v_rbdirty_tree)) {
1810 vclrisdirty(vp);
b7926f31
MD
1811 return(0);
1812 }
d001f460
MD
1813
1814 /*
1815 * VOP_FSYNC will start a new transaction so replicate some code
1816 * here to do it inline (see hammer2_vop_fsync()).
355d67fc
MD
1817 *
1818 * WARNING: The vfsync interacts with the buffer cache and might
1819 * block, we can't hold the inode lock at that time.
c057466c
MD
1820 * However, we MUST ref ip before blocking to ensure that
1821 * it isn't ripped out from under us (since we do not
1822 * hold a lock on the vnode).
d001f460 1823 */
c057466c 1824 hammer2_inode_ref(ip);
9596b8c4 1825 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
c057466c
MD
1826 if (vp)
1827 vfsync(vp, MNT_NOWAIT, 1, NULL, NULL);
a7720be7
MD
1828
1829#if 0
1830 /*
1831 * XXX this interferes with flush operations mainly because the
1832 * same transaction id is being used by asynchronous buffer
1833 * operations above and can be reordered after the flush
1834 * below.
1835 */
355d67fc 1836 parent = hammer2_inode_lock_ex(ip);
a7720be7 1837 hammer2_chain_flush(&info->trans, &parent);
9596b8c4 1838 hammer2_inode_unlock_ex(ip, parent);
a7720be7 1839#endif
c057466c 1840 hammer2_inode_drop(ip);
d001f460
MD
1841 error = 0;
1842#if 0
b7926f31 1843 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
d001f460 1844#endif
b7926f31
MD
1845 if (error)
1846 info->error = error;
1847 return(0);
703720e4
MD
1848}
1849
e118c14f
MD
1850static
1851int
066e00cc 1852hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
703720e4
MD
1853{
1854 return (0);
1855}
1856
e118c14f
MD
1857static
1858int
066e00cc 1859hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
703720e4
MD
1860 struct fid *fhp, struct vnode **vpp)
1861{
1862 return (0);
1863}
1864
e118c14f
MD
1865static
1866int
066e00cc 1867hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
703720e4
MD
1868 int *exflagsp, struct ucred **credanonp)
1869{
1870 return (0);
1871}
50e4f8f4
MD
1872
1873/*
1874 * Support code for hammer2_mount(). Read, verify, and install the volume
1875 * header into the HMP
1876 *
1877 * XXX read four volhdrs and use the one with the highest TID whos CRC
1878 * matches.
1879 *
1880 * XXX check iCRCs.
57381c9e
VS
1881 *
1882 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
1883 * nonexistant locations.
1884 *
1885 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
50e4f8f4
MD
1886 */
1887static
1888int
1889hammer2_install_volume_header(hammer2_mount_t *hmp)
1890{
1891 hammer2_volume_data_t *vd;
99924359 1892 struct buf *bp;
60fbd5f4 1893 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
99924359
MD
1894 int error_reported;
1895 int error;
0b3147ba
VS
1896 int valid;
1897 int i;
50e4f8f4 1898
99924359 1899 error_reported = 0;
0b3147ba
VS
1900 error = 0;
1901 valid = 0;
99924359 1902 bp = NULL;
0b3147ba 1903
99924359
MD
1904 /*
1905 * There are up to 4 copies of the volume header (syncs iterate
1906 * between them so there is no single master). We don't trust the
1907 * volu_size field so we don't know precisely how large the filesystem
1908 * is, so depend on the OS to return an error if we go beyond the
1909 * block device's EOF.
1910 */
0b3147ba 1911 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
62efe6ec 1912 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
99924359 1913 HAMMER2_VOLUME_BYTES, &bp);
0b3147ba 1914 if (error) {
99924359
MD
1915 brelse(bp);
1916 bp = NULL;
0b3147ba
VS
1917 continue;
1918 }
1919
88a032af
VS
1920 vd = (struct hammer2_volume_data *) bp->b_data;
1921 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
1922 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
1923 brelse(bp);
1924 bp = NULL;
0b3147ba 1925 continue;
88a032af
VS
1926 }
1927
1928 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
1929 /* XXX: Reversed-endianness filesystem */
1930 kprintf("hammer2: reverse-endian filesystem detected");
1931 brelse(bp);
1932 bp = NULL;
1933 continue;
1934 }
0b3147ba
VS
1935
1936 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
60fbd5f4 1937 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
99924359 1938 HAMMER2_VOLUME_ICRC0_SIZE);
60fbd5f4
VS
1939 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
1940 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
1941 HAMMER2_VOLUME_ICRC1_SIZE);
1942 if ((crc0 != crc) || (bcrc0 != bcrc)) {
99924359 1943 kprintf("hammer2 volume header crc "
3fc4c63d 1944 "mismatch copy #%d %08x/%08x\n",
60fbd5f4 1945 i, crc0, crc);
99924359
MD
1946 error_reported = 1;
1947 brelse(bp);
1948 bp = NULL;
0b3147ba
VS
1949 continue;
1950 }
4d5318eb 1951 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
99924359
MD
1952 valid = 1;
1953 hmp->voldata = *vd;
3fc4c63d 1954 hmp->volhdrno = i;
0b3147ba 1955 }
99924359
MD
1956 brelse(bp);
1957 bp = NULL;
50e4f8f4 1958 }
0b3147ba 1959 if (valid) {
3fc4c63d 1960 hmp->volsync = hmp->voldata;
57381c9e 1961 error = 0;
3fc4c63d
MD
1962 if (error_reported || bootverbose || 1) { /* 1/DEBUG */
1963 kprintf("hammer2: using volume header #%d\n",
1964 hmp->volhdrno);
1965 }
0b3147ba
VS
1966 } else {
1967 error = EINVAL;
99924359 1968 kprintf("hammer2: no valid volume headers found!\n");
0b3147ba 1969 }
0b3147ba 1970 return (error);
50e4f8f4 1971}
0b3147ba 1972
1a34728c
MD
1973/*
1974 * Reconnect using the passed file pointer. The caller must ref the
1975 * fp for us.
1976 */
1977void
1978hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1979{
476d2aad 1980 hammer2_inode_data_t *ipdata;
9596b8c4 1981 hammer2_chain_t *parent;
a5913bdf 1982 hammer2_mount_t *hmp;
3a5aa68f 1983 size_t name_len;
70c3c3b7 1984
7bed8d7e 1985 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
a5913bdf 1986
70c3c3b7 1987 /*
3a5aa68f
MD
1988 * Closes old comm descriptor, kills threads, cleans up
1989 * states, then installs the new descriptor and creates
1990 * new threads.
1a34728c 1991 */
3a5aa68f 1992 kdmsg_iocom_reconnect(&pmp->iocom, fp, "hammer2");
bfc3a7b1 1993
9b8b748f 1994 /*
537d97bc
MD
1995 * Setup LNK_CONN fields for autoinitiated state machine
1996 */
9596b8c4
MD
1997 parent = hammer2_inode_lock_ex(pmp->iroot);
1998 ipdata = &parent->data->ipdata;
476d2aad
MD
1999 pmp->iocom.auto_lnk_conn.pfs_clid = ipdata->pfs_clid;
2000 pmp->iocom.auto_lnk_conn.pfs_fsid = ipdata->pfs_fsid;
2001 pmp->iocom.auto_lnk_conn.pfs_type = ipdata->pfs_type;
537d97bc 2002 pmp->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
a5913bdf 2003 pmp->iocom.auto_lnk_conn.peer_type = hmp->voldata.peer_type;
537d97bc
MD
2004
2005 /*
2006 * Filter adjustment. Clients do not need visibility into other
2007 * clients (otherwise millions of clients would present a serious
2008 * problem). The fs_label also serves to restrict the namespace.
9b8b748f 2009 */
537d97bc
MD
2010 pmp->iocom.auto_lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
2011 pmp->iocom.auto_lnk_conn.pfs_mask = (uint64_t)-1;
476d2aad 2012 switch (ipdata->pfs_type) {
537d97bc
MD
2013 case DMSG_PFSTYPE_CLIENT:
2014 pmp->iocom.auto_lnk_conn.peer_mask &=
2015 ~(1LLU << DMSG_PFSTYPE_CLIENT);
2016 break;
2017 default:
2018 break;
2019 }
2020
476d2aad 2021 name_len = ipdata->name_len;
537d97bc
MD
2022 if (name_len >= sizeof(pmp->iocom.auto_lnk_conn.fs_label))
2023 name_len = sizeof(pmp->iocom.auto_lnk_conn.fs_label) - 1;
476d2aad 2024 bcopy(ipdata->filename,
537d97bc 2025 pmp->iocom.auto_lnk_conn.fs_label,
ddfbb283 2026 name_len);
537d97bc
MD
2027 pmp->iocom.auto_lnk_conn.fs_label[name_len] = 0;
2028
2029 /*
2030 * Setup LNK_SPAN fields for autoinitiated state machine
2031 */
476d2aad
MD
2032 pmp->iocom.auto_lnk_span.pfs_clid = ipdata->pfs_clid;
2033 pmp->iocom.auto_lnk_span.pfs_fsid = ipdata->pfs_fsid;
2034 pmp->iocom.auto_lnk_span.pfs_type = ipdata->pfs_type;
a5913bdf 2035 pmp->iocom.auto_lnk_span.peer_type = hmp->voldata.peer_type;
537d97bc 2036 pmp->iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1;
476d2aad 2037 name_len = ipdata->name_len;
537d97bc
MD
2038 if (name_len >= sizeof(pmp->iocom.auto_lnk_span.fs_label))
2039 name_len = sizeof(pmp->iocom.auto_lnk_span.fs_label) - 1;
476d2aad 2040 bcopy(ipdata->filename,
537d97bc
MD
2041 pmp->iocom.auto_lnk_span.fs_label,
2042 name_len);
2043 pmp->iocom.auto_lnk_span.fs_label[name_len] = 0;
9596b8c4 2044 hammer2_inode_unlock_ex(pmp->iroot, parent);
537d97bc
MD
2045
2046 kdmsg_iocom_autoinitiate(&pmp->iocom, hammer2_autodmsg);
70c3c3b7
MD
2047}
2048
9b8b748f 2049static int
537d97bc 2050hammer2_rcvdmsg(kdmsg_msg_t *msg)
9b8b748f 2051{
5bc5bca2 2052 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
537d97bc 2053 case DMSG_DBG_SHELL:
10c86c4e 2054 /*
ea7c725f 2055 * (non-transaction)
537d97bc 2056 * Execute shell command (not supported atm)
10c86c4e 2057 */
537d97bc 2058 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
8c280d5d 2059 break;
537d97bc 2060 case DMSG_DBG_SHELL | DMSGF_REPLY:
ea7c725f
MD
2061 /*
2062 * (non-transaction)
2063 */
537d97bc
MD
2064 if (msg->aux_data) {
2065 msg->aux_data[msg->aux_size - 1] = 0;
2066 kprintf("HAMMER2 DBG: %s\n", msg->aux_data);
2067 }
8c280d5d
MD
2068 break;
2069 default:
3b76886b 2070 /*
ea7c725f 2071 * Unsupported message received. We only need to
3b76886b
MD
2072 * reply if it's a transaction in order to close our end.
2073 * Ignore any one-way messages are any further messages
2074 * associated with the transaction.
2075 *
2076 * NOTE: This case also includes DMSG_LNK_ERROR messages
2077 * which might be one-way, replying to those would
2078 * cause an infinite ping-pong.
2079 */
2080 if (msg->any.head.cmd & DMSGF_CREATE)
2081 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
8c280d5d
MD
2082 break;
2083 }
2084 return(0);
2085}
2086
2087/*
537d97bc
MD
2088 * This function is called after KDMSG has automatically handled processing
2089 * of a LNK layer message (typically CONN, SPAN, or CIRC).
8c280d5d 2090 *
537d97bc
MD
2091 * We tag off the LNK_CONN to trigger our LNK_VOLCONF messages which
2092 * advertises all available hammer2 super-root volumes.
8c280d5d 2093 */
537d97bc
MD
2094static void
2095hammer2_autodmsg(kdmsg_msg_t *msg)
8c280d5d 2096{
537d97bc 2097 hammer2_pfsmount_t *pmp = msg->iocom->handle;
7bed8d7e 2098 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
1a34728c 2099 int copyid;
8c280d5d 2100
537d97bc
MD
2101 /*
2102 * We only care about replies to our LNK_CONN auto-request. kdmsg
2103 * has already processed the reply, we use this calback as a shim
2104 * to know when we can advertise available super-root volumes.
2105 */
2106 if ((msg->any.head.cmd & DMSGF_TRANSMASK) !=
2107 (DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_REPLY) ||
2108 msg->state == NULL) {
2109 return;
2110 }
2111
70c3c3b7
MD
2112 kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
2113
5bc5bca2 2114 if (msg->any.head.cmd & DMSGF_CREATE) {
537d97bc 2115 kprintf("HAMMER2: VOLDATA DUMP\n");
1a34728c
MD
2116
2117 /*
2118 * Dump the configuration stored in the volume header
2119 */
2120 hammer2_voldata_lock(hmp);
2121 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
2122 if (hmp->voldata.copyinfo[copyid].copyid == 0)
2123 continue;
2124 hammer2_volconf_update(pmp, copyid);
2125 }
0dea3156 2126 hammer2_voldata_unlock(hmp, 0);
8c280d5d 2127 }
537d97bc
MD
2128 if ((msg->any.head.cmd & DMSGF_DELETE) &&
2129 msg->state && (msg->state->txcmd & DMSGF_DELETE) == 0) {
2130 kprintf("HAMMER2: CONN WAS TERMINATED\n");
8c280d5d 2131 }
9b8b748f 2132}
1a34728c
MD
2133
2134/*
2135 * Volume configuration updates are passed onto the userland service
2136 * daemon via the open LNK_CONN transaction.
2137 */
2138void
2139hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
2140{
7bed8d7e 2141 hammer2_mount_t *hmp = pmp->cluster.chains[0]->hmp; /* XXX */
3a5aa68f 2142 kdmsg_msg_t *msg;
1a34728c
MD
2143
2144 /* XXX interlock against connection state termination */
3a5aa68f
MD
2145 kprintf("volconf update %p\n", pmp->iocom.conn_state);
2146 if (pmp->iocom.conn_state) {
1a34728c 2147 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
ea7c725f
MD
2148 msg = kdmsg_msg_alloc_state(pmp->iocom.conn_state,
2149 DMSG_LNK_VOLCONF, NULL, NULL);
1a34728c
MD
2150 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
2151 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
2152 msg->any.lnk_volconf.index = index;
3a5aa68f 2153 kdmsg_msg_write(msg);
1a34728c
MD
2154 }
2155}
9797e933
MD
2156
2157void
a02dfba1 2158hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp)
9797e933 2159{
1897c66e 2160 hammer2_chain_layer_t *layer;
9797e933 2161 hammer2_chain_t *scan;
1897c66e 2162 hammer2_chain_t *first_parent;
9797e933 2163
a02dfba1
MD
2164 --*countp;
2165 if (*countp == 0) {
2166 kprintf("%*.*s...\n", tab, tab, "");
2167 return;
2168 }
2169 if (*countp < 0)
2170 return;
1897c66e 2171 first_parent = chain->core ? TAILQ_FIRST(&chain->core->ownerq) : NULL;
a7720be7 2172 kprintf("%*.*schain %p.%d %016jx/%d mir=%016jx\n",
9797e933 2173 tab, tab, "",
a7720be7
MD
2174 chain, chain->bref.type,
2175 chain->bref.key, chain->bref.keybits,
2176 chain->bref.mirror_tid);
2177
2178 kprintf("%*.*s [%08x] (%s) dt=%016jx refs=%d\n",
2179 tab, tab, "",
2180 chain->flags,
9797e933
MD
2181 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
2182 chain->data) ? (char *)chain->data->ipdata.filename : "?"),
a7720be7 2183 chain->delete_tid,
8853dfb5 2184 chain->refs);
a7720be7 2185
925e4ad1 2186 kprintf("%*.*s core %p [%08x] lo=%08jx hi=%08jx fp=%p np=%p",
a7720be7
MD
2187 tab, tab, "",
2188 chain->core, (chain->core ? chain->core->flags : 0),
925e4ad1
MD
2189 (chain->core ? chain->core->update_lo : -1),
2190 (chain->core ? chain->core->update_hi : -1),
a7720be7
MD
2191 first_parent,
2192 (first_parent ? TAILQ_NEXT(chain, core_entry) : NULL));
2193
1897c66e
MD
2194 if (first_parent)
2195 kprintf(" [fpflags %08x fprefs %d\n",
2196 first_parent->flags,
2197 first_parent->refs);
2198 if (chain->core == NULL || TAILQ_EMPTY(&chain->core->layerq))
9797e933
MD
2199 kprintf("\n");
2200 else
2201 kprintf(" {\n");
a7720be7
MD
2202 if (chain->core) {
2203 TAILQ_FOREACH(layer, &chain->core->layerq, entry) {
2204 RB_FOREACH(scan, hammer2_chain_tree, &layer->rbtree) {
2205 hammer2_dump_chain(scan, tab + 4, countp);
2206 }
1897c66e 2207 }
9797e933 2208 }
1897c66e 2209 if (chain->core && !TAILQ_EMPTY(&chain->core->layerq)) {
9797e933
MD
2210 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data)
2211 kprintf("%*.*s}(%s)\n", tab, tab, "",
2212 chain->data->ipdata.filename);
2213 else
2214 kprintf("%*.*s}\n", tab, tab, "");
2215 }
2216}