hammer2 - Add peer_type field to LNK_CONN and LNK_SPAN
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
CommitLineData
47902fef 1/*-
703720e4
MD
2 * Copyright (c) 2011, 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
703720e4
MD
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/nlookup.h>
38#include <sys/vnode.h>
39#include <sys/mount.h>
40#include <sys/fcntl.h>
41#include <sys/buf.h>
42#include <sys/uuid.h>
a74bc66c 43#include <sys/vfsops.h>
37aa19df 44#include <sys/sysctl.h>
bfc3a7b1 45#include <sys/socket.h>
703720e4
MD
46
47#include "hammer2.h"
48#include "hammer2_disk.h"
49#include "hammer2_mount.h"
bfc3a7b1 50#include "hammer2_network.h"
703720e4 51
b7926f31
MD
52struct hammer2_sync_info {
53 int error;
54 int waitfor;
55};
56
e4e20f48
MD
57TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
58static struct hammer2_mntlist hammer2_mntlist;
59static struct lock hammer2_mntlk;
60
37aa19df 61int hammer2_debug;
01eabad4 62int hammer2_cluster_enable = 1;
99535653 63int hammer2_hardlink_enable = 1;
01eabad4
MD
64long hammer2_iod_file_read;
65long hammer2_iod_meta_read;
66long hammer2_iod_indr_read;
67long hammer2_iod_file_write;
68long hammer2_iod_meta_write;
69long hammer2_iod_indr_write;
70long hammer2_iod_volu_write;
71long hammer2_ioa_file_read;
72long hammer2_ioa_meta_read;
73long hammer2_ioa_indr_read;
74long hammer2_ioa_file_write;
75long hammer2_ioa_meta_write;
76long hammer2_ioa_indr_write;
77long hammer2_ioa_volu_write;
37aa19df
MD
78
79SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
80
81SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
82 &hammer2_debug, 0, "");
01eabad4
MD
83SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
84 &hammer2_cluster_enable, 0, "");
e708f8b9
MD
85SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
86 &hammer2_hardlink_enable, 0, "");
01eabad4
MD
87SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
88 &hammer2_iod_file_read, 0, "");
89SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
90 &hammer2_iod_meta_read, 0, "");
91SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
92 &hammer2_iod_indr_read, 0, "");
93SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
94 &hammer2_iod_file_write, 0, "");
95SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
96 &hammer2_iod_meta_write, 0, "");
97SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
98 &hammer2_iod_indr_write, 0, "");
99SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
100 &hammer2_iod_volu_write, 0, "");
101SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
102 &hammer2_ioa_file_read, 0, "");
103SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
104 &hammer2_ioa_meta_read, 0, "");
105SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
106 &hammer2_ioa_indr_read, 0, "");
107SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
108 &hammer2_ioa_file_write, 0, "");
109SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
110 &hammer2_ioa_meta_write, 0, "");
111SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
112 &hammer2_ioa_indr_write, 0, "");
113SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
114 &hammer2_ioa_volu_write, 0, "");
37aa19df 115
b7926f31
MD
116static int hammer2_vfs_init(struct vfsconf *conf);
117static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
118 struct ucred *cred);
119static int hammer2_remount(struct mount *, char *, struct vnode *,
703720e4 120 struct ucred *);
b7926f31
MD
121static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
122static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
123static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
703720e4 124 struct ucred *cred);
b7926f31
MD
125static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
126 struct ucred *cred);
127static int hammer2_vfs_sync(struct mount *mp, int waitfor);
128static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
129 ino_t ino, struct vnode **vpp);
130static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
131 struct fid *fhp, struct vnode **vpp);
132static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
133static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
134 int *exflagsp, struct ucred **credanonp);
135
136static int hammer2_install_volume_header(hammer2_mount_t *hmp);
137static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
138static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
50e4f8f4 139
bfc3a7b1
MD
140static void hammer2_cluster_thread_rd(void *arg);
141static void hammer2_cluster_thread_wr(void *arg);
8c280d5d
MD
142static int hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg);
143static int hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg);
10c86c4e 144static int hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg);
70c3c3b7 145static void hammer2_drain_msgq(hammer2_pfsmount_t *pmp);
bfc3a7b1 146
703720e4
MD
147/*
148 * HAMMER2 vfs operations.
149 */
150static struct vfsops hammer2_vfsops = {
066e00cc
MD
151 .vfs_init = hammer2_vfs_init,
152 .vfs_sync = hammer2_vfs_sync,
153 .vfs_mount = hammer2_vfs_mount,
154 .vfs_unmount = hammer2_vfs_unmount,
155 .vfs_root = hammer2_vfs_root,
156 .vfs_statfs = hammer2_vfs_statfs,
157 .vfs_statvfs = hammer2_vfs_statvfs,
158 .vfs_vget = hammer2_vfs_vget,
159 .vfs_vptofh = hammer2_vfs_vptofh,
160 .vfs_fhtovp = hammer2_vfs_fhtovp,
161 .vfs_checkexp = hammer2_vfs_checkexp
703720e4
MD
162};
163
703720e4
MD
164MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
165
166VFS_SET(hammer2_vfsops, hammer2, 0);
167MODULE_VERSION(hammer2, 1);
168
e118c14f
MD
169static
170int
066e00cc 171hammer2_vfs_init(struct vfsconf *conf)
703720e4
MD
172{
173 int error;
174
175 error = 0;
176
0e92b724 177 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
703720e4 178 error = EINVAL;
0e92b724 179 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
703720e4 180 error = EINVAL;
0e92b724 181 if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
703720e4 182 error = EINVAL;
0e92b724 183 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
703720e4
MD
184 error = EINVAL;
185
186 if (error)
187 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
188
e4e20f48
MD
189 lockinit(&hammer2_mntlk, "mntlk", 0, 0);
190 TAILQ_INIT(&hammer2_mntlist);
191
703720e4
MD
192 return (error);
193}
194
195/*
196 * Mount or remount HAMMER2 fileystem from physical media
197 *
198 * mountroot
199 * mp mount point structure
200 * path NULL
201 * data <unused>
202 * cred <unused>
203 *
204 * mount
205 * mp mount point structure
206 * path path to mount point
207 * data pointer to argument structure in user space
208 * volume volume path (device@LABEL form)
209 * hflags user mount flags
210 * cred user credentials
211 *
212 * RETURNS: 0 Success
213 * !0 error number
214 */
e118c14f
MD
215static
216int
066e00cc 217hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
bfc3a7b1 218 struct ucred *cred)
703720e4
MD
219{
220 struct hammer2_mount_info info;
e4e20f48 221 hammer2_pfsmount_t *pmp;
54eb943b 222 hammer2_mount_t *hmp;
7cfa8da5 223 hammer2_key_t lhc;
703720e4
MD
224 struct vnode *devvp;
225 struct nlookupdata nd;
5c23d7f1 226 hammer2_chain_t *parent;
7cfa8da5
MD
227 hammer2_chain_t *schain;
228 hammer2_chain_t *rchain;
703720e4
MD
229 char devstr[MNAMELEN];
230 size_t size;
231 size_t done;
50e4f8f4
MD
232 char *dev;
233 char *label;
54eb943b 234 int ronly = 1;
e4e20f48 235 int create_hmp;
703720e4 236 int error;
703720e4
MD
237
238 hmp = NULL;
e4e20f48 239 pmp = NULL;
50e4f8f4
MD
240 dev = NULL;
241 label = NULL;
703720e4
MD
242 devvp = NULL;
243
244 kprintf("hammer2_mount\n");
245
246 if (path == NULL) {
247 /*
248 * Root mount
249 */
bfc3a7b1
MD
250 bzero(&info, sizeof(info));
251 info.cluster_fd = -1;
703720e4
MD
252 return (EOPNOTSUPP);
253 } else {
254 /*
255 * Non-root mount or updating a mount
256 */
703720e4
MD
257 error = copyin(data, &info, sizeof(info));
258 if (error)
259 return (error);
260
261 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
262 if (error)
263 return (error);
264
265 /* Extract device and label */
266 dev = devstr;
267 label = strchr(devstr, '@');
268 if (label == NULL ||
50e4f8f4 269 ((label + 1) - dev) > done) {
703720e4 270 return (EINVAL);
50e4f8f4 271 }
703720e4
MD
272 *label = '\0';
273 label++;
274 if (*label == '\0')
275 return (EINVAL);
276
277 if (mp->mnt_flag & MNT_UPDATE) {
278 /* Update mount */
279 /* HAMMER2 implements NFS export via mountctl */
e4e20f48 280 hmp = MPTOHMP(mp);
54eb943b
MD
281 devvp = hmp->devvp;
282 error = hammer2_remount(mp, path, devvp, cred);
283 return error;
703720e4
MD
284 }
285 }
286
287 /*
bfc3a7b1
MD
288 * PFS mount
289 *
290 * Lookup name and verify it refers to a block device.
703720e4 291 */
703720e4 292 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
54eb943b
MD
293 if (error == 0)
294 error = nlookup(&nd);
295 if (error == 0)
296 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
703720e4
MD
297 nlookup_done(&nd);
298
54eb943b
MD
299 if (error == 0) {
300 if (vn_isdisk(devvp, &error))
301 error = vfs_mountedon(devvp);
703720e4
MD
302 }
303
304 /*
e4e20f48
MD
305 * Determine if the device has already been mounted. After this
306 * check hmp will be non-NULL if we are doing the second or more
307 * hammer2 mounts from the same device.
703720e4 308 */
e4e20f48
MD
309 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
310 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
311 if (hmp->devvp == devvp)
312 break;
313 }
314
315 /*
316 * Open the device if this isn't a secondary mount
317 */
318 if (hmp) {
319 create_hmp = 0;
320 } else {
321 create_hmp = 1;
322 if (error == 0 && vcount(devvp) > 0)
323 error = EBUSY;
324
325 /*
326 * Now open the device
327 */
54eb943b 328 if (error == 0) {
e4e20f48
MD
329 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
330 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
331 error = vinvalbuf(devvp, V_SAVE, 0, 0);
332 if (error == 0) {
333 error = VOP_OPEN(devvp,
334 ronly ? FREAD : FREAD | FWRITE,
335 FSCRED, NULL);
336 }
337 vn_unlock(devvp);
338 }
339 if (error && devvp) {
340 vrele(devvp);
341 devvp = NULL;
342 }
343 if (error) {
344 lockmgr(&hammer2_mntlk, LK_RELEASE);
345 return error;
54eb943b 346 }
703720e4
MD
347 }
348
349 /*
54eb943b
MD
350 * Block device opened successfully, finish initializing the
351 * mount structure.
352 *
353 * From this point on we have to call hammer2_unmount() on failure.
703720e4 354 */
e4e20f48
MD
355 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
356 mp->mnt_data = (qaddr_t)pmp;
357 pmp->mp = mp;
26bf1a36
MD
358 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
359 lockinit(&pmp->msglk, "h2msg", 0, 0);
360 TAILQ_INIT(&pmp->msgq);
361 RB_INIT(&pmp->staterd_tree);
362 RB_INIT(&pmp->statewr_tree);
e4e20f48
MD
363
364 if (create_hmp) {
365 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
366 hmp->ronly = ronly;
367 hmp->devvp = devvp;
368 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
369 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
370 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
371 }
609a8021 372 ccms_domain_init(&pmp->ccms_dom);
e4e20f48 373 pmp->hmp = hmp;
10c86c4e 374 pmp->router.pmp = pmp;
e4e20f48
MD
375 ++hmp->pmp_count;
376 lockmgr(&hammer2_mntlk, LK_RELEASE);
377 kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
74d91941 378
f0206a67 379 mp->mnt_flag = MNT_LOCAL;
50e4f8f4
MD
380 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */
381
e4e20f48
MD
382 if (create_hmp) {
383 /*
384 * vchain setup. vchain.data is special cased to NULL.
385 * vchain.refs is initialized and will never drop to 0.
386 */
e4e20f48
MD
387 hmp->vchain.refs = 1;
388 hmp->vchain.data = (void *)&hmp->voldata;
004f88b4 389 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
e4e20f48 390 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
004f88b4 391 hmp->vchain.bref_flush = hmp->vchain.bref;
46558838 392 ccms_cst_init(&hmp->vchain.cst, NULL);
e4e20f48 393 /* hmp->vchain.u.xxx is left NULL */
e4e20f48
MD
394 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
395 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
232a50f9 396
e4e20f48
MD
397 /*
398 * Install the volume header
399 */
400 error = hammer2_install_volume_header(hmp);
401 if (error) {
402 hammer2_vfs_unmount(mp, MNT_FORCE);
403 return error;
404 }
50e4f8f4 405 }
703720e4
MD
406
407 /*
50e4f8f4 408 * required mount structure initializations
703720e4 409 */
50e4f8f4
MD
410 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
411 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
412
413 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
414 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
415
416 /*
2910a90c
MD
417 * Optional fields
418 */
419 mp->mnt_iosize_max = MAXPHYS;
420
421 /*
7cfa8da5
MD
422 * First locate the super-root inode, which is key 0 relative to the
423 * volume header's blockset.
424 *
425 * Then locate the root inode by scanning the directory keyspace
426 * represented by the label.
50e4f8f4 427 */
e4e20f48
MD
428 if (create_hmp) {
429 parent = &hmp->vchain;
430 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
431 schain = hammer2_chain_lookup(hmp, &parent,
c667909f 432 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
e4e20f48
MD
433 hammer2_chain_unlock(hmp, parent);
434 if (schain == NULL) {
435 kprintf("hammer2_mount: invalid super-root\n");
436 hammer2_vfs_unmount(mp, MNT_FORCE);
437 return EINVAL;
438 }
439 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
440 hmp->schain = schain; /* left locked */
441 } else {
442 schain = hmp->schain;
443 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
7cfa8da5 444 }
5c23d7f1
MD
445
446 parent = schain;
e4e20f48 447 lhc = hammer2_dirhash(label, strlen(label));
5c23d7f1 448 rchain = hammer2_chain_lookup(hmp, &parent,
c667909f
MD
449 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
450 0);
7cfa8da5
MD
451 while (rchain) {
452 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
453 rchain->u.ip &&
5c23d7f1 454 strcmp(label, rchain->data->ipdata.filename) == 0) {
7cfa8da5
MD
455 break;
456 }
5c23d7f1 457 rchain = hammer2_chain_next(hmp, &parent, rchain,
c667909f
MD
458 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
459 0);
7cfa8da5 460 }
01eabad4 461 hammer2_chain_unlock(hmp, parent);
7cfa8da5 462 if (rchain == NULL) {
e4e20f48 463 kprintf("hammer2_mount: PFS label not found\n");
9c2e0de0 464 hammer2_vfs_unmount(mp, MNT_FORCE);
7cfa8da5
MD
465 return EINVAL;
466 }
e4e20f48
MD
467 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
468 hammer2_chain_unlock(hmp, rchain);
469 kprintf("hammer2_mount: PFS label already mounted!\n");
470 hammer2_vfs_unmount(mp, MNT_FORCE);
4d5318eb 471 return EBUSY;
e4e20f48
MD
472 }
473 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
5c23d7f1 474
e4e20f48
MD
475 hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
476 hammer2_chain_unlock(hmp, rchain);
477 pmp->rchain = rchain; /* left held & unlocked */
478 pmp->iroot = rchain->u.ip; /* implied hold from rchain */
479 pmp->iroot->pmp = pmp;
609a8021 480
e4e20f48 481 kprintf("iroot %p\n", pmp->iroot);
703720e4 482
bfc3a7b1
MD
483 /*
484 * Ref the cluster management messaging descriptor. The mount
485 * program deals with the other end of the communications pipe.
486 */
487 pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
488 if (pmp->msg_fp == NULL) {
489 kprintf("hammer2_mount: bad cluster_fd!\n");
490 hammer2_vfs_unmount(mp, MNT_FORCE);
491 return EBADF;
492 }
493 lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
494 NULL, 0, -1, "hammer2-msgrd");
495 lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
496 NULL, 0, -1, "hammer2-msgwr");
497
498 /*
499 * Finish setup
500 */
f0206a67
VS
501 vfs_getnewfsid(mp);
502 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
503 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
504 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
703720e4 505
54f522df 506 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
703720e4
MD
507 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
508 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
509 copyinstr(path, mp->mnt_stat.f_mntonname,
510 sizeof(mp->mnt_stat.f_mntonname) - 1,
511 &size);
512
bfc3a7b1
MD
513 /*
514 * Initial statfs to prime mnt_stat.
515 */
066e00cc 516 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
703720e4 517
47902fef 518 return 0;
703720e4
MD
519}
520
e118c14f
MD
521static
522int
703720e4
MD
523hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
524 struct ucred *cred)
525{
526 return (0);
527}
528
e118c14f
MD
529static
530int
066e00cc 531hammer2_vfs_unmount(struct mount *mp, int mntflags)
703720e4 532{
e4e20f48 533 hammer2_pfsmount_t *pmp;
54eb943b 534 hammer2_mount_t *hmp;
703720e4 535 int flags;
50e4f8f4 536 int error = 0;
54eb943b
MD
537 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
538 struct vnode *devvp;
703720e4 539
e4e20f48
MD
540 pmp = MPTOPMP(mp);
541 hmp = pmp->hmp;
703720e4
MD
542 flags = 0;
543
544 if (mntflags & MNT_FORCE)
545 flags |= FORCECLOSE;
546
547 hammer2_mount_exlock(hmp);
548
50e4f8f4
MD
549 /*
550 * If mount initialization proceeded far enough we must flush
551 * its vnodes.
552 */
e4e20f48 553 if (pmp->iroot)
50e4f8f4
MD
554 error = vflush(mp, 0, flags);
555
556 if (error)
557 return error;
703720e4 558
e4e20f48
MD
559 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
560 --hmp->pmp_count;
561 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
562
703720e4 563 /*
2910a90c
MD
564 * Flush any left over chains. The voldata lock is only used
565 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
222d9e22 566 */
2910a90c
MD
567 hammer2_voldata_lock(hmp);
568 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
569 HAMMER2_CHAIN_MODIFIED_AUX |
222d9e22 570 HAMMER2_CHAIN_SUBMODIFIED)) {
2910a90c 571 hammer2_voldata_unlock(hmp);
222d9e22 572 hammer2_vfs_sync(mp, MNT_WAIT);
2910a90c
MD
573 } else {
574 hammer2_voldata_unlock(hmp);
222d9e22 575 }
e4e20f48
MD
576 if (hmp->pmp_count == 0) {
577 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
578 HAMMER2_CHAIN_MODIFIED_AUX |
579 HAMMER2_CHAIN_SUBMODIFIED)) {
580 kprintf("hammer2_unmount: chains left over after "
581 "final sync\n");
582 if (hammer2_debug & 0x0010)
583 Debugger("entered debugger");
584 }
222d9e22
MD
585 }
586
587 /*
588 * Cleanup the root and super-root chain elements (which should be
589 * clean).
703720e4 590 */
e4e20f48
MD
591 pmp->iroot = NULL;
592 if (pmp->rchain) {
593 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
594 KKASSERT(pmp->rchain->refs == 1);
595 hammer2_chain_drop(hmp, pmp->rchain);
596 pmp->rchain = NULL;
7cfa8da5 597 }
609a8021 598 ccms_domain_uninit(&pmp->ccms_dom);
bfc3a7b1
MD
599
600 /*
601 * Ask the cluster controller to go away
602 */
603 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
604 while (pmp->msgrd_td || pmp->msgwr_td) {
605 wakeup(&pmp->msg_ctl);
606 tsleep(pmp, 0, "clstrkl", hz);
607 }
608
609 /*
610 * Drop communications descriptor
611 */
612 if (pmp->msg_fp) {
613 fdrop(pmp->msg_fp);
614 pmp->msg_fp = NULL;
615 }
616
617 /*
618 * If no PFS's left drop the master hammer2_mount for the device.
619 */
e4e20f48
MD
620 if (hmp->pmp_count == 0) {
621 if (hmp->schain) {
622 KKASSERT(hmp->schain->refs == 1);
623 hammer2_chain_drop(hmp, hmp->schain);
624 hmp->schain = NULL;
625 }
222d9e22 626
e4e20f48
MD
627 /*
628 * Finish up with the device vnode
629 */
630 if ((devvp = hmp->devvp) != NULL) {
631 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
632 hmp->devvp = NULL;
633 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
634 vrele(devvp);
635 devvp = NULL;
636 }
54eb943b 637 }
54eb943b 638 hammer2_mount_unlock(hmp);
703720e4 639
e4e20f48
MD
640 pmp->mp = NULL;
641 pmp->hmp = NULL;
54eb943b 642 mp->mnt_data = NULL;
703720e4 643
26bf1a36
MD
644 kmalloc_destroy(&pmp->mmsg);
645
e4e20f48
MD
646 kfree(pmp, M_HAMMER2);
647 if (hmp->pmp_count == 0) {
648 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
004f88b4
MD
649 kmalloc_destroy(&hmp->minode);
650 kmalloc_destroy(&hmp->mchain);
e4e20f48
MD
651 kfree(hmp, M_HAMMER2);
652 }
653 lockmgr(&hammer2_mntlk, LK_RELEASE);
703720e4
MD
654 return (error);
655}
656
e118c14f
MD
657static
658int
066e00cc 659hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
703720e4
MD
660 ino_t ino, struct vnode **vpp)
661{
662 kprintf("hammer2_vget\n");
663 return (EOPNOTSUPP);
664}
665
e118c14f
MD
666static
667int
066e00cc 668hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
703720e4 669{
e4e20f48 670 hammer2_pfsmount_t *pmp;
a0ed3c24 671 hammer2_mount_t *hmp;
703720e4
MD
672 int error;
673 struct vnode *vp;
674
e4e20f48 675 pmp = MPTOPMP(mp);
a0ed3c24
MD
676 hmp = pmp->hmp;
677 hammer2_mount_exlock(hmp);
e4e20f48 678 if (pmp->iroot == NULL) {
703720e4
MD
679 *vpp = NULL;
680 error = EINVAL;
681 } else {
a0ed3c24
MD
682 hammer2_chain_lock(hmp, &pmp->iroot->chain,
683 HAMMER2_RESOLVE_ALWAYS |
684 HAMMER2_RESOLVE_SHARED);
e4e20f48 685 vp = hammer2_igetv(pmp->iroot, &error);
a0ed3c24 686 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
703720e4
MD
687 *vpp = vp;
688 if (vp == NULL)
689 kprintf("vnodefail\n");
690 }
a0ed3c24 691 hammer2_mount_unlock(hmp);
703720e4
MD
692
693 return (error);
694}
695
28ee5f14
MD
696/*
697 * Filesystem status
698 *
699 * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
700 */
e118c14f
MD
701static
702int
066e00cc 703hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
703720e4 704{
28ee5f14 705 hammer2_pfsmount_t *pmp;
54eb943b 706 hammer2_mount_t *hmp;
703720e4 707
28ee5f14 708 pmp = MPTOPMP(mp);
e4e20f48 709 hmp = MPTOHMP(mp);
703720e4 710
28ee5f14
MD
711 mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
712 pmp->iroot->delta_icount;
713 mp->mnt_stat.f_ffree = 0;
714 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
715 mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
716 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
50e4f8f4 717 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
703720e4 718
50e4f8f4 719 *sbp = mp->mnt_stat;
703720e4
MD
720 return (0);
721}
722
e118c14f
MD
723static
724int
066e00cc 725hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
703720e4 726{
28ee5f14 727 hammer2_pfsmount_t *pmp;
50e4f8f4
MD
728 hammer2_mount_t *hmp;
729
28ee5f14 730 pmp = MPTOPMP(mp);
e4e20f48 731 hmp = MPTOHMP(mp);
50e4f8f4 732
88a032af 733 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
28ee5f14
MD
734 mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
735 pmp->iroot->delta_icount;
736 mp->mnt_vstat.f_ffree = 0;
737 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
738 mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
739 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
740 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
50e4f8f4
MD
741
742 *sbp = mp->mnt_vstat;
743 return (0);
703720e4
MD
744}
745
746/*
747 * Sync the entire filesystem; this is called from the filesystem syncer
748 * process periodically and whenever a user calls sync(1) on the hammer
749 * mountpoint.
750 *
751 * Currently is actually called from the syncer! \o/
752 *
753 * This task will have to snapshot the state of the dirty inode chain.
754 * From that, it will have to make sure all of the inodes on the dirty
755 * chain have IO initiated. We make sure that io is initiated for the root
756 * block.
757 *
758 * If waitfor is set, we wait for media to acknowledge the new rootblock.
759 *
760 * THINKS: side A vs side B, to have sync not stall all I/O?
761 */
e118c14f
MD
762static
763int
066e00cc 764hammer2_vfs_sync(struct mount *mp, int waitfor)
703720e4 765{
b7926f31 766 struct hammer2_sync_info info;
54eb943b 767 hammer2_mount_t *hmp;
b7926f31
MD
768 int flags;
769 int error;
73e441b9 770 int haswork;
703720e4 771
e4e20f48 772 hmp = MPTOHMP(mp);
703720e4 773
b7926f31
MD
774 flags = VMSC_GETVP;
775 if (waitfor & MNT_LAZY)
776 flags |= VMSC_ONEPASS;
777
778 info.error = 0;
779 info.waitfor = MNT_NOWAIT;
780 vmntvnodescan(mp, flags | VMSC_NOWAIT,
781 hammer2_sync_scan1,
782 hammer2_sync_scan2, &info);
783 if (info.error == 0 && (waitfor & MNT_WAIT)) {
784 info.waitfor = waitfor;
785 vmntvnodescan(mp, flags,
786 hammer2_sync_scan1,
787 hammer2_sync_scan2, &info);
788
789 }
e118c14f 790#if 0
b7926f31
MD
791 if (waitfor == MNT_WAIT) {
792 /* XXX */
793 } else {
794 /* XXX */
795 }
e118c14f 796#endif
01eabad4 797 hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
2910a90c
MD
798 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
799 HAMMER2_CHAIN_MODIFIED_AUX |
800 HAMMER2_CHAIN_SUBMODIFIED)) {
4d5318eb 801 hammer2_chain_flush(hmp, &hmp->vchain, 0);
73e441b9
MD
802 haswork = 1;
803 } else {
804 haswork = 0;
805 }
b7926f31 806 hammer2_chain_unlock(hmp, &hmp->vchain);
1c9f601e
MD
807
808 error = 0;
809
810 if ((waitfor & MNT_LAZY) == 0) {
811 waitfor = MNT_NOWAIT;
812 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
813 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
814 vn_unlock(hmp->devvp);
815 }
816
73e441b9 817 if (error == 0 && haswork) {
b7926f31
MD
818 struct buf *bp;
819
2910a90c
MD
820 /*
821 * Synchronize the disk before flushing the volume
822 * header.
823 */
b7926f31
MD
824 bp = getpbuf(NULL);
825 bp->b_bio1.bio_offset = 0;
826 bp->b_bufsize = 0;
827 bp->b_bcount = 0;
828 bp->b_cmd = BUF_CMD_FLUSH;
829 bp->b_bio1.bio_done = biodone_sync;
830 bp->b_bio1.bio_flags |= BIO_SYNC;
831 vn_strategy(hmp->devvp, &bp->b_bio1);
832 biowait(&bp->b_bio1, "h2vol");
833 relpbuf(bp, NULL);
834
2910a90c
MD
835 /*
836 * Then we can safely flush the volume header. Volume
837 * data is locked separately to prevent ioctl functions
838 * from deadlocking due to a configuration issue.
839 */
b7926f31 840 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
2910a90c 841 hammer2_voldata_lock(hmp);
b7926f31 842 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
2910a90c 843 hammer2_voldata_unlock(hmp);
b7926f31
MD
844 bawrite(bp);
845 }
846 return (error);
847}
703720e4 848
214f4a77
MD
849/*
850 * Sync passes.
851 *
852 * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
853 * won't flush on those flags. The syncer code above will do a
854 * general meta-data flush globally that will catch these flags.
855 */
b7926f31
MD
856static int
857hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
858{
859 hammer2_inode_t *ip;
860
861 ip = VTOI(vp);
862 if (vp->v_type == VNON || ip == NULL ||
2910a90c 863 ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
214f4a77 864 HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
b7926f31
MD
865 RB_EMPTY(&vp->v_rbdirty_tree))) {
866 return(-1);
867 }
868 return(0);
869}
870
871static int
872hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
873{
874 struct hammer2_sync_info *info = data;
875 hammer2_inode_t *ip;
876 int error;
877
878 ip = VTOI(vp);
879 if (vp->v_type == VNON || vp->v_type == VBAD ||
2910a90c 880 ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
214f4a77
MD
881 HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
882 RB_EMPTY(&vp->v_rbdirty_tree))) {
b7926f31
MD
883 return(0);
884 }
885 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
886 if (error)
887 info->error = error;
888 return(0);
703720e4
MD
889}
890
e118c14f
MD
891static
892int
066e00cc 893hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
703720e4
MD
894{
895 return (0);
896}
897
e118c14f
MD
898static
899int
066e00cc 900hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
703720e4
MD
901 struct fid *fhp, struct vnode **vpp)
902{
903 return (0);
904}
905
e118c14f
MD
906static
907int
066e00cc 908hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
703720e4
MD
909 int *exflagsp, struct ucred **credanonp)
910{
911 return (0);
912}
50e4f8f4
MD
913
914/*
915 * Support code for hammer2_mount(). Read, verify, and install the volume
916 * header into the HMP
917 *
918 * XXX read four volhdrs and use the one with the highest TID whos CRC
919 * matches.
920 *
921 * XXX check iCRCs.
57381c9e
VS
922 *
923 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
924 * nonexistant locations.
925 *
926 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
50e4f8f4
MD
927 */
928static
929int
930hammer2_install_volume_header(hammer2_mount_t *hmp)
931{
932 hammer2_volume_data_t *vd;
99924359 933 struct buf *bp;
60fbd5f4 934 hammer2_crc32_t crc0, crc, bcrc0, bcrc;
99924359
MD
935 int error_reported;
936 int error;
0b3147ba
VS
937 int valid;
938 int i;
50e4f8f4 939
99924359 940 error_reported = 0;
0b3147ba
VS
941 error = 0;
942 valid = 0;
99924359 943 bp = NULL;
0b3147ba 944
99924359
MD
945 /*
946 * There are up to 4 copies of the volume header (syncs iterate
947 * between them so there is no single master). We don't trust the
948 * volu_size field so we don't know precisely how large the filesystem
949 * is, so depend on the OS to return an error if we go beyond the
950 * block device's EOF.
951 */
0b3147ba 952 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
62efe6ec 953 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
99924359 954 HAMMER2_VOLUME_BYTES, &bp);
0b3147ba 955 if (error) {
99924359
MD
956 brelse(bp);
957 bp = NULL;
0b3147ba
VS
958 continue;
959 }
960
88a032af
VS
961 vd = (struct hammer2_volume_data *) bp->b_data;
962 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
963 (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
964 brelse(bp);
965 bp = NULL;
0b3147ba 966 continue;
88a032af
VS
967 }
968
969 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
970 /* XXX: Reversed-endianness filesystem */
971 kprintf("hammer2: reverse-endian filesystem detected");
972 brelse(bp);
973 bp = NULL;
974 continue;
975 }
0b3147ba
VS
976
977 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
60fbd5f4 978 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
99924359 979 HAMMER2_VOLUME_ICRC0_SIZE);
60fbd5f4
VS
980 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
981 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
982 HAMMER2_VOLUME_ICRC1_SIZE);
983 if ((crc0 != crc) || (bcrc0 != bcrc)) {
99924359
MD
984 kprintf("hammer2 volume header crc "
985 "mismatch copy #%d\t%08x %08x",
60fbd5f4 986 i, crc0, crc);
99924359
MD
987 error_reported = 1;
988 brelse(bp);
989 bp = NULL;
0b3147ba
VS
990 continue;
991 }
4d5318eb 992 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
99924359
MD
993 valid = 1;
994 hmp->voldata = *vd;
0b3147ba 995 }
99924359
MD
996 brelse(bp);
997 bp = NULL;
50e4f8f4 998 }
0b3147ba 999 if (valid) {
57381c9e 1000 error = 0;
99924359
MD
1001 if (error_reported)
1002 kprintf("hammer2: a valid volume header was found\n");
0b3147ba
VS
1003 } else {
1004 error = EINVAL;
99924359 1005 kprintf("hammer2: no valid volume headers found!\n");
0b3147ba 1006 }
0b3147ba 1007 return (error);
50e4f8f4 1008}
0b3147ba 1009
bfc3a7b1 1010/*
1a34728c
MD
1011 * Reconnect using the passed file pointer. The caller must ref the
1012 * fp for us.
1013 */
1014void
1015hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1016{
70c3c3b7
MD
1017 /*
1018 * Destroy the current connection
1019 */
1a34728c
MD
1020 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1021 while (pmp->msgrd_td || pmp->msgwr_td) {
1022 wakeup(&pmp->msg_ctl);
1023 tsleep(pmp, 0, "clstrkl", hz);
1024 }
70c3c3b7
MD
1025
1026 /*
1027 * Drop communications descriptor
1028 */
1029 if (pmp->msg_fp) {
1030 fdrop(pmp->msg_fp);
1031 pmp->msg_fp = NULL;
1032 }
1033 kprintf("RESTART CONNECTION\n");
1034
1035 /*
1036 * Setup new communications descriptor
1037 */
1038 pmp->msg_ctl = 0;
1a34728c 1039 pmp->msg_fp = fp;
70c3c3b7 1040 pmp->msg_seq = 0;
1a34728c
MD
1041 lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
1042 NULL, 0, -1, "hammer2-msgrd");
1043 lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
1044 NULL, 0, -1, "hammer2-msgwr");
1045}
1046
1047/*
bfc3a7b1
MD
1048 * Cluster controller thread. Perform messaging functions. We have one
1049 * thread for the reader and one for the writer. The writer handles
1050 * shutdown requests (which should break the reader thread).
1051 */
1052static
1053void
1054hammer2_cluster_thread_rd(void *arg)
1055{
1056 hammer2_pfsmount_t *pmp = arg;
26bf1a36
MD
1057 hammer2_msg_hdr_t hdr;
1058 hammer2_msg_t *msg;
1059 hammer2_state_t *state;
1060 size_t hbytes;
1061 int error = 0;
bfc3a7b1
MD
1062
1063 while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
26bf1a36
MD
1064 /*
1065 * Retrieve the message from the pipe or socket.
1066 */
1067 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
bfc3a7b1 1068 NULL, 1, UIO_SYSSPACE);
bfc3a7b1
MD
1069 if (error)
1070 break;
26bf1a36
MD
1071 if (hdr.magic != HAMMER2_MSGHDR_MAGIC) {
1072 kprintf("hammer2: msgrd: bad magic: %04x\n",
1073 hdr.magic);
1074 error = EINVAL;
1075 break;
1076 }
1077 hbytes = (hdr.cmd & HAMMER2_MSGF_SIZE) * HAMMER2_MSG_ALIGN;
1078 if (hbytes < sizeof(hdr) || hbytes > HAMMER2_MSGAUX_MAX) {
1079 kprintf("hammer2: msgrd: bad header size %zd\n",
1080 hbytes);
1081 error = EINVAL;
1082 break;
1083 }
1a34728c
MD
1084 /* XXX messy: mask cmd to avoid allocating state */
1085 msg = hammer2_msg_alloc(&pmp->router,
1086 hdr.cmd & HAMMER2_MSGF_BASECMDMASK,
1087 NULL, NULL);
26bf1a36
MD
1088 msg->any.head = hdr;
1089 msg->hdr_size = hbytes;
1090 if (hbytes > sizeof(hdr)) {
1091 error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1092 hbytes - sizeof(hdr),
1093 NULL, 1, UIO_SYSSPACE);
1094 if (error) {
1095 kprintf("hammer2: short msg received\n");
1096 error = EINVAL;
1097 break;
1098 }
1099 }
1100 msg->aux_size = hdr.aux_bytes * HAMMER2_MSG_ALIGN;
1101 if (msg->aux_size > HAMMER2_MSGAUX_MAX) {
1102 kprintf("hammer2: illegal msg payload size %zd\n",
1103 msg->aux_size);
1104 error = EINVAL;
1105 break;
1106 }
1107 if (msg->aux_size) {
1108 msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1109 M_WAITOK | M_ZERO);
1110 error = fp_read(pmp->msg_fp, msg->aux_data,
1111 msg->aux_size,
1112 NULL, 1, UIO_SYSSPACE);
1113 if (error) {
1114 kprintf("hammer2: short msg "
1115 "payload received\n");
1116 break;
1117 }
1118 }
1119
1120 /*
1121 * State machine tracking, state assignment for msg,
1122 * returns error and discard status. Errors are fatal
1123 * to the connection except for EALREADY which forces
1124 * a discard without execution.
1125 */
10c86c4e 1126 error = hammer2_state_msgrx(msg);
26bf1a36 1127 if (error) {
8c280d5d
MD
1128 /*
1129 * Raw protocol or connection error
1130 */
10c86c4e 1131 hammer2_msg_free(msg);
26bf1a36
MD
1132 if (error == EALREADY)
1133 error = 0;
8c280d5d
MD
1134 } else if (msg->state && msg->state->func) {
1135 /*
1136 * Message related to state which already has a
1137 * handling function installed for it.
1138 */
1139 error = msg->state->func(msg->state, msg);
10c86c4e 1140 hammer2_state_cleanuprx(msg);
8c280d5d
MD
1141 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1142 HAMMER2_MSG_PROTO_LNK) {
1143 /*
1144 * Message related to the LNK protocol set
1145 */
10c86c4e
MD
1146 error = hammer2_msg_lnk_rcvmsg(msg);
1147 hammer2_state_cleanuprx(msg);
8c280d5d
MD
1148 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1149 HAMMER2_MSG_PROTO_DBG) {
1150 /*
1151 * Message related to the DBG protocol set
1152 */
10c86c4e
MD
1153 error = hammer2_msg_dbg_rcvmsg(msg);
1154 hammer2_state_cleanuprx(msg);
26bf1a36 1155 } else {
8c280d5d
MD
1156 /*
1157 * Other higher-level messages (e.g. vnops)
1158 */
10c86c4e
MD
1159 error = hammer2_msg_adhoc_input(msg);
1160 hammer2_state_cleanuprx(msg);
26bf1a36
MD
1161 }
1162 msg = NULL;
bfc3a7b1 1163 }
26bf1a36
MD
1164
1165 if (error)
1166 kprintf("hammer2: msg read failed error %d\n", error);
1167
1168 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1169 if (msg) {
1170 if (msg->state && msg->state->msg == msg)
1171 msg->state->msg = NULL;
10c86c4e 1172 hammer2_msg_free(msg);
26bf1a36
MD
1173 }
1174
1175 if ((state = pmp->freerd_state) != NULL) {
1176 pmp->freerd_state = NULL;
1177 hammer2_state_free(state);
1178 }
1179
1a34728c 1180 /*
70c3c3b7
MD
1181 * Shutdown the socket before waiting for the transmit side.
1182 *
1183 * If we are dying due to e.g. a socket disconnect verses being
1184 * killed explicity we have to set KILL in order to kick the tx
1185 * side when it might not have any other work to do. KILL might
1186 * already be set if we are in an unmount or reconnect.
1187 */
1188 fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1189
1190 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1191 wakeup(&pmp->msg_ctl);
1192
1193 /*
1194 * Wait for the transmit side to drain remaining messages
1195 * before cleaning up the rx state. The transmit side will
1196 * set KILLTX and wait for the rx side to completely finish
1197 * (set msgrd_td to NULL) before cleaning up any remaining
1198 * tx states.
1a34728c 1199 */
26bf1a36 1200 lockmgr(&pmp->msglk, LK_RELEASE);
70c3c3b7
MD
1201 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLRX);
1202 wakeup(&pmp->msg_ctl);
1203 while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILLTX) == 0) {
1204 wakeup(&pmp->msg_ctl);
1205 tsleep(pmp, 0, "clstrkw", hz);
1206 }
26bf1a36 1207
bfc3a7b1
MD
1208 pmp->msgrd_td = NULL;
1209 /* pmp can be ripped out from under us at this point */
1210 wakeup(pmp);
1211 lwkt_exit();
1212}
1213
1214static
1215void
1216hammer2_cluster_thread_wr(void *arg)
1217{
1218 hammer2_pfsmount_t *pmp = arg;
26bf1a36
MD
1219 hammer2_msg_t *msg = NULL;
1220 hammer2_state_t *state;
1221 ssize_t res;
42e2a62e 1222 size_t name_len;
26bf1a36 1223 int error = 0;
70c3c3b7 1224 int retries = 20;
bfc3a7b1 1225
9b8b748f 1226 /*
8c280d5d
MD
1227 * Open a LNK_CONN transaction indicating that we want to take part
1228 * in the spanning tree algorithm. Filter explicitly on the PFS
1229 * info in the iroot.
1230 *
1231 * We do not transmit our (only) LNK_SPAN until the other end has
1232 * acknowledged our link connection request.
42e2a62e 1233 *
8c280d5d
MD
1234 * The transaction remains fully open for the duration of the
1235 * connection.
9b8b748f 1236 */
10c86c4e 1237 msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_CONN |
1a34728c
MD
1238 HAMMER2_MSGF_CREATE,
1239 hammer2_msg_conn_reply, pmp);
8c280d5d
MD
1240 msg->any.lnk_conn.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1241 msg->any.lnk_conn.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1242 msg->any.lnk_conn.pfs_type = pmp->iroot->ip_data.pfs_type;
1243 msg->any.lnk_conn.proto_version = HAMMER2_SPAN_PROTO_1;
2063f4d7
MD
1244 msg->any.lnk_conn.peer_type = pmp->hmp->voldata.peer_type;
1245 msg->any.lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
42e2a62e 1246 name_len = pmp->iroot->ip_data.name_len;
8c280d5d
MD
1247 if (name_len >= sizeof(msg->any.lnk_conn.label))
1248 name_len = sizeof(msg->any.lnk_conn.label) - 1;
1249 bcopy(pmp->iroot->ip_data.filename, msg->any.lnk_conn.label, name_len);
1a34728c 1250 pmp->conn_state = msg->state;
8c280d5d 1251 msg->any.lnk_conn.label[name_len] = 0;
1a34728c 1252 hammer2_msg_write(msg);
9b8b748f
MD
1253
1254 /*
1255 * Transmit loop
1256 */
1257 msg = NULL;
26bf1a36 1258 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
9b8b748f 1259
26bf1a36 1260 while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1a34728c
MD
1261 /*
1262 * Sleep if no messages pending. Interlock with flag while
1263 * holding msglk.
1264 */
1265 if (TAILQ_EMPTY(&pmp->msgq)) {
1266 atomic_set_int(&pmp->msg_ctl,
1267 HAMMER2_CLUSTERCTL_SLEEPING);
1268 lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1269 atomic_clear_int(&pmp->msg_ctl,
1270 HAMMER2_CLUSTERCTL_SLEEPING);
1271 }
1272
26bf1a36
MD
1273 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1274 /*
1275 * Remove msg from the transmit queue and do
1276 * persist and half-closed state handling.
1277 */
1278 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1279 lockmgr(&pmp->msglk, LK_RELEASE);
1280
10c86c4e 1281 error = hammer2_state_msgtx(msg);
26bf1a36
MD
1282 if (error == EALREADY) {
1283 error = 0;
10c86c4e 1284 hammer2_msg_free(msg);
26bf1a36
MD
1285 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1286 continue;
1287 }
32d51501 1288 if (error) {
70c3c3b7 1289 hammer2_msg_free(msg);
32d51501 1290 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
26bf1a36 1291 break;
32d51501 1292 }
26bf1a36
MD
1293
1294 /*
1295 * Dump the message to the pipe or socket.
1296 */
1297 error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1298 &res, UIO_SYSSPACE);
1299 if (error || res != msg->hdr_size) {
1300 if (error == 0)
1301 error = EINVAL;
1302 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1303 break;
1304 }
1305 if (msg->aux_size) {
1306 error = fp_write(pmp->msg_fp,
1307 msg->aux_data, msg->aux_size,
1308 &res, UIO_SYSSPACE);
1309 if (error || res != msg->aux_size) {
1310 if (error == 0)
1311 error = EINVAL;
1312 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1313 break;
1314 }
1315 }
10c86c4e 1316 hammer2_state_cleanuptx(msg);
26bf1a36
MD
1317 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1318 }
1319 }
1320
1321 /*
1322 * Cleanup messages pending transmission and release msgq lock.
1323 */
1324 if (error)
1325 kprintf("hammer2: msg write failed error %d\n", error);
1326
1327 if (msg) {
1328 if (msg->state && msg->state->msg == msg)
1329 msg->state->msg = NULL;
10c86c4e 1330 hammer2_msg_free(msg);
26bf1a36
MD
1331 }
1332
70c3c3b7
MD
1333 /*
1334 * Shutdown the socket. This will cause the rx thread to get an
1335 * EOF and ensure that both threads get to a termination state.
1336 */
1337 fp_shutdown(pmp->msg_fp, SHUT_RDWR);
26bf1a36 1338
70c3c3b7
MD
1339 /*
1340 * Set KILLTX (which the rx side waits for), then wait for the RX
1341 * side to completely finish before we clean out any remaining
1342 * command states.
1343 */
1344 lockmgr(&pmp->msglk, LK_RELEASE);
1345 atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLTX);
1346 wakeup(&pmp->msg_ctl);
1347 while (pmp->msgrd_td) {
1348 wakeup(&pmp->msg_ctl);
1349 tsleep(pmp, 0, "clstrkw", hz);
26bf1a36 1350 }
70c3c3b7 1351 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
26bf1a36 1352
1a34728c 1353 /*
70c3c3b7 1354 * Simulate received MSGF_DELETE's for any remaining states.
1a34728c 1355 */
70c3c3b7
MD
1356cleanuprd:
1357 RB_FOREACH(state, hammer2_state_tree, &pmp->staterd_tree) {
1a34728c 1358 if (state->func &&
1a34728c
MD
1359 (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1360 lockmgr(&pmp->msglk, LK_RELEASE);
1361 msg = hammer2_msg_alloc(&pmp->router,
1362 HAMMER2_LNK_ERROR,
1363 NULL, NULL);
1364 if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1365 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1366 msg->any.head.cmd |= HAMMER2_MSGF_DELETE;
1367 msg->state = state;
70c3c3b7
MD
1368 state->rxcmd = msg->any.head.cmd &
1369 ~HAMMER2_MSGF_DELETE;
1a34728c
MD
1370 msg->state->func(state, msg);
1371 hammer2_state_cleanuprx(msg);
1372 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
70c3c3b7
MD
1373 goto cleanuprd;
1374 }
1375 if (state->func == NULL) {
1376 state->flags &= ~HAMMER2_STATE_INSERTED;
1377 RB_REMOVE(hammer2_state_tree,
1378 &pmp->staterd_tree, state);
1379 hammer2_state_free(state);
1380 goto cleanuprd;
1381 }
1382 }
1383
1384 /*
1385 * NOTE: We have to drain the msgq to handle situations
1386 * where received states have built up output
1387 * messages, to avoid creating messages with
1388 * duplicate CREATE/DELETE flags.
1389 */
1390cleanupwr:
1391 hammer2_drain_msgq(pmp);
1392 RB_FOREACH(state, hammer2_state_tree, &pmp->statewr_tree) {
1393 if (state->func &&
1394 (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1395 lockmgr(&pmp->msglk, LK_RELEASE);
1396 msg = hammer2_msg_alloc(&pmp->router,
1397 HAMMER2_LNK_ERROR,
1398 NULL, NULL);
1399 if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1400 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1401 msg->any.head.cmd |= HAMMER2_MSGF_DELETE |
1402 HAMMER2_MSGF_REPLY;
1403 msg->state = state;
1404 state->rxcmd = msg->any.head.cmd &
1405 ~HAMMER2_MSGF_DELETE;
1406 msg->state->func(state, msg);
1407 hammer2_state_cleanuprx(msg);
1408 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1409 goto cleanupwr;
1410 }
1411 if (state->func == NULL) {
1412 state->flags &= ~HAMMER2_STATE_INSERTED;
1a34728c
MD
1413 RB_REMOVE(hammer2_state_tree,
1414 &pmp->statewr_tree, state);
1415 hammer2_state_free(state);
70c3c3b7 1416 goto cleanupwr;
1a34728c 1417 }
26bf1a36 1418 }
70c3c3b7
MD
1419
1420 hammer2_drain_msgq(pmp);
1421 if (--retries == 0)
1422 panic("hammer2: comm thread shutdown couldn't drain");
1423 if (RB_ROOT(&pmp->statewr_tree))
1424 goto cleanupwr;
1425
1426 if ((state = pmp->freewr_state) != NULL) {
1427 pmp->freewr_state = NULL;
1428 hammer2_state_free(state);
1429 }
1430
26bf1a36
MD
1431 lockmgr(&pmp->msglk, LK_RELEASE);
1432
1433 /*
70c3c3b7
MD
1434 * The state trees had better be empty now
1435 */
1436 KKASSERT(RB_EMPTY(&pmp->staterd_tree));
1437 KKASSERT(RB_EMPTY(&pmp->statewr_tree));
1438 KKASSERT(pmp->conn_state == NULL);
1439
1440 /*
26bf1a36
MD
1441 * pmp can be ripped out from under us once msgwr_td is set to NULL.
1442 */
bfc3a7b1 1443 pmp->msgwr_td = NULL;
bfc3a7b1
MD
1444 wakeup(pmp);
1445 lwkt_exit();
1446}
9b8b748f 1447
1a34728c 1448/*
70c3c3b7
MD
1449 * This cleans out the pending transmit message queue, adjusting any
1450 * persistent states properly in the process.
1451 *
1452 * Caller must hold pmp->msglk
1453 */
1454static
1455void
1456hammer2_drain_msgq(hammer2_pfsmount_t *pmp)
1457{
1458 hammer2_msg_t *msg;
1459
1460 /*
1461 * Clean out our pending transmit queue, executing the
1462 * appropriate state adjustments. If this tries to open
1463 * any new outgoing transactions we have to loop up and
1464 * clean them out.
1465 */
1466 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1467 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1468 lockmgr(&pmp->msglk, LK_RELEASE);
1469 if (msg->state && msg->state->msg == msg)
1470 msg->state->msg = NULL;
1471 if (hammer2_state_msgtx(msg))
1472 hammer2_msg_free(msg);
1473 else
1474 hammer2_state_cleanuptx(msg);
1475 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1476 }
1477}
1478
1479/*
1a34728c
MD
1480 * Called with msglk held after queueing a new message, wakes up the
1481 * transmit thread. We use an interlock thread to avoid unnecessary
1482 * wakeups.
1483 */
1484void
1485hammer2_clusterctl_wakeup(hammer2_pfsmount_t *pmp)
1486{
1487 if (pmp->msg_ctl & HAMMER2_CLUSTERCTL_SLEEPING) {
1488 atomic_clear_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_SLEEPING);
1489 wakeup(&pmp->msg_ctl);
1490 }
1491}
1492
9b8b748f 1493static int
10c86c4e 1494hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg)
9b8b748f 1495{
8c280d5d
MD
1496 switch(msg->any.head.cmd & HAMMER2_MSGF_TRANSMASK) {
1497 case HAMMER2_LNK_CONN | HAMMER2_MSGF_CREATE:
10c86c4e
MD
1498 /*
1499 * reply & leave trans open
1500 */
8c280d5d 1501 kprintf("CONN RECEIVE - (just ignore it)\n");
10c86c4e 1502 hammer2_msg_result(msg, 0);
8c280d5d
MD
1503 break;
1504 case HAMMER2_LNK_SPAN | HAMMER2_MSGF_CREATE:
1505 kprintf("SPAN RECEIVE - ADDED FROM CLUSTER\n");
1506 break;
1507 case HAMMER2_LNK_SPAN | HAMMER2_MSGF_DELETE:
1508 kprintf("SPAN RECEIVE - DELETED FROM CLUSTER\n");
1509 break;
1510 default:
1511 break;
1512 }
1513 return(0);
1514}
1515
1516/*
1517 * This function is called when the other end replies to our LNK_CONN
1518 * request.
1519 *
1520 * We transmit our (single) SPAN on the initial reply, leaving that
1521 * transaction open too.
1522 */
1523static int
1524hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1525{
1526 hammer2_pfsmount_t *pmp = state->any.pmp;
1a34728c 1527 hammer2_mount_t *hmp = pmp->hmp;
70c3c3b7 1528 hammer2_msg_t *rmsg;
8c280d5d 1529 size_t name_len;
1a34728c 1530 int copyid;
8c280d5d 1531
70c3c3b7
MD
1532 kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
1533
8c280d5d
MD
1534 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
1535 kprintf("LNK_CONN transaction replied to, initiate SPAN\n");
70c3c3b7
MD
1536 rmsg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_SPAN |
1537 HAMMER2_MSGF_CREATE,
1a34728c 1538 hammer2_msg_span_reply, pmp);
70c3c3b7
MD
1539 rmsg->any.lnk_span.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1540 rmsg->any.lnk_span.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1541 rmsg->any.lnk_span.pfs_type = pmp->iroot->ip_data.pfs_type;
2063f4d7 1542 rmsg->any.lnk_span.peer_type = pmp->hmp->voldata.peer_type;
70c3c3b7 1543 rmsg->any.lnk_span.proto_version = HAMMER2_SPAN_PROTO_1;
8c280d5d 1544 name_len = pmp->iroot->ip_data.name_len;
70c3c3b7
MD
1545 if (name_len >= sizeof(rmsg->any.lnk_span.label))
1546 name_len = sizeof(rmsg->any.lnk_span.label) - 1;
8c280d5d 1547 bcopy(pmp->iroot->ip_data.filename,
70c3c3b7 1548 rmsg->any.lnk_span.label,
8c280d5d 1549 name_len);
70c3c3b7
MD
1550 rmsg->any.lnk_span.label[name_len] = 0;
1551 hammer2_msg_write(rmsg);
1a34728c
MD
1552
1553 /*
1554 * Dump the configuration stored in the volume header
1555 */
1556 hammer2_voldata_lock(hmp);
1557 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
1558 if (hmp->voldata.copyinfo[copyid].copyid == 0)
1559 continue;
1560 hammer2_volconf_update(pmp, copyid);
1561 }
1562 hammer2_voldata_unlock(hmp);
8c280d5d 1563 }
70c3c3b7
MD
1564 if ((state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1565 (msg->any.head.cmd & HAMMER2_MSGF_DELETE)) {
8c280d5d 1566 kprintf("LNK_CONN transaction terminated by remote\n");
1a34728c 1567 pmp->conn_state = NULL;
10c86c4e 1568 hammer2_msg_reply(msg, 0);
8c280d5d
MD
1569 }
1570 return(0);
1571}
1572
70c3c3b7
MD
1573/*
1574 * Remote terminated our span transaction. We have to terminate our side.
1575 */
8c280d5d
MD
1576static int
1577hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1578{
1579 hammer2_pfsmount_t *pmp = state->any.pmp;
1580
70c3c3b7
MD
1581 kprintf("SPAN REPLY - Our sent span was terminated by the remote %08x state %p\n", msg->any.head.cmd, state);
1582 if ((state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1583 (msg->any.head.cmd & HAMMER2_MSGF_DELETE)) {
1584 hammer2_msg_reply(msg, 0);
1585 }
9b8b748f
MD
1586 return(0);
1587}
1a34728c
MD
1588
1589/*
1590 * Volume configuration updates are passed onto the userland service
1591 * daemon via the open LNK_CONN transaction.
1592 */
1593void
1594hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
1595{
1596 hammer2_mount_t *hmp = pmp->hmp;
1597 hammer2_msg_t *msg;
1598
1599 /* XXX interlock against connection state termination */
1600 kprintf("volconf update %p\n", pmp->conn_state);
1601 if (pmp->conn_state) {
1602 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
1603 msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_VOLCONF,
1604 NULL, NULL);
1605 msg->state = pmp->conn_state;
1606 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
1607 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
1608 msg->any.lnk_volconf.index = index;
1609 hammer2_msg_write(msg);
1610 }
1611}