Merge branches 'hammer2' and 'master' of ssh://crater.dragonflybsd.org/repository...
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
1 /*-
2  * Copyright (c) 2011, 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/nlookup.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/fcntl.h>
41 #include <sys/buf.h>
42 #include <sys/uuid.h>
43 #include <sys/vfsops.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
46
47 #include "hammer2.h"
48 #include "hammer2_disk.h"
49 #include "hammer2_mount.h"
50 #include "hammer2_network.h"
51
52 struct hammer2_sync_info {
53         int error;
54         int waitfor;
55 };
56
57 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
58 static struct hammer2_mntlist hammer2_mntlist;
59 static struct lock hammer2_mntlk;
60
61 int hammer2_debug;
62 int hammer2_cluster_enable = 1;
63 int hammer2_hardlink_enable = 1;
64 long hammer2_iod_file_read;
65 long hammer2_iod_meta_read;
66 long hammer2_iod_indr_read;
67 long hammer2_iod_file_write;
68 long hammer2_iod_meta_write;
69 long hammer2_iod_indr_write;
70 long hammer2_iod_volu_write;
71 long hammer2_ioa_file_read;
72 long hammer2_ioa_meta_read;
73 long hammer2_ioa_indr_read;
74 long hammer2_ioa_file_write;
75 long hammer2_ioa_meta_write;
76 long hammer2_ioa_indr_write;
77 long hammer2_ioa_volu_write;
78
79 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
80
81 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
82            &hammer2_debug, 0, "");
83 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
84            &hammer2_cluster_enable, 0, "");
85 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
86            &hammer2_hardlink_enable, 0, "");
87 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
88            &hammer2_iod_file_read, 0, "");
89 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
90            &hammer2_iod_meta_read, 0, "");
91 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
92            &hammer2_iod_indr_read, 0, "");
93 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
94            &hammer2_iod_file_write, 0, "");
95 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
96            &hammer2_iod_meta_write, 0, "");
97 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
98            &hammer2_iod_indr_write, 0, "");
99 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
100            &hammer2_iod_volu_write, 0, "");
101 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
102            &hammer2_ioa_file_read, 0, "");
103 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
104            &hammer2_ioa_meta_read, 0, "");
105 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
106            &hammer2_ioa_indr_read, 0, "");
107 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
108            &hammer2_ioa_file_write, 0, "");
109 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
110            &hammer2_ioa_meta_write, 0, "");
111 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
112            &hammer2_ioa_indr_write, 0, "");
113 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
114            &hammer2_ioa_volu_write, 0, "");
115
116 static int hammer2_vfs_init(struct vfsconf *conf);
117 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
118                                 struct ucred *cred);
119 static int hammer2_remount(struct mount *, char *, struct vnode *,
120                                 struct ucred *);
121 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
122 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
123 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
124                                 struct ucred *cred);
125 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
126                                 struct ucred *cred);
127 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
128 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
129                                 ino_t ino, struct vnode **vpp);
130 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
131                                 struct fid *fhp, struct vnode **vpp);
132 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
133 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
134                                 int *exflagsp, struct ucred **credanonp);
135
136 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
137 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
138 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
139
140 static void hammer2_cluster_thread_rd(void *arg);
141 static void hammer2_cluster_thread_wr(void *arg);
142 static int hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg);
143 static int hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg);
144 static int hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg);
145
146 /*
147  * HAMMER2 vfs operations.
148  */
149 static struct vfsops hammer2_vfsops = {
150         .vfs_init       = hammer2_vfs_init,
151         .vfs_sync       = hammer2_vfs_sync,
152         .vfs_mount      = hammer2_vfs_mount,
153         .vfs_unmount    = hammer2_vfs_unmount,
154         .vfs_root       = hammer2_vfs_root,
155         .vfs_statfs     = hammer2_vfs_statfs,
156         .vfs_statvfs    = hammer2_vfs_statvfs,
157         .vfs_vget       = hammer2_vfs_vget,
158         .vfs_vptofh     = hammer2_vfs_vptofh,
159         .vfs_fhtovp     = hammer2_vfs_fhtovp,
160         .vfs_checkexp   = hammer2_vfs_checkexp
161 };
162
163 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
164
165 VFS_SET(hammer2_vfsops, hammer2, 0);
166 MODULE_VERSION(hammer2, 1);
167
168 static
169 int
170 hammer2_vfs_init(struct vfsconf *conf)
171 {
172         int error;
173
174         error = 0;
175
176         if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
177                 error = EINVAL;
178         if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
179                 error = EINVAL;
180         if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
181                 error = EINVAL;
182         if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
183                 error = EINVAL;
184
185         if (error)
186                 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
187
188         lockinit(&hammer2_mntlk, "mntlk", 0, 0);
189         TAILQ_INIT(&hammer2_mntlist);
190
191         return (error);
192 }
193
194 /*
195  * Mount or remount HAMMER2 fileystem from physical media
196  *
197  *      mountroot
198  *              mp              mount point structure
199  *              path            NULL
200  *              data            <unused>
201  *              cred            <unused>
202  *
203  *      mount
204  *              mp              mount point structure
205  *              path            path to mount point
206  *              data            pointer to argument structure in user space
207  *                      volume  volume path (device@LABEL form)
208  *                      hflags  user mount flags
209  *              cred            user credentials
210  *
211  * RETURNS:     0       Success
212  *              !0      error number
213  */
214 static
215 int
216 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
217                   struct ucred *cred)
218 {
219         struct hammer2_mount_info info;
220         hammer2_pfsmount_t *pmp;
221         hammer2_mount_t *hmp;
222         hammer2_key_t lhc;
223         struct vnode *devvp;
224         struct nlookupdata nd;
225         hammer2_chain_t *parent;
226         hammer2_chain_t *schain;
227         hammer2_chain_t *rchain;
228         char devstr[MNAMELEN];
229         size_t size;
230         size_t done;
231         char *dev;
232         char *label;
233         int ronly = 1;
234         int create_hmp;
235         int error;
236
237         hmp = NULL;
238         pmp = NULL;
239         dev = NULL;
240         label = NULL;
241         devvp = NULL;
242
243         kprintf("hammer2_mount\n");
244
245         if (path == NULL) {
246                 /*
247                  * Root mount
248                  */
249                 bzero(&info, sizeof(info));
250                 info.cluster_fd = -1;
251                 return (EOPNOTSUPP);
252         } else {
253                 /*
254                  * Non-root mount or updating a mount
255                  */
256                 error = copyin(data, &info, sizeof(info));
257                 if (error)
258                         return (error);
259
260                 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
261                 if (error)
262                         return (error);
263
264                 /* Extract device and label */
265                 dev = devstr;
266                 label = strchr(devstr, '@');
267                 if (label == NULL ||
268                     ((label + 1) - dev) > done) {
269                         return (EINVAL);
270                 }
271                 *label = '\0';
272                 label++;
273                 if (*label == '\0')
274                         return (EINVAL);
275
276                 if (mp->mnt_flag & MNT_UPDATE) {
277                         /* Update mount */
278                         /* HAMMER2 implements NFS export via mountctl */
279                         hmp = MPTOHMP(mp);
280                         devvp = hmp->devvp;
281                         error = hammer2_remount(mp, path, devvp, cred);
282                         return error;
283                 }
284         }
285
286         /*
287          * PFS mount
288          *
289          * Lookup name and verify it refers to a block device.
290          */
291         error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
292         if (error == 0)
293                 error = nlookup(&nd);
294         if (error == 0)
295                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
296         nlookup_done(&nd);
297
298         if (error == 0) {
299                 if (vn_isdisk(devvp, &error))
300                         error = vfs_mountedon(devvp);
301         }
302
303         /*
304          * Determine if the device has already been mounted.  After this
305          * check hmp will be non-NULL if we are doing the second or more
306          * hammer2 mounts from the same device.
307          */
308         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
309         TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
310                 if (hmp->devvp == devvp)
311                         break;
312         }
313
314         /*
315          * Open the device if this isn't a secondary mount
316          */
317         if (hmp) {
318                 create_hmp = 0;
319         } else {
320                 create_hmp = 1;
321                 if (error == 0 && vcount(devvp) > 0)
322                         error = EBUSY;
323
324                 /*
325                  * Now open the device
326                  */
327                 if (error == 0) {
328                         ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
329                         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
330                         error = vinvalbuf(devvp, V_SAVE, 0, 0);
331                         if (error == 0) {
332                                 error = VOP_OPEN(devvp,
333                                                  ronly ? FREAD : FREAD | FWRITE,
334                                                  FSCRED, NULL);
335                         }
336                         vn_unlock(devvp);
337                 }
338                 if (error && devvp) {
339                         vrele(devvp);
340                         devvp = NULL;
341                 }
342                 if (error) {
343                         lockmgr(&hammer2_mntlk, LK_RELEASE);
344                         return error;
345                 }
346         }
347
348         /*
349          * Block device opened successfully, finish initializing the
350          * mount structure.
351          *
352          * From this point on we have to call hammer2_unmount() on failure.
353          */
354         pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
355         mp->mnt_data = (qaddr_t)pmp;
356         pmp->mp = mp;
357         kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
358         lockinit(&pmp->msglk, "h2msg", 0, 0);
359         TAILQ_INIT(&pmp->msgq);
360         RB_INIT(&pmp->staterd_tree);
361         RB_INIT(&pmp->statewr_tree);
362
363         if (create_hmp) {
364                 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
365                 hmp->ronly = ronly;
366                 hmp->devvp = devvp;
367                 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
368                 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
369                 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
370         }
371         ccms_domain_init(&pmp->ccms_dom);
372         pmp->hmp = hmp;
373         pmp->router.pmp = pmp;
374         ++hmp->pmp_count;
375         lockmgr(&hammer2_mntlk, LK_RELEASE);
376         kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
377         
378         mp->mnt_flag = MNT_LOCAL;
379         mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;   /* all entry pts are SMP */
380
381         if (create_hmp) {
382                 /*
383                  * vchain setup. vchain.data is special cased to NULL.
384                  * vchain.refs is initialized and will never drop to 0.
385                  */
386                 hmp->vchain.refs = 1;
387                 hmp->vchain.data = (void *)&hmp->voldata;
388                 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
389                 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
390                 hmp->vchain.bref_flush = hmp->vchain.bref;
391                 ccms_cst_init(&hmp->vchain.cst, NULL);
392                 /* hmp->vchain.u.xxx is left NULL */
393                 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
394                 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
395
396                 /*
397                  * Install the volume header
398                  */
399                 error = hammer2_install_volume_header(hmp);
400                 if (error) {
401                         hammer2_vfs_unmount(mp, MNT_FORCE);
402                         return error;
403                 }
404         }
405
406         /*
407          * required mount structure initializations
408          */
409         mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
410         mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
411
412         mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
413         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
414
415         /*
416          * Optional fields
417          */
418         mp->mnt_iosize_max = MAXPHYS;
419
420         /*
421          * First locate the super-root inode, which is key 0 relative to the
422          * volume header's blockset.
423          *
424          * Then locate the root inode by scanning the directory keyspace
425          * represented by the label.
426          */
427         if (create_hmp) {
428                 parent = &hmp->vchain;
429                 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
430                 schain = hammer2_chain_lookup(hmp, &parent,
431                                       HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
432                 hammer2_chain_unlock(hmp, parent);
433                 if (schain == NULL) {
434                         kprintf("hammer2_mount: invalid super-root\n");
435                         hammer2_vfs_unmount(mp, MNT_FORCE);
436                         return EINVAL;
437                 }
438                 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
439                 hmp->schain = schain;           /* left locked */
440         } else {
441                 schain = hmp->schain;
442                 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
443         }
444
445         parent = schain;
446         lhc = hammer2_dirhash(label, strlen(label));
447         rchain = hammer2_chain_lookup(hmp, &parent,
448                                       lhc, lhc + HAMMER2_DIRHASH_LOMASK,
449                                       0);
450         while (rchain) {
451                 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
452                     rchain->u.ip &&
453                     strcmp(label, rchain->data->ipdata.filename) == 0) {
454                         break;
455                 }
456                 rchain = hammer2_chain_next(hmp, &parent, rchain,
457                                             lhc, lhc + HAMMER2_DIRHASH_LOMASK,
458                                             0);
459         }
460         hammer2_chain_unlock(hmp, parent);
461         if (rchain == NULL) {
462                 kprintf("hammer2_mount: PFS label not found\n");
463                 hammer2_vfs_unmount(mp, MNT_FORCE);
464                 return EINVAL;
465         }
466         if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
467                 hammer2_chain_unlock(hmp, rchain);
468                 kprintf("hammer2_mount: PFS label already mounted!\n");
469                 hammer2_vfs_unmount(mp, MNT_FORCE);
470                 return EBUSY;
471         }
472         atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
473
474         hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
475         hammer2_chain_unlock(hmp, rchain);
476         pmp->rchain = rchain;           /* left held & unlocked */
477         pmp->iroot = rchain->u.ip;      /* implied hold from rchain */
478         pmp->iroot->pmp = pmp;
479
480         kprintf("iroot %p\n", pmp->iroot);
481
482         /*
483          * Ref the cluster management messaging descriptor.  The mount
484          * program deals with the other end of the communications pipe.
485          */
486         pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
487         if (pmp->msg_fp == NULL) {
488                 kprintf("hammer2_mount: bad cluster_fd!\n");
489                 hammer2_vfs_unmount(mp, MNT_FORCE);
490                 return EBADF;
491         }
492         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
493                     NULL, 0, -1, "hammer2-msgrd");
494         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
495                     NULL, 0, -1, "hammer2-msgwr");
496
497         /*
498          * Finish setup
499          */
500         vfs_getnewfsid(mp);
501         vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
502         vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
503         vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
504
505         copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
506         bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
507         bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
508         copyinstr(path, mp->mnt_stat.f_mntonname,
509                   sizeof(mp->mnt_stat.f_mntonname) - 1,
510                   &size);
511
512         /*
513          * Initial statfs to prime mnt_stat.
514          */
515         hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
516
517         return 0;
518 }
519
520 static
521 int
522 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
523                 struct ucred *cred)
524 {
525         return (0);
526 }
527
528 static
529 int
530 hammer2_vfs_unmount(struct mount *mp, int mntflags)
531 {
532         hammer2_pfsmount_t *pmp;
533         hammer2_mount_t *hmp;
534         int flags;
535         int error = 0;
536         int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
537         struct vnode *devvp;
538
539         pmp = MPTOPMP(mp);
540         hmp = pmp->hmp;
541         flags = 0;
542
543         if (mntflags & MNT_FORCE)
544                 flags |= FORCECLOSE;
545
546         hammer2_mount_exlock(hmp);
547
548         /*
549          * If mount initialization proceeded far enough we must flush
550          * its vnodes.
551          */
552         if (pmp->iroot)
553                 error = vflush(mp, 0, flags);
554
555         if (error)
556                 return error;
557
558         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
559         --hmp->pmp_count;
560         kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
561
562         /*
563          * Flush any left over chains.  The voldata lock is only used
564          * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
565          */
566         hammer2_voldata_lock(hmp);
567         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
568                                  HAMMER2_CHAIN_MODIFIED_AUX |
569                                  HAMMER2_CHAIN_SUBMODIFIED)) {
570                 hammer2_voldata_unlock(hmp);
571                 hammer2_vfs_sync(mp, MNT_WAIT);
572         } else {
573                 hammer2_voldata_unlock(hmp);
574         }
575         if (hmp->pmp_count == 0) {
576                 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
577                                          HAMMER2_CHAIN_MODIFIED_AUX |
578                                          HAMMER2_CHAIN_SUBMODIFIED)) {
579                         kprintf("hammer2_unmount: chains left over after "
580                                 "final sync\n");
581                         if (hammer2_debug & 0x0010)
582                                 Debugger("entered debugger");
583                 }
584         }
585
586         /*
587          * Cleanup the root and super-root chain elements (which should be
588          * clean).
589          */
590         pmp->iroot = NULL;
591         if (pmp->rchain) {
592                 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
593                 KKASSERT(pmp->rchain->refs == 1);
594                 hammer2_chain_drop(hmp, pmp->rchain);
595                 pmp->rchain = NULL;
596         }
597         ccms_domain_uninit(&pmp->ccms_dom);
598
599         /*
600          * Ask the cluster controller to go away
601          */
602         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
603         while (pmp->msgrd_td || pmp->msgwr_td) {
604                 wakeup(&pmp->msg_ctl);
605                 tsleep(pmp, 0, "clstrkl", hz);
606         }
607
608         /*
609          * Drop communications descriptor
610          */
611         if (pmp->msg_fp) {
612                 fdrop(pmp->msg_fp);
613                 pmp->msg_fp = NULL;
614         }
615
616         /*
617          * If no PFS's left drop the master hammer2_mount for the device.
618          */
619         if (hmp->pmp_count == 0) {
620                 if (hmp->schain) {
621                         KKASSERT(hmp->schain->refs == 1);
622                         hammer2_chain_drop(hmp, hmp->schain);
623                         hmp->schain = NULL;
624                 }
625
626                 /*
627                  * Finish up with the device vnode
628                  */
629                 if ((devvp = hmp->devvp) != NULL) {
630                         vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
631                         hmp->devvp = NULL;
632                         VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
633                         vrele(devvp);
634                         devvp = NULL;
635                 }
636         }
637         hammer2_mount_unlock(hmp);
638
639         pmp->mp = NULL;
640         pmp->hmp = NULL;
641         mp->mnt_data = NULL;
642
643         kmalloc_destroy(&pmp->mmsg);
644
645         kfree(pmp, M_HAMMER2);
646         if (hmp->pmp_count == 0) {
647                 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
648                 kmalloc_destroy(&hmp->minode);
649                 kmalloc_destroy(&hmp->mchain);
650                 kfree(hmp, M_HAMMER2);
651         }
652         lockmgr(&hammer2_mntlk, LK_RELEASE);
653         return (error);
654 }
655
656 static
657 int
658 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
659              ino_t ino, struct vnode **vpp)
660 {
661         kprintf("hammer2_vget\n");
662         return (EOPNOTSUPP);
663 }
664
665 static
666 int
667 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
668 {
669         hammer2_pfsmount_t *pmp;
670         hammer2_mount_t *hmp;
671         int error;
672         struct vnode *vp;
673
674         pmp = MPTOPMP(mp);
675         hmp = pmp->hmp;
676         hammer2_mount_exlock(hmp);
677         if (pmp->iroot == NULL) {
678                 *vpp = NULL;
679                 error = EINVAL;
680         } else {
681                 hammer2_chain_lock(hmp, &pmp->iroot->chain,
682                                    HAMMER2_RESOLVE_ALWAYS |
683                                    HAMMER2_RESOLVE_SHARED);
684                 vp = hammer2_igetv(pmp->iroot, &error);
685                 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
686                 *vpp = vp;
687                 if (vp == NULL)
688                         kprintf("vnodefail\n");
689         }
690         hammer2_mount_unlock(hmp);
691
692         return (error);
693 }
694
695 /*
696  * Filesystem status
697  *
698  * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
699  */
700 static
701 int
702 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
703 {
704         hammer2_pfsmount_t *pmp;
705         hammer2_mount_t *hmp;
706
707         pmp = MPTOPMP(mp);
708         hmp = MPTOHMP(mp);
709
710         mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
711                                pmp->iroot->delta_icount;
712         mp->mnt_stat.f_ffree = 0;
713         mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
714         mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
715                                 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
716         mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
717
718         *sbp = mp->mnt_stat;
719         return (0);
720 }
721
722 static
723 int
724 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
725 {
726         hammer2_pfsmount_t *pmp;
727         hammer2_mount_t *hmp;
728
729         pmp = MPTOPMP(mp);
730         hmp = MPTOHMP(mp);
731
732         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
733         mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
734                                 pmp->iroot->delta_icount;
735         mp->mnt_vstat.f_ffree = 0;
736         mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
737         mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
738                                  hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
739         mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
740
741         *sbp = mp->mnt_vstat;
742         return (0);
743 }
744
745 /*
746  * Sync the entire filesystem; this is called from the filesystem syncer
747  * process periodically and whenever a user calls sync(1) on the hammer
748  * mountpoint.
749  *
750  * Currently is actually called from the syncer! \o/
751  *
752  * This task will have to snapshot the state of the dirty inode chain.
753  * From that, it will have to make sure all of the inodes on the dirty
754  * chain have IO initiated. We make sure that io is initiated for the root
755  * block.
756  *
757  * If waitfor is set, we wait for media to acknowledge the new rootblock.
758  *
759  * THINKS: side A vs side B, to have sync not stall all I/O?
760  */
761 static
762 int
763 hammer2_vfs_sync(struct mount *mp, int waitfor)
764 {
765         struct hammer2_sync_info info;
766         hammer2_mount_t *hmp;
767         int flags;
768         int error;
769         int haswork;
770
771         hmp = MPTOHMP(mp);
772
773         flags = VMSC_GETVP;
774         if (waitfor & MNT_LAZY)
775                 flags |= VMSC_ONEPASS;
776
777         info.error = 0;
778         info.waitfor = MNT_NOWAIT;
779         vmntvnodescan(mp, flags | VMSC_NOWAIT,
780                       hammer2_sync_scan1,
781                       hammer2_sync_scan2, &info);
782         if (info.error == 0 && (waitfor & MNT_WAIT)) {
783                 info.waitfor = waitfor;
784                     vmntvnodescan(mp, flags,
785                                   hammer2_sync_scan1,
786                                   hammer2_sync_scan2, &info);
787
788         }
789 #if 0
790         if (waitfor == MNT_WAIT) {
791                 /* XXX */
792         } else {
793                 /* XXX */
794         }
795 #endif
796         hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
797         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
798                                  HAMMER2_CHAIN_MODIFIED_AUX |
799                                  HAMMER2_CHAIN_SUBMODIFIED)) {
800                 hammer2_chain_flush(hmp, &hmp->vchain, 0);
801                 haswork = 1;
802         } else {
803                 haswork = 0;
804         }
805         hammer2_chain_unlock(hmp, &hmp->vchain);
806
807         error = 0;
808
809         if ((waitfor & MNT_LAZY) == 0) {
810                 waitfor = MNT_NOWAIT;
811                 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
812                 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
813                 vn_unlock(hmp->devvp);
814         }
815
816         if (error == 0 && haswork) {
817                 struct buf *bp;
818
819                 /*
820                  * Synchronize the disk before flushing the volume
821                  * header.
822                  */
823                 bp = getpbuf(NULL);
824                 bp->b_bio1.bio_offset = 0;
825                 bp->b_bufsize = 0;
826                 bp->b_bcount = 0;
827                 bp->b_cmd = BUF_CMD_FLUSH;
828                 bp->b_bio1.bio_done = biodone_sync;
829                 bp->b_bio1.bio_flags |= BIO_SYNC;
830                 vn_strategy(hmp->devvp, &bp->b_bio1);
831                 biowait(&bp->b_bio1, "h2vol");
832                 relpbuf(bp, NULL);
833
834                 /*
835                  * Then we can safely flush the volume header.  Volume
836                  * data is locked separately to prevent ioctl functions
837                  * from deadlocking due to a configuration issue.
838                  */
839                 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
840                 hammer2_voldata_lock(hmp);
841                 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
842                 hammer2_voldata_unlock(hmp);
843                 bawrite(bp);
844         }
845         return (error);
846 }
847
848 /*
849  * Sync passes.
850  *
851  * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
852  *       won't flush on those flags.  The syncer code above will do a
853  *       general meta-data flush globally that will catch these flags.
854  */
855 static int
856 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
857 {
858         hammer2_inode_t *ip;
859
860         ip = VTOI(vp);
861         if (vp->v_type == VNON || ip == NULL ||
862             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
863                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
864              RB_EMPTY(&vp->v_rbdirty_tree))) {
865                 return(-1);
866         }
867         return(0);
868 }
869
870 static int
871 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
872 {
873         struct hammer2_sync_info *info = data;
874         hammer2_inode_t *ip;
875         int error;
876
877         ip = VTOI(vp);
878         if (vp->v_type == VNON || vp->v_type == VBAD ||
879             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
880                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
881             RB_EMPTY(&vp->v_rbdirty_tree))) {
882                 return(0);
883         }
884         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
885         if (error)
886                 info->error = error;
887         return(0);
888 }
889
890 static
891 int
892 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
893 {
894         return (0);
895 }
896
897 static
898 int
899 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
900                struct fid *fhp, struct vnode **vpp)
901 {
902         return (0);
903 }
904
905 static
906 int
907 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
908                  int *exflagsp, struct ucred **credanonp)
909 {
910         return (0);
911 }
912
913 /*
914  * Support code for hammer2_mount().  Read, verify, and install the volume
915  * header into the HMP
916  *
917  * XXX read four volhdrs and use the one with the highest TID whos CRC
918  *     matches.
919  *
920  * XXX check iCRCs.
921  *
922  * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
923  *     nonexistant locations.
924  *
925  * XXX Record selected volhdr and ring updates to each of 4 volhdrs
926  */
927 static
928 int
929 hammer2_install_volume_header(hammer2_mount_t *hmp)
930 {
931         hammer2_volume_data_t *vd;
932         struct buf *bp;
933         hammer2_crc32_t crc0, crc, bcrc0, bcrc;
934         int error_reported;
935         int error;
936         int valid;
937         int i;
938
939         error_reported = 0;
940         error = 0;
941         valid = 0;
942         bp = NULL;
943
944         /*
945          * There are up to 4 copies of the volume header (syncs iterate
946          * between them so there is no single master).  We don't trust the
947          * volu_size field so we don't know precisely how large the filesystem
948          * is, so depend on the OS to return an error if we go beyond the
949          * block device's EOF.
950          */
951         for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
952                 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
953                               HAMMER2_VOLUME_BYTES, &bp);
954                 if (error) {
955                         brelse(bp);
956                         bp = NULL;
957                         continue;
958                 }
959
960                 vd = (struct hammer2_volume_data *) bp->b_data;
961                 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
962                     (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
963                         brelse(bp);
964                         bp = NULL;
965                         continue;
966                 }
967
968                 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
969                         /* XXX: Reversed-endianness filesystem */
970                         kprintf("hammer2: reverse-endian filesystem detected");
971                         brelse(bp);
972                         bp = NULL;
973                         continue;
974                 }
975
976                 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
977                 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
978                                       HAMMER2_VOLUME_ICRC0_SIZE);
979                 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
980                 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
981                                        HAMMER2_VOLUME_ICRC1_SIZE);
982                 if ((crc0 != crc) || (bcrc0 != bcrc)) {
983                         kprintf("hammer2 volume header crc "
984                                 "mismatch copy #%d\t%08x %08x",
985                                 i, crc0, crc);
986                         error_reported = 1;
987                         brelse(bp);
988                         bp = NULL;
989                         continue;
990                 }
991                 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
992                         valid = 1;
993                         hmp->voldata = *vd;
994                 }
995                 brelse(bp);
996                 bp = NULL;
997         }
998         if (valid) {
999                 error = 0;
1000                 if (error_reported)
1001                         kprintf("hammer2: a valid volume header was found\n");
1002         } else {
1003                 error = EINVAL;
1004                 kprintf("hammer2: no valid volume headers found!\n");
1005         }
1006         return (error);
1007 }
1008
1009 /*
1010  * Reconnect using the passed file pointer.  The caller must ref the
1011  * fp for us.
1012  */
1013 void
1014 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1015 {
1016         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1017         while (pmp->msgrd_td || pmp->msgwr_td) {
1018                wakeup(&pmp->msg_ctl);
1019                tsleep(pmp, 0, "clstrkl", hz);
1020         }
1021         atomic_clear_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1022         pmp->msg_fp = fp;
1023         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
1024                     NULL, 0, -1, "hammer2-msgrd");
1025         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
1026                     NULL, 0, -1, "hammer2-msgwr");
1027 }
1028
1029 /*
1030  * Cluster controller thread.  Perform messaging functions.  We have one
1031  * thread for the reader and one for the writer.  The writer handles
1032  * shutdown requests (which should break the reader thread).
1033  */
1034 static
1035 void
1036 hammer2_cluster_thread_rd(void *arg)
1037 {
1038         hammer2_pfsmount_t *pmp = arg;
1039         hammer2_msg_hdr_t hdr;
1040         hammer2_msg_t *msg;
1041         hammer2_state_t *state;
1042         size_t hbytes;
1043         int error = 0;
1044
1045         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
1046                 /*
1047                  * Retrieve the message from the pipe or socket.
1048                  */
1049                 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
1050                                 NULL, 1, UIO_SYSSPACE);
1051                 if (error)
1052                         break;
1053                 if (hdr.magic != HAMMER2_MSGHDR_MAGIC) {
1054                         kprintf("hammer2: msgrd: bad magic: %04x\n",
1055                                 hdr.magic);
1056                         error = EINVAL;
1057                         break;
1058                 }
1059                 hbytes = (hdr.cmd & HAMMER2_MSGF_SIZE) * HAMMER2_MSG_ALIGN;
1060                 if (hbytes < sizeof(hdr) || hbytes > HAMMER2_MSGAUX_MAX) {
1061                         kprintf("hammer2: msgrd: bad header size %zd\n",
1062                                 hbytes);
1063                         error = EINVAL;
1064                         break;
1065                 }
1066                 /* XXX messy: mask cmd to avoid allocating state */
1067                 msg = hammer2_msg_alloc(&pmp->router,
1068                                         hdr.cmd & HAMMER2_MSGF_BASECMDMASK,
1069                                         NULL, NULL);
1070                 msg->any.head = hdr;
1071                 msg->hdr_size = hbytes;
1072                 if (hbytes > sizeof(hdr)) {
1073                         error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1074                                         hbytes - sizeof(hdr),
1075                                         NULL, 1, UIO_SYSSPACE);
1076                         if (error) {
1077                                 kprintf("hammer2: short msg received\n");
1078                                 error = EINVAL;
1079                                 break;
1080                         }
1081                 }
1082                 msg->aux_size = hdr.aux_bytes * HAMMER2_MSG_ALIGN;
1083                 if (msg->aux_size > HAMMER2_MSGAUX_MAX) {
1084                         kprintf("hammer2: illegal msg payload size %zd\n",
1085                                 msg->aux_size);
1086                         error = EINVAL;
1087                         break;
1088                 }
1089                 if (msg->aux_size) {
1090                         msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1091                                                 M_WAITOK | M_ZERO);
1092                         error = fp_read(pmp->msg_fp, msg->aux_data,
1093                                         msg->aux_size,
1094                                         NULL, 1, UIO_SYSSPACE);
1095                         if (error) {
1096                                 kprintf("hammer2: short msg "
1097                                         "payload received\n");
1098                                 break;
1099                         }
1100                 }
1101
1102                 /*
1103                  * State machine tracking, state assignment for msg,
1104                  * returns error and discard status.  Errors are fatal
1105                  * to the connection except for EALREADY which forces
1106                  * a discard without execution.
1107                  */
1108                 error = hammer2_state_msgrx(msg);
1109                 if (error) {
1110                         /*
1111                          * Raw protocol or connection error
1112                          */
1113                         hammer2_msg_free(msg);
1114                         if (error == EALREADY)
1115                                 error = 0;
1116                 } else if (msg->state && msg->state->func) {
1117                         /*
1118                          * Message related to state which already has a
1119                          * handling function installed for it.
1120                          */
1121                         error = msg->state->func(msg->state, msg);
1122                         hammer2_state_cleanuprx(msg);
1123                 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1124                            HAMMER2_MSG_PROTO_LNK) {
1125                         /*
1126                          * Message related to the LNK protocol set
1127                          */
1128                         error = hammer2_msg_lnk_rcvmsg(msg);
1129                         hammer2_state_cleanuprx(msg);
1130                 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1131                            HAMMER2_MSG_PROTO_DBG) {
1132                         /*
1133                          * Message related to the DBG protocol set
1134                          */
1135                         error = hammer2_msg_dbg_rcvmsg(msg);
1136                         hammer2_state_cleanuprx(msg);
1137                 } else {
1138                         /*
1139                          * Other higher-level messages (e.g. vnops)
1140                          */
1141                         error = hammer2_msg_adhoc_input(msg);
1142                         hammer2_state_cleanuprx(msg);
1143                 }
1144                 msg = NULL;
1145         }
1146
1147         if (error)
1148                 kprintf("hammer2: msg read failed error %d\n", error);
1149
1150         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1151         if (msg) {
1152                 if (msg->state && msg->state->msg == msg)
1153                         msg->state->msg = NULL;
1154                 hammer2_msg_free(msg);
1155         }
1156
1157         if ((state = pmp->freerd_state) != NULL) {
1158                 pmp->freerd_state = NULL;
1159                 hammer2_state_free(state);
1160         }
1161
1162         /*
1163          * XXX simulate MSGF_DELETEs
1164          */
1165         while ((state = RB_ROOT(&pmp->staterd_tree)) != NULL) {
1166                 kprintf("y");
1167                 if (state->func &&
1168                     (state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1169                     (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1170                         lockmgr(&pmp->msglk, LK_RELEASE);
1171                         msg = hammer2_msg_alloc(&pmp->router,
1172                                                 HAMMER2_LNK_ERROR,
1173                                                 NULL, NULL);
1174                         if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1175                                 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1176                         msg->any.head.cmd |= HAMMER2_MSGF_DELETE;
1177                         msg->state = state;
1178                         msg->state->func(state, msg);
1179                         hammer2_state_cleanuprx(msg);
1180                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1181                 } else {
1182                         RB_REMOVE(hammer2_state_tree,
1183                                   &pmp->staterd_tree, state);
1184                         hammer2_state_free(state);
1185                 }
1186         }
1187         lockmgr(&pmp->msglk, LK_RELEASE);
1188
1189         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1190         pmp->msgrd_td = NULL;
1191         /* pmp can be ripped out from under us at this point */
1192         wakeup(pmp);
1193         lwkt_exit();
1194 }
1195
1196 static
1197 void
1198 hammer2_cluster_thread_wr(void *arg)
1199 {
1200         hammer2_pfsmount_t *pmp = arg;
1201         hammer2_msg_t *msg = NULL;
1202         hammer2_state_t *state;
1203         ssize_t res;
1204         size_t name_len;
1205         int error = 0;
1206
1207         /*
1208          * Open a LNK_CONN transaction indicating that we want to take part
1209          * in the spanning tree algorithm.  Filter explicitly on the PFS
1210          * info in the iroot.
1211          *
1212          * We do not transmit our (only) LNK_SPAN until the other end has
1213          * acknowledged our link connection request.
1214          *
1215          * The transaction remains fully open for the duration of the
1216          * connection.
1217          */
1218         msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_CONN |
1219                                               HAMMER2_MSGF_CREATE,
1220                                 hammer2_msg_conn_reply, pmp);
1221         msg->any.lnk_conn.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1222         msg->any.lnk_conn.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1223         msg->any.lnk_conn.pfs_type = pmp->iroot->ip_data.pfs_type;
1224         msg->any.lnk_conn.proto_version = HAMMER2_SPAN_PROTO_1;
1225         name_len = pmp->iroot->ip_data.name_len;
1226         if (name_len >= sizeof(msg->any.lnk_conn.label))
1227                 name_len = sizeof(msg->any.lnk_conn.label) - 1;
1228         bcopy(pmp->iroot->ip_data.filename, msg->any.lnk_conn.label, name_len);
1229         pmp->conn_state = msg->state;
1230         msg->any.lnk_conn.label[name_len] = 0;
1231         hammer2_msg_write(msg);
1232
1233         /*
1234          * Transmit loop
1235          */
1236         msg = NULL;
1237         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1238
1239         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1240                 /*
1241                  * Sleep if no messages pending.  Interlock with flag while
1242                  * holding msglk.
1243                  */
1244                 if (TAILQ_EMPTY(&pmp->msgq)) {
1245                         atomic_set_int(&pmp->msg_ctl,
1246                                        HAMMER2_CLUSTERCTL_SLEEPING);
1247                         lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1248                         atomic_clear_int(&pmp->msg_ctl,
1249                                          HAMMER2_CLUSTERCTL_SLEEPING);
1250                 }
1251
1252                 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1253                         /*
1254                          * Remove msg from the transmit queue and do
1255                          * persist and half-closed state handling.
1256                          */
1257                         TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1258                         lockmgr(&pmp->msglk, LK_RELEASE);
1259
1260                         error = hammer2_state_msgtx(msg);
1261                         if (error == EALREADY) {
1262                                 error = 0;
1263                                 hammer2_msg_free(msg);
1264                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1265                                 continue;
1266                         }
1267                         if (error) {
1268                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1269                                 break;
1270                         }
1271
1272                         /*
1273                          * Dump the message to the pipe or socket.
1274                          */
1275                         error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1276                                          &res, UIO_SYSSPACE);
1277                         if (error || res != msg->hdr_size) {
1278                                 if (error == 0)
1279                                         error = EINVAL;
1280                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1281                                 break;
1282                         }
1283                         if (msg->aux_size) {
1284                                 error = fp_write(pmp->msg_fp,
1285                                                  msg->aux_data, msg->aux_size,
1286                                                  &res, UIO_SYSSPACE);
1287                                 if (error || res != msg->aux_size) {
1288                                         if (error == 0)
1289                                                 error = EINVAL;
1290                                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1291                                         break;
1292                                 }
1293                         }
1294                         hammer2_state_cleanuptx(msg);
1295                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1296                 }
1297         }
1298
1299         /*
1300          * Cleanup messages pending transmission and release msgq lock.
1301          */
1302         if (error)
1303                 kprintf("hammer2: msg write failed error %d\n", error);
1304
1305         if (msg) {
1306                 if (msg->state && msg->state->msg == msg)
1307                         msg->state->msg = NULL;
1308                 hammer2_msg_free(msg);
1309         }
1310
1311         while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1312                 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1313                 if (msg->state && msg->state->msg == msg)
1314                         msg->state->msg = NULL;
1315                 hammer2_msg_free(msg);
1316         }
1317
1318         if ((state = pmp->freewr_state) != NULL) {
1319                 pmp->freewr_state = NULL;
1320                 hammer2_state_free(state);
1321         }
1322
1323         /*
1324          * XXX simulate MSGF_DELETEs
1325          */
1326         while ((state = RB_ROOT(&pmp->statewr_tree)) != NULL) {
1327                 kprintf("x");
1328                 if (state->func &&
1329                     (state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1330                     (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1331                         lockmgr(&pmp->msglk, LK_RELEASE);
1332                         msg = hammer2_msg_alloc(&pmp->router,
1333                                                 HAMMER2_LNK_ERROR,
1334                                                 NULL, NULL);
1335                         if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1336                                 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1337                         msg->any.head.cmd |= HAMMER2_MSGF_DELETE;
1338                         msg->state = state;
1339                         msg->state->func(state, msg);
1340                         hammer2_state_cleanuprx(msg);
1341                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1342                 } else {
1343                         RB_REMOVE(hammer2_state_tree,
1344                                   &pmp->statewr_tree, state);
1345                         hammer2_state_free(state);
1346                 }
1347         }
1348         lockmgr(&pmp->msglk, LK_RELEASE);
1349
1350         /*
1351          * Cleanup descriptor, be sure the read size is shutdown so the
1352          * (probably blocked) read operations returns an error.
1353          *
1354          * pmp can be ripped out from under us once msgwr_td is set to NULL.
1355          */
1356         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1357         pmp->msgwr_td = NULL;
1358         wakeup(pmp);
1359         lwkt_exit();
1360 }
1361
1362 /*
1363  * Called with msglk held after queueing a new message, wakes up the
1364  * transmit thread.  We use an interlock thread to avoid unnecessary
1365  * wakeups.
1366  */
1367 void
1368 hammer2_clusterctl_wakeup(hammer2_pfsmount_t *pmp)
1369 {
1370         if (pmp->msg_ctl & HAMMER2_CLUSTERCTL_SLEEPING) {
1371                 atomic_clear_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_SLEEPING);
1372                 wakeup(&pmp->msg_ctl);
1373         }
1374 }
1375
1376 static int
1377 hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg)
1378 {
1379         switch(msg->any.head.cmd & HAMMER2_MSGF_TRANSMASK) {
1380         case HAMMER2_LNK_CONN | HAMMER2_MSGF_CREATE:
1381                 /*
1382                  * reply & leave trans open
1383                  */
1384                 kprintf("CONN RECEIVE - (just ignore it)\n");
1385                 hammer2_msg_result(msg, 0);
1386                 break;
1387         case HAMMER2_LNK_SPAN | HAMMER2_MSGF_CREATE:
1388                 kprintf("SPAN RECEIVE - ADDED FROM CLUSTER\n");
1389                 break;
1390         case HAMMER2_LNK_SPAN | HAMMER2_MSGF_DELETE:
1391                 kprintf("SPAN RECEIVE - DELETED FROM CLUSTER\n");
1392                 break;
1393         default:
1394                 break;
1395         }
1396         return(0);
1397 }
1398
1399 /*
1400  * This function is called when the other end replies to our LNK_CONN
1401  * request.
1402  *
1403  * We transmit our (single) SPAN on the initial reply, leaving that
1404  * transaction open too.
1405  */
1406 static int
1407 hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1408 {
1409         hammer2_pfsmount_t *pmp = state->any.pmp;
1410         hammer2_mount_t *hmp = pmp->hmp;
1411         size_t name_len;
1412         int copyid;
1413
1414         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
1415                 kprintf("LNK_CONN transaction replied to, initiate SPAN\n");
1416                 msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_SPAN |
1417                                                       HAMMER2_MSGF_CREATE,
1418                                         hammer2_msg_span_reply, pmp);
1419                 msg->any.lnk_span.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1420                 msg->any.lnk_span.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1421                 msg->any.lnk_span.pfs_type = pmp->iroot->ip_data.pfs_type;
1422                 msg->any.lnk_span.proto_version = HAMMER2_SPAN_PROTO_1;
1423                 name_len = pmp->iroot->ip_data.name_len;
1424                 if (name_len >= sizeof(msg->any.lnk_span.label))
1425                         name_len = sizeof(msg->any.lnk_span.label) - 1;
1426                 bcopy(pmp->iroot->ip_data.filename,
1427                       msg->any.lnk_span.label,
1428                       name_len);
1429                 msg->any.lnk_span.label[name_len] = 0;
1430                 hammer2_msg_write(msg);
1431
1432                 /*
1433                  * Dump the configuration stored in the volume header
1434                  */
1435                 hammer2_voldata_lock(hmp);
1436                 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
1437                         if (hmp->voldata.copyinfo[copyid].copyid == 0)
1438                                 continue;
1439                         hammer2_volconf_update(pmp, copyid);
1440                 }
1441                 hammer2_voldata_unlock(hmp);
1442         }
1443         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
1444                 kprintf("LNK_CONN transaction terminated by remote\n");
1445                 pmp->conn_state = NULL;
1446                 hammer2_msg_reply(msg, 0);
1447         }
1448         return(0);
1449 }
1450
1451 static int
1452 hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1453 {
1454         hammer2_pfsmount_t *pmp = state->any.pmp;
1455
1456         kprintf("SPAN REPLY - Our span was terminated? %p\n", pmp);
1457         return(0);
1458 }
1459
1460 /*
1461  * Volume configuration updates are passed onto the userland service
1462  * daemon via the open LNK_CONN transaction.
1463  */
1464 void
1465 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
1466 {
1467         hammer2_mount_t *hmp = pmp->hmp;
1468         hammer2_msg_t *msg;
1469
1470         /* XXX interlock against connection state termination */
1471         kprintf("volconf update %p\n", pmp->conn_state);
1472         if (pmp->conn_state) {
1473                 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
1474                 msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_VOLCONF,
1475                                         NULL, NULL);
1476                 msg->state = pmp->conn_state;
1477                 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
1478                 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
1479                 msg->any.lnk_volconf.index = index;
1480                 hammer2_msg_write(msg);
1481         }
1482 }