7046890e68064b4806281dff1ea7fd13b808a473
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
1 /*-
2  * Copyright (c) 2011, 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/nlookup.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/fcntl.h>
41 #include <sys/buf.h>
42 #include <sys/uuid.h>
43 #include <sys/vfsops.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
46
47 #include "hammer2.h"
48 #include "hammer2_disk.h"
49 #include "hammer2_mount.h"
50 #include "hammer2_network.h"
51
52 struct hammer2_sync_info {
53         int error;
54         int waitfor;
55 };
56
57 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
58 static struct hammer2_mntlist hammer2_mntlist;
59 static struct lock hammer2_mntlk;
60
61 int hammer2_debug;
62 int hammer2_cluster_enable = 1;
63 int hammer2_hardlink_enable = 1;
64 long hammer2_iod_file_read;
65 long hammer2_iod_meta_read;
66 long hammer2_iod_indr_read;
67 long hammer2_iod_file_write;
68 long hammer2_iod_meta_write;
69 long hammer2_iod_indr_write;
70 long hammer2_iod_volu_write;
71 long hammer2_ioa_file_read;
72 long hammer2_ioa_meta_read;
73 long hammer2_ioa_indr_read;
74 long hammer2_ioa_file_write;
75 long hammer2_ioa_meta_write;
76 long hammer2_ioa_indr_write;
77 long hammer2_ioa_volu_write;
78
79 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
80
81 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
82            &hammer2_debug, 0, "");
83 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
84            &hammer2_cluster_enable, 0, "");
85 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
86            &hammer2_hardlink_enable, 0, "");
87 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
88            &hammer2_iod_file_read, 0, "");
89 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
90            &hammer2_iod_meta_read, 0, "");
91 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
92            &hammer2_iod_indr_read, 0, "");
93 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
94            &hammer2_iod_file_write, 0, "");
95 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
96            &hammer2_iod_meta_write, 0, "");
97 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
98            &hammer2_iod_indr_write, 0, "");
99 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
100            &hammer2_iod_volu_write, 0, "");
101 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
102            &hammer2_ioa_file_read, 0, "");
103 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
104            &hammer2_ioa_meta_read, 0, "");
105 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
106            &hammer2_ioa_indr_read, 0, "");
107 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
108            &hammer2_ioa_file_write, 0, "");
109 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
110            &hammer2_ioa_meta_write, 0, "");
111 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
112            &hammer2_ioa_indr_write, 0, "");
113 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
114            &hammer2_ioa_volu_write, 0, "");
115
116 static int hammer2_vfs_init(struct vfsconf *conf);
117 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
118                                 struct ucred *cred);
119 static int hammer2_remount(struct mount *, char *, struct vnode *,
120                                 struct ucred *);
121 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
122 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
123 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
124                                 struct ucred *cred);
125 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
126                                 struct ucred *cred);
127 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
128 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
129                                 ino_t ino, struct vnode **vpp);
130 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
131                                 struct fid *fhp, struct vnode **vpp);
132 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
133 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
134                                 int *exflagsp, struct ucred **credanonp);
135
136 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
137 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
138 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
139
140 static void hammer2_cluster_thread_rd(void *arg);
141 static void hammer2_cluster_thread_wr(void *arg);
142 static int hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg);
143 static int hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg);
144 static int hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg);
145 static void hammer2_drain_msgq(hammer2_pfsmount_t *pmp);
146
147 /*
148  * HAMMER2 vfs operations.
149  */
150 static struct vfsops hammer2_vfsops = {
151         .vfs_init       = hammer2_vfs_init,
152         .vfs_sync       = hammer2_vfs_sync,
153         .vfs_mount      = hammer2_vfs_mount,
154         .vfs_unmount    = hammer2_vfs_unmount,
155         .vfs_root       = hammer2_vfs_root,
156         .vfs_statfs     = hammer2_vfs_statfs,
157         .vfs_statvfs    = hammer2_vfs_statvfs,
158         .vfs_vget       = hammer2_vfs_vget,
159         .vfs_vptofh     = hammer2_vfs_vptofh,
160         .vfs_fhtovp     = hammer2_vfs_fhtovp,
161         .vfs_checkexp   = hammer2_vfs_checkexp
162 };
163
164 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
165
166 VFS_SET(hammer2_vfsops, hammer2, 0);
167 MODULE_VERSION(hammer2, 1);
168
169 static
170 int
171 hammer2_vfs_init(struct vfsconf *conf)
172 {
173         int error;
174
175         error = 0;
176
177         if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
178                 error = EINVAL;
179         if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
180                 error = EINVAL;
181         if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
182                 error = EINVAL;
183         if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
184                 error = EINVAL;
185
186         if (error)
187                 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
188
189         lockinit(&hammer2_mntlk, "mntlk", 0, 0);
190         TAILQ_INIT(&hammer2_mntlist);
191
192         return (error);
193 }
194
195 /*
196  * Mount or remount HAMMER2 fileystem from physical media
197  *
198  *      mountroot
199  *              mp              mount point structure
200  *              path            NULL
201  *              data            <unused>
202  *              cred            <unused>
203  *
204  *      mount
205  *              mp              mount point structure
206  *              path            path to mount point
207  *              data            pointer to argument structure in user space
208  *                      volume  volume path (device@LABEL form)
209  *                      hflags  user mount flags
210  *              cred            user credentials
211  *
212  * RETURNS:     0       Success
213  *              !0      error number
214  */
215 static
216 int
217 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
218                   struct ucred *cred)
219 {
220         struct hammer2_mount_info info;
221         hammer2_pfsmount_t *pmp;
222         hammer2_mount_t *hmp;
223         hammer2_key_t lhc;
224         struct vnode *devvp;
225         struct nlookupdata nd;
226         hammer2_chain_t *parent;
227         hammer2_chain_t *schain;
228         hammer2_chain_t *rchain;
229         char devstr[MNAMELEN];
230         size_t size;
231         size_t done;
232         char *dev;
233         char *label;
234         int ronly = 1;
235         int create_hmp;
236         int error;
237
238         hmp = NULL;
239         pmp = NULL;
240         dev = NULL;
241         label = NULL;
242         devvp = NULL;
243
244         kprintf("hammer2_mount\n");
245
246         if (path == NULL) {
247                 /*
248                  * Root mount
249                  */
250                 bzero(&info, sizeof(info));
251                 info.cluster_fd = -1;
252                 return (EOPNOTSUPP);
253         } else {
254                 /*
255                  * Non-root mount or updating a mount
256                  */
257                 error = copyin(data, &info, sizeof(info));
258                 if (error)
259                         return (error);
260
261                 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
262                 if (error)
263                         return (error);
264
265                 /* Extract device and label */
266                 dev = devstr;
267                 label = strchr(devstr, '@');
268                 if (label == NULL ||
269                     ((label + 1) - dev) > done) {
270                         return (EINVAL);
271                 }
272                 *label = '\0';
273                 label++;
274                 if (*label == '\0')
275                         return (EINVAL);
276
277                 if (mp->mnt_flag & MNT_UPDATE) {
278                         /* Update mount */
279                         /* HAMMER2 implements NFS export via mountctl */
280                         hmp = MPTOHMP(mp);
281                         devvp = hmp->devvp;
282                         error = hammer2_remount(mp, path, devvp, cred);
283                         return error;
284                 }
285         }
286
287         /*
288          * PFS mount
289          *
290          * Lookup name and verify it refers to a block device.
291          */
292         error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
293         if (error == 0)
294                 error = nlookup(&nd);
295         if (error == 0)
296                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
297         nlookup_done(&nd);
298
299         if (error == 0) {
300                 if (vn_isdisk(devvp, &error))
301                         error = vfs_mountedon(devvp);
302         }
303
304         /*
305          * Determine if the device has already been mounted.  After this
306          * check hmp will be non-NULL if we are doing the second or more
307          * hammer2 mounts from the same device.
308          */
309         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
310         TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
311                 if (hmp->devvp == devvp)
312                         break;
313         }
314
315         /*
316          * Open the device if this isn't a secondary mount
317          */
318         if (hmp) {
319                 create_hmp = 0;
320         } else {
321                 create_hmp = 1;
322                 if (error == 0 && vcount(devvp) > 0)
323                         error = EBUSY;
324
325                 /*
326                  * Now open the device
327                  */
328                 if (error == 0) {
329                         ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
330                         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
331                         error = vinvalbuf(devvp, V_SAVE, 0, 0);
332                         if (error == 0) {
333                                 error = VOP_OPEN(devvp,
334                                                  ronly ? FREAD : FREAD | FWRITE,
335                                                  FSCRED, NULL);
336                         }
337                         vn_unlock(devvp);
338                 }
339                 if (error && devvp) {
340                         vrele(devvp);
341                         devvp = NULL;
342                 }
343                 if (error) {
344                         lockmgr(&hammer2_mntlk, LK_RELEASE);
345                         return error;
346                 }
347         }
348
349         /*
350          * Block device opened successfully, finish initializing the
351          * mount structure.
352          *
353          * From this point on we have to call hammer2_unmount() on failure.
354          */
355         pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
356         mp->mnt_data = (qaddr_t)pmp;
357         pmp->mp = mp;
358         kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
359         lockinit(&pmp->msglk, "h2msg", 0, 0);
360         TAILQ_INIT(&pmp->msgq);
361         RB_INIT(&pmp->staterd_tree);
362         RB_INIT(&pmp->statewr_tree);
363
364         if (create_hmp) {
365                 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
366                 hmp->ronly = ronly;
367                 hmp->devvp = devvp;
368                 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
369                 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
370                 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
371         }
372         ccms_domain_init(&pmp->ccms_dom);
373         pmp->hmp = hmp;
374         pmp->router.pmp = pmp;
375         ++hmp->pmp_count;
376         lockmgr(&hammer2_mntlk, LK_RELEASE);
377         kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
378         
379         mp->mnt_flag = MNT_LOCAL;
380         mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;   /* all entry pts are SMP */
381
382         if (create_hmp) {
383                 /*
384                  * vchain setup. vchain.data is special cased to NULL.
385                  * vchain.refs is initialized and will never drop to 0.
386                  */
387                 hmp->vchain.refs = 1;
388                 hmp->vchain.data = (void *)&hmp->voldata;
389                 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
390                 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
391                 hmp->vchain.bref_flush = hmp->vchain.bref;
392                 ccms_cst_init(&hmp->vchain.cst, NULL);
393                 /* hmp->vchain.u.xxx is left NULL */
394                 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
395                 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
396
397                 /*
398                  * Install the volume header
399                  */
400                 error = hammer2_install_volume_header(hmp);
401                 if (error) {
402                         hammer2_vfs_unmount(mp, MNT_FORCE);
403                         return error;
404                 }
405         }
406
407         /*
408          * required mount structure initializations
409          */
410         mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
411         mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
412
413         mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
414         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
415
416         /*
417          * Optional fields
418          */
419         mp->mnt_iosize_max = MAXPHYS;
420
421         /*
422          * First locate the super-root inode, which is key 0 relative to the
423          * volume header's blockset.
424          *
425          * Then locate the root inode by scanning the directory keyspace
426          * represented by the label.
427          */
428         if (create_hmp) {
429                 parent = &hmp->vchain;
430                 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
431                 schain = hammer2_chain_lookup(hmp, &parent,
432                                       HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
433                 hammer2_chain_unlock(hmp, parent);
434                 if (schain == NULL) {
435                         kprintf("hammer2_mount: invalid super-root\n");
436                         hammer2_vfs_unmount(mp, MNT_FORCE);
437                         return EINVAL;
438                 }
439                 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
440                 hmp->schain = schain;           /* left locked */
441         } else {
442                 schain = hmp->schain;
443                 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
444         }
445
446         parent = schain;
447         lhc = hammer2_dirhash(label, strlen(label));
448         rchain = hammer2_chain_lookup(hmp, &parent,
449                                       lhc, lhc + HAMMER2_DIRHASH_LOMASK,
450                                       0);
451         while (rchain) {
452                 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
453                     rchain->u.ip &&
454                     strcmp(label, rchain->data->ipdata.filename) == 0) {
455                         break;
456                 }
457                 rchain = hammer2_chain_next(hmp, &parent, rchain,
458                                             lhc, lhc + HAMMER2_DIRHASH_LOMASK,
459                                             0);
460         }
461         hammer2_chain_unlock(hmp, parent);
462         if (rchain == NULL) {
463                 kprintf("hammer2_mount: PFS label not found\n");
464                 hammer2_vfs_unmount(mp, MNT_FORCE);
465                 return EINVAL;
466         }
467         if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
468                 hammer2_chain_unlock(hmp, rchain);
469                 kprintf("hammer2_mount: PFS label already mounted!\n");
470                 hammer2_vfs_unmount(mp, MNT_FORCE);
471                 return EBUSY;
472         }
473         atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
474
475         hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
476         hammer2_chain_unlock(hmp, rchain);
477         pmp->rchain = rchain;           /* left held & unlocked */
478         pmp->iroot = rchain->u.ip;      /* implied hold from rchain */
479         pmp->iroot->pmp = pmp;
480
481         kprintf("iroot %p\n", pmp->iroot);
482
483         /*
484          * Ref the cluster management messaging descriptor.  The mount
485          * program deals with the other end of the communications pipe.
486          */
487         pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
488         if (pmp->msg_fp == NULL) {
489                 kprintf("hammer2_mount: bad cluster_fd!\n");
490                 hammer2_vfs_unmount(mp, MNT_FORCE);
491                 return EBADF;
492         }
493         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
494                     NULL, 0, -1, "hammer2-msgrd");
495         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
496                     NULL, 0, -1, "hammer2-msgwr");
497
498         /*
499          * Finish setup
500          */
501         vfs_getnewfsid(mp);
502         vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
503         vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
504         vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
505
506         copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
507         bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
508         bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
509         copyinstr(path, mp->mnt_stat.f_mntonname,
510                   sizeof(mp->mnt_stat.f_mntonname) - 1,
511                   &size);
512
513         /*
514          * Initial statfs to prime mnt_stat.
515          */
516         hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
517
518         return 0;
519 }
520
521 static
522 int
523 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
524                 struct ucred *cred)
525 {
526         return (0);
527 }
528
529 static
530 int
531 hammer2_vfs_unmount(struct mount *mp, int mntflags)
532 {
533         hammer2_pfsmount_t *pmp;
534         hammer2_mount_t *hmp;
535         int flags;
536         int error = 0;
537         int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
538         struct vnode *devvp;
539
540         pmp = MPTOPMP(mp);
541         hmp = pmp->hmp;
542         flags = 0;
543
544         if (mntflags & MNT_FORCE)
545                 flags |= FORCECLOSE;
546
547         hammer2_mount_exlock(hmp);
548
549         /*
550          * If mount initialization proceeded far enough we must flush
551          * its vnodes.
552          */
553         if (pmp->iroot)
554                 error = vflush(mp, 0, flags);
555
556         if (error)
557                 return error;
558
559         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
560         --hmp->pmp_count;
561         kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
562
563         /*
564          * Flush any left over chains.  The voldata lock is only used
565          * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
566          */
567         hammer2_voldata_lock(hmp);
568         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
569                                  HAMMER2_CHAIN_MODIFIED_AUX |
570                                  HAMMER2_CHAIN_SUBMODIFIED)) {
571                 hammer2_voldata_unlock(hmp);
572                 hammer2_vfs_sync(mp, MNT_WAIT);
573         } else {
574                 hammer2_voldata_unlock(hmp);
575         }
576         if (hmp->pmp_count == 0) {
577                 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
578                                          HAMMER2_CHAIN_MODIFIED_AUX |
579                                          HAMMER2_CHAIN_SUBMODIFIED)) {
580                         kprintf("hammer2_unmount: chains left over after "
581                                 "final sync\n");
582                         if (hammer2_debug & 0x0010)
583                                 Debugger("entered debugger");
584                 }
585         }
586
587         /*
588          * Cleanup the root and super-root chain elements (which should be
589          * clean).
590          */
591         pmp->iroot = NULL;
592         if (pmp->rchain) {
593                 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
594                 KKASSERT(pmp->rchain->refs == 1);
595                 hammer2_chain_drop(hmp, pmp->rchain);
596                 pmp->rchain = NULL;
597         }
598         ccms_domain_uninit(&pmp->ccms_dom);
599
600         /*
601          * Ask the cluster controller to go away
602          */
603         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
604         while (pmp->msgrd_td || pmp->msgwr_td) {
605                 wakeup(&pmp->msg_ctl);
606                 tsleep(pmp, 0, "clstrkl", hz);
607         }
608
609         /*
610          * Drop communications descriptor
611          */
612         if (pmp->msg_fp) {
613                 fdrop(pmp->msg_fp);
614                 pmp->msg_fp = NULL;
615         }
616
617         /*
618          * If no PFS's left drop the master hammer2_mount for the device.
619          */
620         if (hmp->pmp_count == 0) {
621                 if (hmp->schain) {
622                         KKASSERT(hmp->schain->refs == 1);
623                         hammer2_chain_drop(hmp, hmp->schain);
624                         hmp->schain = NULL;
625                 }
626
627                 /*
628                  * Finish up with the device vnode
629                  */
630                 if ((devvp = hmp->devvp) != NULL) {
631                         vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
632                         hmp->devvp = NULL;
633                         VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
634                         vrele(devvp);
635                         devvp = NULL;
636                 }
637         }
638         hammer2_mount_unlock(hmp);
639
640         pmp->mp = NULL;
641         pmp->hmp = NULL;
642         mp->mnt_data = NULL;
643
644         kmalloc_destroy(&pmp->mmsg);
645
646         kfree(pmp, M_HAMMER2);
647         if (hmp->pmp_count == 0) {
648                 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
649                 kmalloc_destroy(&hmp->minode);
650                 kmalloc_destroy(&hmp->mchain);
651                 kfree(hmp, M_HAMMER2);
652         }
653         lockmgr(&hammer2_mntlk, LK_RELEASE);
654         return (error);
655 }
656
657 static
658 int
659 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
660              ino_t ino, struct vnode **vpp)
661 {
662         kprintf("hammer2_vget\n");
663         return (EOPNOTSUPP);
664 }
665
666 static
667 int
668 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
669 {
670         hammer2_pfsmount_t *pmp;
671         hammer2_mount_t *hmp;
672         int error;
673         struct vnode *vp;
674
675         pmp = MPTOPMP(mp);
676         hmp = pmp->hmp;
677         hammer2_mount_exlock(hmp);
678         if (pmp->iroot == NULL) {
679                 *vpp = NULL;
680                 error = EINVAL;
681         } else {
682                 hammer2_chain_lock(hmp, &pmp->iroot->chain,
683                                    HAMMER2_RESOLVE_ALWAYS |
684                                    HAMMER2_RESOLVE_SHARED);
685                 vp = hammer2_igetv(pmp->iroot, &error);
686                 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
687                 *vpp = vp;
688                 if (vp == NULL)
689                         kprintf("vnodefail\n");
690         }
691         hammer2_mount_unlock(hmp);
692
693         return (error);
694 }
695
696 /*
697  * Filesystem status
698  *
699  * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
700  */
701 static
702 int
703 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
704 {
705         hammer2_pfsmount_t *pmp;
706         hammer2_mount_t *hmp;
707
708         pmp = MPTOPMP(mp);
709         hmp = MPTOHMP(mp);
710
711         mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
712                                pmp->iroot->delta_icount;
713         mp->mnt_stat.f_ffree = 0;
714         mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
715         mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
716                                 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
717         mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
718
719         *sbp = mp->mnt_stat;
720         return (0);
721 }
722
723 static
724 int
725 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
726 {
727         hammer2_pfsmount_t *pmp;
728         hammer2_mount_t *hmp;
729
730         pmp = MPTOPMP(mp);
731         hmp = MPTOHMP(mp);
732
733         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
734         mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
735                                 pmp->iroot->delta_icount;
736         mp->mnt_vstat.f_ffree = 0;
737         mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
738         mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
739                                  hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
740         mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
741
742         *sbp = mp->mnt_vstat;
743         return (0);
744 }
745
746 /*
747  * Sync the entire filesystem; this is called from the filesystem syncer
748  * process periodically and whenever a user calls sync(1) on the hammer
749  * mountpoint.
750  *
751  * Currently is actually called from the syncer! \o/
752  *
753  * This task will have to snapshot the state of the dirty inode chain.
754  * From that, it will have to make sure all of the inodes on the dirty
755  * chain have IO initiated. We make sure that io is initiated for the root
756  * block.
757  *
758  * If waitfor is set, we wait for media to acknowledge the new rootblock.
759  *
760  * THINKS: side A vs side B, to have sync not stall all I/O?
761  */
762 static
763 int
764 hammer2_vfs_sync(struct mount *mp, int waitfor)
765 {
766         struct hammer2_sync_info info;
767         hammer2_mount_t *hmp;
768         int flags;
769         int error;
770         int haswork;
771
772         hmp = MPTOHMP(mp);
773
774         flags = VMSC_GETVP;
775         if (waitfor & MNT_LAZY)
776                 flags |= VMSC_ONEPASS;
777
778         info.error = 0;
779         info.waitfor = MNT_NOWAIT;
780         vmntvnodescan(mp, flags | VMSC_NOWAIT,
781                       hammer2_sync_scan1,
782                       hammer2_sync_scan2, &info);
783         if (info.error == 0 && (waitfor & MNT_WAIT)) {
784                 info.waitfor = waitfor;
785                     vmntvnodescan(mp, flags,
786                                   hammer2_sync_scan1,
787                                   hammer2_sync_scan2, &info);
788
789         }
790 #if 0
791         if (waitfor == MNT_WAIT) {
792                 /* XXX */
793         } else {
794                 /* XXX */
795         }
796 #endif
797         hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
798         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
799                                  HAMMER2_CHAIN_MODIFIED_AUX |
800                                  HAMMER2_CHAIN_SUBMODIFIED)) {
801                 hammer2_chain_flush(hmp, &hmp->vchain, 0);
802                 haswork = 1;
803         } else {
804                 haswork = 0;
805         }
806         hammer2_chain_unlock(hmp, &hmp->vchain);
807
808         error = 0;
809
810         if ((waitfor & MNT_LAZY) == 0) {
811                 waitfor = MNT_NOWAIT;
812                 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
813                 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
814                 vn_unlock(hmp->devvp);
815         }
816
817         if (error == 0 && haswork) {
818                 struct buf *bp;
819
820                 /*
821                  * Synchronize the disk before flushing the volume
822                  * header.
823                  */
824                 bp = getpbuf(NULL);
825                 bp->b_bio1.bio_offset = 0;
826                 bp->b_bufsize = 0;
827                 bp->b_bcount = 0;
828                 bp->b_cmd = BUF_CMD_FLUSH;
829                 bp->b_bio1.bio_done = biodone_sync;
830                 bp->b_bio1.bio_flags |= BIO_SYNC;
831                 vn_strategy(hmp->devvp, &bp->b_bio1);
832                 biowait(&bp->b_bio1, "h2vol");
833                 relpbuf(bp, NULL);
834
835                 /*
836                  * Then we can safely flush the volume header.  Volume
837                  * data is locked separately to prevent ioctl functions
838                  * from deadlocking due to a configuration issue.
839                  */
840                 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
841                 hammer2_voldata_lock(hmp);
842                 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
843                 hammer2_voldata_unlock(hmp);
844                 bawrite(bp);
845         }
846         return (error);
847 }
848
849 /*
850  * Sync passes.
851  *
852  * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
853  *       won't flush on those flags.  The syncer code above will do a
854  *       general meta-data flush globally that will catch these flags.
855  */
856 static int
857 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
858 {
859         hammer2_inode_t *ip;
860
861         ip = VTOI(vp);
862         if (vp->v_type == VNON || ip == NULL ||
863             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
864                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
865              RB_EMPTY(&vp->v_rbdirty_tree))) {
866                 return(-1);
867         }
868         return(0);
869 }
870
871 static int
872 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
873 {
874         struct hammer2_sync_info *info = data;
875         hammer2_inode_t *ip;
876         int error;
877
878         ip = VTOI(vp);
879         if (vp->v_type == VNON || vp->v_type == VBAD ||
880             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
881                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
882             RB_EMPTY(&vp->v_rbdirty_tree))) {
883                 return(0);
884         }
885         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
886         if (error)
887                 info->error = error;
888         return(0);
889 }
890
891 static
892 int
893 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
894 {
895         return (0);
896 }
897
898 static
899 int
900 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
901                struct fid *fhp, struct vnode **vpp)
902 {
903         return (0);
904 }
905
906 static
907 int
908 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
909                  int *exflagsp, struct ucred **credanonp)
910 {
911         return (0);
912 }
913
914 /*
915  * Support code for hammer2_mount().  Read, verify, and install the volume
916  * header into the HMP
917  *
918  * XXX read four volhdrs and use the one with the highest TID whos CRC
919  *     matches.
920  *
921  * XXX check iCRCs.
922  *
923  * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
924  *     nonexistant locations.
925  *
926  * XXX Record selected volhdr and ring updates to each of 4 volhdrs
927  */
928 static
929 int
930 hammer2_install_volume_header(hammer2_mount_t *hmp)
931 {
932         hammer2_volume_data_t *vd;
933         struct buf *bp;
934         hammer2_crc32_t crc0, crc, bcrc0, bcrc;
935         int error_reported;
936         int error;
937         int valid;
938         int i;
939
940         error_reported = 0;
941         error = 0;
942         valid = 0;
943         bp = NULL;
944
945         /*
946          * There are up to 4 copies of the volume header (syncs iterate
947          * between them so there is no single master).  We don't trust the
948          * volu_size field so we don't know precisely how large the filesystem
949          * is, so depend on the OS to return an error if we go beyond the
950          * block device's EOF.
951          */
952         for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
953                 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
954                               HAMMER2_VOLUME_BYTES, &bp);
955                 if (error) {
956                         brelse(bp);
957                         bp = NULL;
958                         continue;
959                 }
960
961                 vd = (struct hammer2_volume_data *) bp->b_data;
962                 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
963                     (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
964                         brelse(bp);
965                         bp = NULL;
966                         continue;
967                 }
968
969                 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
970                         /* XXX: Reversed-endianness filesystem */
971                         kprintf("hammer2: reverse-endian filesystem detected");
972                         brelse(bp);
973                         bp = NULL;
974                         continue;
975                 }
976
977                 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
978                 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
979                                       HAMMER2_VOLUME_ICRC0_SIZE);
980                 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
981                 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
982                                        HAMMER2_VOLUME_ICRC1_SIZE);
983                 if ((crc0 != crc) || (bcrc0 != bcrc)) {
984                         kprintf("hammer2 volume header crc "
985                                 "mismatch copy #%d\t%08x %08x",
986                                 i, crc0, crc);
987                         error_reported = 1;
988                         brelse(bp);
989                         bp = NULL;
990                         continue;
991                 }
992                 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
993                         valid = 1;
994                         hmp->voldata = *vd;
995                 }
996                 brelse(bp);
997                 bp = NULL;
998         }
999         if (valid) {
1000                 error = 0;
1001                 if (error_reported)
1002                         kprintf("hammer2: a valid volume header was found\n");
1003         } else {
1004                 error = EINVAL;
1005                 kprintf("hammer2: no valid volume headers found!\n");
1006         }
1007         return (error);
1008 }
1009
1010 /*
1011  * Reconnect using the passed file pointer.  The caller must ref the
1012  * fp for us.
1013  */
1014 void
1015 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1016 {
1017         /*
1018          * Destroy the current connection
1019          */
1020         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1021         while (pmp->msgrd_td || pmp->msgwr_td) {
1022                wakeup(&pmp->msg_ctl);
1023                tsleep(pmp, 0, "clstrkl", hz);
1024         }
1025
1026         /*
1027          * Drop communications descriptor
1028          */
1029         if (pmp->msg_fp) {
1030                 fdrop(pmp->msg_fp);
1031                 pmp->msg_fp = NULL;
1032         }
1033         kprintf("RESTART CONNECTION\n");
1034
1035         /*
1036          * Setup new communications descriptor
1037          */
1038         pmp->msg_ctl = 0;
1039         pmp->msg_fp = fp;
1040         pmp->msg_seq = 0;
1041         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
1042                     NULL, 0, -1, "hammer2-msgrd");
1043         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
1044                     NULL, 0, -1, "hammer2-msgwr");
1045 }
1046
1047 /*
1048  * Cluster controller thread.  Perform messaging functions.  We have one
1049  * thread for the reader and one for the writer.  The writer handles
1050  * shutdown requests (which should break the reader thread).
1051  */
1052 static
1053 void
1054 hammer2_cluster_thread_rd(void *arg)
1055 {
1056         hammer2_pfsmount_t *pmp = arg;
1057         hammer2_msg_hdr_t hdr;
1058         hammer2_msg_t *msg;
1059         hammer2_state_t *state;
1060         size_t hbytes;
1061         int error = 0;
1062
1063         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
1064                 /*
1065                  * Retrieve the message from the pipe or socket.
1066                  */
1067                 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
1068                                 NULL, 1, UIO_SYSSPACE);
1069                 if (error)
1070                         break;
1071                 if (hdr.magic != HAMMER2_MSGHDR_MAGIC) {
1072                         kprintf("hammer2: msgrd: bad magic: %04x\n",
1073                                 hdr.magic);
1074                         error = EINVAL;
1075                         break;
1076                 }
1077                 hbytes = (hdr.cmd & HAMMER2_MSGF_SIZE) * HAMMER2_MSG_ALIGN;
1078                 if (hbytes < sizeof(hdr) || hbytes > HAMMER2_MSGAUX_MAX) {
1079                         kprintf("hammer2: msgrd: bad header size %zd\n",
1080                                 hbytes);
1081                         error = EINVAL;
1082                         break;
1083                 }
1084                 /* XXX messy: mask cmd to avoid allocating state */
1085                 msg = hammer2_msg_alloc(&pmp->router,
1086                                         hdr.cmd & HAMMER2_MSGF_BASECMDMASK,
1087                                         NULL, NULL);
1088                 msg->any.head = hdr;
1089                 msg->hdr_size = hbytes;
1090                 if (hbytes > sizeof(hdr)) {
1091                         error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1092                                         hbytes - sizeof(hdr),
1093                                         NULL, 1, UIO_SYSSPACE);
1094                         if (error) {
1095                                 kprintf("hammer2: short msg received\n");
1096                                 error = EINVAL;
1097                                 break;
1098                         }
1099                 }
1100                 msg->aux_size = hdr.aux_bytes * HAMMER2_MSG_ALIGN;
1101                 if (msg->aux_size > HAMMER2_MSGAUX_MAX) {
1102                         kprintf("hammer2: illegal msg payload size %zd\n",
1103                                 msg->aux_size);
1104                         error = EINVAL;
1105                         break;
1106                 }
1107                 if (msg->aux_size) {
1108                         msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1109                                                 M_WAITOK | M_ZERO);
1110                         error = fp_read(pmp->msg_fp, msg->aux_data,
1111                                         msg->aux_size,
1112                                         NULL, 1, UIO_SYSSPACE);
1113                         if (error) {
1114                                 kprintf("hammer2: short msg "
1115                                         "payload received\n");
1116                                 break;
1117                         }
1118                 }
1119
1120                 /*
1121                  * State machine tracking, state assignment for msg,
1122                  * returns error and discard status.  Errors are fatal
1123                  * to the connection except for EALREADY which forces
1124                  * a discard without execution.
1125                  */
1126                 error = hammer2_state_msgrx(msg);
1127                 if (error) {
1128                         /*
1129                          * Raw protocol or connection error
1130                          */
1131                         hammer2_msg_free(msg);
1132                         if (error == EALREADY)
1133                                 error = 0;
1134                 } else if (msg->state && msg->state->func) {
1135                         /*
1136                          * Message related to state which already has a
1137                          * handling function installed for it.
1138                          */
1139                         error = msg->state->func(msg->state, msg);
1140                         hammer2_state_cleanuprx(msg);
1141                 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1142                            HAMMER2_MSG_PROTO_LNK) {
1143                         /*
1144                          * Message related to the LNK protocol set
1145                          */
1146                         error = hammer2_msg_lnk_rcvmsg(msg);
1147                         hammer2_state_cleanuprx(msg);
1148                 } else if ((msg->any.head.cmd & HAMMER2_MSGF_PROTOS) ==
1149                            HAMMER2_MSG_PROTO_DBG) {
1150                         /*
1151                          * Message related to the DBG protocol set
1152                          */
1153                         error = hammer2_msg_dbg_rcvmsg(msg);
1154                         hammer2_state_cleanuprx(msg);
1155                 } else {
1156                         /*
1157                          * Other higher-level messages (e.g. vnops)
1158                          */
1159                         error = hammer2_msg_adhoc_input(msg);
1160                         hammer2_state_cleanuprx(msg);
1161                 }
1162                 msg = NULL;
1163         }
1164
1165         if (error)
1166                 kprintf("hammer2: msg read failed error %d\n", error);
1167
1168         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1169         if (msg) {
1170                 if (msg->state && msg->state->msg == msg)
1171                         msg->state->msg = NULL;
1172                 hammer2_msg_free(msg);
1173         }
1174
1175         if ((state = pmp->freerd_state) != NULL) {
1176                 pmp->freerd_state = NULL;
1177                 hammer2_state_free(state);
1178         }
1179
1180         /*
1181          * Shutdown the socket before waiting for the transmit side.
1182          *
1183          * If we are dying due to e.g. a socket disconnect verses being
1184          * killed explicity we have to set KILL in order to kick the tx
1185          * side when it might not have any other work to do.  KILL might
1186          * already be set if we are in an unmount or reconnect.
1187          */
1188         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1189
1190         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1191         wakeup(&pmp->msg_ctl);
1192
1193         /*
1194          * Wait for the transmit side to drain remaining messages
1195          * before cleaning up the rx state.  The transmit side will
1196          * set KILLTX and wait for the rx side to completely finish
1197          * (set msgrd_td to NULL) before cleaning up any remaining
1198          * tx states.
1199          */
1200         lockmgr(&pmp->msglk, LK_RELEASE);
1201         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLRX);
1202         wakeup(&pmp->msg_ctl);
1203         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILLTX) == 0) {
1204                 wakeup(&pmp->msg_ctl);
1205                 tsleep(pmp, 0, "clstrkw", hz);
1206         }
1207
1208         pmp->msgrd_td = NULL;
1209         /* pmp can be ripped out from under us at this point */
1210         wakeup(pmp);
1211         lwkt_exit();
1212 }
1213
1214 static
1215 void
1216 hammer2_cluster_thread_wr(void *arg)
1217 {
1218         hammer2_pfsmount_t *pmp = arg;
1219         hammer2_msg_t *msg = NULL;
1220         hammer2_state_t *state;
1221         ssize_t res;
1222         size_t name_len;
1223         int error = 0;
1224         int retries = 20;
1225
1226         /*
1227          * Open a LNK_CONN transaction indicating that we want to take part
1228          * in the spanning tree algorithm.  Filter explicitly on the PFS
1229          * info in the iroot.
1230          *
1231          * We do not transmit our (only) LNK_SPAN until the other end has
1232          * acknowledged our link connection request.
1233          *
1234          * The transaction remains fully open for the duration of the
1235          * connection.
1236          */
1237         msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_CONN |
1238                                               HAMMER2_MSGF_CREATE,
1239                                 hammer2_msg_conn_reply, pmp);
1240         msg->any.lnk_conn.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1241         msg->any.lnk_conn.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1242         msg->any.lnk_conn.pfs_type = pmp->iroot->ip_data.pfs_type;
1243         msg->any.lnk_conn.proto_version = HAMMER2_SPAN_PROTO_1;
1244         msg->any.lnk_conn.peer_type = pmp->hmp->voldata.peer_type;
1245         msg->any.lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
1246         name_len = pmp->iroot->ip_data.name_len;
1247         if (name_len >= sizeof(msg->any.lnk_conn.label))
1248                 name_len = sizeof(msg->any.lnk_conn.label) - 1;
1249         bcopy(pmp->iroot->ip_data.filename, msg->any.lnk_conn.label, name_len);
1250         pmp->conn_state = msg->state;
1251         msg->any.lnk_conn.label[name_len] = 0;
1252         hammer2_msg_write(msg);
1253
1254         /*
1255          * Transmit loop
1256          */
1257         msg = NULL;
1258         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1259
1260         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1261                 /*
1262                  * Sleep if no messages pending.  Interlock with flag while
1263                  * holding msglk.
1264                  */
1265                 if (TAILQ_EMPTY(&pmp->msgq)) {
1266                         atomic_set_int(&pmp->msg_ctl,
1267                                        HAMMER2_CLUSTERCTL_SLEEPING);
1268                         lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1269                         atomic_clear_int(&pmp->msg_ctl,
1270                                          HAMMER2_CLUSTERCTL_SLEEPING);
1271                 }
1272
1273                 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1274                         /*
1275                          * Remove msg from the transmit queue and do
1276                          * persist and half-closed state handling.
1277                          */
1278                         TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1279                         lockmgr(&pmp->msglk, LK_RELEASE);
1280
1281                         error = hammer2_state_msgtx(msg);
1282                         if (error == EALREADY) {
1283                                 error = 0;
1284                                 hammer2_msg_free(msg);
1285                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1286                                 continue;
1287                         }
1288                         if (error) {
1289                                 hammer2_msg_free(msg);
1290                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1291                                 break;
1292                         }
1293
1294                         /*
1295                          * Dump the message to the pipe or socket.
1296                          */
1297                         error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1298                                          &res, UIO_SYSSPACE);
1299                         if (error || res != msg->hdr_size) {
1300                                 if (error == 0)
1301                                         error = EINVAL;
1302                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1303                                 break;
1304                         }
1305                         if (msg->aux_size) {
1306                                 error = fp_write(pmp->msg_fp,
1307                                                  msg->aux_data, msg->aux_size,
1308                                                  &res, UIO_SYSSPACE);
1309                                 if (error || res != msg->aux_size) {
1310                                         if (error == 0)
1311                                                 error = EINVAL;
1312                                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1313                                         break;
1314                                 }
1315                         }
1316                         hammer2_state_cleanuptx(msg);
1317                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1318                 }
1319         }
1320
1321         /*
1322          * Cleanup messages pending transmission and release msgq lock.
1323          */
1324         if (error)
1325                 kprintf("hammer2: msg write failed error %d\n", error);
1326
1327         if (msg) {
1328                 if (msg->state && msg->state->msg == msg)
1329                         msg->state->msg = NULL;
1330                 hammer2_msg_free(msg);
1331         }
1332
1333         /*
1334          * Shutdown the socket.  This will cause the rx thread to get an
1335          * EOF and ensure that both threads get to a termination state.
1336          */
1337         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1338
1339         /*
1340          * Set KILLTX (which the rx side waits for), then wait for the RX
1341          * side to completely finish before we clean out any remaining
1342          * command states.
1343          */
1344         lockmgr(&pmp->msglk, LK_RELEASE);
1345         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLTX);
1346         wakeup(&pmp->msg_ctl);
1347         while (pmp->msgrd_td) {
1348                 wakeup(&pmp->msg_ctl);
1349                 tsleep(pmp, 0, "clstrkw", hz);
1350         }
1351         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1352
1353         /*
1354          * Simulate received MSGF_DELETE's for any remaining states.
1355          */
1356 cleanuprd:
1357         RB_FOREACH(state, hammer2_state_tree, &pmp->staterd_tree) {
1358                 if (state->func &&
1359                     (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1360                         lockmgr(&pmp->msglk, LK_RELEASE);
1361                         msg = hammer2_msg_alloc(&pmp->router,
1362                                                 HAMMER2_LNK_ERROR,
1363                                                 NULL, NULL);
1364                         if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1365                                 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1366                         msg->any.head.cmd |= HAMMER2_MSGF_DELETE;
1367                         msg->state = state;
1368                         state->rxcmd = msg->any.head.cmd &
1369                                        ~HAMMER2_MSGF_DELETE;
1370                         msg->state->func(state, msg);
1371                         hammer2_state_cleanuprx(msg);
1372                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1373                         goto cleanuprd;
1374                 }
1375                 if (state->func == NULL) {
1376                         state->flags &= ~HAMMER2_STATE_INSERTED;
1377                         RB_REMOVE(hammer2_state_tree,
1378                                   &pmp->staterd_tree, state);
1379                         hammer2_state_free(state);
1380                         goto cleanuprd;
1381                 }
1382         }
1383
1384         /*
1385          * NOTE: We have to drain the msgq to handle situations
1386          *       where received states have built up output
1387          *       messages, to avoid creating messages with
1388          *       duplicate CREATE/DELETE flags.
1389          */
1390 cleanupwr:
1391         hammer2_drain_msgq(pmp);
1392         RB_FOREACH(state, hammer2_state_tree, &pmp->statewr_tree) {
1393                 if (state->func &&
1394                     (state->rxcmd & HAMMER2_MSGF_DELETE) == 0) {
1395                         lockmgr(&pmp->msglk, LK_RELEASE);
1396                         msg = hammer2_msg_alloc(&pmp->router,
1397                                                 HAMMER2_LNK_ERROR,
1398                                                 NULL, NULL);
1399                         if ((state->rxcmd & HAMMER2_MSGF_CREATE) == 0)
1400                                 msg->any.head.cmd |= HAMMER2_MSGF_CREATE;
1401                         msg->any.head.cmd |= HAMMER2_MSGF_DELETE |
1402                                              HAMMER2_MSGF_REPLY;
1403                         msg->state = state;
1404                         state->rxcmd = msg->any.head.cmd &
1405                                        ~HAMMER2_MSGF_DELETE;
1406                         msg->state->func(state, msg);
1407                         hammer2_state_cleanuprx(msg);
1408                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1409                         goto cleanupwr;
1410                 }
1411                 if (state->func == NULL) {
1412                         state->flags &= ~HAMMER2_STATE_INSERTED;
1413                         RB_REMOVE(hammer2_state_tree,
1414                                   &pmp->statewr_tree, state);
1415                         hammer2_state_free(state);
1416                         goto cleanupwr;
1417                 }
1418         }
1419
1420         hammer2_drain_msgq(pmp);
1421         if (--retries == 0)
1422                 panic("hammer2: comm thread shutdown couldn't drain");
1423         if (RB_ROOT(&pmp->statewr_tree))
1424                 goto cleanupwr;
1425
1426         if ((state = pmp->freewr_state) != NULL) {
1427                 pmp->freewr_state = NULL;
1428                 hammer2_state_free(state);
1429         }
1430
1431         lockmgr(&pmp->msglk, LK_RELEASE);
1432
1433         /*
1434          * The state trees had better be empty now
1435          */
1436         KKASSERT(RB_EMPTY(&pmp->staterd_tree));
1437         KKASSERT(RB_EMPTY(&pmp->statewr_tree));
1438         KKASSERT(pmp->conn_state == NULL);
1439
1440         /*
1441          * pmp can be ripped out from under us once msgwr_td is set to NULL.
1442          */
1443         pmp->msgwr_td = NULL;
1444         wakeup(pmp);
1445         lwkt_exit();
1446 }
1447
1448 /*
1449  * This cleans out the pending transmit message queue, adjusting any
1450  * persistent states properly in the process.
1451  *
1452  * Caller must hold pmp->msglk
1453  */
1454 static
1455 void
1456 hammer2_drain_msgq(hammer2_pfsmount_t *pmp)
1457 {
1458         hammer2_msg_t *msg;
1459
1460         /*
1461          * Clean out our pending transmit queue, executing the
1462          * appropriate state adjustments.  If this tries to open
1463          * any new outgoing transactions we have to loop up and
1464          * clean them out.
1465          */
1466         while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1467                 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1468                 lockmgr(&pmp->msglk, LK_RELEASE);
1469                 if (msg->state && msg->state->msg == msg)
1470                         msg->state->msg = NULL;
1471                 if (hammer2_state_msgtx(msg))
1472                         hammer2_msg_free(msg);
1473                 else
1474                         hammer2_state_cleanuptx(msg);
1475                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1476         }
1477 }
1478
1479 /*
1480  * Called with msglk held after queueing a new message, wakes up the
1481  * transmit thread.  We use an interlock thread to avoid unnecessary
1482  * wakeups.
1483  */
1484 void
1485 hammer2_clusterctl_wakeup(hammer2_pfsmount_t *pmp)
1486 {
1487         if (pmp->msg_ctl & HAMMER2_CLUSTERCTL_SLEEPING) {
1488                 atomic_clear_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_SLEEPING);
1489                 wakeup(&pmp->msg_ctl);
1490         }
1491 }
1492
1493 static int
1494 hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg)
1495 {
1496         switch(msg->any.head.cmd & HAMMER2_MSGF_TRANSMASK) {
1497         case HAMMER2_LNK_CONN | HAMMER2_MSGF_CREATE:
1498                 /*
1499                  * reply & leave trans open
1500                  */
1501                 kprintf("CONN RECEIVE - (just ignore it)\n");
1502                 hammer2_msg_result(msg, 0);
1503                 break;
1504         case HAMMER2_LNK_SPAN | HAMMER2_MSGF_CREATE:
1505                 kprintf("SPAN RECEIVE - ADDED FROM CLUSTER\n");
1506                 break;
1507         case HAMMER2_LNK_SPAN | HAMMER2_MSGF_DELETE:
1508                 kprintf("SPAN RECEIVE - DELETED FROM CLUSTER\n");
1509                 break;
1510         default:
1511                 break;
1512         }
1513         return(0);
1514 }
1515
1516 /*
1517  * This function is called when the other end replies to our LNK_CONN
1518  * request.
1519  *
1520  * We transmit our (single) SPAN on the initial reply, leaving that
1521  * transaction open too.
1522  */
1523 static int
1524 hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1525 {
1526         hammer2_pfsmount_t *pmp = state->any.pmp;
1527         hammer2_mount_t *hmp = pmp->hmp;
1528         hammer2_msg_t *rmsg;
1529         size_t name_len;
1530         int copyid;
1531
1532         kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
1533
1534         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
1535                 kprintf("LNK_CONN transaction replied to, initiate SPAN\n");
1536                 rmsg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_SPAN |
1537                                                        HAMMER2_MSGF_CREATE,
1538                                         hammer2_msg_span_reply, pmp);
1539                 rmsg->any.lnk_span.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1540                 rmsg->any.lnk_span.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1541                 rmsg->any.lnk_span.pfs_type = pmp->iroot->ip_data.pfs_type;
1542                 rmsg->any.lnk_span.peer_type = pmp->hmp->voldata.peer_type;
1543                 rmsg->any.lnk_span.proto_version = HAMMER2_SPAN_PROTO_1;
1544                 name_len = pmp->iroot->ip_data.name_len;
1545                 if (name_len >= sizeof(rmsg->any.lnk_span.label))
1546                         name_len = sizeof(rmsg->any.lnk_span.label) - 1;
1547                 bcopy(pmp->iroot->ip_data.filename,
1548                       rmsg->any.lnk_span.label,
1549                       name_len);
1550                 rmsg->any.lnk_span.label[name_len] = 0;
1551                 hammer2_msg_write(rmsg);
1552
1553                 /*
1554                  * Dump the configuration stored in the volume header
1555                  */
1556                 hammer2_voldata_lock(hmp);
1557                 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
1558                         if (hmp->voldata.copyinfo[copyid].copyid == 0)
1559                                 continue;
1560                         hammer2_volconf_update(pmp, copyid);
1561                 }
1562                 hammer2_voldata_unlock(hmp);
1563         }
1564         if ((state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1565             (msg->any.head.cmd & HAMMER2_MSGF_DELETE)) {
1566                 kprintf("LNK_CONN transaction terminated by remote\n");
1567                 pmp->conn_state = NULL;
1568                 hammer2_msg_reply(msg, 0);
1569         }
1570         return(0);
1571 }
1572
1573 /*
1574  * Remote terminated our span transaction.  We have to terminate our side.
1575  */
1576 static int
1577 hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1578 {
1579         hammer2_pfsmount_t *pmp = state->any.pmp;
1580
1581         kprintf("SPAN REPLY - Our sent span was terminated by the remote %08x state %p\n", msg->any.head.cmd, state);
1582         if ((state->txcmd & HAMMER2_MSGF_DELETE) == 0 &&
1583             (msg->any.head.cmd & HAMMER2_MSGF_DELETE)) {
1584                 hammer2_msg_reply(msg, 0);
1585         }
1586         return(0);
1587 }
1588
1589 /*
1590  * Volume configuration updates are passed onto the userland service
1591  * daemon via the open LNK_CONN transaction.
1592  */
1593 void
1594 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
1595 {
1596         hammer2_mount_t *hmp = pmp->hmp;
1597         hammer2_msg_t *msg;
1598
1599         /* XXX interlock against connection state termination */
1600         kprintf("volconf update %p\n", pmp->conn_state);
1601         if (pmp->conn_state) {
1602                 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
1603                 msg = hammer2_msg_alloc(&pmp->router, HAMMER2_LNK_VOLCONF,
1604                                         NULL, NULL);
1605                 msg->state = pmp->conn_state;
1606                 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
1607                 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
1608                 msg->any.lnk_volconf.index = index;
1609                 hammer2_msg_write(msg);
1610         }
1611 }