hammer2 - Messaging layer separation work part 1
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
1 /*-
2  * Copyright (c) 2011, 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/nlookup.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/fcntl.h>
41 #include <sys/buf.h>
42 #include <sys/uuid.h>
43 #include <sys/vfsops.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
46
47 #include "hammer2.h"
48 #include "hammer2_disk.h"
49 #include "hammer2_mount.h"
50
51 struct hammer2_sync_info {
52         int error;
53         int waitfor;
54 };
55
56 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
57 static struct hammer2_mntlist hammer2_mntlist;
58 static struct lock hammer2_mntlk;
59
60 int hammer2_debug;
61 int hammer2_cluster_enable = 1;
62 int hammer2_hardlink_enable = 1;
63 long hammer2_iod_file_read;
64 long hammer2_iod_meta_read;
65 long hammer2_iod_indr_read;
66 long hammer2_iod_file_write;
67 long hammer2_iod_meta_write;
68 long hammer2_iod_indr_write;
69 long hammer2_iod_volu_write;
70 long hammer2_ioa_file_read;
71 long hammer2_ioa_meta_read;
72 long hammer2_ioa_indr_read;
73 long hammer2_ioa_file_write;
74 long hammer2_ioa_meta_write;
75 long hammer2_ioa_indr_write;
76 long hammer2_ioa_volu_write;
77
78 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
79
80 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
81            &hammer2_debug, 0, "");
82 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
83            &hammer2_cluster_enable, 0, "");
84 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
85            &hammer2_hardlink_enable, 0, "");
86 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
87            &hammer2_iod_file_read, 0, "");
88 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
89            &hammer2_iod_meta_read, 0, "");
90 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
91            &hammer2_iod_indr_read, 0, "");
92 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
93            &hammer2_iod_file_write, 0, "");
94 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
95            &hammer2_iod_meta_write, 0, "");
96 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
97            &hammer2_iod_indr_write, 0, "");
98 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
99            &hammer2_iod_volu_write, 0, "");
100 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
101            &hammer2_ioa_file_read, 0, "");
102 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
103            &hammer2_ioa_meta_read, 0, "");
104 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
105            &hammer2_ioa_indr_read, 0, "");
106 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
107            &hammer2_ioa_file_write, 0, "");
108 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
109            &hammer2_ioa_meta_write, 0, "");
110 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
111            &hammer2_ioa_indr_write, 0, "");
112 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
113            &hammer2_ioa_volu_write, 0, "");
114
115 static int hammer2_vfs_init(struct vfsconf *conf);
116 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
117                                 struct ucred *cred);
118 static int hammer2_remount(struct mount *, char *, struct vnode *,
119                                 struct ucred *);
120 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
121 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
122 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
123                                 struct ucred *cred);
124 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
125                                 struct ucred *cred);
126 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
127 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
128                                 ino_t ino, struct vnode **vpp);
129 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
130                                 struct fid *fhp, struct vnode **vpp);
131 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
132 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
133                                 int *exflagsp, struct ucred **credanonp);
134
135 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
136 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
137 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
138
139 static void hammer2_cluster_thread_rd(void *arg);
140 static void hammer2_cluster_thread_wr(void *arg);
141 static int hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg);
142 static int hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg);
143 static int hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg);
144 static void hammer2_drain_msgq(hammer2_pfsmount_t *pmp);
145
146 /*
147  * HAMMER2 vfs operations.
148  */
149 static struct vfsops hammer2_vfsops = {
150         .vfs_init       = hammer2_vfs_init,
151         .vfs_sync       = hammer2_vfs_sync,
152         .vfs_mount      = hammer2_vfs_mount,
153         .vfs_unmount    = hammer2_vfs_unmount,
154         .vfs_root       = hammer2_vfs_root,
155         .vfs_statfs     = hammer2_vfs_statfs,
156         .vfs_statvfs    = hammer2_vfs_statvfs,
157         .vfs_vget       = hammer2_vfs_vget,
158         .vfs_vptofh     = hammer2_vfs_vptofh,
159         .vfs_fhtovp     = hammer2_vfs_fhtovp,
160         .vfs_checkexp   = hammer2_vfs_checkexp
161 };
162
163 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
164
165 VFS_SET(hammer2_vfsops, hammer2, 0);
166 MODULE_VERSION(hammer2, 1);
167
168 static
169 int
170 hammer2_vfs_init(struct vfsconf *conf)
171 {
172         int error;
173
174         error = 0;
175
176         if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
177                 error = EINVAL;
178         if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
179                 error = EINVAL;
180         if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
181                 error = EINVAL;
182         if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
183                 error = EINVAL;
184
185         if (error)
186                 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
187
188         lockinit(&hammer2_mntlk, "mntlk", 0, 0);
189         TAILQ_INIT(&hammer2_mntlist);
190
191         return (error);
192 }
193
194 /*
195  * Mount or remount HAMMER2 fileystem from physical media
196  *
197  *      mountroot
198  *              mp              mount point structure
199  *              path            NULL
200  *              data            <unused>
201  *              cred            <unused>
202  *
203  *      mount
204  *              mp              mount point structure
205  *              path            path to mount point
206  *              data            pointer to argument structure in user space
207  *                      volume  volume path (device@LABEL form)
208  *                      hflags  user mount flags
209  *              cred            user credentials
210  *
211  * RETURNS:     0       Success
212  *              !0      error number
213  */
214 static
215 int
216 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
217                   struct ucred *cred)
218 {
219         struct hammer2_mount_info info;
220         hammer2_pfsmount_t *pmp;
221         hammer2_mount_t *hmp;
222         hammer2_key_t lhc;
223         struct vnode *devvp;
224         struct nlookupdata nd;
225         hammer2_chain_t *parent;
226         hammer2_chain_t *schain;
227         hammer2_chain_t *rchain;
228         char devstr[MNAMELEN];
229         size_t size;
230         size_t done;
231         char *dev;
232         char *label;
233         int ronly = 1;
234         int create_hmp;
235         int error;
236
237         hmp = NULL;
238         pmp = NULL;
239         dev = NULL;
240         label = NULL;
241         devvp = NULL;
242
243         kprintf("hammer2_mount\n");
244
245         if (path == NULL) {
246                 /*
247                  * Root mount
248                  */
249                 bzero(&info, sizeof(info));
250                 info.cluster_fd = -1;
251                 return (EOPNOTSUPP);
252         } else {
253                 /*
254                  * Non-root mount or updating a mount
255                  */
256                 error = copyin(data, &info, sizeof(info));
257                 if (error)
258                         return (error);
259
260                 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
261                 if (error)
262                         return (error);
263
264                 /* Extract device and label */
265                 dev = devstr;
266                 label = strchr(devstr, '@');
267                 if (label == NULL ||
268                     ((label + 1) - dev) > done) {
269                         return (EINVAL);
270                 }
271                 *label = '\0';
272                 label++;
273                 if (*label == '\0')
274                         return (EINVAL);
275
276                 if (mp->mnt_flag & MNT_UPDATE) {
277                         /* Update mount */
278                         /* HAMMER2 implements NFS export via mountctl */
279                         hmp = MPTOHMP(mp);
280                         devvp = hmp->devvp;
281                         error = hammer2_remount(mp, path, devvp, cred);
282                         return error;
283                 }
284         }
285
286         /*
287          * PFS mount
288          *
289          * Lookup name and verify it refers to a block device.
290          */
291         error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
292         if (error == 0)
293                 error = nlookup(&nd);
294         if (error == 0)
295                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
296         nlookup_done(&nd);
297
298         if (error == 0) {
299                 if (vn_isdisk(devvp, &error))
300                         error = vfs_mountedon(devvp);
301         }
302
303         /*
304          * Determine if the device has already been mounted.  After this
305          * check hmp will be non-NULL if we are doing the second or more
306          * hammer2 mounts from the same device.
307          */
308         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
309         TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
310                 if (hmp->devvp == devvp)
311                         break;
312         }
313
314         /*
315          * Open the device if this isn't a secondary mount
316          */
317         if (hmp) {
318                 create_hmp = 0;
319         } else {
320                 create_hmp = 1;
321                 if (error == 0 && vcount(devvp) > 0)
322                         error = EBUSY;
323
324                 /*
325                  * Now open the device
326                  */
327                 if (error == 0) {
328                         ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
329                         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
330                         error = vinvalbuf(devvp, V_SAVE, 0, 0);
331                         if (error == 0) {
332                                 error = VOP_OPEN(devvp,
333                                                  ronly ? FREAD : FREAD | FWRITE,
334                                                  FSCRED, NULL);
335                         }
336                         vn_unlock(devvp);
337                 }
338                 if (error && devvp) {
339                         vrele(devvp);
340                         devvp = NULL;
341                 }
342                 if (error) {
343                         lockmgr(&hammer2_mntlk, LK_RELEASE);
344                         return error;
345                 }
346         }
347
348         /*
349          * Block device opened successfully, finish initializing the
350          * mount structure.
351          *
352          * From this point on we have to call hammer2_unmount() on failure.
353          */
354         pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
355         mp->mnt_data = (qaddr_t)pmp;
356         pmp->mp = mp;
357         kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
358         lockinit(&pmp->msglk, "h2msg", 0, 0);
359         TAILQ_INIT(&pmp->msgq);
360         RB_INIT(&pmp->staterd_tree);
361         RB_INIT(&pmp->statewr_tree);
362
363         if (create_hmp) {
364                 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
365                 hmp->ronly = ronly;
366                 hmp->devvp = devvp;
367                 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
368                 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
369                 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
370         }
371         ccms_domain_init(&pmp->ccms_dom);
372         pmp->hmp = hmp;
373         pmp->router.pmp = pmp;
374         ++hmp->pmp_count;
375         lockmgr(&hammer2_mntlk, LK_RELEASE);
376         kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
377         
378         mp->mnt_flag = MNT_LOCAL;
379         mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;   /* all entry pts are SMP */
380
381         if (create_hmp) {
382                 /*
383                  * vchain setup. vchain.data is special cased to NULL.
384                  * vchain.refs is initialized and will never drop to 0.
385                  */
386                 hmp->vchain.refs = 1;
387                 hmp->vchain.data = (void *)&hmp->voldata;
388                 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
389                 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
390                 hmp->vchain.bref_flush = hmp->vchain.bref;
391                 ccms_cst_init(&hmp->vchain.cst, NULL);
392                 /* hmp->vchain.u.xxx is left NULL */
393                 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
394                 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
395
396                 /*
397                  * Install the volume header
398                  */
399                 error = hammer2_install_volume_header(hmp);
400                 if (error) {
401                         hammer2_vfs_unmount(mp, MNT_FORCE);
402                         return error;
403                 }
404         }
405
406         /*
407          * required mount structure initializations
408          */
409         mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
410         mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
411
412         mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
413         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
414
415         /*
416          * Optional fields
417          */
418         mp->mnt_iosize_max = MAXPHYS;
419
420         /*
421          * First locate the super-root inode, which is key 0 relative to the
422          * volume header's blockset.
423          *
424          * Then locate the root inode by scanning the directory keyspace
425          * represented by the label.
426          */
427         if (create_hmp) {
428                 parent = &hmp->vchain;
429                 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
430                 schain = hammer2_chain_lookup(hmp, &parent,
431                                       HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
432                 hammer2_chain_unlock(hmp, parent);
433                 if (schain == NULL) {
434                         kprintf("hammer2_mount: invalid super-root\n");
435                         hammer2_vfs_unmount(mp, MNT_FORCE);
436                         return EINVAL;
437                 }
438                 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
439                 hmp->schain = schain;           /* left locked */
440         } else {
441                 schain = hmp->schain;
442                 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
443         }
444
445         parent = schain;
446         lhc = hammer2_dirhash(label, strlen(label));
447         rchain = hammer2_chain_lookup(hmp, &parent,
448                                       lhc, lhc + HAMMER2_DIRHASH_LOMASK,
449                                       0);
450         while (rchain) {
451                 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
452                     rchain->u.ip &&
453                     strcmp(label, rchain->data->ipdata.filename) == 0) {
454                         break;
455                 }
456                 rchain = hammer2_chain_next(hmp, &parent, rchain,
457                                             lhc, lhc + HAMMER2_DIRHASH_LOMASK,
458                                             0);
459         }
460         hammer2_chain_unlock(hmp, parent);
461         if (rchain == NULL) {
462                 kprintf("hammer2_mount: PFS label not found\n");
463                 hammer2_vfs_unmount(mp, MNT_FORCE);
464                 return EINVAL;
465         }
466         if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
467                 hammer2_chain_unlock(hmp, rchain);
468                 kprintf("hammer2_mount: PFS label already mounted!\n");
469                 hammer2_vfs_unmount(mp, MNT_FORCE);
470                 return EBUSY;
471         }
472         atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
473
474         hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
475         hammer2_chain_unlock(hmp, rchain);
476         pmp->rchain = rchain;           /* left held & unlocked */
477         pmp->iroot = rchain->u.ip;      /* implied hold from rchain */
478         pmp->iroot->pmp = pmp;
479
480         kprintf("iroot %p\n", pmp->iroot);
481
482         /*
483          * Ref the cluster management messaging descriptor.  The mount
484          * program deals with the other end of the communications pipe.
485          */
486         pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
487         if (pmp->msg_fp == NULL) {
488                 kprintf("hammer2_mount: bad cluster_fd!\n");
489                 hammer2_vfs_unmount(mp, MNT_FORCE);
490                 return EBADF;
491         }
492         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
493                     NULL, 0, -1, "hammer2-msgrd");
494         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
495                     NULL, 0, -1, "hammer2-msgwr");
496
497         /*
498          * Finish setup
499          */
500         vfs_getnewfsid(mp);
501         vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
502         vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
503         vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
504
505         copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
506         bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
507         bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
508         copyinstr(path, mp->mnt_stat.f_mntonname,
509                   sizeof(mp->mnt_stat.f_mntonname) - 1,
510                   &size);
511
512         /*
513          * Initial statfs to prime mnt_stat.
514          */
515         hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
516
517         return 0;
518 }
519
520 static
521 int
522 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
523                 struct ucred *cred)
524 {
525         return (0);
526 }
527
528 static
529 int
530 hammer2_vfs_unmount(struct mount *mp, int mntflags)
531 {
532         hammer2_pfsmount_t *pmp;
533         hammer2_mount_t *hmp;
534         int flags;
535         int error = 0;
536         int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
537         struct vnode *devvp;
538
539         pmp = MPTOPMP(mp);
540         hmp = pmp->hmp;
541         flags = 0;
542
543         if (mntflags & MNT_FORCE)
544                 flags |= FORCECLOSE;
545
546         hammer2_mount_exlock(hmp);
547
548         /*
549          * If mount initialization proceeded far enough we must flush
550          * its vnodes.
551          */
552         if (pmp->iroot)
553                 error = vflush(mp, 0, flags);
554
555         if (error)
556                 return error;
557
558         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
559         --hmp->pmp_count;
560         kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
561
562         /*
563          * Flush any left over chains.  The voldata lock is only used
564          * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
565          */
566         hammer2_voldata_lock(hmp);
567         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
568                                  HAMMER2_CHAIN_MODIFIED_AUX |
569                                  HAMMER2_CHAIN_SUBMODIFIED)) {
570                 hammer2_voldata_unlock(hmp);
571                 hammer2_vfs_sync(mp, MNT_WAIT);
572         } else {
573                 hammer2_voldata_unlock(hmp);
574         }
575         if (hmp->pmp_count == 0) {
576                 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
577                                          HAMMER2_CHAIN_MODIFIED_AUX |
578                                          HAMMER2_CHAIN_SUBMODIFIED)) {
579                         kprintf("hammer2_unmount: chains left over after "
580                                 "final sync\n");
581                         if (hammer2_debug & 0x0010)
582                                 Debugger("entered debugger");
583                 }
584         }
585
586         /*
587          * Cleanup the root and super-root chain elements (which should be
588          * clean).
589          */
590         pmp->iroot = NULL;
591         if (pmp->rchain) {
592                 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
593                 KKASSERT(pmp->rchain->refs == 1);
594                 hammer2_chain_drop(hmp, pmp->rchain);
595                 pmp->rchain = NULL;
596         }
597         ccms_domain_uninit(&pmp->ccms_dom);
598
599         /*
600          * Ask the cluster controller to go away
601          */
602         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
603         while (pmp->msgrd_td || pmp->msgwr_td) {
604                 wakeup(&pmp->msg_ctl);
605                 tsleep(pmp, 0, "clstrkl", hz);
606         }
607
608         /*
609          * Drop communications descriptor
610          */
611         if (pmp->msg_fp) {
612                 fdrop(pmp->msg_fp);
613                 pmp->msg_fp = NULL;
614         }
615
616         /*
617          * If no PFS's left drop the master hammer2_mount for the device.
618          */
619         if (hmp->pmp_count == 0) {
620                 if (hmp->schain) {
621                         KKASSERT(hmp->schain->refs == 1);
622                         hammer2_chain_drop(hmp, hmp->schain);
623                         hmp->schain = NULL;
624                 }
625
626                 /*
627                  * Finish up with the device vnode
628                  */
629                 if ((devvp = hmp->devvp) != NULL) {
630                         vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
631                         hmp->devvp = NULL;
632                         VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
633                         vrele(devvp);
634                         devvp = NULL;
635                 }
636         }
637         hammer2_mount_unlock(hmp);
638
639         pmp->mp = NULL;
640         pmp->hmp = NULL;
641         mp->mnt_data = NULL;
642
643         kmalloc_destroy(&pmp->mmsg);
644
645         kfree(pmp, M_HAMMER2);
646         if (hmp->pmp_count == 0) {
647                 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
648                 kmalloc_destroy(&hmp->minode);
649                 kmalloc_destroy(&hmp->mchain);
650                 kfree(hmp, M_HAMMER2);
651         }
652         lockmgr(&hammer2_mntlk, LK_RELEASE);
653         return (error);
654 }
655
656 static
657 int
658 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
659              ino_t ino, struct vnode **vpp)
660 {
661         kprintf("hammer2_vget\n");
662         return (EOPNOTSUPP);
663 }
664
665 static
666 int
667 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
668 {
669         hammer2_pfsmount_t *pmp;
670         hammer2_mount_t *hmp;
671         int error;
672         struct vnode *vp;
673
674         pmp = MPTOPMP(mp);
675         hmp = pmp->hmp;
676         hammer2_mount_exlock(hmp);
677         if (pmp->iroot == NULL) {
678                 *vpp = NULL;
679                 error = EINVAL;
680         } else {
681                 hammer2_chain_lock(hmp, &pmp->iroot->chain,
682                                    HAMMER2_RESOLVE_ALWAYS |
683                                    HAMMER2_RESOLVE_SHARED);
684                 vp = hammer2_igetv(pmp->iroot, &error);
685                 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
686                 *vpp = vp;
687                 if (vp == NULL)
688                         kprintf("vnodefail\n");
689         }
690         hammer2_mount_unlock(hmp);
691
692         return (error);
693 }
694
695 /*
696  * Filesystem status
697  *
698  * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
699  */
700 static
701 int
702 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
703 {
704         hammer2_pfsmount_t *pmp;
705         hammer2_mount_t *hmp;
706
707         pmp = MPTOPMP(mp);
708         hmp = MPTOHMP(mp);
709
710         mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
711                                pmp->iroot->delta_icount;
712         mp->mnt_stat.f_ffree = 0;
713         mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
714         mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
715                                 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
716         mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
717
718         *sbp = mp->mnt_stat;
719         return (0);
720 }
721
722 static
723 int
724 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
725 {
726         hammer2_pfsmount_t *pmp;
727         hammer2_mount_t *hmp;
728
729         pmp = MPTOPMP(mp);
730         hmp = MPTOHMP(mp);
731
732         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
733         mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
734                                 pmp->iroot->delta_icount;
735         mp->mnt_vstat.f_ffree = 0;
736         mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
737         mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
738                                  hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
739         mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
740
741         *sbp = mp->mnt_vstat;
742         return (0);
743 }
744
745 /*
746  * Sync the entire filesystem; this is called from the filesystem syncer
747  * process periodically and whenever a user calls sync(1) on the hammer
748  * mountpoint.
749  *
750  * Currently is actually called from the syncer! \o/
751  *
752  * This task will have to snapshot the state of the dirty inode chain.
753  * From that, it will have to make sure all of the inodes on the dirty
754  * chain have IO initiated. We make sure that io is initiated for the root
755  * block.
756  *
757  * If waitfor is set, we wait for media to acknowledge the new rootblock.
758  *
759  * THINKS: side A vs side B, to have sync not stall all I/O?
760  */
761 static
762 int
763 hammer2_vfs_sync(struct mount *mp, int waitfor)
764 {
765         struct hammer2_sync_info info;
766         hammer2_mount_t *hmp;
767         int flags;
768         int error;
769         int haswork;
770
771         hmp = MPTOHMP(mp);
772
773         flags = VMSC_GETVP;
774         if (waitfor & MNT_LAZY)
775                 flags |= VMSC_ONEPASS;
776
777         info.error = 0;
778         info.waitfor = MNT_NOWAIT;
779         vmntvnodescan(mp, flags | VMSC_NOWAIT,
780                       hammer2_sync_scan1,
781                       hammer2_sync_scan2, &info);
782         if (info.error == 0 && (waitfor & MNT_WAIT)) {
783                 info.waitfor = waitfor;
784                     vmntvnodescan(mp, flags,
785                                   hammer2_sync_scan1,
786                                   hammer2_sync_scan2, &info);
787
788         }
789 #if 0
790         if (waitfor == MNT_WAIT) {
791                 /* XXX */
792         } else {
793                 /* XXX */
794         }
795 #endif
796         hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
797         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
798                                  HAMMER2_CHAIN_MODIFIED_AUX |
799                                  HAMMER2_CHAIN_SUBMODIFIED)) {
800                 hammer2_chain_flush(hmp, &hmp->vchain, 0);
801                 haswork = 1;
802         } else {
803                 haswork = 0;
804         }
805         hammer2_chain_unlock(hmp, &hmp->vchain);
806
807         error = 0;
808
809         if ((waitfor & MNT_LAZY) == 0) {
810                 waitfor = MNT_NOWAIT;
811                 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
812                 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
813                 vn_unlock(hmp->devvp);
814         }
815
816         if (error == 0 && haswork) {
817                 struct buf *bp;
818
819                 /*
820                  * Synchronize the disk before flushing the volume
821                  * header.
822                  */
823                 bp = getpbuf(NULL);
824                 bp->b_bio1.bio_offset = 0;
825                 bp->b_bufsize = 0;
826                 bp->b_bcount = 0;
827                 bp->b_cmd = BUF_CMD_FLUSH;
828                 bp->b_bio1.bio_done = biodone_sync;
829                 bp->b_bio1.bio_flags |= BIO_SYNC;
830                 vn_strategy(hmp->devvp, &bp->b_bio1);
831                 biowait(&bp->b_bio1, "h2vol");
832                 relpbuf(bp, NULL);
833
834                 /*
835                  * Then we can safely flush the volume header.  Volume
836                  * data is locked separately to prevent ioctl functions
837                  * from deadlocking due to a configuration issue.
838                  */
839                 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
840                 hammer2_voldata_lock(hmp);
841                 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
842                 hammer2_voldata_unlock(hmp);
843                 bawrite(bp);
844         }
845         return (error);
846 }
847
848 /*
849  * Sync passes.
850  *
851  * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
852  *       won't flush on those flags.  The syncer code above will do a
853  *       general meta-data flush globally that will catch these flags.
854  */
855 static int
856 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
857 {
858         hammer2_inode_t *ip;
859
860         ip = VTOI(vp);
861         if (vp->v_type == VNON || ip == NULL ||
862             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
863                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
864              RB_EMPTY(&vp->v_rbdirty_tree))) {
865                 return(-1);
866         }
867         return(0);
868 }
869
870 static int
871 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
872 {
873         struct hammer2_sync_info *info = data;
874         hammer2_inode_t *ip;
875         int error;
876
877         ip = VTOI(vp);
878         if (vp->v_type == VNON || vp->v_type == VBAD ||
879             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
880                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
881             RB_EMPTY(&vp->v_rbdirty_tree))) {
882                 return(0);
883         }
884         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
885         if (error)
886                 info->error = error;
887         return(0);
888 }
889
890 static
891 int
892 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
893 {
894         return (0);
895 }
896
897 static
898 int
899 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
900                struct fid *fhp, struct vnode **vpp)
901 {
902         return (0);
903 }
904
905 static
906 int
907 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
908                  int *exflagsp, struct ucred **credanonp)
909 {
910         return (0);
911 }
912
913 /*
914  * Support code for hammer2_mount().  Read, verify, and install the volume
915  * header into the HMP
916  *
917  * XXX read four volhdrs and use the one with the highest TID whos CRC
918  *     matches.
919  *
920  * XXX check iCRCs.
921  *
922  * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
923  *     nonexistant locations.
924  *
925  * XXX Record selected volhdr and ring updates to each of 4 volhdrs
926  */
927 static
928 int
929 hammer2_install_volume_header(hammer2_mount_t *hmp)
930 {
931         hammer2_volume_data_t *vd;
932         struct buf *bp;
933         hammer2_crc32_t crc0, crc, bcrc0, bcrc;
934         int error_reported;
935         int error;
936         int valid;
937         int i;
938
939         error_reported = 0;
940         error = 0;
941         valid = 0;
942         bp = NULL;
943
944         /*
945          * There are up to 4 copies of the volume header (syncs iterate
946          * between them so there is no single master).  We don't trust the
947          * volu_size field so we don't know precisely how large the filesystem
948          * is, so depend on the OS to return an error if we go beyond the
949          * block device's EOF.
950          */
951         for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
952                 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
953                               HAMMER2_VOLUME_BYTES, &bp);
954                 if (error) {
955                         brelse(bp);
956                         bp = NULL;
957                         continue;
958                 }
959
960                 vd = (struct hammer2_volume_data *) bp->b_data;
961                 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
962                     (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
963                         brelse(bp);
964                         bp = NULL;
965                         continue;
966                 }
967
968                 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
969                         /* XXX: Reversed-endianness filesystem */
970                         kprintf("hammer2: reverse-endian filesystem detected");
971                         brelse(bp);
972                         bp = NULL;
973                         continue;
974                 }
975
976                 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
977                 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
978                                       HAMMER2_VOLUME_ICRC0_SIZE);
979                 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
980                 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
981                                        HAMMER2_VOLUME_ICRC1_SIZE);
982                 if ((crc0 != crc) || (bcrc0 != bcrc)) {
983                         kprintf("hammer2 volume header crc "
984                                 "mismatch copy #%d\t%08x %08x",
985                                 i, crc0, crc);
986                         error_reported = 1;
987                         brelse(bp);
988                         bp = NULL;
989                         continue;
990                 }
991                 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
992                         valid = 1;
993                         hmp->voldata = *vd;
994                 }
995                 brelse(bp);
996                 bp = NULL;
997         }
998         if (valid) {
999                 error = 0;
1000                 if (error_reported)
1001                         kprintf("hammer2: a valid volume header was found\n");
1002         } else {
1003                 error = EINVAL;
1004                 kprintf("hammer2: no valid volume headers found!\n");
1005         }
1006         return (error);
1007 }
1008
1009 /*
1010  * Reconnect using the passed file pointer.  The caller must ref the
1011  * fp for us.
1012  */
1013 void
1014 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp)
1015 {
1016         /*
1017          * Destroy the current connection
1018          */
1019         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1020         while (pmp->msgrd_td || pmp->msgwr_td) {
1021                wakeup(&pmp->msg_ctl);
1022                tsleep(pmp, 0, "clstrkl", hz);
1023         }
1024
1025         /*
1026          * Drop communications descriptor
1027          */
1028         if (pmp->msg_fp) {
1029                 fdrop(pmp->msg_fp);
1030                 pmp->msg_fp = NULL;
1031         }
1032         kprintf("RESTART CONNECTION\n");
1033
1034         /*
1035          * Setup new communications descriptor
1036          */
1037         pmp->msg_ctl = 0;
1038         pmp->msg_fp = fp;
1039         pmp->msg_seq = 0;
1040         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
1041                     NULL, 0, -1, "hammer2-msgrd");
1042         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
1043                     NULL, 0, -1, "hammer2-msgwr");
1044 }
1045
1046 /*
1047  * Cluster controller thread.  Perform messaging functions.  We have one
1048  * thread for the reader and one for the writer.  The writer handles
1049  * shutdown requests (which should break the reader thread).
1050  */
1051 static
1052 void
1053 hammer2_cluster_thread_rd(void *arg)
1054 {
1055         hammer2_pfsmount_t *pmp = arg;
1056         dmsg_hdr_t hdr;
1057         hammer2_msg_t *msg;
1058         hammer2_state_t *state;
1059         size_t hbytes;
1060         int error = 0;
1061
1062         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
1063                 /*
1064                  * Retrieve the message from the pipe or socket.
1065                  */
1066                 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
1067                                 NULL, 1, UIO_SYSSPACE);
1068                 if (error)
1069                         break;
1070                 if (hdr.magic != DMSG_HDR_MAGIC) {
1071                         kprintf("hammer2: msgrd: bad magic: %04x\n",
1072                                 hdr.magic);
1073                         error = EINVAL;
1074                         break;
1075                 }
1076                 hbytes = (hdr.cmd & DMSGF_SIZE) * DMSG_ALIGN;
1077                 if (hbytes < sizeof(hdr) || hbytes > DMSG_AUX_MAX) {
1078                         kprintf("hammer2: msgrd: bad header size %zd\n",
1079                                 hbytes);
1080                         error = EINVAL;
1081                         break;
1082                 }
1083                 /* XXX messy: mask cmd to avoid allocating state */
1084                 msg = hammer2_msg_alloc(&pmp->router,
1085                                         hdr.cmd & DMSGF_BASECMDMASK,
1086                                         NULL, NULL);
1087                 msg->any.head = hdr;
1088                 msg->hdr_size = hbytes;
1089                 if (hbytes > sizeof(hdr)) {
1090                         error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1091                                         hbytes - sizeof(hdr),
1092                                         NULL, 1, UIO_SYSSPACE);
1093                         if (error) {
1094                                 kprintf("hammer2: short msg received\n");
1095                                 error = EINVAL;
1096                                 break;
1097                         }
1098                 }
1099                 msg->aux_size = hdr.aux_bytes * DMSG_ALIGN;
1100                 if (msg->aux_size > DMSG_AUX_MAX) {
1101                         kprintf("hammer2: illegal msg payload size %zd\n",
1102                                 msg->aux_size);
1103                         error = EINVAL;
1104                         break;
1105                 }
1106                 if (msg->aux_size) {
1107                         msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1108                                                 M_WAITOK | M_ZERO);
1109                         error = fp_read(pmp->msg_fp, msg->aux_data,
1110                                         msg->aux_size,
1111                                         NULL, 1, UIO_SYSSPACE);
1112                         if (error) {
1113                                 kprintf("hammer2: short msg "
1114                                         "payload received\n");
1115                                 break;
1116                         }
1117                 }
1118
1119                 /*
1120                  * State machine tracking, state assignment for msg,
1121                  * returns error and discard status.  Errors are fatal
1122                  * to the connection except for EALREADY which forces
1123                  * a discard without execution.
1124                  */
1125                 error = hammer2_state_msgrx(msg);
1126                 if (error) {
1127                         /*
1128                          * Raw protocol or connection error
1129                          */
1130                         hammer2_msg_free(msg);
1131                         if (error == EALREADY)
1132                                 error = 0;
1133                 } else if (msg->state && msg->state->func) {
1134                         /*
1135                          * Message related to state which already has a
1136                          * handling function installed for it.
1137                          */
1138                         error = msg->state->func(msg->state, msg);
1139                         hammer2_state_cleanuprx(msg);
1140                 } else if ((msg->any.head.cmd & DMSGF_PROTOS) ==
1141                            DMSG_PROTO_LNK) {
1142                         /*
1143                          * Message related to the LNK protocol set
1144                          */
1145                         error = hammer2_msg_lnk_rcvmsg(msg);
1146                         hammer2_state_cleanuprx(msg);
1147                 } else if ((msg->any.head.cmd & DMSGF_PROTOS) ==
1148                            DMSG_PROTO_DBG) {
1149                         /*
1150                          * Message related to the DBG protocol set
1151                          */
1152                         error = hammer2_msg_dbg_rcvmsg(msg);
1153                         hammer2_state_cleanuprx(msg);
1154                 } else {
1155                         /*
1156                          * Other higher-level messages (e.g. vnops)
1157                          */
1158                         error = hammer2_msg_adhoc_input(msg);
1159                         hammer2_state_cleanuprx(msg);
1160                 }
1161                 msg = NULL;
1162         }
1163
1164         if (error)
1165                 kprintf("hammer2: msg read failed error %d\n", error);
1166
1167         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1168         if (msg) {
1169                 if (msg->state && msg->state->msg == msg)
1170                         msg->state->msg = NULL;
1171                 hammer2_msg_free(msg);
1172         }
1173
1174         if ((state = pmp->freerd_state) != NULL) {
1175                 pmp->freerd_state = NULL;
1176                 hammer2_state_free(state);
1177         }
1178
1179         /*
1180          * Shutdown the socket before waiting for the transmit side.
1181          *
1182          * If we are dying due to e.g. a socket disconnect verses being
1183          * killed explicity we have to set KILL in order to kick the tx
1184          * side when it might not have any other work to do.  KILL might
1185          * already be set if we are in an unmount or reconnect.
1186          */
1187         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1188
1189         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
1190         wakeup(&pmp->msg_ctl);
1191
1192         /*
1193          * Wait for the transmit side to drain remaining messages
1194          * before cleaning up the rx state.  The transmit side will
1195          * set KILLTX and wait for the rx side to completely finish
1196          * (set msgrd_td to NULL) before cleaning up any remaining
1197          * tx states.
1198          */
1199         lockmgr(&pmp->msglk, LK_RELEASE);
1200         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLRX);
1201         wakeup(&pmp->msg_ctl);
1202         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILLTX) == 0) {
1203                 wakeup(&pmp->msg_ctl);
1204                 tsleep(pmp, 0, "clstrkw", hz);
1205         }
1206
1207         pmp->msgrd_td = NULL;
1208         /* pmp can be ripped out from under us at this point */
1209         wakeup(pmp);
1210         lwkt_exit();
1211 }
1212
1213 static
1214 void
1215 hammer2_cluster_thread_wr(void *arg)
1216 {
1217         hammer2_pfsmount_t *pmp = arg;
1218         hammer2_msg_t *msg = NULL;
1219         hammer2_state_t *state;
1220         ssize_t res;
1221         size_t name_len;
1222         int error = 0;
1223         int retries = 20;
1224
1225         /*
1226          * Open a LNK_CONN transaction indicating that we want to take part
1227          * in the spanning tree algorithm.  Filter explicitly on the PFS
1228          * info in the iroot.
1229          *
1230          * We do not transmit our (only) LNK_SPAN until the other end has
1231          * acknowledged our link connection request.
1232          *
1233          * The transaction remains fully open for the duration of the
1234          * connection.
1235          */
1236         msg = hammer2_msg_alloc(&pmp->router, DMSG_LNK_CONN | DMSGF_CREATE,
1237                                 hammer2_msg_conn_reply, pmp);
1238         msg->any.lnk_conn.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1239         msg->any.lnk_conn.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1240         msg->any.lnk_conn.pfs_type = pmp->iroot->ip_data.pfs_type;
1241         msg->any.lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
1242         msg->any.lnk_conn.peer_type = pmp->hmp->voldata.peer_type;
1243         msg->any.lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2;
1244         name_len = pmp->iroot->ip_data.name_len;
1245         if (name_len >= sizeof(msg->any.lnk_conn.label))
1246                 name_len = sizeof(msg->any.lnk_conn.label) - 1;
1247         bcopy(pmp->iroot->ip_data.filename, msg->any.lnk_conn.label, name_len);
1248         pmp->conn_state = msg->state;
1249         msg->any.lnk_conn.label[name_len] = 0;
1250         hammer2_msg_write(msg);
1251
1252         /*
1253          * Transmit loop
1254          */
1255         msg = NULL;
1256         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1257
1258         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1259                 /*
1260                  * Sleep if no messages pending.  Interlock with flag while
1261                  * holding msglk.
1262                  */
1263                 if (TAILQ_EMPTY(&pmp->msgq)) {
1264                         atomic_set_int(&pmp->msg_ctl,
1265                                        HAMMER2_CLUSTERCTL_SLEEPING);
1266                         lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1267                         atomic_clear_int(&pmp->msg_ctl,
1268                                          HAMMER2_CLUSTERCTL_SLEEPING);
1269                 }
1270
1271                 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1272                         /*
1273                          * Remove msg from the transmit queue and do
1274                          * persist and half-closed state handling.
1275                          */
1276                         TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1277                         lockmgr(&pmp->msglk, LK_RELEASE);
1278
1279                         error = hammer2_state_msgtx(msg);
1280                         if (error == EALREADY) {
1281                                 error = 0;
1282                                 hammer2_msg_free(msg);
1283                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1284                                 continue;
1285                         }
1286                         if (error) {
1287                                 hammer2_msg_free(msg);
1288                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1289                                 break;
1290                         }
1291
1292                         /*
1293                          * Dump the message to the pipe or socket.
1294                          */
1295                         error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1296                                          &res, UIO_SYSSPACE);
1297                         if (error || res != msg->hdr_size) {
1298                                 if (error == 0)
1299                                         error = EINVAL;
1300                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1301                                 break;
1302                         }
1303                         if (msg->aux_size) {
1304                                 error = fp_write(pmp->msg_fp,
1305                                                  msg->aux_data, msg->aux_size,
1306                                                  &res, UIO_SYSSPACE);
1307                                 if (error || res != msg->aux_size) {
1308                                         if (error == 0)
1309                                                 error = EINVAL;
1310                                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1311                                         break;
1312                                 }
1313                         }
1314                         hammer2_state_cleanuptx(msg);
1315                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1316                 }
1317         }
1318
1319         /*
1320          * Cleanup messages pending transmission and release msgq lock.
1321          */
1322         if (error)
1323                 kprintf("hammer2: msg write failed error %d\n", error);
1324
1325         if (msg) {
1326                 if (msg->state && msg->state->msg == msg)
1327                         msg->state->msg = NULL;
1328                 hammer2_msg_free(msg);
1329         }
1330
1331         /*
1332          * Shutdown the socket.  This will cause the rx thread to get an
1333          * EOF and ensure that both threads get to a termination state.
1334          */
1335         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1336
1337         /*
1338          * Set KILLTX (which the rx side waits for), then wait for the RX
1339          * side to completely finish before we clean out any remaining
1340          * command states.
1341          */
1342         lockmgr(&pmp->msglk, LK_RELEASE);
1343         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILLTX);
1344         wakeup(&pmp->msg_ctl);
1345         while (pmp->msgrd_td) {
1346                 wakeup(&pmp->msg_ctl);
1347                 tsleep(pmp, 0, "clstrkw", hz);
1348         }
1349         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1350
1351         /*
1352          * Simulate received MSGF_DELETE's for any remaining states.
1353          */
1354 cleanuprd:
1355         RB_FOREACH(state, hammer2_state_tree, &pmp->staterd_tree) {
1356                 if (state->func &&
1357                     (state->rxcmd & DMSGF_DELETE) == 0) {
1358                         lockmgr(&pmp->msglk, LK_RELEASE);
1359                         msg = hammer2_msg_alloc(&pmp->router, DMSG_LNK_ERROR,
1360                                                 NULL, NULL);
1361                         if ((state->rxcmd & DMSGF_CREATE) == 0)
1362                                 msg->any.head.cmd |= DMSGF_CREATE;
1363                         msg->any.head.cmd |= DMSGF_DELETE;
1364                         msg->state = state;
1365                         state->rxcmd = msg->any.head.cmd &
1366                                        ~DMSGF_DELETE;
1367                         msg->state->func(state, msg);
1368                         hammer2_state_cleanuprx(msg);
1369                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1370                         goto cleanuprd;
1371                 }
1372                 if (state->func == NULL) {
1373                         state->flags &= ~HAMMER2_STATE_INSERTED;
1374                         RB_REMOVE(hammer2_state_tree,
1375                                   &pmp->staterd_tree, state);
1376                         hammer2_state_free(state);
1377                         goto cleanuprd;
1378                 }
1379         }
1380
1381         /*
1382          * NOTE: We have to drain the msgq to handle situations
1383          *       where received states have built up output
1384          *       messages, to avoid creating messages with
1385          *       duplicate CREATE/DELETE flags.
1386          */
1387 cleanupwr:
1388         hammer2_drain_msgq(pmp);
1389         RB_FOREACH(state, hammer2_state_tree, &pmp->statewr_tree) {
1390                 if (state->func &&
1391                     (state->rxcmd & DMSGF_DELETE) == 0) {
1392                         lockmgr(&pmp->msglk, LK_RELEASE);
1393                         msg = hammer2_msg_alloc(&pmp->router,
1394                                                 DMSG_LNK_ERROR,
1395                                                 NULL, NULL);
1396                         if ((state->rxcmd & DMSGF_CREATE) == 0)
1397                                 msg->any.head.cmd |= DMSGF_CREATE;
1398                         msg->any.head.cmd |= DMSGF_DELETE |
1399                                              DMSGF_REPLY;
1400                         msg->state = state;
1401                         state->rxcmd = msg->any.head.cmd &
1402                                        ~DMSGF_DELETE;
1403                         msg->state->func(state, msg);
1404                         hammer2_state_cleanuprx(msg);
1405                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1406                         goto cleanupwr;
1407                 }
1408                 if (state->func == NULL) {
1409                         state->flags &= ~HAMMER2_STATE_INSERTED;
1410                         RB_REMOVE(hammer2_state_tree,
1411                                   &pmp->statewr_tree, state);
1412                         hammer2_state_free(state);
1413                         goto cleanupwr;
1414                 }
1415         }
1416
1417         hammer2_drain_msgq(pmp);
1418         if (--retries == 0)
1419                 panic("hammer2: comm thread shutdown couldn't drain");
1420         if (RB_ROOT(&pmp->statewr_tree))
1421                 goto cleanupwr;
1422
1423         if ((state = pmp->freewr_state) != NULL) {
1424                 pmp->freewr_state = NULL;
1425                 hammer2_state_free(state);
1426         }
1427
1428         lockmgr(&pmp->msglk, LK_RELEASE);
1429
1430         /*
1431          * The state trees had better be empty now
1432          */
1433         KKASSERT(RB_EMPTY(&pmp->staterd_tree));
1434         KKASSERT(RB_EMPTY(&pmp->statewr_tree));
1435         KKASSERT(pmp->conn_state == NULL);
1436
1437         /*
1438          * pmp can be ripped out from under us once msgwr_td is set to NULL.
1439          */
1440         pmp->msgwr_td = NULL;
1441         wakeup(pmp);
1442         lwkt_exit();
1443 }
1444
1445 /*
1446  * This cleans out the pending transmit message queue, adjusting any
1447  * persistent states properly in the process.
1448  *
1449  * Caller must hold pmp->msglk
1450  */
1451 static
1452 void
1453 hammer2_drain_msgq(hammer2_pfsmount_t *pmp)
1454 {
1455         hammer2_msg_t *msg;
1456
1457         /*
1458          * Clean out our pending transmit queue, executing the
1459          * appropriate state adjustments.  If this tries to open
1460          * any new outgoing transactions we have to loop up and
1461          * clean them out.
1462          */
1463         while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1464                 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1465                 lockmgr(&pmp->msglk, LK_RELEASE);
1466                 if (msg->state && msg->state->msg == msg)
1467                         msg->state->msg = NULL;
1468                 if (hammer2_state_msgtx(msg))
1469                         hammer2_msg_free(msg);
1470                 else
1471                         hammer2_state_cleanuptx(msg);
1472                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1473         }
1474 }
1475
1476 /*
1477  * Called with msglk held after queueing a new message, wakes up the
1478  * transmit thread.  We use an interlock thread to avoid unnecessary
1479  * wakeups.
1480  */
1481 void
1482 hammer2_clusterctl_wakeup(hammer2_pfsmount_t *pmp)
1483 {
1484         if (pmp->msg_ctl & HAMMER2_CLUSTERCTL_SLEEPING) {
1485                 atomic_clear_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_SLEEPING);
1486                 wakeup(&pmp->msg_ctl);
1487         }
1488 }
1489
1490 static int
1491 hammer2_msg_lnk_rcvmsg(hammer2_msg_t *msg)
1492 {
1493         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
1494         case DMSG_LNK_CONN | DMSGF_CREATE:
1495                 /*
1496                  * reply & leave trans open
1497                  */
1498                 kprintf("CONN RECEIVE - (just ignore it)\n");
1499                 hammer2_msg_result(msg, 0);
1500                 break;
1501         case DMSG_LNK_SPAN | DMSGF_CREATE:
1502                 kprintf("SPAN RECEIVE - ADDED FROM CLUSTER\n");
1503                 break;
1504         case DMSG_LNK_SPAN | DMSGF_DELETE:
1505                 kprintf("SPAN RECEIVE - DELETED FROM CLUSTER\n");
1506                 break;
1507         default:
1508                 break;
1509         }
1510         return(0);
1511 }
1512
1513 /*
1514  * This function is called when the other end replies to our LNK_CONN
1515  * request.
1516  *
1517  * We transmit our (single) SPAN on the initial reply, leaving that
1518  * transaction open too.
1519  */
1520 static int
1521 hammer2_msg_conn_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1522 {
1523         hammer2_pfsmount_t *pmp = state->any.pmp;
1524         hammer2_mount_t *hmp = pmp->hmp;
1525         hammer2_msg_t *rmsg;
1526         size_t name_len;
1527         int copyid;
1528
1529         kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd);
1530
1531         if (msg->any.head.cmd & DMSGF_CREATE) {
1532                 kprintf("LNK_CONN transaction replied to, initiate SPAN\n");
1533                 rmsg = hammer2_msg_alloc(&pmp->router, DMSG_LNK_SPAN |
1534                                                        DMSGF_CREATE,
1535                                         hammer2_msg_span_reply, pmp);
1536                 rmsg->any.lnk_span.pfs_clid = pmp->iroot->ip_data.pfs_clid;
1537                 rmsg->any.lnk_span.pfs_fsid = pmp->iroot->ip_data.pfs_fsid;
1538                 rmsg->any.lnk_span.pfs_type = pmp->iroot->ip_data.pfs_type;
1539                 rmsg->any.lnk_span.peer_type = pmp->hmp->voldata.peer_type;
1540                 rmsg->any.lnk_span.proto_version = DMSG_SPAN_PROTO_1;
1541                 name_len = pmp->iroot->ip_data.name_len;
1542                 if (name_len >= sizeof(rmsg->any.lnk_span.label))
1543                         name_len = sizeof(rmsg->any.lnk_span.label) - 1;
1544                 bcopy(pmp->iroot->ip_data.filename,
1545                       rmsg->any.lnk_span.label,
1546                       name_len);
1547                 rmsg->any.lnk_span.label[name_len] = 0;
1548                 hammer2_msg_write(rmsg);
1549
1550                 /*
1551                  * Dump the configuration stored in the volume header
1552                  */
1553                 hammer2_voldata_lock(hmp);
1554                 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
1555                         if (hmp->voldata.copyinfo[copyid].copyid == 0)
1556                                 continue;
1557                         hammer2_volconf_update(pmp, copyid);
1558                 }
1559                 hammer2_voldata_unlock(hmp);
1560         }
1561         if ((state->txcmd & DMSGF_DELETE) == 0 &&
1562             (msg->any.head.cmd & DMSGF_DELETE)) {
1563                 kprintf("LNK_CONN transaction terminated by remote\n");
1564                 pmp->conn_state = NULL;
1565                 hammer2_msg_reply(msg, 0);
1566         }
1567         return(0);
1568 }
1569
1570 /*
1571  * Remote terminated our span transaction.  We have to terminate our side.
1572  */
1573 static int
1574 hammer2_msg_span_reply(hammer2_state_t *state, hammer2_msg_t *msg)
1575 {
1576         /*hammer2_pfsmount_t *pmp = state->any.pmp;*/
1577
1578         kprintf("SPAN REPLY - Our sent span was terminated by the "
1579                 "remote %08x state %p\n", msg->any.head.cmd, state);
1580         if ((state->txcmd & DMSGF_DELETE) == 0 &&
1581             (msg->any.head.cmd & DMSGF_DELETE)) {
1582                 hammer2_msg_reply(msg, 0);
1583         }
1584         return(0);
1585 }
1586
1587 /*
1588  * Volume configuration updates are passed onto the userland service
1589  * daemon via the open LNK_CONN transaction.
1590  */
1591 void
1592 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index)
1593 {
1594         hammer2_mount_t *hmp = pmp->hmp;
1595         hammer2_msg_t *msg;
1596
1597         /* XXX interlock against connection state termination */
1598         kprintf("volconf update %p\n", pmp->conn_state);
1599         if (pmp->conn_state) {
1600                 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n");
1601                 msg = hammer2_msg_alloc(&pmp->router, DMSG_LNK_VOLCONF,
1602                                         NULL, NULL);
1603                 msg->state = pmp->conn_state;
1604                 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index];
1605                 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid;
1606                 msg->any.lnk_volconf.index = index;
1607                 hammer2_msg_write(msg);
1608         }
1609 }