Merge branches 'hammer2' and 'master' of ssh://crater.dragonflybsd.org/repository...
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
1 /*-
2  * Copyright (c) 2011, 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/nlookup.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/fcntl.h>
41 #include <sys/buf.h>
42 #include <sys/uuid.h>
43 #include <sys/vfsops.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
46
47 #include "hammer2.h"
48 #include "hammer2_disk.h"
49 #include "hammer2_mount.h"
50 #include "hammer2_network.h"
51
52 struct hammer2_sync_info {
53         int error;
54         int waitfor;
55 };
56
57 TAILQ_HEAD(hammer2_mntlist, hammer2_mount);
58 static struct hammer2_mntlist hammer2_mntlist;
59 static struct lock hammer2_mntlk;
60
61 int hammer2_debug;
62 int hammer2_cluster_enable = 1;
63 int hammer2_hardlink_enable = 1;
64 long hammer2_iod_file_read;
65 long hammer2_iod_meta_read;
66 long hammer2_iod_indr_read;
67 long hammer2_iod_file_write;
68 long hammer2_iod_meta_write;
69 long hammer2_iod_indr_write;
70 long hammer2_iod_volu_write;
71 long hammer2_ioa_file_read;
72 long hammer2_ioa_meta_read;
73 long hammer2_ioa_indr_read;
74 long hammer2_ioa_file_write;
75 long hammer2_ioa_meta_write;
76 long hammer2_ioa_indr_write;
77 long hammer2_ioa_volu_write;
78
79 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem");
80
81 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW,
82            &hammer2_debug, 0, "");
83 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW,
84            &hammer2_cluster_enable, 0, "");
85 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW,
86            &hammer2_hardlink_enable, 0, "");
87 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW,
88            &hammer2_iod_file_read, 0, "");
89 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW,
90            &hammer2_iod_meta_read, 0, "");
91 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW,
92            &hammer2_iod_indr_read, 0, "");
93 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW,
94            &hammer2_iod_file_write, 0, "");
95 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW,
96            &hammer2_iod_meta_write, 0, "");
97 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW,
98            &hammer2_iod_indr_write, 0, "");
99 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW,
100            &hammer2_iod_volu_write, 0, "");
101 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW,
102            &hammer2_ioa_file_read, 0, "");
103 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW,
104            &hammer2_ioa_meta_read, 0, "");
105 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW,
106            &hammer2_ioa_indr_read, 0, "");
107 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW,
108            &hammer2_ioa_file_write, 0, "");
109 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW,
110            &hammer2_ioa_meta_write, 0, "");
111 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW,
112            &hammer2_ioa_indr_write, 0, "");
113 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW,
114            &hammer2_ioa_volu_write, 0, "");
115
116 static int hammer2_vfs_init(struct vfsconf *conf);
117 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
118                                 struct ucred *cred);
119 static int hammer2_remount(struct mount *, char *, struct vnode *,
120                                 struct ucred *);
121 static int hammer2_vfs_unmount(struct mount *mp, int mntflags);
122 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp);
123 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp,
124                                 struct ucred *cred);
125 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
126                                 struct ucred *cred);
127 static int hammer2_vfs_sync(struct mount *mp, int waitfor);
128 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
129                                 ino_t ino, struct vnode **vpp);
130 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
131                                 struct fid *fhp, struct vnode **vpp);
132 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp);
133 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
134                                 int *exflagsp, struct ucred **credanonp);
135
136 static int hammer2_install_volume_header(hammer2_mount_t *hmp);
137 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
138 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
139
140 static void hammer2_cluster_thread_rd(void *arg);
141 static void hammer2_cluster_thread_wr(void *arg);
142 static int hammer2_msg_span_reply(hammer2_pfsmount_t *pmp, hammer2_msg_t *msg);
143
144 /*
145  * HAMMER2 vfs operations.
146  */
147 static struct vfsops hammer2_vfsops = {
148         .vfs_init       = hammer2_vfs_init,
149         .vfs_sync       = hammer2_vfs_sync,
150         .vfs_mount      = hammer2_vfs_mount,
151         .vfs_unmount    = hammer2_vfs_unmount,
152         .vfs_root       = hammer2_vfs_root,
153         .vfs_statfs     = hammer2_vfs_statfs,
154         .vfs_statvfs    = hammer2_vfs_statvfs,
155         .vfs_vget       = hammer2_vfs_vget,
156         .vfs_vptofh     = hammer2_vfs_vptofh,
157         .vfs_fhtovp     = hammer2_vfs_fhtovp,
158         .vfs_checkexp   = hammer2_vfs_checkexp
159 };
160
161 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
162
163 VFS_SET(hammer2_vfsops, hammer2, 0);
164 MODULE_VERSION(hammer2, 1);
165
166 static
167 int
168 hammer2_vfs_init(struct vfsconf *conf)
169 {
170         int error;
171
172         error = 0;
173
174         if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
175                 error = EINVAL;
176         if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
177                 error = EINVAL;
178         if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
179                 error = EINVAL;
180         if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
181                 error = EINVAL;
182
183         if (error)
184                 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
185
186         lockinit(&hammer2_mntlk, "mntlk", 0, 0);
187         TAILQ_INIT(&hammer2_mntlist);
188
189         return (error);
190 }
191
192 /*
193  * Mount or remount HAMMER2 fileystem from physical media
194  *
195  *      mountroot
196  *              mp              mount point structure
197  *              path            NULL
198  *              data            <unused>
199  *              cred            <unused>
200  *
201  *      mount
202  *              mp              mount point structure
203  *              path            path to mount point
204  *              data            pointer to argument structure in user space
205  *                      volume  volume path (device@LABEL form)
206  *                      hflags  user mount flags
207  *              cred            user credentials
208  *
209  * RETURNS:     0       Success
210  *              !0      error number
211  */
212 static
213 int
214 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
215                   struct ucred *cred)
216 {
217         struct hammer2_mount_info info;
218         hammer2_pfsmount_t *pmp;
219         hammer2_mount_t *hmp;
220         hammer2_key_t lhc;
221         struct vnode *devvp;
222         struct nlookupdata nd;
223         hammer2_chain_t *parent;
224         hammer2_chain_t *schain;
225         hammer2_chain_t *rchain;
226         char devstr[MNAMELEN];
227         size_t size;
228         size_t done;
229         char *dev;
230         char *label;
231         int ronly = 1;
232         int create_hmp;
233         int error;
234
235         hmp = NULL;
236         pmp = NULL;
237         dev = NULL;
238         label = NULL;
239         devvp = NULL;
240
241         kprintf("hammer2_mount\n");
242
243         if (path == NULL) {
244                 /*
245                  * Root mount
246                  */
247                 bzero(&info, sizeof(info));
248                 info.cluster_fd = -1;
249                 return (EOPNOTSUPP);
250         } else {
251                 /*
252                  * Non-root mount or updating a mount
253                  */
254                 error = copyin(data, &info, sizeof(info));
255                 if (error)
256                         return (error);
257
258                 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
259                 if (error)
260                         return (error);
261
262                 /* Extract device and label */
263                 dev = devstr;
264                 label = strchr(devstr, '@');
265                 if (label == NULL ||
266                     ((label + 1) - dev) > done) {
267                         return (EINVAL);
268                 }
269                 *label = '\0';
270                 label++;
271                 if (*label == '\0')
272                         return (EINVAL);
273
274                 if (mp->mnt_flag & MNT_UPDATE) {
275                         /* Update mount */
276                         /* HAMMER2 implements NFS export via mountctl */
277                         hmp = MPTOHMP(mp);
278                         devvp = hmp->devvp;
279                         error = hammer2_remount(mp, path, devvp, cred);
280                         return error;
281                 }
282         }
283
284         /*
285          * PFS mount
286          *
287          * Lookup name and verify it refers to a block device.
288          */
289         error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
290         if (error == 0)
291                 error = nlookup(&nd);
292         if (error == 0)
293                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
294         nlookup_done(&nd);
295
296         if (error == 0) {
297                 if (vn_isdisk(devvp, &error))
298                         error = vfs_mountedon(devvp);
299         }
300
301         /*
302          * Determine if the device has already been mounted.  After this
303          * check hmp will be non-NULL if we are doing the second or more
304          * hammer2 mounts from the same device.
305          */
306         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
307         TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) {
308                 if (hmp->devvp == devvp)
309                         break;
310         }
311
312         /*
313          * Open the device if this isn't a secondary mount
314          */
315         if (hmp) {
316                 create_hmp = 0;
317         } else {
318                 create_hmp = 1;
319                 if (error == 0 && vcount(devvp) > 0)
320                         error = EBUSY;
321
322                 /*
323                  * Now open the device
324                  */
325                 if (error == 0) {
326                         ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
327                         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
328                         error = vinvalbuf(devvp, V_SAVE, 0, 0);
329                         if (error == 0) {
330                                 error = VOP_OPEN(devvp,
331                                                  ronly ? FREAD : FREAD | FWRITE,
332                                                  FSCRED, NULL);
333                         }
334                         vn_unlock(devvp);
335                 }
336                 if (error && devvp) {
337                         vrele(devvp);
338                         devvp = NULL;
339                 }
340                 if (error) {
341                         lockmgr(&hammer2_mntlk, LK_RELEASE);
342                         return error;
343                 }
344         }
345
346         /*
347          * Block device opened successfully, finish initializing the
348          * mount structure.
349          *
350          * From this point on we have to call hammer2_unmount() on failure.
351          */
352         pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO);
353         mp->mnt_data = (qaddr_t)pmp;
354         pmp->mp = mp;
355         kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg");
356         lockinit(&pmp->msglk, "h2msg", 0, 0);
357         TAILQ_INIT(&pmp->msgq);
358         RB_INIT(&pmp->staterd_tree);
359         RB_INIT(&pmp->statewr_tree);
360
361         if (create_hmp) {
362                 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
363                 hmp->ronly = ronly;
364                 hmp->devvp = devvp;
365                 kmalloc_create(&hmp->minode, "HAMMER2-inodes");
366                 kmalloc_create(&hmp->mchain, "HAMMER2-chains");
367                 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry);
368         }
369         ccms_domain_init(&pmp->ccms_dom);
370         pmp->hmp = hmp;
371         ++hmp->pmp_count;
372         lockmgr(&hammer2_mntlk, LK_RELEASE);
373         kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
374         
375         mp->mnt_flag = MNT_LOCAL;
376         mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;   /* all entry pts are SMP */
377
378         if (create_hmp) {
379                 /*
380                  * vchain setup. vchain.data is special cased to NULL.
381                  * vchain.refs is initialized and will never drop to 0.
382                  */
383                 hmp->vchain.refs = 1;
384                 hmp->vchain.data = (void *)&hmp->voldata;
385                 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME;
386                 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX;
387                 hmp->vchain.bref_flush = hmp->vchain.bref;
388                 ccms_cst_init(&hmp->vchain.cst, NULL);
389                 /* hmp->vchain.u.xxx is left NULL */
390                 lockinit(&hmp->alloclk, "h2alloc", 0, 0);
391                 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE);
392
393                 /*
394                  * Install the volume header
395                  */
396                 error = hammer2_install_volume_header(hmp);
397                 if (error) {
398                         hammer2_vfs_unmount(mp, MNT_FORCE);
399                         return error;
400                 }
401         }
402
403         /*
404          * required mount structure initializations
405          */
406         mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE;
407         mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE;
408
409         mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE;
410         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
411
412         /*
413          * Optional fields
414          */
415         mp->mnt_iosize_max = MAXPHYS;
416
417         /*
418          * First locate the super-root inode, which is key 0 relative to the
419          * volume header's blockset.
420          *
421          * Then locate the root inode by scanning the directory keyspace
422          * represented by the label.
423          */
424         if (create_hmp) {
425                 parent = &hmp->vchain;
426                 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
427                 schain = hammer2_chain_lookup(hmp, &parent,
428                                       HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0);
429                 hammer2_chain_unlock(hmp, parent);
430                 if (schain == NULL) {
431                         kprintf("hammer2_mount: invalid super-root\n");
432                         hammer2_vfs_unmount(mp, MNT_FORCE);
433                         return EINVAL;
434                 }
435                 hammer2_chain_ref(hmp, schain); /* for hmp->schain */
436                 hmp->schain = schain;           /* left locked */
437         } else {
438                 schain = hmp->schain;
439                 hammer2_chain_lock(hmp, schain, HAMMER2_RESOLVE_ALWAYS);
440         }
441
442         parent = schain;
443         lhc = hammer2_dirhash(label, strlen(label));
444         rchain = hammer2_chain_lookup(hmp, &parent,
445                                       lhc, lhc + HAMMER2_DIRHASH_LOMASK,
446                                       0);
447         while (rchain) {
448                 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE &&
449                     rchain->u.ip &&
450                     strcmp(label, rchain->data->ipdata.filename) == 0) {
451                         break;
452                 }
453                 rchain = hammer2_chain_next(hmp, &parent, rchain,
454                                             lhc, lhc + HAMMER2_DIRHASH_LOMASK,
455                                             0);
456         }
457         hammer2_chain_unlock(hmp, parent);
458         if (rchain == NULL) {
459                 kprintf("hammer2_mount: PFS label not found\n");
460                 hammer2_vfs_unmount(mp, MNT_FORCE);
461                 return EINVAL;
462         }
463         if (rchain->flags & HAMMER2_CHAIN_MOUNTED) {
464                 hammer2_chain_unlock(hmp, rchain);
465                 kprintf("hammer2_mount: PFS label already mounted!\n");
466                 hammer2_vfs_unmount(mp, MNT_FORCE);
467                 return EBUSY;
468         }
469         atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED);
470
471         hammer2_chain_ref(hmp, rchain); /* for pmp->rchain */
472         hammer2_chain_unlock(hmp, rchain);
473         pmp->rchain = rchain;           /* left held & unlocked */
474         pmp->iroot = rchain->u.ip;      /* implied hold from rchain */
475         pmp->iroot->pmp = pmp;
476
477         kprintf("iroot %p\n", pmp->iroot);
478
479         /*
480          * Ref the cluster management messaging descriptor.  The mount
481          * program deals with the other end of the communications pipe.
482          */
483         pmp->msg_fp = holdfp(curproc->p_fd, info.cluster_fd, -1);
484         if (pmp->msg_fp == NULL) {
485                 kprintf("hammer2_mount: bad cluster_fd!\n");
486                 hammer2_vfs_unmount(mp, MNT_FORCE);
487                 return EBADF;
488         }
489         lwkt_create(hammer2_cluster_thread_rd, pmp, &pmp->msgrd_td,
490                     NULL, 0, -1, "hammer2-msgrd");
491         lwkt_create(hammer2_cluster_thread_wr, pmp, &pmp->msgwr_td,
492                     NULL, 0, -1, "hammer2-msgwr");
493
494         /*
495          * Finish setup
496          */
497         vfs_getnewfsid(mp);
498         vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);
499         vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);
500         vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);
501
502         copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
503         bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
504         bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
505         copyinstr(path, mp->mnt_stat.f_mntonname,
506                   sizeof(mp->mnt_stat.f_mntonname) - 1,
507                   &size);
508
509         /*
510          * Initial statfs to prime mnt_stat.
511          */
512         hammer2_vfs_statfs(mp, &mp->mnt_stat, cred);
513
514         return 0;
515 }
516
517 static
518 int
519 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
520                 struct ucred *cred)
521 {
522         return (0);
523 }
524
525 static
526 int
527 hammer2_vfs_unmount(struct mount *mp, int mntflags)
528 {
529         hammer2_pfsmount_t *pmp;
530         hammer2_mount_t *hmp;
531         int flags;
532         int error = 0;
533         int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
534         struct vnode *devvp;
535
536         pmp = MPTOPMP(mp);
537         hmp = pmp->hmp;
538         flags = 0;
539
540         if (mntflags & MNT_FORCE)
541                 flags |= FORCECLOSE;
542
543         hammer2_mount_exlock(hmp);
544
545         /*
546          * If mount initialization proceeded far enough we must flush
547          * its vnodes.
548          */
549         if (pmp->iroot)
550                 error = vflush(mp, 0, flags);
551
552         if (error)
553                 return error;
554
555         lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
556         --hmp->pmp_count;
557         kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count);
558
559         /*
560          * Flush any left over chains.  The voldata lock is only used
561          * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX.
562          */
563         hammer2_voldata_lock(hmp);
564         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
565                                  HAMMER2_CHAIN_MODIFIED_AUX |
566                                  HAMMER2_CHAIN_SUBMODIFIED)) {
567                 hammer2_voldata_unlock(hmp);
568                 hammer2_vfs_sync(mp, MNT_WAIT);
569         } else {
570                 hammer2_voldata_unlock(hmp);
571         }
572         if (hmp->pmp_count == 0) {
573                 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
574                                          HAMMER2_CHAIN_MODIFIED_AUX |
575                                          HAMMER2_CHAIN_SUBMODIFIED)) {
576                         kprintf("hammer2_unmount: chains left over after "
577                                 "final sync\n");
578                         if (hammer2_debug & 0x0010)
579                                 Debugger("entered debugger");
580                 }
581         }
582
583         /*
584          * Cleanup the root and super-root chain elements (which should be
585          * clean).
586          */
587         pmp->iroot = NULL;
588         if (pmp->rchain) {
589                 atomic_clear_int(&pmp->rchain->flags, HAMMER2_CHAIN_MOUNTED);
590                 KKASSERT(pmp->rchain->refs == 1);
591                 hammer2_chain_drop(hmp, pmp->rchain);
592                 pmp->rchain = NULL;
593         }
594         ccms_domain_uninit(&pmp->ccms_dom);
595
596         /*
597          * Ask the cluster controller to go away
598          */
599         atomic_set_int(&pmp->msg_ctl, HAMMER2_CLUSTERCTL_KILL);
600         while (pmp->msgrd_td || pmp->msgwr_td) {
601                 wakeup(&pmp->msg_ctl);
602                 tsleep(pmp, 0, "clstrkl", hz);
603         }
604
605         /*
606          * Drop communications descriptor
607          */
608         if (pmp->msg_fp) {
609                 fdrop(pmp->msg_fp);
610                 pmp->msg_fp = NULL;
611         }
612
613         /*
614          * If no PFS's left drop the master hammer2_mount for the device.
615          */
616         if (hmp->pmp_count == 0) {
617                 if (hmp->schain) {
618                         KKASSERT(hmp->schain->refs == 1);
619                         hammer2_chain_drop(hmp, hmp->schain);
620                         hmp->schain = NULL;
621                 }
622
623                 /*
624                  * Finish up with the device vnode
625                  */
626                 if ((devvp = hmp->devvp) != NULL) {
627                         vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0);
628                         hmp->devvp = NULL;
629                         VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE));
630                         vrele(devvp);
631                         devvp = NULL;
632                 }
633         }
634         hammer2_mount_unlock(hmp);
635
636         pmp->mp = NULL;
637         pmp->hmp = NULL;
638         mp->mnt_data = NULL;
639
640         kmalloc_destroy(&pmp->mmsg);
641
642         kfree(pmp, M_HAMMER2);
643         if (hmp->pmp_count == 0) {
644                 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
645                 kmalloc_destroy(&hmp->minode);
646                 kmalloc_destroy(&hmp->mchain);
647                 kfree(hmp, M_HAMMER2);
648         }
649         lockmgr(&hammer2_mntlk, LK_RELEASE);
650         return (error);
651 }
652
653 static
654 int
655 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
656              ino_t ino, struct vnode **vpp)
657 {
658         kprintf("hammer2_vget\n");
659         return (EOPNOTSUPP);
660 }
661
662 static
663 int
664 hammer2_vfs_root(struct mount *mp, struct vnode **vpp)
665 {
666         hammer2_pfsmount_t *pmp;
667         hammer2_mount_t *hmp;
668         int error;
669         struct vnode *vp;
670
671         pmp = MPTOPMP(mp);
672         hmp = pmp->hmp;
673         hammer2_mount_exlock(hmp);
674         if (pmp->iroot == NULL) {
675                 *vpp = NULL;
676                 error = EINVAL;
677         } else {
678                 hammer2_chain_lock(hmp, &pmp->iroot->chain,
679                                    HAMMER2_RESOLVE_ALWAYS |
680                                    HAMMER2_RESOLVE_SHARED);
681                 vp = hammer2_igetv(pmp->iroot, &error);
682                 hammer2_chain_unlock(hmp, &pmp->iroot->chain);
683                 *vpp = vp;
684                 if (vp == NULL)
685                         kprintf("vnodefail\n");
686         }
687         hammer2_mount_unlock(hmp);
688
689         return (error);
690 }
691
692 /*
693  * Filesystem status
694  *
695  * XXX incorporate pmp->iroot->ip_data.inode_quota and data_quota
696  */
697 static
698 int
699 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
700 {
701         hammer2_pfsmount_t *pmp;
702         hammer2_mount_t *hmp;
703
704         pmp = MPTOPMP(mp);
705         hmp = MPTOHMP(mp);
706
707         mp->mnt_stat.f_files = pmp->iroot->ip_data.inode_count +
708                                pmp->iroot->delta_icount;
709         mp->mnt_stat.f_ffree = 0;
710         mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
711         mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size -
712                                 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
713         mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
714
715         *sbp = mp->mnt_stat;
716         return (0);
717 }
718
719 static
720 int
721 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
722 {
723         hammer2_pfsmount_t *pmp;
724         hammer2_mount_t *hmp;
725
726         pmp = MPTOPMP(mp);
727         hmp = MPTOHMP(mp);
728
729         mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE;
730         mp->mnt_vstat.f_files = pmp->iroot->ip_data.inode_count +
731                                 pmp->iroot->delta_icount;
732         mp->mnt_vstat.f_ffree = 0;
733         mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE;
734         mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size -
735                                  hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE;
736         mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
737
738         *sbp = mp->mnt_vstat;
739         return (0);
740 }
741
742 /*
743  * Sync the entire filesystem; this is called from the filesystem syncer
744  * process periodically and whenever a user calls sync(1) on the hammer
745  * mountpoint.
746  *
747  * Currently is actually called from the syncer! \o/
748  *
749  * This task will have to snapshot the state of the dirty inode chain.
750  * From that, it will have to make sure all of the inodes on the dirty
751  * chain have IO initiated. We make sure that io is initiated for the root
752  * block.
753  *
754  * If waitfor is set, we wait for media to acknowledge the new rootblock.
755  *
756  * THINKS: side A vs side B, to have sync not stall all I/O?
757  */
758 static
759 int
760 hammer2_vfs_sync(struct mount *mp, int waitfor)
761 {
762         struct hammer2_sync_info info;
763         hammer2_mount_t *hmp;
764         int flags;
765         int error;
766         int haswork;
767
768         hmp = MPTOHMP(mp);
769
770         flags = VMSC_GETVP;
771         if (waitfor & MNT_LAZY)
772                 flags |= VMSC_ONEPASS;
773
774         info.error = 0;
775         info.waitfor = MNT_NOWAIT;
776         vmntvnodescan(mp, flags | VMSC_NOWAIT,
777                       hammer2_sync_scan1,
778                       hammer2_sync_scan2, &info);
779         if (info.error == 0 && (waitfor & MNT_WAIT)) {
780                 info.waitfor = waitfor;
781                     vmntvnodescan(mp, flags,
782                                   hammer2_sync_scan1,
783                                   hammer2_sync_scan2, &info);
784
785         }
786 #if 0
787         if (waitfor == MNT_WAIT) {
788                 /* XXX */
789         } else {
790                 /* XXX */
791         }
792 #endif
793         hammer2_chain_lock(hmp, &hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
794         if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED |
795                                  HAMMER2_CHAIN_MODIFIED_AUX |
796                                  HAMMER2_CHAIN_SUBMODIFIED)) {
797                 hammer2_chain_flush(hmp, &hmp->vchain, 0);
798                 haswork = 1;
799         } else {
800                 haswork = 0;
801         }
802         hammer2_chain_unlock(hmp, &hmp->vchain);
803
804         error = 0;
805
806         if ((waitfor & MNT_LAZY) == 0) {
807                 waitfor = MNT_NOWAIT;
808                 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
809                 error = VOP_FSYNC(hmp->devvp, waitfor, 0);
810                 vn_unlock(hmp->devvp);
811         }
812
813         if (error == 0 && haswork) {
814                 struct buf *bp;
815
816                 /*
817                  * Synchronize the disk before flushing the volume
818                  * header.
819                  */
820                 bp = getpbuf(NULL);
821                 bp->b_bio1.bio_offset = 0;
822                 bp->b_bufsize = 0;
823                 bp->b_bcount = 0;
824                 bp->b_cmd = BUF_CMD_FLUSH;
825                 bp->b_bio1.bio_done = biodone_sync;
826                 bp->b_bio1.bio_flags |= BIO_SYNC;
827                 vn_strategy(hmp->devvp, &bp->b_bio1);
828                 biowait(&bp->b_bio1, "h2vol");
829                 relpbuf(bp, NULL);
830
831                 /*
832                  * Then we can safely flush the volume header.  Volume
833                  * data is locked separately to prevent ioctl functions
834                  * from deadlocking due to a configuration issue.
835                  */
836                 bp = getblk(hmp->devvp, 0, HAMMER2_PBUFSIZE, 0, 0);
837                 hammer2_voldata_lock(hmp);
838                 bcopy(&hmp->voldata, bp->b_data, HAMMER2_PBUFSIZE);
839                 hammer2_voldata_unlock(hmp);
840                 bawrite(bp);
841         }
842         return (error);
843 }
844
845 /*
846  * Sync passes.
847  *
848  * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code
849  *       won't flush on those flags.  The syncer code above will do a
850  *       general meta-data flush globally that will catch these flags.
851  */
852 static int
853 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
854 {
855         hammer2_inode_t *ip;
856
857         ip = VTOI(vp);
858         if (vp->v_type == VNON || ip == NULL ||
859             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
860                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
861              RB_EMPTY(&vp->v_rbdirty_tree))) {
862                 return(-1);
863         }
864         return(0);
865 }
866
867 static int
868 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
869 {
870         struct hammer2_sync_info *info = data;
871         hammer2_inode_t *ip;
872         int error;
873
874         ip = VTOI(vp);
875         if (vp->v_type == VNON || vp->v_type == VBAD ||
876             ((ip->chain.flags & (HAMMER2_CHAIN_MODIFIED |
877                                  HAMMER2_CHAIN_DIRTYEMBED)) == 0 &&
878             RB_EMPTY(&vp->v_rbdirty_tree))) {
879                 return(0);
880         }
881         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
882         if (error)
883                 info->error = error;
884         return(0);
885 }
886
887 static
888 int
889 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp)
890 {
891         return (0);
892 }
893
894 static
895 int
896 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
897                struct fid *fhp, struct vnode **vpp)
898 {
899         return (0);
900 }
901
902 static
903 int
904 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
905                  int *exflagsp, struct ucred **credanonp)
906 {
907         return (0);
908 }
909
910 /*
911  * Support code for hammer2_mount().  Read, verify, and install the volume
912  * header into the HMP
913  *
914  * XXX read four volhdrs and use the one with the highest TID whos CRC
915  *     matches.
916  *
917  * XXX check iCRCs.
918  *
919  * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
920  *     nonexistant locations.
921  *
922  * XXX Record selected volhdr and ring updates to each of 4 volhdrs
923  */
924 static
925 int
926 hammer2_install_volume_header(hammer2_mount_t *hmp)
927 {
928         hammer2_volume_data_t *vd;
929         struct buf *bp;
930         hammer2_crc32_t crc0, crc, bcrc0, bcrc;
931         int error_reported;
932         int error;
933         int valid;
934         int i;
935
936         error_reported = 0;
937         error = 0;
938         valid = 0;
939         bp = NULL;
940
941         /*
942          * There are up to 4 copies of the volume header (syncs iterate
943          * between them so there is no single master).  We don't trust the
944          * volu_size field so we don't know precisely how large the filesystem
945          * is, so depend on the OS to return an error if we go beyond the
946          * block device's EOF.
947          */
948         for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
949                 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64,
950                               HAMMER2_VOLUME_BYTES, &bp);
951                 if (error) {
952                         brelse(bp);
953                         bp = NULL;
954                         continue;
955                 }
956
957                 vd = (struct hammer2_volume_data *) bp->b_data;
958                 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) &&
959                     (vd->magic != HAMMER2_VOLUME_ID_ABO)) {
960                         brelse(bp);
961                         bp = NULL;
962                         continue;
963                 }
964
965                 if (vd->magic == HAMMER2_VOLUME_ID_ABO) {
966                         /* XXX: Reversed-endianness filesystem */
967                         kprintf("hammer2: reverse-endian filesystem detected");
968                         brelse(bp);
969                         bp = NULL;
970                         continue;
971                 }
972
973                 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0];
974                 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF,
975                                       HAMMER2_VOLUME_ICRC0_SIZE);
976                 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1];
977                 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF,
978                                        HAMMER2_VOLUME_ICRC1_SIZE);
979                 if ((crc0 != crc) || (bcrc0 != bcrc)) {
980                         kprintf("hammer2 volume header crc "
981                                 "mismatch copy #%d\t%08x %08x",
982                                 i, crc0, crc);
983                         error_reported = 1;
984                         brelse(bp);
985                         bp = NULL;
986                         continue;
987                 }
988                 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) {
989                         valid = 1;
990                         hmp->voldata = *vd;
991                 }
992                 brelse(bp);
993                 bp = NULL;
994         }
995         if (valid) {
996                 error = 0;
997                 if (error_reported)
998                         kprintf("hammer2: a valid volume header was found\n");
999         } else {
1000                 error = EINVAL;
1001                 kprintf("hammer2: no valid volume headers found!\n");
1002         }
1003         return (error);
1004 }
1005
1006 /*
1007  * Cluster controller thread.  Perform messaging functions.  We have one
1008  * thread for the reader and one for the writer.  The writer handles
1009  * shutdown requests (which should break the reader thread).
1010  */
1011 static
1012 void
1013 hammer2_cluster_thread_rd(void *arg)
1014 {
1015         hammer2_pfsmount_t *pmp = arg;
1016         hammer2_msg_hdr_t hdr;
1017         hammer2_msg_t *msg;
1018         hammer2_state_t *state;
1019         size_t hbytes;
1020         int error = 0;
1021
1022         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0) {
1023                 /*
1024                  * Retrieve the message from the pipe or socket.
1025                  */
1026                 error = fp_read(pmp->msg_fp, &hdr, sizeof(hdr),
1027                                 NULL, 1, UIO_SYSSPACE);
1028                 if (error)
1029                         break;
1030                 if (hdr.magic != HAMMER2_MSGHDR_MAGIC) {
1031                         kprintf("hammer2: msgrd: bad magic: %04x\n",
1032                                 hdr.magic);
1033                         error = EINVAL;
1034                         break;
1035                 }
1036                 hbytes = (hdr.cmd & HAMMER2_MSGF_SIZE) * HAMMER2_MSG_ALIGN;
1037                 if (hbytes < sizeof(hdr) || hbytes > HAMMER2_MSGAUX_MAX) {
1038                         kprintf("hammer2: msgrd: bad header size %zd\n",
1039                                 hbytes);
1040                         error = EINVAL;
1041                         break;
1042                 }
1043                 msg = kmalloc(offsetof(struct hammer2_msg, any) + hbytes,
1044                               pmp->mmsg, M_WAITOK | M_ZERO);
1045                 msg->any.head = hdr;
1046                 msg->hdr_size = hbytes;
1047                 if (hbytes > sizeof(hdr)) {
1048                         error = fp_read(pmp->msg_fp, &msg->any.head + 1,
1049                                         hbytes - sizeof(hdr),
1050                                         NULL, 1, UIO_SYSSPACE);
1051                         if (error) {
1052                                 kprintf("hammer2: short msg received\n");
1053                                 error = EINVAL;
1054                                 break;
1055                         }
1056                 }
1057                 msg->aux_size = hdr.aux_bytes * HAMMER2_MSG_ALIGN;
1058                 if (msg->aux_size > HAMMER2_MSGAUX_MAX) {
1059                         kprintf("hammer2: illegal msg payload size %zd\n",
1060                                 msg->aux_size);
1061                         error = EINVAL;
1062                         break;
1063                 }
1064                 if (msg->aux_size) {
1065                         msg->aux_data = kmalloc(msg->aux_size, pmp->mmsg,
1066                                                 M_WAITOK | M_ZERO);
1067                         error = fp_read(pmp->msg_fp, msg->aux_data,
1068                                         msg->aux_size,
1069                                         NULL, 1, UIO_SYSSPACE);
1070                         if (error) {
1071                                 kprintf("hammer2: short msg "
1072                                         "payload received\n");
1073                                 break;
1074                         }
1075                 }
1076
1077                 /*
1078                  * State machine tracking, state assignment for msg,
1079                  * returns error and discard status.  Errors are fatal
1080                  * to the connection except for EALREADY which forces
1081                  * a discard without execution.
1082                  */
1083                 error = hammer2_state_msgrx(pmp, msg);
1084                 if (error) {
1085                         hammer2_msg_free(pmp, msg);
1086                         if (error == EALREADY)
1087                                 error = 0;
1088                 } else if (msg->state) {
1089                         error = msg->state->func(pmp, msg);
1090                         hammer2_state_cleanuprx(pmp, msg);
1091                 } else {
1092                         error = hammer2_msg_adhoc_input(pmp, msg);
1093                         hammer2_state_cleanuprx(pmp, msg);
1094                 }
1095                 msg = NULL;
1096         }
1097
1098         if (error)
1099                 kprintf("hammer2: msg read failed error %d\n", error);
1100
1101         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1102         if (msg) {
1103                 if (msg->state && msg->state->msg == msg)
1104                         msg->state->msg = NULL;
1105                 hammer2_msg_free(pmp, msg);
1106         }
1107
1108         if ((state = pmp->freerd_state) != NULL) {
1109                 pmp->freerd_state = NULL;
1110                 hammer2_state_free(state);
1111         }
1112
1113         while ((state = RB_ROOT(&pmp->staterd_tree)) != NULL) {
1114                 RB_REMOVE(hammer2_state_tree, &pmp->staterd_tree, state);
1115                 hammer2_state_free(state);
1116         }
1117         lockmgr(&pmp->msglk, LK_RELEASE);
1118
1119         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1120         pmp->msgrd_td = NULL;
1121         /* pmp can be ripped out from under us at this point */
1122         wakeup(pmp);
1123         lwkt_exit();
1124 }
1125
1126 static
1127 void
1128 hammer2_cluster_thread_wr(void *arg)
1129 {
1130         hammer2_pfsmount_t *pmp = arg;
1131         hammer2_msg_t *msg = NULL;
1132         hammer2_state_t *state;
1133         ssize_t res;
1134         int error = 0;
1135
1136         /*
1137          * Initiate a SPAN transaction registering our PFS with the other
1138          * end using {source}=1.  The transaction is left open.
1139          */
1140         msg = hammer2_msg_alloc(pmp, 1, 0,
1141                                 HAMMER2_LNK_SPAN | HAMMER2_MSGF_CREATE);
1142         hammer2_msg_write(pmp, msg, hammer2_msg_span_reply);
1143
1144         /*
1145          * Transmit loop
1146          */
1147         msg = NULL;
1148         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1149
1150         while ((pmp->msg_ctl & HAMMER2_CLUSTERCTL_KILL) == 0 && error == 0) {
1151                 lksleep(&pmp->msg_ctl, &pmp->msglk, 0, "msgwr", hz);
1152                 while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1153                         /*
1154                          * Remove msg from the transmit queue and do
1155                          * persist and half-closed state handling.
1156                          */
1157                         TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1158                         lockmgr(&pmp->msglk, LK_RELEASE);
1159
1160                         error = hammer2_state_msgtx(pmp, msg);
1161                         if (error == EALREADY) {
1162                                 error = 0;
1163                                 hammer2_msg_free(pmp, msg);
1164                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1165                                 continue;
1166                         }
1167                         if (error)
1168                                 break;
1169
1170                         /*
1171                          * Dump the message to the pipe or socket.
1172                          */
1173                         error = fp_write(pmp->msg_fp, &msg->any, msg->hdr_size,
1174                                          &res, UIO_SYSSPACE);
1175                         if (error || res != msg->hdr_size) {
1176                                 if (error == 0)
1177                                         error = EINVAL;
1178                                 lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1179                                 break;
1180                         }
1181                         if (msg->aux_size) {
1182                                 error = fp_write(pmp->msg_fp,
1183                                                  msg->aux_data, msg->aux_size,
1184                                                  &res, UIO_SYSSPACE);
1185                                 if (error || res != msg->aux_size) {
1186                                         if (error == 0)
1187                                                 error = EINVAL;
1188                                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1189                                         break;
1190                                 }
1191                         }
1192                         hammer2_state_cleanuptx(pmp, msg);
1193                         lockmgr(&pmp->msglk, LK_EXCLUSIVE);
1194                 }
1195         }
1196
1197         /*
1198          * Cleanup messages pending transmission and release msgq lock.
1199          */
1200         if (error)
1201                 kprintf("hammer2: msg write failed error %d\n", error);
1202
1203         if (msg) {
1204                 if (msg->state && msg->state->msg == msg)
1205                         msg->state->msg = NULL;
1206                 hammer2_msg_free(pmp, msg);
1207         }
1208
1209         while ((msg = TAILQ_FIRST(&pmp->msgq)) != NULL) {
1210                 TAILQ_REMOVE(&pmp->msgq, msg, qentry);
1211                 if (msg->state && msg->state->msg == msg)
1212                         msg->state->msg = NULL;
1213                 hammer2_msg_free(pmp, msg);
1214         }
1215
1216         if ((state = pmp->freewr_state) != NULL) {
1217                 pmp->freewr_state = NULL;
1218                 hammer2_state_free(state);
1219         }
1220
1221         while ((state = RB_ROOT(&pmp->statewr_tree)) != NULL) {
1222                 RB_REMOVE(hammer2_state_tree, &pmp->statewr_tree, state);
1223                 hammer2_state_free(state);
1224         }
1225         lockmgr(&pmp->msglk, LK_RELEASE);
1226
1227         /*
1228          * Cleanup descriptor, be sure the read size is shutdown so the
1229          * (probably blocked) read operations returns an error.
1230          *
1231          * pmp can be ripped out from under us once msgwr_td is set to NULL.
1232          */
1233         fp_shutdown(pmp->msg_fp, SHUT_RDWR);
1234         pmp->msgwr_td = NULL;
1235         wakeup(pmp);
1236         lwkt_exit();
1237 }
1238
1239 static int
1240 hammer2_msg_span_reply(hammer2_pfsmount_t *pmp, hammer2_msg_t *msg)
1241 {
1242         kprintf("SPAN REPLY\n");
1243         return(0);
1244 }