dmsg, hammer2 - refactor remove hammer2-specific code
[dragonfly.git] / sys / dev / disk / xdisk / xdisk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This module allows disk devices to be created and associated with a
36  * communications pipe or socket.  You open the device and issue an
37  * ioctl() to install a new disk along with its communications descriptor.
38  *
39  * All further communication occurs via the descriptor using the DMSG
40  * LNK_CONN, LNK_SPAN, and BLOCK protocols.  The descriptor can be a
41  * direct connection to a remote machine's disk (in-kernenl), to a remote
42  * cluster controller, to the local cluster controller, etc.
43  *
44  * /dev/xdisk is the control device, issue ioctl()s to create the /dev/xa%d
45  * devices.  These devices look like raw disks to the system.
46  *
47  * TODO:
48  *      Handle circuit disconnects, leave bio's pending
49  *      Restart bio's on circuit reconnect.
50  */
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/buf.h>
54 #include <sys/conf.h>
55 #include <sys/device.h>
56 #include <sys/devicestat.h>
57 #include <sys/disk.h>
58 #include <sys/kernel.h>
59 #include <sys/malloc.h>
60 #include <sys/sysctl.h>
61 #include <sys/proc.h>
62 #include <sys/queue.h>
63 #include <sys/udev.h>
64 #include <sys/uuid.h>
65 #include <sys/kern_syscall.h>
66
67 #include <sys/dmsg.h>
68 #include <sys/xdiskioctl.h>
69
70 #include <sys/buf2.h>
71 #include <sys/thread2.h>
72
73 struct xa_softc;
74
75 struct xa_tag {
76         TAILQ_ENTRY(xa_tag) entry;
77         struct xa_softc *xa;
78         dmsg_blk_error_t status;
79         kdmsg_state_t   *state;
80         kdmsg_circuit_t *circ;
81         struct bio      *bio;
82         int             running;        /* transaction running */
83         int             waitseq;        /* streaming reply */
84         int             done;           /* final (transaction closed) */
85 };
86
87 typedef struct xa_tag   xa_tag_t;
88
89 struct xa_softc {
90         TAILQ_ENTRY(xa_softc) entry;
91         cdev_t          dev;
92         kdmsg_iocom_t   iocom;
93         struct xdisk_attach_ioctl xaioc;
94         struct disk_info info;
95         struct disk     disk;
96         uuid_t          pfs_fsid;
97         int             unit;
98         int             serializing;
99         int             attached;
100         int             opencnt;
101         uint64_t        keyid;
102         xa_tag_t        *opentag;
103         TAILQ_HEAD(, bio) bioq;
104         TAILQ_HEAD(, xa_tag) tag_freeq;
105         TAILQ_HEAD(, xa_tag) tag_pendq;
106         TAILQ_HEAD(, kdmsg_circuit) circq;
107         struct lwkt_token tok;
108 };
109
110 typedef struct xa_softc xa_softc_t;
111
112 #define MAXTAGS         64      /* no real limit */
113
114 static int xdisk_attach(struct xdisk_attach_ioctl *xaioc);
115 static int xdisk_detach(struct xdisk_attach_ioctl *xaioc);
116 static void xa_exit(kdmsg_iocom_t *iocom);
117 static void xa_terminate_check(struct xa_softc *xa);
118 static int xa_rcvdmsg(kdmsg_msg_t *msg);
119 static void xa_autodmsg(kdmsg_msg_t *msg);
120
121 static xa_tag_t *xa_setup_cmd(xa_softc_t *xa, struct bio *bio);
122 static void xa_start(xa_tag_t *tag, kdmsg_msg_t *msg);
123 static uint32_t xa_wait(xa_tag_t *tag, int seq);
124 static void xa_done(xa_tag_t *tag, int wasbio);
125 static int xa_sync_completion(kdmsg_state_t *state, kdmsg_msg_t *msg);
126 static int xa_bio_completion(kdmsg_state_t *state, kdmsg_msg_t *msg);
127 static void xa_restart_deferred(xa_softc_t *xa);
128
129 MALLOC_DEFINE(M_XDISK, "Networked disk client", "Network Disks");
130
131 /*
132  * Control device, issue ioctls to create xa devices.
133  */
134 static d_open_t xdisk_open;
135 static d_close_t xdisk_close;
136 static d_ioctl_t xdisk_ioctl;
137
138 static struct dev_ops xdisk_ops = {
139         { "xdisk", 0, D_MPSAFE | D_TRACKCLOSE },
140         .d_open =       xdisk_open,
141         .d_close =      xdisk_close,
142         .d_ioctl =      xdisk_ioctl
143 };
144
145 /*
146  * XA disk devices
147  */
148 static d_open_t xa_open;
149 static d_close_t xa_close;
150 static d_ioctl_t xa_ioctl;
151 static d_strategy_t xa_strategy;
152 static d_psize_t xa_size;
153
154 static struct dev_ops xa_ops = {
155         { "xa", 0, D_DISK | D_CANFREE | D_MPSAFE | D_TRACKCLOSE },
156         .d_open =       xa_open,
157         .d_close =      xa_close,
158         .d_ioctl =      xa_ioctl,
159         .d_read =       physread,
160         .d_write =      physwrite,
161         .d_strategy =   xa_strategy,
162         .d_psize =      xa_size
163 };
164
165 static struct lwkt_token xdisk_token = LWKT_TOKEN_INITIALIZER(xdisk_token);
166 static int xdisk_opencount;
167 static cdev_t xdisk_dev;
168 static TAILQ_HEAD(, xa_softc) xa_queue;
169
170 /*
171  * Module initialization
172  */
173 static int
174 xdisk_modevent(module_t mod, int type, void *data)
175 {
176         switch (type) {
177         case MOD_LOAD:
178                 TAILQ_INIT(&xa_queue);
179                 xdisk_dev = make_dev(&xdisk_ops, 0,
180                                      UID_ROOT, GID_WHEEL, 0600, "xdisk");
181                 break;
182         case MOD_UNLOAD:
183         case MOD_SHUTDOWN:
184                 if (xdisk_opencount || TAILQ_FIRST(&xa_queue))
185                         return (EBUSY);
186                 if (xdisk_dev) {
187                         destroy_dev(xdisk_dev);
188                         xdisk_dev = NULL;
189                 }
190                 dev_ops_remove_all(&xdisk_ops);
191                 dev_ops_remove_all(&xa_ops);
192                 break;
193         default:
194                 break;
195         }
196         return 0;
197 }
198
199 DEV_MODULE(xdisk, xdisk_modevent, 0);
200
201 /*
202  * Control device
203  */
204 static int
205 xdisk_open(struct dev_open_args *ap)
206 {
207         lwkt_gettoken(&xdisk_token);
208         ++xdisk_opencount;
209         lwkt_reltoken(&xdisk_token);
210         return(0);
211 }
212
213 static int
214 xdisk_close(struct dev_close_args *ap)
215 {
216         lwkt_gettoken(&xdisk_token);
217         --xdisk_opencount;
218         lwkt_reltoken(&xdisk_token);
219         return(0);
220 }
221
222 static int
223 xdisk_ioctl(struct dev_ioctl_args *ap)
224 {
225         int error;
226
227         switch(ap->a_cmd) {
228         case XDISKIOCATTACH:
229                 error = xdisk_attach((void *)ap->a_data);
230                 break;
231         case XDISKIOCDETACH:
232                 error = xdisk_detach((void *)ap->a_data);
233                 break;
234         default:
235                 error = ENOTTY;
236                 break;
237         }
238         return error;
239 }
240
241 /************************************************************************
242  *                              DMSG INTERFACE                          *
243  ************************************************************************/
244
245 static int
246 xdisk_attach(struct xdisk_attach_ioctl *xaioc)
247 {
248         xa_softc_t *xa;
249         xa_tag_t *tag;
250         struct file *fp;
251         int unit;
252         int n;
253         char devname[64];
254         cdev_t dev;
255
256         /*
257          * Normalize ioctl params
258          */
259         fp = holdfp(curproc->p_fd, xaioc->fd, -1);
260         if (fp == NULL)
261                 return EINVAL;
262         if (xaioc->cl_label[sizeof(xaioc->cl_label) - 1] != 0)
263                 return EINVAL;
264         if (xaioc->fs_label[sizeof(xaioc->fs_label) - 1] != 0)
265                 return EINVAL;
266         if (xaioc->blksize < DEV_BSIZE || xaioc->blksize > MAXBSIZE)
267                 return EINVAL;
268
269         /*
270          * See if the serial number is already present.  If we are
271          * racing a termination the disk subsystem may still have
272          * duplicate entries not yet removed so we wait a bit and
273          * retry.
274          */
275         lwkt_gettoken(&xdisk_token);
276 again:
277         TAILQ_FOREACH(xa, &xa_queue, entry) {
278                 if (strcmp(xa->iocom.auto_lnk_conn.fs_label,
279                            xaioc->fs_label) == 0) {
280                         if (xa->serializing) {
281                                 tsleep(xa, 0, "xadelay", hz / 10);
282                                 goto again;
283                         }
284                         xa->serializing = 1;
285                         kdmsg_iocom_uninit(&xa->iocom);
286                         break;
287                 }
288         }
289
290         /*
291          * Create a new xa if not already present
292          */
293         if (xa == NULL) {
294                 unit = 0;
295                 for (;;) {
296                         TAILQ_FOREACH(xa, &xa_queue, entry) {
297                                 if (xa->unit == unit)
298                                         break;
299                         }
300                         if (xa == NULL)
301                                 break;
302                         ++unit;
303                 }
304                 xa = kmalloc(sizeof(*xa), M_XDISK, M_WAITOK|M_ZERO);
305                 xa->unit = unit;
306                 xa->serializing = 1;
307                 lwkt_token_init(&xa->tok, "xa");
308                 TAILQ_INIT(&xa->circq);
309                 TAILQ_INIT(&xa->bioq);
310                 TAILQ_INIT(&xa->tag_freeq);
311                 TAILQ_INIT(&xa->tag_pendq);
312                 for (n = 0; n < MAXTAGS; ++n) {
313                         tag = kmalloc(sizeof(*tag), M_XDISK, M_WAITOK|M_ZERO);
314                         tag->xa = xa;
315                         TAILQ_INSERT_TAIL(&xa->tag_freeq, tag, entry);
316                 }
317                 TAILQ_INSERT_TAIL(&xa_queue, xa, entry);
318         } else {
319                 unit = xa->unit;
320         }
321
322         /*
323          * (xa) is now serializing.
324          */
325         xa->xaioc = *xaioc;
326         xa->attached = 1;
327         lwkt_reltoken(&xdisk_token);
328
329         /*
330          * Create device
331          */
332         if (xa->dev == NULL) {
333                 dev = disk_create(unit, &xa->disk, &xa_ops);
334                 dev->si_drv1 = xa;
335                 xa->dev = dev;
336         }
337
338         xa->info.d_media_blksize = xaioc->blksize;
339         xa->info.d_media_blocks = xaioc->bytes / xaioc->blksize;
340         xa->info.d_dsflags = DSO_MBRQUIET | DSO_RAWPSIZE;
341         xa->info.d_secpertrack = 32;
342         xa->info.d_nheads = 64;
343         xa->info.d_secpercyl = xa->info.d_secpertrack * xa->info.d_nheads;
344         xa->info.d_ncylinders = 0;
345         if (xa->xaioc.fs_label[0])
346                 xa->info.d_serialno = xa->xaioc.fs_label;
347
348         /*
349          * Set up messaging connection
350          */
351         ksnprintf(devname, sizeof(devname), "xa%d", unit);
352         kdmsg_iocom_init(&xa->iocom, xa,
353                          KDMSG_IOCOMF_AUTOCONN |
354                          KDMSG_IOCOMF_AUTOSPAN |
355                          KDMSG_IOCOMF_AUTOCIRC |
356                          KDMSG_IOCOMF_AUTOFORGE,
357                          M_XDISK, xa_rcvdmsg);
358         xa->iocom.exit_func = xa_exit;
359
360         kdmsg_iocom_reconnect(&xa->iocom, fp, devname);
361
362         /*
363          * Setup our LNK_CONN advertisement for autoinitiate.
364          *
365          * Our filter is setup to only accept PEER_BLOCK/SERVER
366          * advertisements.
367          */
368         xa->iocom.auto_lnk_conn.pfs_type = DMSG_PFSTYPE_CLIENT;
369         xa->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
370         xa->iocom.auto_lnk_conn.peer_type = DMSG_PEER_BLOCK;
371         xa->iocom.auto_lnk_conn.peer_mask = 1LLU << DMSG_PEER_BLOCK;
372         xa->iocom.auto_lnk_conn.pfs_mask = 1LLU << DMSG_PFSTYPE_SERVER;
373         ksnprintf(xa->iocom.auto_lnk_conn.cl_label,
374                   sizeof(xa->iocom.auto_lnk_conn.cl_label),
375                   "%s", xaioc->cl_label);
376
377         /*
378          * We need a unique pfs_fsid to avoid confusion.
379          * We supply a rendezvous fs_label using the serial number.
380          */
381         kern_uuidgen(&xa->pfs_fsid, 1);
382         xa->iocom.auto_lnk_conn.pfs_fsid = xa->pfs_fsid;
383         ksnprintf(xa->iocom.auto_lnk_conn.fs_label,
384                   sizeof(xa->iocom.auto_lnk_conn.fs_label),
385                   "%s", xaioc->fs_label);
386
387         /*
388          * Setup our LNK_SPAN advertisement for autoinitiate
389          */
390         xa->iocom.auto_lnk_span.pfs_type = DMSG_PFSTYPE_CLIENT;
391         xa->iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1;
392         xa->iocom.auto_lnk_span.peer_type = DMSG_PEER_BLOCK;
393         ksnprintf(xa->iocom.auto_lnk_span.cl_label,
394                   sizeof(xa->iocom.auto_lnk_span.cl_label),
395                   "%s", xa->xaioc.cl_label);
396
397         kdmsg_iocom_autoinitiate(&xa->iocom, xa_autodmsg);
398         disk_setdiskinfo_sync(&xa->disk, &xa->info);
399
400         lwkt_gettoken(&xdisk_token);
401         xa->serializing = 0;
402         xa_terminate_check(xa);
403         lwkt_reltoken(&xdisk_token);
404
405         return(0);
406 }
407
408 static int
409 xdisk_detach(struct xdisk_attach_ioctl *xaioc)
410 {
411         struct xa_softc *xa;
412
413         lwkt_gettoken(&xdisk_token);
414         for (;;) {
415                 TAILQ_FOREACH(xa, &xa_queue, entry) {
416                         if (strcmp(xa->iocom.auto_lnk_conn.fs_label,
417                                    xaioc->fs_label) == 0) {
418                                 break;
419                         }
420                 }
421                 if (xa == NULL || xa->serializing == 0) {
422                         xa->serializing = 1;
423                         break;
424                 }
425                 tsleep(xa, 0, "xadet", hz / 10);
426         }
427         if (xa) {
428                 kdmsg_iocom_uninit(&xa->iocom);
429                 xa->serializing = 0;
430         }
431         lwkt_reltoken(&xdisk_token);
432         return(0);
433 }
434
435 /*
436  * Called from iocom core transmit thread upon disconnect.
437  */
438 static
439 void
440 xa_exit(kdmsg_iocom_t *iocom)
441 {
442         struct xa_softc *xa = iocom->handle;
443
444         lwkt_gettoken(&xa->tok);
445         lwkt_gettoken(&xdisk_token);
446
447         /*
448          * We must wait for any I/O's to complete to ensure that all
449          * state structure references are cleaned up before returning.
450          */
451         xa->attached = -1;      /* force deferral or failure */
452         while (TAILQ_FIRST(&xa->tag_pendq)) {
453                 tsleep(xa, 0, "xabiow", hz / 10);
454         }
455
456         /*
457          * All serializing code checks for de-initialization so only
458          * do it if we aren't already serializing.
459          */
460         if (xa->serializing == 0) {
461                 xa->serializing = 1;
462                 kdmsg_iocom_uninit(iocom);
463                 xa->serializing = 0;
464         }
465
466         /*
467          * If the drive is not in use and no longer attach it can be
468          * destroyed.
469          */
470         xa->attached = 0;
471         xa_terminate_check(xa);
472         lwkt_reltoken(&xdisk_token);
473         lwkt_reltoken(&xa->tok);
474 }
475
476 /*
477  * Determine if we can destroy the xa_softc.
478  *
479  * Called with xdisk_token held.
480  */
481 static
482 void
483 xa_terminate_check(struct xa_softc *xa)
484 {
485         xa_tag_t *tag;
486         struct bio *bio;
487
488         if (xa->opencnt || xa->attached || xa->serializing)
489                 return;
490         xa->serializing = 1;
491         kdmsg_iocom_uninit(&xa->iocom);
492
493         /*
494          * When destroying an xa make sure all pending I/O (typically
495          * from the disk probe) is done.
496          *
497          * XXX what about new I/O initiated prior to disk_destroy().
498          */
499         while ((tag = TAILQ_FIRST(&xa->tag_pendq)) != NULL) {
500                 TAILQ_REMOVE(&xa->tag_pendq, tag, entry);
501                 if ((bio = tag->bio) != NULL) {
502                         tag->bio = NULL;
503                         bio->bio_buf->b_error = ENXIO;
504                         bio->bio_buf->b_flags |= B_ERROR;
505                         biodone(bio);
506                 }
507                 TAILQ_INSERT_TAIL(&xa->tag_freeq, tag, entry);
508         }
509         if (xa->dev) {
510                 disk_destroy(&xa->disk);
511                 xa->dev->si_drv1 = NULL;
512                 xa->dev = NULL;
513         }
514         KKASSERT(xa->opencnt == 0 && xa->attached == 0);
515         while ((tag = TAILQ_FIRST(&xa->tag_freeq)) != NULL) {
516                 TAILQ_REMOVE(&xa->tag_freeq, tag, entry);
517                 tag->xa = NULL;
518                 kfree(tag, M_XDISK);
519         }
520         KKASSERT(TAILQ_EMPTY(&xa->tag_pendq));
521         TAILQ_REMOVE(&xa_queue, xa, entry); /* XXX */
522         kfree(xa, M_XDISK);
523 }
524
525 /*
526  * Shim to catch and record virtual circuit events.
527  */
528 static void
529 xa_autodmsg(kdmsg_msg_t *msg)
530 {
531         xa_softc_t *xa = msg->iocom->handle;
532
533         kdmsg_circuit_t *circ;
534         kdmsg_circuit_t *cscan;
535         uint32_t xcmd;
536
537         /*
538          * Because this is just a shim we don't have a state callback for
539          * the transactions we are sniffing, so make things easier by
540          * calculating the original command along with the current message's
541          * flags.  This is because transactions are made up of numerous
542          * messages and only the first typically specifies the actual command.
543          */
544         if (msg->state) {
545                 xcmd = msg->state->icmd |
546                        (msg->any.head.cmd & (DMSGF_CREATE |
547                                              DMSGF_DELETE |
548                                              DMSGF_REPLY));
549         } else {
550                 xcmd = msg->any.head.cmd;
551         }
552
553         /*
554          * Add or remove a circuit, sorted by weight (lower numbers are
555          * better).
556          */
557         switch(xcmd) {
558         case DMSG_LNK_CIRC | DMSGF_CREATE | DMSGF_REPLY:
559                 /*
560                  * Track established circuits
561                  */
562                 circ = msg->state->any.circ;
563                 lwkt_gettoken(&xa->tok);
564                 if (circ->recorded == 0) {
565                         TAILQ_FOREACH(cscan, &xa->circq, entry) {
566                                 if (circ->weight < cscan->weight)
567                                         break;
568                         }
569                         if (cscan)
570                                 TAILQ_INSERT_BEFORE(cscan, circ, entry);
571                         else
572                                 TAILQ_INSERT_TAIL(&xa->circq, circ, entry);
573                         circ->recorded = 1;
574                 }
575
576                 /*
577                  * Restart any deferred I/O.
578                  */
579                 xa_restart_deferred(xa);
580                 lwkt_reltoken(&xa->tok);
581                 break;
582         case DMSG_LNK_CIRC | DMSGF_DELETE | DMSGF_REPLY:
583                 /*
584                  * Losing virtual circuit.  Remove the circ from contention.
585                  */
586                 circ = msg->state->any.circ;
587                 lwkt_gettoken(&xa->tok);
588                 if (circ->recorded) {
589                         TAILQ_REMOVE(&xa->circq, circ, entry);
590                         circ->recorded = 0;
591                 }
592                 xa_restart_deferred(xa);
593                 lwkt_reltoken(&xa->tok);
594                 break;
595         default:
596                 break;
597         }
598 }
599
600 static int
601 xa_rcvdmsg(kdmsg_msg_t *msg)
602 {
603         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
604         case DMSG_DBG_SHELL:
605                 /*
606                  * Execute shell command (not supported atm).
607                  *
608                  * This is a one-way packet but if not (e.g. if part of
609                  * a streaming transaction), we will have already closed
610                  * our end.
611                  */
612                 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
613                 break;
614         case DMSG_DBG_SHELL | DMSGF_REPLY:
615                 /*
616                  * Receive one or more replies to a shell command that we
617                  * sent.
618                  *
619                  * This is a one-way packet but if not (e.g. if part of
620                  * a streaming transaction), we will have already closed
621                  * our end.
622                  */
623                 if (msg->aux_data) {
624                         msg->aux_data[msg->aux_size - 1] = 0;
625                         kprintf("xdisk: DEBUGMSG: %s\n", msg->aux_data);
626                 }
627                 break;
628         default:
629                 /*
630                  * Unsupported LNK message received.  We only need to
631                  * reply if it's a transaction in order to close our end.
632                  * Ignore any one-way messages are any further messages
633                  * associated with the transaction.
634                  *
635                  * NOTE: This case also includes DMSG_LNK_ERROR messages
636                  *       which might be one-way, replying to those would
637                  *       cause an infinite ping-pong.
638                  */
639                 if (msg->any.head.cmd & DMSGF_CREATE)
640                         kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
641                 break;
642         }
643         return(0);
644 }
645
646
647 /************************************************************************
648  *                         XA DEVICE INTERFACE                          *
649  ************************************************************************/
650
651 static int
652 xa_open(struct dev_open_args *ap)
653 {
654         cdev_t dev = ap->a_head.a_dev;
655         xa_softc_t *xa;
656         xa_tag_t *tag;
657         kdmsg_msg_t *msg;
658         int error;
659
660         dev->si_bsize_phys = 512;
661         dev->si_bsize_best = 32768;
662
663         /*
664          * Interlock open with opencnt, wait for attachment operations
665          * to finish.
666          */
667         lwkt_gettoken(&xdisk_token);
668 again:
669         xa = dev->si_drv1;
670         if (xa == NULL) {
671                 lwkt_reltoken(&xdisk_token);
672                 return ENXIO;   /* raced destruction */
673         }
674         if (xa->serializing) {
675                 tsleep(xa, 0, "xarace", hz / 10);
676                 goto again;
677         }
678         if (xa->attached == 0) {
679                 lwkt_reltoken(&xdisk_token);
680                 return ENXIO;   /* raced destruction */
681         }
682
683         /*
684          * Serialize initial open
685          */
686         if (xa->opencnt++ > 0) {
687                 lwkt_reltoken(&xdisk_token);
688                 return(0);
689         }
690         xa->serializing = 1;
691         lwkt_reltoken(&xdisk_token);
692
693         tag = xa_setup_cmd(xa, NULL);
694         if (tag == NULL) {
695                 lwkt_gettoken(&xdisk_token);
696                 KKASSERT(xa->opencnt > 0);
697                 --xa->opencnt;
698                 xa->serializing = 0;
699                 xa_terminate_check(xa);
700                 lwkt_reltoken(&xdisk_token);
701                 return(ENXIO);
702         }
703         msg = kdmsg_msg_alloc(&xa->iocom, tag->circ,
704                               DMSG_BLK_OPEN | DMSGF_CREATE,
705                               xa_sync_completion, tag);
706         msg->any.blk_open.modes = DMSG_BLKOPEN_RD | DMSG_BLKOPEN_WR;
707         xa_start(tag, msg);
708         if (xa_wait(tag, 0) == 0) {
709                 xa->keyid = tag->status.keyid;
710                 xa->opentag = tag;      /* leave tag open */
711                 xa->serializing = 0;
712                 error = 0;
713         } else {
714                 xa_done(tag, 0);
715                 lwkt_gettoken(&xdisk_token);
716                 KKASSERT(xa->opencnt > 0);
717                 --xa->opencnt;
718                 xa->serializing = 0;
719                 xa_terminate_check(xa);
720                 lwkt_reltoken(&xdisk_token);
721                 error = ENXIO;
722         }
723         return (error);
724 }
725
726 static int
727 xa_close(struct dev_close_args *ap)
728 {
729         cdev_t dev = ap->a_head.a_dev;
730         xa_softc_t *xa;
731         xa_tag_t *tag;
732
733         xa = dev->si_drv1;
734         if (xa == NULL)
735                 return ENXIO;   /* raced destruction */
736
737         lwkt_gettoken(&xa->tok);
738         if ((tag = xa->opentag) != NULL) {
739                 xa->opentag = NULL;
740                 kdmsg_state_reply(tag->state, 0);
741                 while (tag->done == 0)
742                         xa_wait(tag, tag->waitseq);
743                 xa_done(tag, 0);
744         }
745         lwkt_reltoken(&xa->tok);
746
747         lwkt_gettoken(&xdisk_token);
748         KKASSERT(xa->opencnt > 0);
749         --xa->opencnt;
750         xa_terminate_check(xa);
751         lwkt_reltoken(&xdisk_token);
752
753         return(0);
754 }
755
756 static int
757 xa_strategy(struct dev_strategy_args *ap)
758 {
759         xa_softc_t *xa = ap->a_head.a_dev->si_drv1;
760         xa_tag_t *tag;
761         struct bio *bio = ap->a_bio;
762
763         /*
764          * Allow potentially temporary link failures to fail the I/Os
765          * only if the device is not open.  That is, we allow the disk
766          * probe code prior to mount to fail.
767          */
768         if (xa->attached == 0 && xa->opencnt == 0) {
769                 bio->bio_buf->b_error = ENXIO;
770                 bio->bio_buf->b_flags |= B_ERROR;
771                 biodone(bio);
772                 return(0);
773         }
774
775         tag = xa_setup_cmd(xa, bio);
776         if (tag)
777                 xa_start(tag, NULL);
778         return(0);
779 }
780
781 static int
782 xa_ioctl(struct dev_ioctl_args *ap)
783 {
784         return(ENOTTY);
785 }
786
787 static int
788 xa_size(struct dev_psize_args *ap)
789 {
790         struct xa_softc *xa;
791
792         if ((xa = ap->a_head.a_dev->si_drv1) == NULL)
793                 return (ENXIO);
794         ap->a_result = xa->info.d_media_blocks;
795         return (0);
796 }
797
798 /************************************************************************
799  *                  XA BLOCK PROTOCOL STATE MACHINE                     *
800  ************************************************************************
801  *
802  * Implement tag/msg setup and related functions.
803  */
804 static xa_tag_t *
805 xa_setup_cmd(xa_softc_t *xa, struct bio *bio)
806 {
807         kdmsg_circuit_t *circ;
808         xa_tag_t *tag;
809
810         /*
811          * Only get a tag if we have a valid virtual circuit to the server.
812          */
813         lwkt_gettoken(&xa->tok);
814         TAILQ_FOREACH(circ, &xa->circq, entry) {
815                 if (circ->lost == 0)
816                         break;
817         }
818         if (circ == NULL || xa->attached <= 0) {
819                 tag = NULL;
820         } else if ((tag = TAILQ_FIRST(&xa->tag_freeq)) != NULL) {
821                 TAILQ_REMOVE(&xa->tag_freeq, tag, entry);
822                 tag->bio = bio;
823                 tag->circ = circ;
824                 kdmsg_circ_hold(circ);
825                 TAILQ_INSERT_TAIL(&xa->tag_pendq, tag, entry);
826         }
827
828         /*
829          * If we can't dispatch now and this is a bio, queue it for later.
830          */
831         if (tag == NULL && bio) {
832                 TAILQ_INSERT_TAIL(&xa->bioq, bio, bio_act);
833         }
834         lwkt_reltoken(&xa->tok);
835
836         return (tag);
837 }
838
839 static void
840 xa_start(xa_tag_t *tag, kdmsg_msg_t *msg)
841 {
842         xa_softc_t *xa = tag->xa;
843
844         if (msg == NULL) {
845                 struct bio *bio;
846                 struct buf *bp;
847
848                 KKASSERT(tag->bio);
849                 bio = tag->bio;
850                 bp = bio->bio_buf;
851
852                 switch(bp->b_cmd) {
853                 case BUF_CMD_READ:
854                         msg = kdmsg_msg_alloc(&xa->iocom, tag->circ,
855                                               DMSG_BLK_READ |
856                                               DMSGF_CREATE | DMSGF_DELETE,
857                                               xa_bio_completion, tag);
858                         msg->any.blk_read.keyid = xa->keyid;
859                         msg->any.blk_read.offset = bio->bio_offset;
860                         msg->any.blk_read.bytes = bp->b_bcount;
861                         break;
862                 case BUF_CMD_WRITE:
863                         msg = kdmsg_msg_alloc(&xa->iocom, tag->circ,
864                                               DMSG_BLK_WRITE |
865                                               DMSGF_CREATE | DMSGF_DELETE,
866                                               xa_bio_completion, tag);
867                         msg->any.blk_write.keyid = xa->keyid;
868                         msg->any.blk_write.offset = bio->bio_offset;
869                         msg->any.blk_write.bytes = bp->b_bcount;
870                         msg->aux_data = bp->b_data;
871                         msg->aux_size = bp->b_bcount;
872                         break;
873                 case BUF_CMD_FLUSH:
874                         msg = kdmsg_msg_alloc(&xa->iocom, tag->circ,
875                                               DMSG_BLK_FLUSH |
876                                               DMSGF_CREATE | DMSGF_DELETE,
877                                               xa_bio_completion, tag);
878                         msg->any.blk_flush.keyid = xa->keyid;
879                         msg->any.blk_flush.offset = bio->bio_offset;
880                         msg->any.blk_flush.bytes = bp->b_bcount;
881                         break;
882                 case BUF_CMD_FREEBLKS:
883                         msg = kdmsg_msg_alloc(&xa->iocom, tag->circ,
884                                               DMSG_BLK_FREEBLKS |
885                                               DMSGF_CREATE | DMSGF_DELETE,
886                                               xa_bio_completion, tag);
887                         msg->any.blk_freeblks.keyid = xa->keyid;
888                         msg->any.blk_freeblks.offset = bio->bio_offset;
889                         msg->any.blk_freeblks.bytes = bp->b_bcount;
890                         break;
891                 default:
892                         bp->b_flags |= B_ERROR;
893                         bp->b_error = EIO;
894                         biodone(bio);
895                         tag->bio = NULL;
896                         break;
897                 }
898         }
899
900         tag->done = 0;
901         tag->waitseq = 0;
902         if (msg) {
903                 tag->state = msg->state;
904                 kdmsg_msg_write(msg);
905         } else {
906                 xa_done(tag, 1);
907         }
908 }
909
910 static uint32_t
911 xa_wait(xa_tag_t *tag, int seq)
912 {
913         xa_softc_t *xa = tag->xa;
914
915         lwkt_gettoken(&xa->tok);
916         while (tag->waitseq == seq)
917                 tsleep(tag, 0, "xawait", 0);
918         lwkt_reltoken(&xa->tok);
919         return (tag->status.head.error);
920 }
921
922 static void
923 xa_done(xa_tag_t *tag, int wasbio)
924 {
925         xa_softc_t *xa = tag->xa;
926         struct bio *bio;
927
928         KKASSERT(tag->bio == NULL);
929         tag->done = 1;
930         tag->state = NULL;
931
932         lwkt_gettoken(&xa->tok);
933         if (wasbio && (bio = TAILQ_FIRST(&xa->bioq)) != NULL) {
934                 TAILQ_REMOVE(&xa->bioq, bio, bio_act);
935                 tag->bio = bio;
936                 lwkt_reltoken(&xa->tok);
937                 xa_start(tag, NULL);
938         } else {
939                 if (tag->circ) {
940                         kdmsg_circ_drop(tag->circ);
941                         tag->circ = NULL;
942                 }
943                 TAILQ_REMOVE(&xa->tag_pendq, tag, entry);
944                 TAILQ_INSERT_TAIL(&xa->tag_freeq, tag, entry);
945                 lwkt_reltoken(&xa->tok);
946         }
947 }
948
949 static int
950 xa_sync_completion(kdmsg_state_t *state, kdmsg_msg_t *msg)
951 {
952         xa_tag_t *tag = state->any.any;
953         xa_softc_t *xa = tag->xa;
954
955         switch(msg->any.head.cmd & DMSGF_CMDSWMASK) {
956         case DMSG_LNK_ERROR | DMSGF_REPLY:
957                 bzero(&tag->status, sizeof(tag->status));
958                 tag->status.head = msg->any.head;
959                 break;
960         case DMSG_BLK_ERROR | DMSGF_REPLY:
961                 tag->status = msg->any.blk_error;
962                 break;
963         }
964         lwkt_gettoken(&xa->tok);
965         if (msg->any.head.cmd & DMSGF_DELETE) { /* receive termination */
966                 if (xa->opentag == tag) {
967                         xa->opentag = NULL;     /* XXX */
968                         kdmsg_state_reply(tag->state, 0);
969                         xa_done(tag, 0);
970                         lwkt_reltoken(&xa->tok);
971                         return(0);
972                 } else {
973                         tag->done = 1;
974                 }
975         }
976         ++tag->waitseq;
977         lwkt_reltoken(&xa->tok);
978
979         wakeup(tag);
980
981         return (0);
982 }
983
984 static int
985 xa_bio_completion(kdmsg_state_t *state, kdmsg_msg_t *msg)
986 {
987         xa_tag_t *tag = state->any.any;
988         xa_softc_t *xa = tag->xa;
989         struct bio *bio;
990         struct buf *bp;
991
992         /*
993          * Get the bio from the tag.  If no bio is present we just do
994          * 'done' handling.
995          */
996         if ((bio = tag->bio) == NULL)
997                 goto handle_done;
998         bp = bio->bio_buf;
999
1000         /*
1001          * Process return status
1002          */
1003         switch(msg->any.head.cmd & DMSGF_CMDSWMASK) {
1004         case DMSG_LNK_ERROR | DMSGF_REPLY:
1005                 bzero(&tag->status, sizeof(tag->status));
1006                 tag->status.head = msg->any.head;
1007                 if (tag->status.head.error)
1008                         tag->status.resid = bp->b_bcount;
1009                 else
1010                         tag->status.resid = 0;
1011                 break;
1012         case DMSG_BLK_ERROR | DMSGF_REPLY:
1013                 tag->status = msg->any.blk_error;
1014                 break;
1015         }
1016
1017         /*
1018          * Potentially move the bio back onto the pending queue if the
1019          * device is open and the error is related to losing the virtual
1020          * circuit.
1021          */
1022         if (tag->status.head.error &&
1023             (msg->any.head.cmd & DMSGF_DELETE) && xa->opencnt) {
1024                 if (tag->status.head.error == DMSG_ERR_LOSTLINK ||
1025                     tag->status.head.error == DMSG_ERR_CANTCIRC) {
1026                         goto handle_repend;
1027                 }
1028         }
1029
1030         /*
1031          * Process bio completion
1032          *
1033          * For reads any returned data is zero-extended if necessary, so
1034          * the server can short-cut any all-zeros reads if it desires.
1035          */
1036         switch(bp->b_cmd) {
1037         case BUF_CMD_READ:
1038                 if (msg->aux_data && msg->aux_size) {
1039                         if (msg->aux_size < bp->b_bcount) {
1040                                 bcopy(msg->aux_data, bp->b_data, msg->aux_size);
1041                                 bzero(bp->b_data + msg->aux_size,
1042                                       bp->b_bcount - msg->aux_size);
1043                         } else {
1044                                 bcopy(msg->aux_data, bp->b_data, bp->b_bcount);
1045                         }
1046                 } else {
1047                         bzero(bp->b_data, bp->b_bcount);
1048                 }
1049                 /* fall through */
1050         case BUF_CMD_WRITE:
1051         case BUF_CMD_FLUSH:
1052         case BUF_CMD_FREEBLKS:
1053         default:
1054                 if (tag->status.resid > bp->b_bcount)
1055                         tag->status.resid = bp->b_bcount;
1056                 bp->b_resid = tag->status.resid;
1057                 if ((bp->b_error = tag->status.head.error) != 0) {
1058                         bp->b_flags |= B_ERROR;
1059                 } else {
1060                         bp->b_resid = 0;
1061                 }
1062                 biodone(bio);
1063                 tag->bio = NULL;
1064                 break;
1065         }
1066
1067         /*
1068          * Handle completion of the transaction.  If the bioq is not empty
1069          * we can initiate another bio on the same tag.
1070          *
1071          * NOTE: Most of our transactions will be single-message
1072          *       CREATE+DELETEs, so we won't have to terminate the
1073          *       transaction separately, here.  But just in case they
1074          *       aren't be sure to terminate the transaction.
1075          */
1076 handle_done:
1077         if (msg->any.head.cmd & DMSGF_DELETE) {
1078                 xa_done(tag, 1);
1079                 if ((state->txcmd & DMSGF_DELETE) == 0)
1080                         kdmsg_msg_reply(msg, 0);
1081         }
1082         return (0);
1083
1084         /*
1085          * Handle the case where the transaction failed due to a
1086          * connectivity issue.  The tag is put away with wasbio=0
1087          * and we restart the bio.
1088          *
1089          * Setting circ->lost causes xa_setup_cmd() to skip the circuit.
1090          * Other circuits might still be live.  Once a circuit gets messed
1091          * up it will (eventually) be deleted so we can simply leave (lost)
1092          * set forever after.
1093          */
1094 handle_repend:
1095         lwkt_gettoken(&xa->tok);
1096         kprintf("BIO CIRC FAILURE, REPEND BIO %p\n", bio);
1097         tag->circ->lost = 1;
1098         tag->bio = NULL;
1099         xa_done(tag, 0);
1100         if ((state->txcmd & DMSGF_DELETE) == 0)
1101                 kdmsg_msg_reply(msg, 0);
1102
1103         /*
1104          * Restart or requeue the bio
1105          */
1106         tag = xa_setup_cmd(xa, bio);
1107         if (tag)
1108                 xa_start(tag, NULL);
1109         lwkt_reltoken(&xa->tok);
1110         return (0);
1111 }
1112
1113 /*
1114  * Restart as much deferred I/O as we can.
1115  *
1116  * Called with xa->tok held
1117  */
1118 static
1119 void
1120 xa_restart_deferred(xa_softc_t *xa)
1121 {
1122         struct bio *bio;
1123         xa_tag_t *tag;
1124
1125         while ((bio = TAILQ_FIRST(&xa->bioq)) != NULL) {
1126                 tag = xa_setup_cmd(xa, NULL);
1127                 if (tag == NULL)
1128                         break;
1129                 TAILQ_REMOVE(&xa->bioq, bio, bio_act);
1130                 tag->bio = bio;
1131                 xa_start(tag, NULL);
1132         }
1133 }