2 * Copyright (c) 2012-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module allows disk devices to be created and associated with a
36 * communications pipe or socket. You open the device and issue an
37 * ioctl() to install a new disk along with its communications descriptor.
39 * All further communication occurs via the descriptor using the DMSG
40 * LNK_CONN, LNK_SPAN, and BLOCK protocols. The descriptor can be a
41 * direct connection to a remote machine's disk (in-kernenl), to a remote
42 * cluster controller, to the local cluster controller, etc.
44 * /dev/xdisk is the control device, issue ioctl()s to create the /dev/xa%d
45 * devices. These devices look like raw disks to the system.
47 #include <sys/param.h>
48 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/devicestat.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/sysctl.h>
58 #include <sys/queue.h>
62 #include <sys/kern_syscall.h>
65 #include <sys/xdiskioctl.h>
68 #include <sys/thread2.h>
72 RB_HEAD(xa_softc_tree, xa_softc);
73 RB_PROTOTYPE(xa_softc_tree, xa_softc, rbnode, xa_softc_cmp);
76 SYSCTL_INT(_debug, OID_AUTO, xa_active, CTLFLAG_RW, &xa_active, 0,
77 "Number of active xdisk IOs");
78 static uint64_t xa_last;
79 SYSCTL_ULONG(_debug, OID_AUTO, xa_last, CTLFLAG_RW, &xa_last, 0,
80 "Offset of last xdisk IO");
81 static int xa_debug = 1;
82 SYSCTL_INT(_debug, OID_AUTO, xa_debug, CTLFLAG_RW, &xa_debug, 0,
89 TAILQ_ENTRY(xa_tag) entry;
91 dmsg_blk_error_t status;
99 typedef struct xa_tag xa_tag_t;
105 struct kdmsg_state_list spanq;
106 RB_ENTRY(xa_softc) rbnode;
108 struct devstat stats;
109 struct disk_info info;
119 char cl_label[64]; /* from LNK_SPAN cl_label (host/dev) */
120 char fs_label[64]; /* from LNK_SPAN fs_label (serno str) */
122 TAILQ_HEAD(, bio) bioq; /* pending BIOs */
123 TAILQ_HEAD(, xa_tag) tag_freeq; /* available I/O tags */
124 TAILQ_HEAD(, xa_tag) tag_pendq; /* running I/O tags */
128 typedef struct xa_softc xa_softc_t;
131 TAILQ_ENTRY(xa_iocom) entry;
136 typedef struct xa_iocom xa_iocom_t;
138 static int xa_softc_cmp(xa_softc_t *sc1, xa_softc_t *sc2);
139 RB_GENERATE(xa_softc_tree, xa_softc, rbnode, xa_softc_cmp);
140 static struct xa_softc_tree xa_device_tree;
142 #define MAXTAGS 64 /* no real limit */
144 static int xdisk_attach(struct xdisk_attach_ioctl *xaioc);
145 static int xdisk_detach(struct xdisk_attach_ioctl *xaioc);
146 static void xaio_exit(kdmsg_iocom_t *iocom);
147 static int xaio_rcvdmsg(kdmsg_msg_t *msg);
149 static void xa_terminate_check(struct xa_softc *sc);
151 static xa_tag_t *xa_setup_cmd(xa_softc_t *sc, struct bio *bio);
152 static void xa_start(xa_tag_t *tag, kdmsg_msg_t *msg, int async);
153 static void xa_done(xa_tag_t *tag, int wasbio);
154 static void xa_release(xa_tag_t *tag, int wasbio);
155 static uint32_t xa_wait(xa_tag_t *tag);
156 static int xa_sync_completion(kdmsg_state_t *state, kdmsg_msg_t *msg);
157 static int xa_bio_completion(kdmsg_state_t *state, kdmsg_msg_t *msg);
158 static void xa_restart_deferred(xa_softc_t *sc);
160 #define xa_printf(level, ctl, ...) \
161 if (xa_debug >= (level)) kprintf("xdisk: " ctl, __VA_ARGS__)
163 MALLOC_DEFINE(M_XDISK, "Networked disk client", "Network Disks");
166 * Control device, issue ioctls to create xa devices.
168 static d_open_t xdisk_open;
169 static d_close_t xdisk_close;
170 static d_ioctl_t xdisk_ioctl;
172 static struct dev_ops xdisk_ops = {
173 { "xdisk", 0, D_MPSAFE | D_TRACKCLOSE },
174 .d_open = xdisk_open,
175 .d_close = xdisk_close,
176 .d_ioctl = xdisk_ioctl
182 static d_open_t xa_open;
183 static d_close_t xa_close;
184 static d_ioctl_t xa_ioctl;
185 static d_strategy_t xa_strategy;
186 static d_psize_t xa_size;
188 static struct dev_ops xa_ops = {
189 { "xa", 0, D_DISK | D_CANFREE | D_MPSAFE | D_TRACKCLOSE },
194 .d_write = physwrite,
195 .d_strategy = xa_strategy,
199 static int xdisk_opencount;
200 static cdev_t xdisk_dev;
201 struct lock xdisk_lk;
202 static TAILQ_HEAD(, xa_iocom) xaiocomq;
205 * Module initialization
208 xdisk_modevent(module_t mod, int type, void *data)
212 TAILQ_INIT(&xaiocomq);
213 RB_INIT(&xa_device_tree);
214 lockinit(&xdisk_lk, "xdisk", 0, 0);
215 xdisk_dev = make_dev(&xdisk_ops, 0,
216 UID_ROOT, GID_WHEEL, 0600, "xdisk");
220 if (!RB_EMPTY(&xa_device_tree))
222 if (xdisk_opencount || TAILQ_FIRST(&xaiocomq))
225 destroy_dev(xdisk_dev);
228 dev_ops_remove_all(&xdisk_ops);
229 dev_ops_remove_all(&xa_ops);
237 DEV_MODULE(xdisk, xdisk_modevent, 0);
240 xa_softc_cmp(xa_softc_t *sc1, xa_softc_t *sc2)
242 return(strcmp(sc1->fs_label, sc2->fs_label));
249 xdisk_open(struct dev_open_args *ap)
251 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
253 lockmgr(&xdisk_lk, LK_RELEASE);
258 xdisk_close(struct dev_close_args *ap)
260 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
262 lockmgr(&xdisk_lk, LK_RELEASE);
267 xdisk_ioctl(struct dev_ioctl_args *ap)
273 error = xdisk_attach((void *)ap->a_data);
276 error = xdisk_detach((void *)ap->a_data);
285 /************************************************************************
287 ************************************************************************/
290 xdisk_attach(struct xdisk_attach_ioctl *xaioc)
296 * Normalize ioctl params
298 fp = holdfp(curproc->p_fd, xaioc->fd, -1);
301 xa_printf(1, "xdisk_attach fp=%p\n", fp);
304 * See if the serial number is already present. If we are
305 * racing a termination the disk subsystem may still have
306 * duplicate entries not yet removed so we wait a bit and
309 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
311 xaio = kmalloc(sizeof(*xaio), M_XDISK, M_WAITOK | M_ZERO);
312 kdmsg_iocom_init(&xaio->iocom, xaio,
313 KDMSG_IOCOMF_AUTOCONN,
314 M_XDISK, xaio_rcvdmsg);
315 xaio->iocom.exit_func = xaio_exit;
317 kdmsg_iocom_reconnect(&xaio->iocom, fp, "xdisk");
320 * Setup our LNK_CONN advertisement for autoinitiate.
322 * Our filter is setup to only accept PEER_BLOCK/SERVER
325 * We need a unique pfs_fsid to avoid confusion.
327 xaio->iocom.auto_lnk_conn.pfs_type = DMSG_PFSTYPE_CLIENT;
328 xaio->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1;
329 xaio->iocom.auto_lnk_conn.peer_type = DMSG_PEER_BLOCK;
330 xaio->iocom.auto_lnk_conn.peer_mask = 1LLU << DMSG_PEER_BLOCK;
331 xaio->iocom.auto_lnk_conn.pfs_mask = 1LLU << DMSG_PFSTYPE_SERVER;
332 ksnprintf(xaio->iocom.auto_lnk_conn.fs_label,
333 sizeof(xaio->iocom.auto_lnk_conn.fs_label),
335 kern_uuidgen(&xaio->iocom.auto_lnk_conn.pfs_fsid, 1);
338 * Setup our LNK_SPAN advertisement for autoinitiate
340 TAILQ_INSERT_TAIL(&xaiocomq, xaio, entry);
341 kdmsg_iocom_autoinitiate(&xaio->iocom, NULL);
343 lockmgr(&xdisk_lk, LK_RELEASE);
349 xdisk_detach(struct xdisk_attach_ioctl *xaioc)
355 * Called from iocom core transmit thread upon disconnect.
359 xaio_exit(kdmsg_iocom_t *iocom)
361 xa_iocom_t *xaio = iocom->handle;
363 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
364 xa_printf(1, "%s", "xdisk_detach [xaio_exit()]\n");
365 TAILQ_REMOVE(&xaiocomq, xaio, entry);
366 lockmgr(&xdisk_lk, LK_RELEASE);
368 kdmsg_iocom_uninit(&xaio->iocom);
370 kfree(xaio, M_XDISK);
374 * Called from iocom core to handle messages that the iocom core does not
375 * handle itself and for which a state function callback has not yet been
378 * We primarily care about LNK_SPAN transactions here.
381 xaio_rcvdmsg(kdmsg_msg_t *msg)
383 kdmsg_state_t *state = msg->state;
384 xa_iocom_t *xaio = state->iocom->handle;
389 "xdisk - rcvmsg state=%p rx=%08x tx=%08x msgcmd=%08x\n",
390 state, state->rxcmd, state->txcmd,
393 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
396 case DMSG_LNK_SPAN | DMSGF_CREATE | DMSGF_DELETE:
398 * A LNK_SPAN transaction which is opened and closed
399 * degenerately is not useful to us, just ignore it.
401 kdmsg_msg_reply(msg, 0);
403 case DMSG_LNK_SPAN | DMSGF_CREATE:
405 * Manage the tracking node for the remote LNK_SPAN.
407 * Return a streaming result, leaving the transaction open
408 * in both directions to allow sub-transactions.
410 bcopy(msg->any.lnk_span.cl_label, xaio->dummysc.cl_label,
411 sizeof(xaio->dummysc.cl_label));
412 xaio->dummysc.cl_label[sizeof(xaio->dummysc.cl_label) - 1] = 0;
414 bcopy(msg->any.lnk_span.fs_label, xaio->dummysc.fs_label,
415 sizeof(xaio->dummysc.fs_label));
416 xaio->dummysc.fs_label[sizeof(xaio->dummysc.fs_label) - 1] = 0;
418 xa_printf(3, "LINK_SPAN state %p create for %s\n",
419 msg->state, msg->any.lnk_span.fs_label);
421 sc = RB_FIND(xa_softc_tree, &xa_device_tree, &xaio->dummysc);
429 sc = kmalloc(sizeof(*sc), M_XDISK, M_WAITOK | M_ZERO);
430 bcopy(msg->any.lnk_span.cl_label, sc->cl_label,
431 sizeof(sc->cl_label));
432 sc->cl_label[sizeof(sc->cl_label) - 1] = 0;
433 bcopy(msg->any.lnk_span.fs_label, sc->fs_label,
434 sizeof(sc->fs_label));
435 sc->fs_label[sizeof(sc->fs_label) - 1] = 0;
437 /* XXX FIXME O(N^2) */
441 RB_FOREACH(sctmp, xa_softc_tree,
443 if (sctmp->unit == unit)
451 lockinit(&sc->lk, "xalk", 0, 0);
452 TAILQ_INIT(&sc->spanq);
453 TAILQ_INIT(&sc->bioq);
454 TAILQ_INIT(&sc->tag_freeq);
455 TAILQ_INIT(&sc->tag_pendq);
457 lockmgr(&sc->lk, LK_EXCLUSIVE);
458 RB_INSERT(xa_softc_tree, &xa_device_tree, sc);
459 TAILQ_INSERT_TAIL(&sc->spanq, msg->state, user_entry);
460 msg->state->any.xa_sc = sc;
465 for (n = 0; n < MAXTAGS; ++n) {
466 tag = kmalloc(sizeof(*tag),
467 M_XDISK, M_WAITOK|M_ZERO);
469 TAILQ_INSERT_TAIL(&sc->tag_freeq, tag, entry);
472 if (sc->dev == NULL) {
473 dev = disk_create(unit, &sc->disk, &xa_ops);
476 devstat_add_entry(&sc->stats, "xa", unit,
478 DEVSTAT_NO_ORDERED_TAGS,
479 DEVSTAT_TYPE_DIRECT |
480 DEVSTAT_TYPE_IF_OTHER,
481 DEVSTAT_PRIORITY_OTHER);
484 sc->info.d_media_blksize =
485 msg->any.lnk_span.media.block.blksize;
486 if (sc->info.d_media_blksize <= 0)
487 sc->info.d_media_blksize = 1;
488 sc->info.d_media_blocks =
489 msg->any.lnk_span.media.block.bytes /
490 sc->info.d_media_blksize;
491 sc->info.d_dsflags = DSO_MBRQUIET | DSO_RAWPSIZE;
492 sc->info.d_secpertrack = 32;
493 sc->info.d_nheads = 64;
494 sc->info.d_secpercyl = sc->info.d_secpertrack *
496 sc->info.d_ncylinders = 0;
498 sc->info.d_serialno = sc->fs_label;
500 * WARNING! disk_setdiskinfo() must be asynchronous
501 * because we are in the rxmsg thread. If
502 * it is synchronous and issues more disk
503 * I/Os, we will deadlock.
505 disk_setdiskinfo(&sc->disk, &sc->info);
506 xa_restart_deferred(sc); /* eats serializing */
507 lockmgr(&sc->lk, LK_RELEASE);
509 lockmgr(&sc->lk, LK_EXCLUSIVE);
511 TAILQ_INSERT_TAIL(&sc->spanq, msg->state, user_entry);
512 msg->state->any.xa_sc = sc;
513 if (sc->serializing == 0 && sc->open_tag == NULL) {
515 xa_restart_deferred(sc); /* eats serializing */
517 lockmgr(&sc->lk, LK_RELEASE);
518 if (sc->dev && sc->dev->si_disk) {
519 xa_printf(1, "reprobe disk: %s\n",
521 disk_msg_send(DISK_DISK_REPROBE,
526 xa_printf(2, "sc %p spancnt %d\n", sc, sc->spancnt);
527 kdmsg_msg_result(msg, 0);
529 case DMSG_LNK_SPAN | DMSGF_DELETE:
531 * Manage the tracking node for the remote LNK_SPAN.
533 * Return a final result, closing our end of the transaction.
535 sc = msg->state->any.xa_sc;
536 xa_printf(3, "LINK_SPAN state %p delete for %s (sc=%p)\n",
537 msg->state, (sc ? sc->fs_label : "(null)"), sc);
538 lockmgr(&sc->lk, LK_EXCLUSIVE);
539 msg->state->any.xa_sc = NULL;
540 TAILQ_REMOVE(&sc->spanq, msg->state, user_entry);
543 xa_printf(2, "sc %p spancnt %d\n", sc, sc->spancnt);
546 * Spans can come and go as the graph stabilizes, so if
547 * we lose a span along with sc->open_tag we may be able
548 * to restart the I/Os on a different span.
551 sc->serializing == 0 && sc->open_tag == NULL) {
553 xa_restart_deferred(sc);
555 lockmgr(&sc->lk, LK_RELEASE);
556 kdmsg_msg_reply(msg, 0);
562 if (sc->spancnt == 0)
563 xa_terminate_check(sc);
566 case DMSG_LNK_SPAN | DMSGF_DELETE | DMSGF_REPLY:
568 * Ignore unimplemented streaming replies on our LNK_SPAN
571 xa_printf(3, "LINK_SPAN state %p delete+reply\n",
574 case DMSG_LNK_SPAN | DMSGF_REPLY:
576 * Ignore unimplemented streaming replies on our LNK_SPAN
579 xa_printf(3, "LINK_SPAN state %p reply\n",
584 * Execute shell command (not supported atm).
586 * This is a one-way packet but if not (e.g. if part of
587 * a streaming transaction), we will have already closed
590 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
592 case DMSG_DBG_SHELL | DMSGF_REPLY:
594 * Receive one or more replies to a shell command
595 * that we sent. Just dump it to the console.
597 * This is a one-way packet but if not (e.g. if
598 * part of a streaming transaction), we will have
599 * already closed our end.
602 msg->aux_data[msg->aux_size - 1] = 0;
603 xa_printf(0, "DEBUGMSG: %s\n", msg->aux_data);
608 * Unsupported one-way message, streaming message, or
611 * Terminate any unsupported transactions with an error
612 * and ignore any unsupported streaming messages.
614 * NOTE: This case also includes DMSG_LNK_ERROR messages
615 * which might be one-way, replying to those would
616 * cause an infinite ping-pong.
618 if (msg->any.head.cmd & DMSGF_CREATE)
619 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
622 lockmgr(&xdisk_lk, LK_RELEASE);
628 * Determine if we can destroy the xa_softc.
630 * Called with xdisk_lk held.
634 xa_terminate_check(struct xa_softc *sc)
639 * Determine if we can destroy the softc.
641 xa_printf(1, "Terminate check xa%d (%d,%d,%d) sc=%p ",
643 sc->opencnt, sc->serializing, sc->spancnt,
646 if (sc->opencnt || sc->serializing || sc->spancnt ||
647 TAILQ_FIRST(&sc->bioq) || TAILQ_FIRST(&sc->tag_pendq)) {
648 xa_printf(1, "%s", "(leave intact)\n");
653 * Remove from device tree, a race with a new incoming span
654 * will create a new softc and disk.
656 RB_REMOVE(xa_softc_tree, &xa_device_tree, sc);
660 * Device has to go first to prevent device ops races.
663 disk_destroy(&sc->disk);
664 devstat_remove_entry(&sc->stats);
665 sc->dev->si_drv1 = NULL;
669 xa_printf(1, "%s", "(remove from tree)\n");
671 KKASSERT(sc->opencnt == 0);
672 KKASSERT(TAILQ_EMPTY(&sc->tag_pendq));
674 while ((tag = TAILQ_FIRST(&sc->tag_freeq)) != NULL) {
675 TAILQ_REMOVE(&sc->tag_freeq, tag, entry);
683 /************************************************************************
684 * XA DEVICE INTERFACE *
685 ************************************************************************/
688 xa_open(struct dev_open_args *ap)
690 cdev_t dev = ap->a_head.a_dev;
694 dev->si_bsize_phys = 512;
695 dev->si_bsize_best = 32768;
698 * Interlock open with opencnt, wait for attachment operations
701 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
705 lockmgr(&xdisk_lk, LK_RELEASE);
706 return ENXIO; /* raced destruction */
708 if (sc->serializing) {
709 tsleep(sc, 0, "xarace", hz / 10);
712 if (sc->terminating) {
713 lockmgr(&xdisk_lk, LK_RELEASE);
714 return ENXIO; /* raced destruction */
719 * Serialize initial open
721 if (sc->opencnt++ > 0) {
724 lockmgr(&xdisk_lk, LK_RELEASE);
729 * Issue BLK_OPEN if necessary. ENXIO is returned if we have trouble.
731 if (sc->open_tag == NULL) {
732 lockmgr(&sc->lk, LK_EXCLUSIVE);
733 xa_restart_deferred(sc); /* eats serializing */
734 lockmgr(&sc->lk, LK_RELEASE);
739 lockmgr(&xdisk_lk, LK_RELEASE);
742 * Wait for completion of the BLK_OPEN
744 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
745 while (sc->serializing)
746 lksleep(sc, &xdisk_lk, 0, "xaopen", hz);
748 error = sc->last_error;
750 KKASSERT(sc->opencnt > 0);
752 xa_terminate_check(sc);
753 sc = NULL; /* sc may be invalid now */
755 lockmgr(&xdisk_lk, LK_RELEASE);
761 xa_close(struct dev_close_args *ap)
763 cdev_t dev = ap->a_head.a_dev;
767 lockmgr(&xdisk_lk, LK_EXCLUSIVE);
770 lockmgr(&sc->lk, LK_RELEASE);
771 return ENXIO; /* raced destruction */
773 if (sc->terminating) {
774 lockmgr(&sc->lk, LK_RELEASE);
775 return ENXIO; /* raced destruction */
777 lockmgr(&sc->lk, LK_EXCLUSIVE);
780 * NOTE: Clearing open_tag allows a concurrent open to re-open
781 * the device and prevents autonomous completion of the tag.
783 if (sc->opencnt == 1 && sc->open_tag) {
786 lockmgr(&sc->lk, LK_RELEASE);
787 kdmsg_state_reply(tag->state, 0); /* close our side */
788 xa_wait(tag); /* wait on remote */
790 lockmgr(&sc->lk, LK_RELEASE);
792 KKASSERT(sc->opencnt > 0);
794 xa_terminate_check(sc);
795 lockmgr(&xdisk_lk, LK_RELEASE);
801 xa_strategy(struct dev_strategy_args *ap)
803 xa_softc_t *sc = ap->a_head.a_dev->si_drv1;
805 struct bio *bio = ap->a_bio;
807 devstat_start_transaction(&sc->stats);
808 atomic_add_int(&xa_active, 1);
809 xa_last = bio->bio_offset;
812 * If no tags are available NULL is returned and the bio is
813 * placed on sc->bioq.
815 lockmgr(&sc->lk, LK_EXCLUSIVE);
816 tag = xa_setup_cmd(sc, bio);
818 xa_start(tag, NULL, 1);
819 lockmgr(&sc->lk, LK_RELEASE);
825 xa_ioctl(struct dev_ioctl_args *ap)
831 xa_size(struct dev_psize_args *ap)
835 if ((sc = ap->a_head.a_dev->si_drv1) == NULL)
837 ap->a_result = sc->info.d_media_blocks;
841 /************************************************************************
842 * XA BLOCK PROTOCOL STATE MACHINE *
843 ************************************************************************
845 * Implement tag/msg setup and related functions.
846 * Called with sc->lk held.
849 xa_setup_cmd(xa_softc_t *sc, struct bio *bio)
854 * Only get a tag if we have a valid virtual circuit to the server.
856 if ((tag = TAILQ_FIRST(&sc->tag_freeq)) != NULL) {
857 TAILQ_REMOVE(&sc->tag_freeq, tag, entry);
859 TAILQ_INSERT_TAIL(&sc->tag_pendq, tag, entry);
863 * If we can't dispatch now and this is a bio, queue it for later.
865 if (tag == NULL && bio) {
866 TAILQ_INSERT_TAIL(&sc->bioq, bio, bio_act);
873 * Called with sc->lk held
876 xa_start(xa_tag_t *tag, kdmsg_msg_t *msg, int async)
878 xa_softc_t *sc = tag->sc;
882 tag->status.head.error = DMSG_ERR_IO; /* fallback error */
887 kdmsg_state_t *trans;
889 if (sc->opencnt == 0 || sc->open_tag == NULL) {
890 TAILQ_FOREACH(trans, &sc->spanq, user_entry) {
891 if ((trans->rxcmd & DMSGF_DELETE) == 0)
895 trans = sc->open_tag->state;
906 msg = kdmsg_msg_alloc(trans,
910 xa_bio_completion, tag);
911 msg->any.blk_read.keyid = sc->keyid;
912 msg->any.blk_read.offset = bio->bio_offset;
913 msg->any.blk_read.bytes = bp->b_bcount;
916 msg = kdmsg_msg_alloc(trans,
918 DMSGF_CREATE | DMSGF_DELETE,
919 xa_bio_completion, tag);
920 msg->any.blk_write.keyid = sc->keyid;
921 msg->any.blk_write.offset = bio->bio_offset;
922 msg->any.blk_write.bytes = bp->b_bcount;
923 msg->aux_data = bp->b_data;
924 msg->aux_size = bp->b_bcount;
927 msg = kdmsg_msg_alloc(trans,
929 DMSGF_CREATE | DMSGF_DELETE,
930 xa_bio_completion, tag);
931 msg->any.blk_flush.keyid = sc->keyid;
932 msg->any.blk_flush.offset = bio->bio_offset;
933 msg->any.blk_flush.bytes = bp->b_bcount;
935 case BUF_CMD_FREEBLKS:
936 msg = kdmsg_msg_alloc(trans,
938 DMSGF_CREATE | DMSGF_DELETE,
939 xa_bio_completion, tag);
940 msg->any.blk_freeblks.keyid = sc->keyid;
941 msg->any.blk_freeblks.offset = bio->bio_offset;
942 msg->any.blk_freeblks.bytes = bp->b_bcount;
945 bp->b_flags |= B_ERROR;
947 devstat_end_transaction_buf(&sc->stats, bp);
948 atomic_add_int(&xa_active, -1);
956 * If no msg was allocated we likely could not find a good span.
961 * Message was passed in or constructed.
963 tag->state = msg->state;
964 lockmgr(&sc->lk, LK_RELEASE);
965 kdmsg_msg_write(msg);
966 lockmgr(&sc->lk, LK_EXCLUSIVE);
967 } else if (tag->bio &&
968 (tag->bio->bio_buf->b_flags & B_FAILONDIS) == 0) {
970 * No spans available but BIO is not allowed to fail
971 * on connectivity problems. Requeue the BIO.
973 TAILQ_INSERT_TAIL(&sc->bioq, tag->bio, bio_act);
975 lockmgr(&sc->lk, LK_RELEASE);
977 lockmgr(&sc->lk, LK_EXCLUSIVE);
980 * No spans available, bio is allowed to fail.
982 lockmgr(&sc->lk, LK_RELEASE);
983 tag->status.head.error = DMSG_ERR_IO;
985 lockmgr(&sc->lk, LK_EXCLUSIVE);
990 xa_wait(xa_tag_t *tag)
992 xa_softc_t *sc = tag->sc;
995 lockmgr(&sc->lk, LK_EXCLUSIVE);
997 while (tag->done == 0)
998 lksleep(tag, &sc->lk, 0, "xawait", 0);
999 lockmgr(&sc->lk, LK_RELEASE);
1001 error = tag->status.head.error;
1009 xa_done(xa_tag_t *tag, int wasbio)
1011 KKASSERT(tag->bio == NULL);
1018 xa_release(tag, wasbio);
1022 * Release a tag. If everything looks ok and there are pending BIOs
1023 * (due to all tags in-use), we can use the tag to start the next BIO.
1024 * Do not try to restart if the connection is currently failed.
1028 xa_release(xa_tag_t *tag, int wasbio)
1030 xa_softc_t *sc = tag->sc;
1033 if ((bio = tag->bio) != NULL) {
1034 struct buf *bp = bio->bio_buf;
1037 bp->b_flags |= B_ERROR;
1038 devstat_end_transaction_buf(&sc->stats, bp);
1039 atomic_add_int(&xa_active, -1);
1044 lockmgr(&sc->lk, LK_EXCLUSIVE);
1046 if (wasbio && sc->open_tag &&
1047 (bio = TAILQ_FIRST(&sc->bioq)) != NULL) {
1048 TAILQ_REMOVE(&sc->bioq, bio, bio_act);
1050 xa_start(tag, NULL, 1);
1052 TAILQ_REMOVE(&sc->tag_pendq, tag, entry);
1053 TAILQ_INSERT_TAIL(&sc->tag_freeq, tag, entry);
1055 lockmgr(&sc->lk, LK_RELEASE);
1059 * Handle messages under the BLKOPEN transaction.
1062 xa_sync_completion(kdmsg_state_t *state, kdmsg_msg_t *msg)
1064 xa_tag_t *tag = state->any.any;
1069 * If the tag has been cleaned out we already closed our side
1070 * of the transaction and we are waiting for the other side to
1073 xa_printf(1, "xa_sync_completion: tag %p msg %08x state %p\n",
1074 tag, msg->any.head.cmd, msg->state);
1077 if (msg->any.head.cmd & DMSGF_CREATE)
1078 kdmsg_state_reply(state, DMSG_ERR_LOSTLINK);
1086 lockmgr(&sc->lk, LK_EXCLUSIVE);
1089 * Handle initial response to our open and restart any deferred
1092 * NOTE: DELETE may also be set.
1094 if (msg->any.head.cmd & DMSGF_CREATE) {
1095 switch(msg->any.head.cmd & DMSGF_CMDSWMASK) {
1096 case DMSG_LNK_ERROR | DMSGF_REPLY:
1097 bzero(&tag->status, sizeof(tag->status));
1098 tag->status.head = msg->any.head;
1100 case DMSG_BLK_ERROR | DMSGF_REPLY:
1101 tag->status = msg->any.blk_error;
1104 sc->last_error = tag->status.head.error;
1105 xa_printf(1, "blk_open completion status %d\n",
1107 if (sc->last_error == 0) {
1108 while ((bio = TAILQ_FIRST(&sc->bioq)) != NULL) {
1109 tag = xa_setup_cmd(sc, NULL);
1112 TAILQ_REMOVE(&sc->bioq, bio, bio_act);
1114 xa_start(tag, NULL, 1);
1117 sc->serializing = 0;
1122 * Handle unexpected termination (or lost comm channel) from other
1123 * side. Autonomous completion only if open_tag matches,
1124 * otherwise another thread is probably waiting on the tag.
1126 * (see xa_close() for other interactions)
1128 if (msg->any.head.cmd & DMSGF_DELETE) {
1129 kdmsg_state_reply(tag->state, 0);
1130 if (sc->open_tag == tag) {
1131 sc->open_tag = NULL;
1138 lockmgr(&sc->lk, LK_RELEASE);
1144 xa_bio_completion(kdmsg_state_t *state, kdmsg_msg_t *msg)
1146 xa_tag_t *tag = state->any.any;
1147 xa_softc_t *sc = tag->sc;
1152 * Get the bio from the tag. If no bio is present we just do
1155 if ((bio = tag->bio) == NULL)
1160 * Process return status
1162 switch(msg->any.head.cmd & DMSGF_CMDSWMASK) {
1163 case DMSG_LNK_ERROR | DMSGF_REPLY:
1164 bzero(&tag->status, sizeof(tag->status));
1165 tag->status.head = msg->any.head;
1166 if (tag->status.head.error)
1167 tag->status.resid = bp->b_bcount;
1169 tag->status.resid = 0;
1171 case DMSG_BLK_ERROR | DMSGF_REPLY:
1172 tag->status = msg->any.blk_error;
1177 * If the device is open stall the bio on DMSG errors. If an
1178 * actual I/O error occured on the remote device, DMSG_ERR_IO
1181 if (tag->status.head.error &&
1182 (msg->any.head.cmd & DMSGF_DELETE) && sc->opencnt) {
1183 if (tag->status.head.error != DMSG_ERR_IO)
1188 * Process bio completion
1190 * For reads any returned data is zero-extended if necessary, so
1191 * the server can short-cut any all-zeros reads if it desires.
1195 if (msg->aux_data && msg->aux_size) {
1196 if (msg->aux_size < bp->b_bcount) {
1197 bcopy(msg->aux_data, bp->b_data, msg->aux_size);
1198 bzero(bp->b_data + msg->aux_size,
1199 bp->b_bcount - msg->aux_size);
1201 bcopy(msg->aux_data, bp->b_data, bp->b_bcount);
1204 bzero(bp->b_data, bp->b_bcount);
1209 case BUF_CMD_FREEBLKS:
1211 if (tag->status.resid > bp->b_bcount)
1212 tag->status.resid = bp->b_bcount;
1213 bp->b_resid = tag->status.resid;
1214 if (tag->status.head.error != 0) {
1216 bp->b_flags |= B_ERROR;
1220 devstat_end_transaction_buf(&sc->stats, bp);
1221 atomic_add_int(&xa_active, -1);
1228 * Handle completion of the transaction. If the bioq is not empty
1229 * we can initiate another bio on the same tag.
1231 * NOTE: Most of our transactions will be single-message
1232 * CREATE+DELETEs, so we won't have to terminate the
1233 * transaction separately, here. But just in case they
1234 * aren't be sure to terminate the transaction.
1237 if (msg->any.head.cmd & DMSGF_DELETE) {
1239 if ((state->txcmd & DMSGF_DELETE) == 0)
1240 kdmsg_msg_reply(msg, 0);
1245 * Handle the case where the transaction failed due to a
1246 * connectivity issue. The tag is put away with wasbio=0
1247 * and we put the BIO back onto the bioq for a later restart.
1249 * probe I/Os (where the device is not open) will be failed
1250 * instead of requeued.
1254 if (bio->bio_buf->b_flags & B_FAILONDIS) {
1255 xa_printf(1, "xa_strategy: lost link, fail probe bp %p\n",
1257 bio->bio_buf->b_error = ENXIO;
1258 bio->bio_buf->b_flags |= B_ERROR;
1262 xa_printf(1, "xa_strategy: lost link, requeue bp %p\n",
1266 if ((state->txcmd & DMSGF_DELETE) == 0)
1267 kdmsg_msg_reply(msg, 0);
1273 lockmgr(&sc->lk, LK_EXCLUSIVE);
1274 TAILQ_INSERT_TAIL(&sc->bioq, bio, bio_act);
1275 lockmgr(&sc->lk, LK_RELEASE);
1281 * Restart as much deferred I/O as we can. The serializer is set and we
1282 * eat it (clear it) when done.
1284 * Called with sc->lk held
1288 xa_restart_deferred(xa_softc_t *sc)
1290 kdmsg_state_t *span;
1295 KKASSERT(sc->serializing);
1298 * Determine if a restart is needed.
1300 if (sc->opencnt == 0) {
1302 * Device is not open, nothing to do, eat serializing.
1304 sc->serializing = 0;
1306 } else if (sc->open_tag == NULL) {
1308 * BLK_OPEN required before we can restart any BIOs.
1309 * Select the best LNK_SPAN to issue the BLK_OPEN under.
1311 * serializing interlocks waiting open()s.
1314 TAILQ_FOREACH(span, &sc->spanq, user_entry) {
1315 if ((span->rxcmd & DMSGF_DELETE) == 0)
1322 tag = xa_setup_cmd(sc, NULL);
1328 msg = kdmsg_msg_alloc(span,
1331 xa_sync_completion, tag);
1332 msg->any.blk_open.modes = DMSG_BLKOPEN_RD;
1334 "BLK_OPEN tag %p state %p "
1336 tag, msg->state, span);
1337 xa_start(tag, msg, 0);
1340 sc->serializing = 0;
1343 /* else leave serializing set until BLK_OPEN response */
1346 sc->serializing = 0;