2 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: head/sys/dev/virtio/scsi/virtio_scsi.c 311305 2017-01-04 20:26:42Z asomers $
29 /* Driver for VirtIO SCSI devices. */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 #include <sys/sysctl.h>
40 #include <sys/callout.h>
41 #include <sys/queue.h>
44 #include <machine/stdarg.h>
49 #include <bus/cam/cam.h>
50 #include <bus/cam/cam_ccb.h>
51 #include <bus/cam/cam_sim.h>
52 #include <bus/cam/cam_periph.h>
53 #include <bus/cam/cam_xpt_periph.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 #include <bus/cam/cam_debug.h>
56 #include <bus/cam/scsi/scsi_all.h>
57 #include <bus/cam/scsi/scsi_message.h>
59 #include <dev/virtual/virtio/virtio/virtio.h>
60 #include <dev/virtual/virtio/virtio/virtqueue.h>
61 #include <dev/virtual/virtio/scsi/virtio_scsi.h>
62 #include <dev/virtual/virtio/scsi/virtio_scsivar.h>
64 static int vtscsi_modevent(module_t, int, void *);
66 static int vtscsi_probe(device_t);
67 static int vtscsi_attach(device_t);
68 static int vtscsi_detach(device_t);
69 static int vtscsi_suspend(device_t);
70 static int vtscsi_resume(device_t);
72 static void vtscsi_negotiate_features(struct vtscsi_softc *);
73 static void vtscsi_read_config(struct vtscsi_softc *,
74 struct virtio_scsi_config *);
75 static int vtscsi_maximum_segments(struct vtscsi_softc *, int);
76 static int vtscsi_alloc_intrs(struct vtscsi_softc *);
77 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *);
78 static void vtscsi_write_device_config(struct vtscsi_softc *);
79 static int vtscsi_reinit(struct vtscsi_softc *);
81 static int vtscsi_alloc_cam(struct vtscsi_softc *);
82 static int vtscsi_register_cam(struct vtscsi_softc *);
83 static void vtscsi_free_cam(struct vtscsi_softc *);
84 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
85 static int vtscsi_register_async(struct vtscsi_softc *);
86 static void vtscsi_deregister_async(struct vtscsi_softc *);
87 static void vtscsi_cam_action(struct cam_sim *, union ccb *);
88 static void vtscsi_cam_poll(struct cam_sim *);
90 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
92 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
94 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
95 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
96 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
97 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *,
98 struct cam_sim *, union ccb *);
100 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
101 struct sglist *, struct ccb_scsiio *);
102 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
103 struct vtscsi_request *, int *, int *);
104 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
105 struct vtscsi_request *);
106 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
107 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
108 struct vtscsi_request *);
109 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
110 struct vtscsi_request *);
111 static void vtscsi_timedout_scsi_cmd(void *);
112 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
113 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
114 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
115 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
116 struct vtscsi_request *);
118 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *,
119 struct vtscsi_request *);
120 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *,
121 struct vtscsi_request *, struct sglist *, int, int, int);
122 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
123 struct vtscsi_request *);
124 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
125 struct vtscsi_request *);
126 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
127 struct vtscsi_request *);
129 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
130 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
131 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
132 struct virtio_scsi_cmd_req *);
133 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
134 uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
136 static void vtscsi_freeze_simq(struct vtscsi_softc *, int);
137 static int vtscsi_thaw_simq(struct vtscsi_softc *, int);
139 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
141 static void vtscsi_cam_rescan_callback(struct cam_periph *periph,
143 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
145 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *);
147 static void vtscsi_handle_event(struct vtscsi_softc *,
148 struct virtio_scsi_event *);
149 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *,
150 struct virtio_scsi_event *);
151 static int vtscsi_init_event_vq(struct vtscsi_softc *);
152 static void vtscsi_reinit_event_vq(struct vtscsi_softc *);
153 static void vtscsi_drain_event_vq(struct vtscsi_softc *);
155 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *);
156 static void vtscsi_complete_vqs(struct vtscsi_softc *);
157 static void vtscsi_drain_vqs(struct vtscsi_softc *);
158 static void vtscsi_cancel_request(struct vtscsi_softc *,
159 struct vtscsi_request *);
160 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
161 static void vtscsi_stop(struct vtscsi_softc *);
162 static int vtscsi_reset_bus(struct vtscsi_softc *);
164 static void vtscsi_init_request(struct vtscsi_softc *,
165 struct vtscsi_request *);
166 static int vtscsi_alloc_requests(struct vtscsi_softc *);
167 static void vtscsi_free_requests(struct vtscsi_softc *);
168 static void vtscsi_enqueue_request(struct vtscsi_softc *,
169 struct vtscsi_request *);
170 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
172 static void vtscsi_complete_request(struct vtscsi_request *);
173 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
175 static void vtscsi_control_vq_intr(void *);
176 static void vtscsi_event_vq_intr(void *);
177 static void vtscsi_request_vq_intr(void *);
178 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *);
179 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *);
181 static void vtscsi_get_tunables(struct vtscsi_softc *);
182 static void vtscsi_add_sysctl(struct vtscsi_softc *);
184 static void vtscsi_printf_req(struct vtscsi_request *, const char *,
185 const char *, ...) __printflike(3, 4);
187 /* Global tunables. */
189 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
190 * IO during virtio_stop(). So in-flight requests still complete after the
191 * device reset. We would have to wait for all the in-flight IO to complete,
192 * which defeats the typical purpose of a bus reset. We could simulate the
193 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
194 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
195 * control virtqueue). But this isn't very useful if things really go off
196 * the rails, so default to disabled for now.
198 static int vtscsi_bus_reset_disable = 1;
199 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
201 static struct virtio_feature_desc vtscsi_feature_desc[] = {
202 { VIRTIO_SCSI_F_INOUT, "InOut" },
203 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" },
208 static device_method_t vtscsi_methods[] = {
209 /* Device methods. */
210 DEVMETHOD(device_probe, vtscsi_probe),
211 DEVMETHOD(device_attach, vtscsi_attach),
212 DEVMETHOD(device_detach, vtscsi_detach),
213 DEVMETHOD(device_suspend, vtscsi_suspend),
214 DEVMETHOD(device_resume, vtscsi_resume),
219 static driver_t vtscsi_driver = {
222 sizeof(struct vtscsi_softc)
224 static devclass_t vtscsi_devclass;
226 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
227 vtscsi_modevent, NULL);
228 MODULE_VERSION(virtio_scsi, 1);
229 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
230 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
233 vtscsi_modevent(module_t mod, int type, void *unused)
252 vtscsi_probe(device_t dev)
255 if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
258 device_set_desc(dev, "VirtIO SCSI Adapter");
260 return (BUS_PROBE_DEFAULT);
265 driver_intr_t *handler;
269 vtscsi_attach(device_t dev)
271 struct vtscsi_softc *sc;
272 struct virtio_scsi_config scsicfg;
275 sc = device_get_softc(dev);
276 sc->vtscsi_dev = dev;
278 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 TAILQ_INIT(&sc->vtscsi_req_free);
281 vtscsi_get_tunables(sc);
282 vtscsi_add_sysctl(sc);
284 virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 vtscsi_negotiate_features(sc);
287 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 #ifndef __DragonFly__ /* XXX swildner */
290 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
291 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
293 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
294 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
296 vtscsi_read_config(sc, &scsicfg);
298 sc->vtscsi_max_channel = scsicfg.max_channel;
299 sc->vtscsi_max_target = scsicfg.max_target;
300 sc->vtscsi_max_lun = scsicfg.max_lun;
301 sc->vtscsi_event_buf_size = scsicfg.event_info_size;
303 vtscsi_write_device_config(sc);
305 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
306 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
307 if (sc->vtscsi_sglist == NULL) {
309 device_printf(dev, "cannot allocate sglist\n");
313 error = vtscsi_alloc_intrs(sc);
315 device_printf(dev, "cannot allocate interrupts\n");
319 error = vtscsi_alloc_virtqueues(sc);
321 device_printf(dev, "cannot allocate virtqueues\n");
325 /* XXX Separate function */
326 struct irqmap info[3];
328 /* Possible "Virtqueue <-> IRQ" configurations */
329 switch (sc->vtscsi_nintr) {
331 info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
332 info[1] = (struct irqmap){0, vtscsi_event_vq_intr};
333 info[2] = (struct irqmap){0, vtscsi_request_vq_intr};
336 info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
337 info[1] = (struct irqmap){0, vtscsi_event_vq_intr};
338 info[2] = (struct irqmap){1, vtscsi_request_vq_intr};
341 info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
342 info[1] = (struct irqmap){1, vtscsi_event_vq_intr};
343 info[2] = (struct irqmap){2, vtscsi_request_vq_intr};
346 device_printf(dev, "Invalid interrupt vector count: %d\n",
350 for (i = 0; i < 3; i++) {
351 error = virtio_bind_intr(sc->vtscsi_dev, info[i].irq, i,
352 info[i].handler, sc);
355 "cannot bind virtqueue IRQs\n");
360 error = vtscsi_init_event_vq(sc);
362 device_printf(dev, "cannot populate the eventvq\n");
366 error = vtscsi_alloc_requests(sc);
368 device_printf(dev, "cannot allocate requests\n");
372 error = vtscsi_alloc_cam(sc);
374 device_printf(dev, "cannot allocate CAM structures\n");
378 for (i = 0; i < sc->vtscsi_nintr; i++) {
379 error = virtio_setup_intr(dev, i, NULL);
381 device_printf(dev, "cannot setup virtqueue "
387 vtscsi_enable_vqs_intr(sc);
390 * Register with CAM after interrupts are enabled so we will get
391 * notified of the probe responses.
393 error = vtscsi_register_cam(sc);
395 device_printf(dev, "cannot register with CAM\n");
407 vtscsi_detach(device_t dev)
409 struct vtscsi_softc *sc;
412 sc = device_get_softc(dev);
414 for (i = 0; i < sc->vtscsi_nintr; i++)
415 virtio_teardown_intr(dev, i);
418 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
419 if (device_is_attached(dev))
423 vtscsi_complete_vqs(sc);
424 vtscsi_drain_vqs(sc);
427 vtscsi_free_requests(sc);
429 if (sc->vtscsi_sglist != NULL) {
430 sglist_free(sc->vtscsi_sglist);
431 sc->vtscsi_sglist = NULL;
434 VTSCSI_LOCK_DESTROY(sc);
440 vtscsi_suspend(device_t dev)
447 vtscsi_resume(device_t dev)
454 vtscsi_negotiate_features(struct vtscsi_softc *sc)
459 dev = sc->vtscsi_dev;
460 features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
461 sc->vtscsi_features = features;
464 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \
465 virtio_read_device_config(_dev, \
466 offsetof(struct virtio_scsi_config, _field), \
467 &(_cfg)->_field, sizeof((_cfg)->_field)) \
470 vtscsi_read_config(struct vtscsi_softc *sc,
471 struct virtio_scsi_config *scsicfg)
475 dev = sc->vtscsi_dev;
477 bzero(scsicfg, sizeof(struct virtio_scsi_config));
479 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
480 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
481 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
482 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
483 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
484 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
485 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
486 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
487 VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
488 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
491 #undef VTSCSI_GET_CONFIG
494 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
498 nsegs = VTSCSI_MIN_SEGMENTS;
501 nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
502 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
503 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
512 vtscsi_alloc_intrs(struct vtscsi_softc *sc)
514 int intrcount = virtio_intr_count(sc->vtscsi_dev);
517 for (i = 0; i < NELEM(sc->vtscsi_cpus); i++)
518 sc->vtscsi_cpus[i] = -1;
520 intrcount = imin(intrcount, 3);
525 error = virtio_intr_alloc(sc->vtscsi_dev, &cnt, 0, sc->vtscsi_cpus);
527 virtio_intr_release(sc->vtscsi_dev);
530 sc->vtscsi_nintr = cnt;
535 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
537 device_t dev = sc->vtscsi_dev;
538 struct vq_alloc_info vq_info[3];
541 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, &sc->vtscsi_control_vq,
542 "%s control", device_get_nameunit(dev));
544 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, &sc->vtscsi_event_vq,
545 "%s event", device_get_nameunit(dev));
547 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
548 &sc->vtscsi_request_vq, "%s request", device_get_nameunit(dev));
550 return (virtio_alloc_virtqueues(dev, nvqs, vq_info));
554 vtscsi_write_device_config(struct vtscsi_softc *sc)
557 virtio_write_dev_config_4(sc->vtscsi_dev,
558 offsetof(struct virtio_scsi_config, sense_size),
559 VIRTIO_SCSI_SENSE_SIZE);
562 * This is the size in the virtio_scsi_cmd_req structure. Note
563 * this value (32) is larger than the maximum CAM CDB size (16).
565 virtio_write_dev_config_4(sc->vtscsi_dev,
566 offsetof(struct virtio_scsi_config, cdb_size),
567 VIRTIO_SCSI_CDB_SIZE);
571 vtscsi_reinit(struct vtscsi_softc *sc)
576 dev = sc->vtscsi_dev;
578 error = virtio_reinit(dev, sc->vtscsi_features);
580 vtscsi_write_device_config(sc);
581 vtscsi_reinit_event_vq(sc);
582 virtio_reinit_complete(dev);
584 vtscsi_enable_vqs_intr(sc);
587 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
593 vtscsi_alloc_cam(struct vtscsi_softc *sc)
596 struct cam_devq *devq;
599 dev = sc->vtscsi_dev;
600 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
602 devq = cam_simq_alloc(openings);
604 device_printf(dev, "cannot allocate SIM queue\n");
608 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
609 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
611 cam_simq_release(devq);
612 if (sc->vtscsi_sim == NULL) {
613 device_printf(dev, "cannot allocate SIM\n");
621 vtscsi_register_cam(struct vtscsi_softc *sc)
624 int registered, error;
626 dev = sc->vtscsi_dev;
631 if (xpt_bus_register(sc->vtscsi_sim, 0) != CAM_SUCCESS) {
633 device_printf(dev, "cannot register XPT bus\n");
639 if (xpt_create_path(&sc->vtscsi_path, NULL,
640 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
641 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
643 device_printf(dev, "cannot create bus path\n");
647 if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
649 device_printf(dev, "cannot register async callback\n");
658 if (sc->vtscsi_path != NULL) {
659 xpt_free_path(sc->vtscsi_path);
660 sc->vtscsi_path = NULL;
664 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
672 vtscsi_free_cam(struct vtscsi_softc *sc)
677 if (sc->vtscsi_path != NULL) {
678 vtscsi_deregister_async(sc);
680 xpt_free_path(sc->vtscsi_path);
681 sc->vtscsi_path = NULL;
683 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
686 if (sc->vtscsi_sim != NULL) {
687 cam_sim_free(sc->vtscsi_sim);
688 sc->vtscsi_sim = NULL;
695 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
698 struct vtscsi_softc *sc;
701 sc = cam_sim_softc(sim);
703 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
706 * TODO Once QEMU supports event reporting, we should
707 * (un)subscribe to events here.
710 case AC_FOUND_DEVICE:
718 vtscsi_register_async(struct vtscsi_softc *sc)
720 struct ccb_setasync *csa;
723 csa = &xpt_alloc_ccb()->csa;
725 xpt_setup_ccb(&csa->ccb_h, sc->vtscsi_path, 5);
726 csa->ccb_h.func_code = XPT_SASYNC_CB;
727 csa->event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
728 csa->callback = vtscsi_cam_async;
729 csa->callback_arg = sc->vtscsi_sim;
731 xpt_action((union ccb *)csa);
732 status = csa->ccb_h.status;
733 xpt_free_ccb(&csa->ccb_h);
739 vtscsi_deregister_async(struct vtscsi_softc *sc)
741 struct ccb_setasync *csa;
743 csa = &xpt_alloc_ccb()->csa;
744 xpt_setup_ccb(&csa->ccb_h, sc->vtscsi_path, 5);
745 csa->ccb_h.func_code = XPT_SASYNC_CB;
746 csa->event_enable = 0;
747 csa->callback = vtscsi_cam_async;
748 csa->callback_arg = sc->vtscsi_sim;
749 xpt_action((union ccb *)csa);
750 xpt_free_ccb(&csa->ccb_h);
754 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
756 struct vtscsi_softc *sc;
757 struct ccb_hdr *ccbh;
759 sc = cam_sim_softc(sim);
762 VTSCSI_LOCK_OWNED(sc);
764 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
766 * The VTSCSI_MTX is briefly dropped between setting
767 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
768 * drop any CCBs that come in during that window.
770 ccbh->status = CAM_NO_HBA;
775 switch (ccbh->func_code) {
777 vtscsi_cam_scsi_io(sc, sim, ccb);
780 case XPT_SET_TRAN_SETTINGS:
781 ccbh->status = CAM_FUNC_NOTAVAIL;
785 case XPT_GET_TRAN_SETTINGS:
786 vtscsi_cam_get_tran_settings(sc, ccb);
790 vtscsi_cam_reset_bus(sc, ccb);
794 vtscsi_cam_reset_dev(sc, ccb);
798 vtscsi_cam_abort(sc, ccb);
801 case XPT_CALC_GEOMETRY:
802 cam_calc_geometry(&ccb->ccg, 1);
807 vtscsi_cam_path_inquiry(sc, sim, ccb);
811 vtscsi_dprintf(sc, VTSCSI_ERROR,
812 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
814 ccbh->status = CAM_REQ_INVALID;
821 vtscsi_cam_poll(struct cam_sim *sim)
823 struct vtscsi_softc *sc;
825 sc = cam_sim_softc(sim);
827 vtscsi_complete_vqs_locked(sc);
831 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
834 struct ccb_hdr *ccbh;
835 struct ccb_scsiio *csio;
841 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
843 ccbh->status = CAM_REQ_INVALID;
847 #ifndef __DragonFly__ /* XXX swildner */
848 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
849 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
851 ccbh->status = CAM_REQ_INVALID;
856 error = vtscsi_start_scsi_cmd(sc, ccb);
860 vtscsi_dprintf(sc, VTSCSI_ERROR,
861 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
867 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
869 struct ccb_trans_settings *cts;
870 struct ccb_trans_settings_scsi *scsi;
873 scsi = &cts->proto_specific.scsi;
875 cts->protocol = PROTO_SCSI;
876 cts->protocol_version = SCSI_REV_SPC3;
877 cts->transport = XPORT_SAS;
878 cts->transport_version = 0;
880 scsi->valid = CTS_SCSI_VALID_TQ;
881 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
883 ccb->ccb_h.status = CAM_REQ_CMP;
888 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
892 error = vtscsi_reset_bus(sc);
894 ccb->ccb_h.status = CAM_REQ_CMP;
896 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
898 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
899 error, ccb, ccb->ccb_h.status);
905 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
907 struct ccb_hdr *ccbh;
908 struct vtscsi_request *req;
913 req = vtscsi_dequeue_request(sc);
916 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
922 error = vtscsi_execute_reset_dev_cmd(sc, req);
926 vtscsi_enqueue_request(sc, req);
929 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
933 ccbh->status = CAM_RESRC_UNAVAIL;
935 ccbh->status = CAM_REQ_CMP_ERR;
941 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
943 struct vtscsi_request *req;
944 struct ccb_hdr *ccbh;
949 req = vtscsi_dequeue_request(sc);
952 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
958 error = vtscsi_execute_abort_task_cmd(sc, req);
962 vtscsi_enqueue_request(sc, req);
965 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
969 ccbh->status = CAM_RESRC_UNAVAIL;
971 ccbh->status = CAM_REQ_CMP_ERR;
977 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
981 struct ccb_pathinq *cpi;
983 dev = sc->vtscsi_dev;
986 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
988 cpi->version_num = 1;
989 cpi->hba_inquiry = PI_TAG_ABLE;
990 cpi->target_sprt = 0;
991 cpi->hba_misc = PIM_SEQSCAN;
992 if (vtscsi_bus_reset_disable != 0)
993 cpi->hba_misc |= PIM_NOBUSRESET;
994 cpi->hba_eng_cnt = 0;
996 cpi->max_target = sc->vtscsi_max_target;
997 cpi->max_lun = sc->vtscsi_max_lun;
998 cpi->initiator_id = VTSCSI_INITIATOR_ID;
1000 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1001 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
1002 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1004 cpi->unit_number = cam_sim_unit(sim);
1005 cpi->bus_id = cam_sim_bus(sim);
1007 cpi->base_transfer_speed = 300000;
1009 cpi->protocol = PROTO_SCSI;
1010 cpi->protocol_version = SCSI_REV_SPC3;
1011 cpi->transport = XPORT_SAS;
1012 cpi->transport_version = 0;
1014 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
1018 cpi->hba_vendor = virtio_get_vendor(dev);
1019 cpi->hba_device = virtio_get_device(dev);
1020 cpi->hba_subvendor = virtio_get_subvendor(dev);
1021 cpi->hba_subdevice = virtio_get_subdevice(dev);
1024 ccb->ccb_h.status = CAM_REQ_CMP;
1029 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1030 struct ccb_scsiio *csio)
1032 struct ccb_hdr *ccbh;
1033 struct bus_dma_segment *dseg;
1036 ccbh = &csio->ccb_h;
1039 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1041 if ((ccbh->flags & CAM_DATA_PHYS) == 0)
1042 error = sglist_append(sg,
1043 csio->data_ptr, csio->dxfer_len);
1045 error = sglist_append_phys(sg,
1046 (vm_paddr_t)(vm_offset_t) csio->data_ptr,
1050 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1051 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1053 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
1054 error = sglist_append(sg,
1055 (void *)(vm_offset_t) dseg->ds_addr,
1058 error = sglist_append_phys(sg,
1059 (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1067 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1068 int *readable, int *writable)
1071 struct ccb_hdr *ccbh;
1072 struct ccb_scsiio *csio;
1073 struct virtio_scsi_cmd_req *cmd_req;
1074 struct virtio_scsi_cmd_resp *cmd_resp;
1077 sg = sc->vtscsi_sglist;
1078 csio = &req->vsr_ccb->csio;
1079 ccbh = &csio->ccb_h;
1080 cmd_req = &req->vsr_cmd_req;
1081 cmd_resp = &req->vsr_cmd_resp;
1085 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1086 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1087 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1088 /* At least one segment must be left for the response. */
1089 if (error || sg->sg_nseg == sg->sg_maxseg)
1093 *readable = sg->sg_nseg;
1095 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1096 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1097 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1102 *writable = sg->sg_nseg - *readable;
1104 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1105 "writable=%d\n", req, ccbh, *readable, *writable);
1111 * This should never happen unless maxio was incorrectly set.
1113 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1115 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1116 "nseg=%d maxseg=%d\n",
1117 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1123 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1126 struct virtqueue *vq;
1127 struct ccb_scsiio *csio;
1128 struct ccb_hdr *ccbh;
1129 struct virtio_scsi_cmd_req *cmd_req;
1130 struct virtio_scsi_cmd_resp *cmd_resp;
1131 int readable, writable, error;
1133 sg = sc->vtscsi_sglist;
1134 vq = sc->vtscsi_request_vq;
1135 csio = &req->vsr_ccb->csio;
1136 ccbh = &csio->ccb_h;
1137 cmd_req = &req->vsr_cmd_req;
1138 cmd_resp = &req->vsr_cmd_resp;
1140 vtscsi_init_scsi_cmd_req(csio, cmd_req);
1142 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1146 req->vsr_complete = vtscsi_complete_scsi_cmd;
1147 cmd_resp->response = -1;
1149 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1151 vtscsi_dprintf(sc, VTSCSI_ERROR,
1152 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1154 ccbh->status = CAM_REQUEUE_REQ;
1155 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1159 ccbh->status |= CAM_SIM_QUEUED;
1160 ccbh->ccbh_vtscsi_req = req;
1162 virtqueue_notify(vq, NULL);
1164 if (ccbh->timeout != CAM_TIME_INFINITY) {
1165 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1166 callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1167 vtscsi_timedout_scsi_cmd, req);
1170 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1177 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1179 struct vtscsi_request *req;
1182 req = vtscsi_dequeue_request(sc);
1184 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1185 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1191 error = vtscsi_execute_scsi_cmd(sc, req);
1193 vtscsi_enqueue_request(sc, req);
1199 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1200 struct vtscsi_request *req)
1202 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1203 struct vtscsi_request *to_req;
1206 tmf_resp = &req->vsr_tmf_resp;
1207 response = tmf_resp->response;
1208 to_req = req->vsr_timedout_req;
1210 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1211 req, to_req, response);
1213 vtscsi_enqueue_request(sc, req);
1216 * The timedout request could have completed between when the
1217 * abort task was sent and when the host processed it.
1219 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1222 /* The timedout request was successfully aborted. */
1223 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1226 /* Don't bother if the device is going away. */
1227 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1230 /* The timedout request will be aborted by the reset. */
1231 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1234 vtscsi_reset_bus(sc);
1238 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1239 struct vtscsi_request *to_req)
1242 struct ccb_hdr *to_ccbh;
1243 struct vtscsi_request *req;
1244 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1245 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1248 sg = sc->vtscsi_sglist;
1249 to_ccbh = &to_req->vsr_ccb->ccb_h;
1251 req = vtscsi_dequeue_request(sc);
1257 tmf_req = &req->vsr_tmf_req;
1258 tmf_resp = &req->vsr_tmf_resp;
1260 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1261 (uintptr_t) to_ccbh, tmf_req);
1264 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1265 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1267 req->vsr_timedout_req = to_req;
1268 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1269 tmf_resp->response = -1;
1271 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1272 VTSCSI_EXECUTE_ASYNC);
1276 vtscsi_enqueue_request(sc, req);
1279 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1280 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1286 vtscsi_timedout_scsi_cmd(void *xreq)
1288 struct vtscsi_softc *sc;
1289 struct vtscsi_request *to_req;
1292 sc = to_req->vsr_softc;
1294 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1295 to_req, to_req->vsr_ccb, to_req->vsr_state);
1297 /* Don't bother if the device is going away. */
1298 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1302 * Bail if the request is not in use. We likely raced when
1303 * stopping the callout handler or it has already been aborted.
1305 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1306 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1310 * Complete the request queue in case the timedout request is
1311 * actually just pending.
1313 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1314 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1317 sc->vtscsi_stats.scsi_cmd_timeouts++;
1318 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1320 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1323 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1324 vtscsi_reset_bus(sc);
1328 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1332 switch (cmd_resp->response) {
1333 case VIRTIO_SCSI_S_OK:
1334 status = CAM_REQ_CMP;
1336 case VIRTIO_SCSI_S_OVERRUN:
1337 status = CAM_DATA_RUN_ERR;
1339 case VIRTIO_SCSI_S_ABORTED:
1340 status = CAM_REQ_ABORTED;
1342 case VIRTIO_SCSI_S_BAD_TARGET:
1344 * A CAM_SEL_TIMEOUT here will cause the entire device to
1345 * be lost, which is not desirable when scanning LUNs.
1346 * Use CAM_DEV_NOT_THERE instead.
1348 status = CAM_DEV_NOT_THERE;
1350 case VIRTIO_SCSI_S_RESET:
1351 status = CAM_SCSI_BUS_RESET;
1353 case VIRTIO_SCSI_S_BUSY:
1354 status = CAM_SCSI_BUSY;
1356 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1357 case VIRTIO_SCSI_S_TARGET_FAILURE:
1358 case VIRTIO_SCSI_S_NEXUS_FAILURE:
1359 status = CAM_SCSI_IT_NEXUS_LOST;
1361 default: /* VIRTIO_SCSI_S_FAILURE */
1362 status = CAM_REQ_CMP_ERR;
1370 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1371 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1375 csio->scsi_status = cmd_resp->status;
1376 csio->resid = cmd_resp->resid;
1378 if (csio->scsi_status == SCSI_STATUS_OK)
1379 status = CAM_REQ_CMP;
1381 status = CAM_SCSI_STATUS_ERROR;
1383 if (cmd_resp->sense_len > 0) {
1384 status |= CAM_AUTOSNS_VALID;
1386 if (cmd_resp->sense_len < csio->sense_len)
1387 csio->sense_resid = csio->sense_len -
1388 cmd_resp->sense_len;
1390 csio->sense_resid = 0;
1392 bzero(&csio->sense_data, sizeof(csio->sense_data));
1393 memcpy(cmd_resp->sense, &csio->sense_data,
1394 csio->sense_len - csio->sense_resid);
1397 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1398 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1399 csio, csio->scsi_status, csio->resid, csio->sense_resid);
1405 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1407 struct ccb_hdr *ccbh;
1408 struct ccb_scsiio *csio;
1409 struct virtio_scsi_cmd_resp *cmd_resp;
1412 csio = &req->vsr_ccb->csio;
1413 ccbh = &csio->ccb_h;
1414 cmd_resp = &req->vsr_cmd_resp;
1416 KASSERT(ccbh->ccbh_vtscsi_req == req,
1417 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1419 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1420 callout_stop(&req->vsr_callout);
1422 status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1423 if (status == CAM_REQ_ABORTED) {
1424 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1425 status = CAM_CMD_TIMEOUT;
1426 } else if (status == CAM_REQ_CMP)
1427 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1429 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1430 status |= CAM_DEV_QFRZN;
1431 xpt_freeze_devq(ccbh->path, 1);
1434 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1435 status |= CAM_RELEASE_SIMQ;
1437 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1440 ccbh->status = status;
1441 xpt_done(req->vsr_ccb);
1442 vtscsi_enqueue_request(sc, req);
1446 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1449 /* XXX We probably shouldn't poll forever. */
1450 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1452 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1453 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1455 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1459 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1460 struct sglist *sg, int readable, int writable, int flag)
1462 struct virtqueue *vq;
1465 vq = sc->vtscsi_control_vq;
1467 KKASSERT(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1469 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1472 * Return EAGAIN when the virtqueue does not have enough
1473 * descriptors available.
1475 if (error == ENOSPC || error == EMSGSIZE)
1481 virtqueue_notify(vq, NULL);
1482 if (flag == VTSCSI_EXECUTE_POLL)
1483 vtscsi_poll_ctrl_req(sc, req);
1489 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1490 struct vtscsi_request *req)
1493 struct ccb_hdr *ccbh;
1494 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1498 tmf_resp = &req->vsr_tmf_resp;
1500 switch (tmf_resp->response) {
1501 case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1502 ccbh->status = CAM_REQ_CMP;
1504 case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1505 ccbh->status = CAM_UA_ABORT;
1508 ccbh->status = CAM_REQ_CMP_ERR;
1513 vtscsi_enqueue_request(sc, req);
1517 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1518 struct vtscsi_request *req)
1521 struct ccb_abort *cab;
1522 struct ccb_hdr *ccbh;
1523 struct ccb_hdr *abort_ccbh;
1524 struct vtscsi_request *abort_req;
1525 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1526 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1529 sg = sc->vtscsi_sglist;
1530 cab = &req->vsr_ccb->cab;
1532 tmf_req = &req->vsr_tmf_req;
1533 tmf_resp = &req->vsr_tmf_resp;
1535 /* CCB header and request that's to be aborted. */
1536 abort_ccbh = &cab->abort_ccb->ccb_h;
1537 abort_req = abort_ccbh->ccbh_vtscsi_req;
1539 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1544 /* Only attempt to abort requests that could be in-flight. */
1545 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1550 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1551 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1552 callout_stop(&abort_req->vsr_callout);
1554 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1555 (uintptr_t) abort_ccbh, tmf_req);
1558 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1559 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1561 req->vsr_complete = vtscsi_complete_abort_task_cmd;
1562 tmf_resp->response = -1;
1564 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1565 VTSCSI_EXECUTE_ASYNC);
1568 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1569 "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1575 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1576 struct vtscsi_request *req)
1579 struct ccb_hdr *ccbh;
1580 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1584 tmf_resp = &req->vsr_tmf_resp;
1586 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1587 req, ccb, tmf_resp->response);
1589 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1590 ccbh->status = CAM_REQ_CMP;
1591 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1594 ccbh->status = CAM_REQ_CMP_ERR;
1597 vtscsi_enqueue_request(sc, req);
1601 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1602 struct vtscsi_request *req)
1605 struct ccb_resetdev *crd;
1606 struct ccb_hdr *ccbh;
1607 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1608 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1612 sg = sc->vtscsi_sglist;
1613 crd = &req->vsr_ccb->crd;
1615 tmf_req = &req->vsr_tmf_req;
1616 tmf_resp = &req->vsr_tmf_resp;
1618 if (ccbh->target_lun == CAM_LUN_WILDCARD)
1619 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1621 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1623 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1626 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1627 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1629 req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1630 tmf_resp->response = -1;
1632 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1633 VTSCSI_EXECUTE_ASYNC);
1635 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1642 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1645 *target_id = lun[1];
1646 *lun_id = (lun[2] << 8) | lun[3];
1650 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1654 lun[1] = ccbh->target_id;
1655 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1656 lun[3] = ccbh->target_lun & 0xFF;
1660 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1661 struct virtio_scsi_cmd_req *cmd_req)
1665 switch (csio->tag_action) {
1666 case MSG_HEAD_OF_Q_TAG:
1667 attr = VIRTIO_SCSI_S_HEAD;
1669 case MSG_ORDERED_Q_TAG:
1670 attr = VIRTIO_SCSI_S_ORDERED;
1673 attr = VIRTIO_SCSI_S_ACA;
1675 default: /* MSG_SIMPLE_Q_TAG */
1676 attr = VIRTIO_SCSI_S_SIMPLE;
1680 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1681 cmd_req->tag = (uintptr_t) csio;
1682 cmd_req->task_attr = attr;
1684 memcpy(cmd_req->cdb,
1685 csio->ccb_h.flags & CAM_CDB_POINTER ?
1686 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1691 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1692 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1695 vtscsi_set_request_lun(ccbh, tmf_req->lun);
1697 tmf_req->type = VIRTIO_SCSI_T_TMF;
1698 tmf_req->subtype = subtype;
1703 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1707 frozen = sc->vtscsi_frozen;
1709 if (reason & VTSCSI_REQUEST &&
1710 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1711 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1713 if (reason & VTSCSI_REQUEST_VQ &&
1714 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1715 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1717 /* Freeze the SIMQ if transitioned to frozen. */
1718 if (frozen == 0 && sc->vtscsi_frozen != 0) {
1719 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1720 xpt_freeze_simq(sc->vtscsi_sim, 1);
1725 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1729 if (sc->vtscsi_frozen == 0 || reason == 0)
1732 if (reason & VTSCSI_REQUEST &&
1733 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1734 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1736 if (reason & VTSCSI_REQUEST_VQ &&
1737 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1738 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1740 thawed = sc->vtscsi_frozen == 0;
1742 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1748 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1749 target_id_t target_id, lun_id_t lun_id)
1751 struct cam_path *path;
1753 /* Use the wildcard path from our softc for bus announcements. */
1754 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1755 xpt_async(ac_code, sc->vtscsi_path, NULL);
1759 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1760 target_id, lun_id) != CAM_REQ_CMP) {
1761 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1765 xpt_async(ac_code, path, NULL);
1766 xpt_free_path(path);
1770 vtscsi_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1772 xpt_free_path(ccb->ccb_h.path);
1773 xpt_free_ccb(&ccb->ccb_h);
1777 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1783 ccb = xpt_alloc_ccb();
1785 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1789 status = xpt_create_path(&ccb->ccb_h.path, NULL,
1790 cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1791 if (status != CAM_REQ_CMP) {
1792 xpt_free_ccb(&ccb->ccb_h);
1796 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1797 ccb->ccb_h.func_code = XPT_SCAN_LUN;
1798 ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1799 ccb->crcn.flags = CAM_FLAG_NONE;
1804 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1809 ccb = xpt_alloc_ccb();
1811 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1815 status = xpt_create_path(&ccb->ccb_h.path, NULL,
1816 cam_sim_path(sc->vtscsi_sim),
1817 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1818 if (status != CAM_REQ_CMP) {
1819 xpt_free_ccb(&ccb->ccb_h);
1823 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1824 ccb->ccb_h.func_code = XPT_SCAN_BUS;
1825 ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1826 ccb->crcn.flags = CAM_FLAG_NONE;
1831 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1832 struct virtio_scsi_event *event)
1834 target_id_t target_id;
1837 vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1839 switch (event->reason) {
1840 case VIRTIO_SCSI_EVT_RESET_RESCAN:
1841 case VIRTIO_SCSI_EVT_RESET_REMOVED:
1842 vtscsi_execute_rescan(sc, target_id, lun_id);
1845 device_printf(sc->vtscsi_dev,
1846 "unhandled transport event reason: %d\n", event->reason);
1852 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1856 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1857 switch (event->event) {
1858 case VIRTIO_SCSI_T_TRANSPORT_RESET:
1859 vtscsi_transport_reset_event(sc, event);
1862 device_printf(sc->vtscsi_dev,
1863 "unhandled event: %d\n", event->event);
1867 vtscsi_execute_rescan_bus(sc);
1870 * This should always be successful since the buffer
1871 * was just dequeued.
1873 error = vtscsi_enqueue_event_buf(sc, event);
1875 ("cannot requeue event buffer: %d", error));
1879 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1880 struct virtio_scsi_event *event)
1883 struct virtqueue *vq;
1886 sg = sc->vtscsi_sglist;
1887 vq = sc->vtscsi_event_vq;
1888 size = sc->vtscsi_event_buf_size;
1893 error = sglist_append(sg, event, size);
1897 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1901 virtqueue_notify(vq, NULL);
1907 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1909 struct virtio_scsi_event *event;
1913 * The first release of QEMU with VirtIO SCSI support would crash
1914 * when attempting to notify the event virtqueue. This was fixed
1915 * when hotplug support was added.
1917 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1918 size = sc->vtscsi_event_buf_size;
1922 if (size < sizeof(struct virtio_scsi_event))
1925 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1926 event = &sc->vtscsi_event_bufs[i];
1928 error = vtscsi_enqueue_event_buf(sc, event);
1934 * Even just one buffer is enough. Missed events are
1935 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1944 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1946 struct virtio_scsi_event *event;
1949 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1950 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1953 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1954 event = &sc->vtscsi_event_bufs[i];
1956 error = vtscsi_enqueue_event_buf(sc, event);
1961 KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1965 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1967 struct virtqueue *vq;
1970 vq = sc->vtscsi_event_vq;
1973 while (virtqueue_drain(vq, &last) != NULL)
1976 KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1980 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1983 VTSCSI_LOCK_OWNED(sc);
1985 if (sc->vtscsi_request_vq != NULL)
1986 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1987 if (sc->vtscsi_control_vq != NULL)
1988 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1992 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1996 vtscsi_complete_vqs_locked(sc);
2001 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2008 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
2011 * The callout must be drained when detaching since the request is
2012 * about to be freed. The VTSCSI_MTX must not be held for this in
2013 * case the callout is pending because there is a deadlock potential.
2014 * Otherwise, the virtqueue is being drained because of a bus reset
2015 * so we only need to attempt to stop the callouts.
2017 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
2019 VTSCSI_LOCK_NOTOWNED(sc);
2021 VTSCSI_LOCK_OWNED(sc);
2023 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
2025 callout_drain(&req->vsr_callout);
2027 callout_stop(&req->vsr_callout);
2033 ccb->ccb_h.status = CAM_NO_HBA;
2035 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2041 vtscsi_enqueue_request(sc, req);
2045 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2047 struct vtscsi_request *req;
2052 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
2054 while ((req = virtqueue_drain(vq, &last)) != NULL)
2055 vtscsi_cancel_request(sc, req);
2057 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2061 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2064 if (sc->vtscsi_control_vq != NULL)
2065 vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2066 if (sc->vtscsi_request_vq != NULL)
2067 vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2068 if (sc->vtscsi_event_vq != NULL)
2069 vtscsi_drain_event_vq(sc);
2073 vtscsi_stop(struct vtscsi_softc *sc)
2076 vtscsi_disable_vqs_intr(sc);
2077 virtio_stop(sc->vtscsi_dev);
2081 vtscsi_reset_bus(struct vtscsi_softc *sc)
2085 VTSCSI_LOCK_OWNED(sc);
2087 if (vtscsi_bus_reset_disable != 0) {
2088 device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2092 sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2095 * vtscsi_stop() will cause the in-flight requests to be canceled.
2096 * Those requests are then completed here so CAM will retry them
2097 * after the reset is complete.
2100 vtscsi_complete_vqs_locked(sc);
2102 /* Rid the virtqueues of any remaining requests. */
2103 vtscsi_drain_vqs(sc);
2106 * Any resource shortage that froze the SIMQ cannot persist across
2107 * a bus reset so ensure it gets thawed here.
2109 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2110 xpt_release_simq(sc->vtscsi_sim, 0);
2112 error = vtscsi_reinit(sc);
2114 device_printf(sc->vtscsi_dev,
2115 "reinitialization failed, stopping device...\n");
2118 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2121 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2127 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2131 int req_nsegs, resp_nsegs;
2133 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2134 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2136 KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2137 KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2140 req->vsr_softc = sc;
2141 callout_init_lk(&req->vsr_callout, VTSCSI_MTX(sc));
2145 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2147 struct vtscsi_request *req;
2151 * Commands destined for either the request or control queues come
2152 * from the same SIM queue. Use the size of the request virtqueue
2153 * as it (should) be much more frequently used. Some additional
2154 * requests are allocated for internal (TMF) use.
2156 nreqs = virtqueue_size(sc->vtscsi_request_vq);
2157 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2158 nreqs /= VTSCSI_MIN_SEGMENTS;
2159 nreqs += VTSCSI_RESERVED_REQUESTS;
2161 for (i = 0; i < nreqs; i++) {
2162 req = contigmalloc(sizeof(struct vtscsi_request), M_DEVBUF,
2163 M_WAITOK, 0, BUS_SPACE_MAXADDR, 16, 0);
2167 vtscsi_init_request(sc, req);
2169 sc->vtscsi_nrequests++;
2170 vtscsi_enqueue_request(sc, req);
2177 vtscsi_free_requests(struct vtscsi_softc *sc)
2179 struct vtscsi_request *req;
2181 while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2182 KASSERT(callout_active(&req->vsr_callout) == 0,
2183 ("request callout still active"));
2185 sc->vtscsi_nrequests--;
2186 contigfree(req, sizeof(struct vtscsi_request), M_DEVBUF);
2189 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2190 sc->vtscsi_nrequests));
2194 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2197 KASSERT(req->vsr_softc == sc,
2198 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2200 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2202 /* A request is available so the SIMQ could be released. */
2203 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2204 xpt_release_simq(sc->vtscsi_sim, 1);
2206 req->vsr_ccb = NULL;
2207 req->vsr_complete = NULL;
2208 req->vsr_ptr0 = NULL;
2209 req->vsr_state = VTSCSI_REQ_STATE_FREE;
2212 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2213 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2216 * We insert at the tail of the queue in order to make it
2217 * very unlikely a request will be reused if we race with
2218 * stopping its callout handler.
2220 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2223 static struct vtscsi_request *
2224 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2226 struct vtscsi_request *req;
2228 req = TAILQ_FIRST(&sc->vtscsi_req_free);
2230 req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2231 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2233 sc->vtscsi_stats.dequeue_no_requests++;
2235 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2241 vtscsi_complete_request(struct vtscsi_request *req)
2244 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2245 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2247 if (req->vsr_complete != NULL)
2248 req->vsr_complete(req->vsr_softc, req);
2252 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2254 struct vtscsi_request *req;
2256 VTSCSI_LOCK_OWNED(sc);
2258 while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2259 vtscsi_complete_request(req);
2263 vtscsi_control_vq_intr(void *xsc)
2265 struct vtscsi_softc *sc;
2266 struct virtqueue *vq;
2269 vq = sc->vtscsi_control_vq;
2273 if (!virtqueue_pending(vq))
2276 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2278 if (virtqueue_enable_intr(vq) != 0) {
2279 virtqueue_disable_intr(vq);
2289 vtscsi_event_vq_intr(void *xsc)
2291 struct vtscsi_softc *sc;
2292 struct virtqueue *vq;
2293 struct virtio_scsi_event *event;
2296 vq = sc->vtscsi_event_vq;
2300 if (!virtqueue_pending(vq))
2303 while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2304 vtscsi_handle_event(sc, event);
2306 if (virtqueue_enable_intr(vq) != 0) {
2307 virtqueue_disable_intr(vq);
2317 vtscsi_request_vq_intr(void *xsc)
2319 struct vtscsi_softc *sc;
2320 struct virtqueue *vq;
2323 vq = sc->vtscsi_request_vq;
2327 if (!virtqueue_pending(vq))
2330 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2332 if (virtqueue_enable_intr(vq) != 0) {
2333 virtqueue_disable_intr(vq);
2343 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2346 virtqueue_disable_intr(sc->vtscsi_control_vq);
2347 virtqueue_disable_intr(sc->vtscsi_event_vq);
2348 virtqueue_disable_intr(sc->vtscsi_request_vq);
2352 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2355 virtqueue_enable_intr(sc->vtscsi_control_vq);
2356 virtqueue_enable_intr(sc->vtscsi_event_vq);
2357 virtqueue_enable_intr(sc->vtscsi_request_vq);
2361 vtscsi_get_tunables(struct vtscsi_softc *sc)
2365 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2367 ksnprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2368 device_get_unit(sc->vtscsi_dev));
2369 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2373 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2376 struct vtscsi_statistics *stats;
2377 struct sysctl_ctx_list *ctx;
2378 struct sysctl_oid *tree;
2379 struct sysctl_oid_list *child;
2381 dev = sc->vtscsi_dev;
2382 stats = &sc->vtscsi_stats;
2383 ctx = device_get_sysctl_ctx(dev);
2384 tree = device_get_sysctl_tree(dev);
2385 child = SYSCTL_CHILDREN(tree);
2387 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2388 CTLFLAG_RW, &sc->vtscsi_debug, 0,
2391 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2392 CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2393 "SCSI command timeouts");
2394 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2395 CTLFLAG_RD, &stats->dequeue_no_requests,
2396 "No available requests to dequeue");
2400 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2401 const char *fmt, ...)
2403 struct vtscsi_softc *sc;
2413 sc = req->vsr_softc;
2416 __va_start(ap, fmt);
2417 sbuf_new(&sb, str, sizeof(str), 0);
2420 sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2421 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2422 cam_sim_bus(sc->vtscsi_sim));
2424 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2425 sbuf_cat(&sb, path_str);
2426 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2427 scsi_command_string(&ccb->csio, &sb);
2428 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2432 sbuf_vprintf(&sb, fmt, ap);
2436 kprintf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,