4 * Copyright (c) 2002 Nate Lawson.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $FreeBSD: src/share/examples/scsi_target/scsi_target.c,v 1.5.2.5 2003/02/18 22:07:10 njl Exp $
31 #include <sys/types.h>
45 #include <sys/queue.h>
46 #include <sys/event.h>
47 #include <sys/param.h>
48 #include <bus/cam/cam_queue.h>
49 #include <bus/cam/scsi/scsi_all.h>
50 #include <bus/cam/scsi/scsi_targetio.h>
51 #include <bus/cam/scsi/scsi_message.h>
52 #include "scsi_target.h"
54 /* Maximum amount to transfer per CTIO */
55 #define MAX_XFER MAXPHYS
56 /* Maximum number of allocated CTIOs */
58 /* Maximum sector size for emulated volume */
59 #define MAX_SECTOR 32768
61 /* Global variables */
63 u_int32_t volume_size;
72 static struct ccb_queue pending_queue;
73 static struct ccb_queue work_queue;
74 static struct ioc_enable_lun ioc_enlun = {
81 static void cleanup(void);
82 static int init_ccbs(void);
83 static void request_loop(void);
84 static void handle_read(void);
85 /* static int work_atio(struct ccb_accept_tio *); */
86 static void queue_io(struct ccb_scsiio *);
87 static void run_queue(struct ccb_accept_tio *);
88 static int work_inot(struct ccb_immed_notify *);
89 static struct ccb_scsiio *
91 /* static void free_ccb(union ccb *); */
92 static cam_status get_sim_flags(u_int16_t *);
93 static void rel_simq(void);
94 static void abort_all_pending(void);
95 static void usage(void);
98 main(int argc, char *argv[])
101 char *file_name, targname[16];
102 u_int16_t req_flags, sim_flags;
107 req_flags = sim_flags = 0;
109 targ_fd = file_fd = kq_fd = -1;
111 sector_size = SECTOR_SIZE;
114 /* Prepare resource pools */
115 TAILQ_INIT(&pending_queue);
116 TAILQ_INIT(&work_queue);
118 while ((ch = getopt(argc, argv, "AdSTb:c:s:W:")) != -1) {
121 req_flags |= SID_Addr16;
127 req_flags |= SID_Sync;
130 req_flags |= SID_CmdQue;
133 buf_size = atoi(optarg);
134 if (buf_size < 256 || buf_size > MAX_XFER)
135 errx(1, "Unreasonable buf size: %s", optarg);
138 sector_size = atoi(optarg);
139 if (sector_size < 512 || sector_size > MAX_SECTOR)
140 errx(1, "Unreasonable sector size: %s", optarg);
143 user_size = strtoll(optarg, NULL, /*base*/10);
145 errx(1, "Unreasonable volume size: %s", optarg);
148 req_flags &= ~(SID_WBus16 | SID_WBus32);
149 switch (atoi(optarg)) {
151 /* Leave req_flags zeroed */
154 req_flags |= SID_WBus16;
157 req_flags |= SID_WBus32;
160 warnx("Width %s not supported", optarg);
176 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
180 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
181 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
182 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
183 warnx("Incomplete target path specified");
187 /* We don't support any vendor-specific commands */
188 ioc_enlun.grp6_len = 0;
189 ioc_enlun.grp7_len = 0;
191 /* Open backing store for IO */
192 file_fd = open(file_name, O_RDWR);
194 err(1, "open backing store file");
196 /* Check backing store size or use the size user gave us */
197 if (user_size == 0) {
200 if (fstat(file_fd, &st) < 0)
201 err(1, "fstat file");
202 volume_size = st.st_size / sector_size;
204 volume_size = user_size / sector_size;
206 if (volume_size <= 0)
207 errx(1, "volume must be larger than %d", sector_size);
210 struct aiocb aio, *aiop;
212 /* Make sure we have working AIO support */
213 memset(&aio, 0, sizeof(aio));
214 aio.aio_buf = malloc(sector_size);
215 if (aio.aio_buf == NULL)
217 aio.aio_fildes = file_fd;
219 aio.aio_nbytes = sector_size;
220 signal(SIGSYS, SIG_IGN);
221 if (aio_read(&aio) != 0) {
222 printf("You must enable VFS_AIO in your kernel "
223 "or load the aio(4) module.\n");
226 if (aio_waitcomplete(&aiop, NULL) != sector_size)
227 err(1, "aio_waitcomplete");
228 assert(aiop == &aio);
229 signal(SIGSYS, SIG_DFL);
230 free((void *)aio.aio_buf);
232 warnx("aio support tested ok");
235 /* Go through all the control devices and find one that isn't busy. */
238 snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
239 targ_fd = open(targname, O_RDWR);
240 } while (targ_fd < 0 && errno == EBUSY);
243 err(1, "Tried to open %d devices, none available", unit);
245 /* The first three are handled by kevent() later */
246 signal(SIGHUP, SIG_IGN);
247 signal(SIGINT, SIG_IGN);
248 signal(SIGTERM, SIG_IGN);
249 signal(SIGPROF, SIG_IGN);
250 signal(SIGALRM, SIG_IGN);
251 signal(SIGSTOP, SIG_IGN);
252 signal(SIGTSTP, SIG_IGN);
254 /* Register a cleanup handler to run when exiting */
257 /* Enable listening on the specified LUN */
258 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
259 err(1, "TARGIOCENABLE");
261 /* Enable debugging if requested */
263 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
264 err(1, "TARGIOCDEBUG");
267 /* Set up inquiry data according to what SIM supports */
268 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
269 errx(1, "get_sim_flags");
270 if (tcmd_init(req_flags, sim_flags) != 0)
271 errx(1, "Initializing tcmd subsystem failed");
273 /* Queue ATIOs and INOTs on descriptor */
274 if (init_ccbs() != 0)
275 errx(1, "init_ccbs failed");
278 warnx("main loop beginning");
287 struct ccb_hdr *ccb_h;
290 warnx("cleanup called");
292 ioctl(targ_fd, TARGIOCDEBUG, &debug);
294 ioctl(targ_fd, TARGIOCDISABLE, NULL);
297 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
298 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
299 free_ccb((union ccb *)ccb_h);
301 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
302 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
303 free_ccb((union ccb *)ccb_h);
310 /* Allocate ATIOs/INOTs and queue on HBA */
316 for (i = 0; i < MAX_INITIATORS; i++) {
317 struct ccb_accept_tio *atio;
318 struct atio_descr *a_descr;
319 struct ccb_immed_notify *inot;
321 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
326 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
327 if (a_descr == NULL) {
329 warn("malloc atio_descr");
332 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
333 atio->ccb_h.targ_descr = a_descr;
334 send_ccb((union ccb *)atio, /*priority*/1);
336 inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
341 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
342 send_ccb((union ccb *)inot, /*priority*/1);
351 struct kevent events[MAX_EVENTS];
352 struct timespec ts, *tptr;
355 /* Register kqueue for event notification */
356 if ((kq_fd = kqueue()) < 0)
357 err(1, "init kqueue");
359 /* Set up some default events */
360 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
361 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
362 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
363 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
364 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
365 err(1, "kevent signal registration");
372 /* Loop until user signal */
375 struct ccb_hdr *ccb_h;
377 /* Check for the next signal, read ready, or AIO completion */
378 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
380 if (errno == EINTR) {
382 warnx("EINTR, looping");
386 err(1, "kevent failed");
388 } else if (retval > MAX_EVENTS) {
389 errx(1, "kevent returned more events than allocated?");
392 /* Process all received events. */
393 for (i = 0; i < retval; i++) {
394 if ((events[i].flags & EV_ERROR) != 0)
395 errx(1, "kevent registration failed");
397 switch (events[i].filter) {
405 struct ccb_scsiio *ctio;
406 struct ctio_descr *c_descr;
410 ctio = (struct ccb_scsiio *)events[i].udata;
411 c_descr = (struct ctio_descr *)
412 ctio->ccb_h.targ_descr;
413 c_descr->event = AIO_DONE;
414 /* Queue on the appropriate ATIO */
416 /* Process any queued completions. */
417 run_queue(c_descr->atio);
422 warnx("signal ready, setting quit");
426 warnx("unknown event %#x", events[i].filter);
434 /* Grab the first CCB and perform one work unit. */
435 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
438 ccb = (union ccb *)ccb_h;
439 switch (ccb_h->func_code) {
440 case XPT_ACCEPT_TARGET_IO:
441 /* Start one more transfer. */
442 retval = work_atio(&ccb->atio);
444 case XPT_IMMED_NOTIFY:
445 retval = work_inot(&ccb->cin);
448 warnx("Unhandled ccb type %#x on workq",
454 /* Assume work function handled the exception */
455 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
457 warnx("Queue frozen receiving CCB, "
463 /* No more work needed for this command. */
465 TAILQ_REMOVE(&work_queue, ccb_h,
471 * Poll for new events (i.e. completions) while we
472 * are processing CCBs on the work_queue. Once it's
473 * empty, use an infinite wait.
475 if (!TAILQ_EMPTY(&work_queue))
482 /* CCBs are ready from the kernel */
486 union ccb *ccb_array[MAX_INITIATORS], *ccb;
489 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
490 if (ccb_count <= 0) {
491 warn("read ccb ptrs");
494 ccb_count /= sizeof(union ccb *);
496 warnx("truncated read ccb ptr?");
500 for (i = 0; i < ccb_count; i++) {
502 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
504 switch (ccb->ccb_h.func_code) {
505 case XPT_ACCEPT_TARGET_IO:
507 struct ccb_accept_tio *atio;
508 struct atio_descr *a_descr;
510 /* Initialize ATIO descr for this transaction */
512 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
513 bzero(a_descr, sizeof(*a_descr));
514 TAILQ_INIT(&a_descr->cmplt_io);
515 a_descr->flags = atio->ccb_h.flags &
516 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
517 /* XXX add a_descr->priority */
518 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
519 a_descr->cdb = atio->cdb_io.cdb_bytes;
521 a_descr->cdb = atio->cdb_io.cdb_ptr;
523 /* ATIOs are processed in FIFO order */
524 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
528 case XPT_CONT_TARGET_IO:
530 struct ccb_scsiio *ctio;
531 struct ctio_descr *c_descr;
534 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
535 c_descr->event = CTIO_DONE;
536 /* Queue on the appropriate ATIO */
538 /* Process any queued completions. */
539 run_queue(c_descr->atio);
542 case XPT_IMMED_NOTIFY:
543 /* INOTs are handled with priority */
544 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
548 warnx("Unhandled ccb type %#x in handle_read",
549 ccb->ccb_h.func_code);
555 /* Process an ATIO CCB from the kernel */
557 work_atio(struct ccb_accept_tio *atio)
559 struct ccb_scsiio *ctio;
560 struct atio_descr *a_descr;
561 struct ctio_descr *c_descr;
566 warnx("Working on ATIO %p", atio);
568 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
570 /* Get a CTIO and initialize it according to our known parameters */
575 ctio->ccb_h.flags = a_descr->flags;
576 ctio->tag_id = atio->tag_id;
577 ctio->init_id = atio->init_id;
578 /* XXX priority needs to be added to a_descr */
579 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
580 c_descr->atio = atio;
581 if ((a_descr->flags & CAM_DIR_IN) != 0)
582 c_descr->offset = a_descr->base_off + a_descr->targ_req;
583 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
584 c_descr->offset = a_descr->base_off + a_descr->init_req;
587 * Return a check condition if there was an error while
588 * receiving this ATIO.
590 if (atio->sense_len != 0) {
591 struct scsi_sense_data *sense;
594 warnx("ATIO with %u bytes sense received",
597 sense = &atio->sense_data;
598 tcmd_sense(ctio->init_id, ctio, sense->flags,
599 sense->add_sense_code, sense->add_sense_code_qual);
600 send_ccb((union ccb *)ctio, /*priority*/1);
604 status = atio->ccb_h.status & CAM_STATUS_MASK;
607 ret = tcmd_handle(atio, ctio, ATIO_WORK);
609 case CAM_REQ_ABORTED:
611 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
612 send_ccb((union ccb *)atio, /*priority*/1);
616 warnx("ATIO completed with unhandled status %#x", status);
626 queue_io(struct ccb_scsiio *ctio)
628 struct ccb_hdr *ccb_h;
629 struct io_queue *ioq;
630 struct ctio_descr *c_descr, *curr_descr;
632 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
633 /* If the completion is for a specific ATIO, queue in order */
634 if (c_descr->atio != NULL) {
635 struct atio_descr *a_descr;
637 a_descr = (struct atio_descr *)c_descr->atio->ccb_h.targ_descr;
638 ioq = &a_descr->cmplt_io;
640 errx(1, "CTIO %p has NULL ATIO", ctio);
643 /* Insert in order, sorted by offset */
644 if (!TAILQ_EMPTY(ioq)) {
645 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
646 curr_descr = (struct ctio_descr *)ccb_h->targ_descr;
647 if (curr_descr->offset <= c_descr->offset) {
648 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h,
652 if (TAILQ_PREV(ccb_h, io_queue, periph_links.tqe)
654 TAILQ_INSERT_BEFORE(ccb_h, &ctio->ccb_h,
660 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
665 * Go through all completed AIO/CTIOs for a given ATIO and advance data
666 * counts, start continuation IO, etc.
669 run_queue(struct ccb_accept_tio *atio)
671 struct atio_descr *a_descr;
672 struct ccb_hdr *ccb_h;
673 int sent_status, event;
678 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
680 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
681 struct ccb_scsiio *ctio;
682 struct ctio_descr *c_descr;
684 ctio = (struct ccb_scsiio *)ccb_h;
685 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
687 /* If completed item is in range, call handler */
688 if ((c_descr->event == AIO_DONE &&
689 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
690 || (c_descr->event == CTIO_DONE &&
691 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
692 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
693 event = c_descr->event;
695 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
697 tcmd_handle(atio, ctio, c_descr->event);
699 /* If entire transfer complete, send back ATIO */
700 if (sent_status != 0 && event == CTIO_DONE)
701 send_ccb((union ccb *)atio, /*priority*/1);
703 /* Gap in offsets so wait until later callback */
705 warnx("IO %p out of order", ccb_h);
712 work_inot(struct ccb_immed_notify *inot)
718 warnx("Working on INOT %p", inot);
720 status = inot->ccb_h.status;
721 sense = (status & CAM_AUTOSNS_VALID) != 0;
722 status &= CAM_STATUS_MASK;
725 case CAM_SCSI_BUS_RESET:
726 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
730 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
733 case CAM_MESSAGE_RECV:
734 switch (inot->message_args[0]) {
735 case MSG_TASK_COMPLETE:
736 case MSG_INITIATOR_DET_ERR:
737 case MSG_ABORT_TASK_SET:
738 case MSG_MESSAGE_REJECT:
740 case MSG_PARITY_ERROR:
741 case MSG_TARGET_RESET:
743 case MSG_CLEAR_TASK_SET:
745 warnx("INOT message %#x", inot->message_args[0]);
749 case CAM_REQ_ABORTED:
750 warnx("INOT %p aborted", inot);
753 warnx("Unhandled INOT status %#x", status);
757 /* If there is sense data, use it */
759 struct scsi_sense_data *sense;
761 sense = &inot->sense_data;
762 tcmd_sense(inot->initiator_id, NULL, sense->flags,
763 sense->add_sense_code, sense->add_sense_code_qual);
765 warnx("INOT has sense: %#x", sense->flags);
769 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
770 send_ccb((union ccb *)inot, /*priority*/1);
776 send_ccb(union ccb *ccb, int priority)
779 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
780 ccb->ccb_h.pinfo.priority = priority;
781 if (XPT_FC_IS_QUEUED(ccb)) {
782 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
785 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
787 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
791 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
792 static struct ccb_scsiio *
795 struct ccb_scsiio *ctio;
796 struct ctio_descr *c_descr;
799 if (num_ctios == MAX_CTIOS)
802 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
807 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
808 if (c_descr == NULL) {
810 warn("malloc ctio_descr");
813 c_descr->buf = malloc(buf_size);
814 if (c_descr->buf == NULL) {
817 warn("malloc backing store");
822 /* Initialize CTIO, CTIO descr, and AIO */
823 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
824 ctio->ccb_h.retry_count = 2;
825 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
826 ctio->data_ptr = c_descr->buf;
827 ctio->ccb_h.targ_descr = c_descr;
828 c_descr->aiocb.aio_buf = c_descr->buf;
829 c_descr->aiocb.aio_fildes = file_fd;
830 se = &c_descr->aiocb.aio_sigevent;
831 se->sigev_notify = SIGEV_KEVENT;
832 se->sigev_notify_kqueue = kq_fd;
833 se->sigev_value.sival_ptr = ctio;
839 free_ccb(union ccb *ccb)
841 switch (ccb->ccb_h.func_code) {
842 case XPT_CONT_TARGET_IO:
844 struct ctio_descr *c_descr;
846 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
851 case XPT_ACCEPT_TARGET_IO:
852 free(ccb->ccb_h.targ_descr);
854 case XPT_IMMED_NOTIFY:
862 get_sim_flags(u_int16_t *flags)
864 struct ccb_pathinq cpi;
867 /* Find SIM capabilities */
868 bzero(&cpi, sizeof(cpi));
869 cpi.ccb_h.func_code = XPT_PATH_INQ;
870 send_ccb((union ccb *)&cpi, /*priority*/1);
871 status = cpi.ccb_h.status & CAM_STATUS_MASK;
872 if (status != CAM_REQ_CMP) {
873 fprintf(stderr, "CPI failed, status %#x\n", status);
877 /* Can only enable on controllers that support target mode */
878 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
879 fprintf(stderr, "HBA does not support target mode\n");
880 status = CAM_PATH_INVALID;
884 *flags = cpi.hba_inquiry;
891 struct ccb_relsim crs;
893 bzero(&crs, sizeof(crs));
894 crs.ccb_h.func_code = XPT_REL_SIMQ;
895 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
897 crs.release_timeout = 0;
899 send_ccb((union ccb *)&crs, /*priority*/0);
902 /* Cancel all pending CCBs. */
906 struct ccb_abort cab;
907 struct ccb_hdr *ccb_h;
910 warnx("abort_all_pending");
912 bzero(&cab, sizeof(cab));
913 cab.ccb_h.func_code = XPT_ABORT;
914 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
916 warnx("Aborting pending CCB %p\n", ccb_h);
917 cab.abort_ccb = (union ccb *)ccb_h;
918 send_ccb((union ccb *)&cab, /*priority*/1);
919 if (cab.ccb_h.status != CAM_REQ_CMP) {
920 warnx("Unable to abort CCB, status %#x\n",
930 "Usage: scsi_target [-AdST] [-b bufsize] [-c sectorsize]\n"
931 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
932 "\t\tbus:target:lun filename\n");