2 * Generic SCSI Target Kernel Mode Driver
4 * Copyright (c) 2002 Nate Lawson.
5 * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/scsi/scsi_target.c,v 1.22.2.7 2003/02/18 22:07:10 njl Exp $
30 * $DragonFly: src/sys/bus/cam/scsi/scsi_target.c,v 1.20 2008/07/18 00:07:23 dillon Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/vnode.h>
40 #include <sys/devicestat.h>
41 #include <sys/thread2.h>
42 #include <sys/devfs.h>
45 #include "../cam_ccb.h"
46 #include "../cam_periph.h"
47 #include "../cam_xpt_periph.h"
48 #include "../cam_sim.h"
49 #include "scsi_targetio.h"
51 /* Transaction information attached to each CCB sent by the user */
52 struct targ_cmd_descr {
53 struct cam_periph_map_info mapinfo;
54 TAILQ_ENTRY(targ_cmd_descr) tqe;
60 /* Offset into the private CCB area for storing our descriptor */
61 #define targ_descr periph_priv.entries[1].ptr
63 TAILQ_HEAD(descr_queue, targ_cmd_descr);
66 TARG_STATE_RESV = 0x00, /* Invalid state */
67 TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */
68 TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */
71 /* Per-instance device software context */
73 /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */
74 struct ccb_queue pending_ccb_queue;
76 /* Command descriptors awaiting CTIO resources from the XPT */
77 struct descr_queue work_queue;
79 /* Command descriptors that have been aborted back to the user. */
80 struct descr_queue abort_queue;
83 * Queue of CCBs that have been copied out to userland, but our
84 * userland daemon has not yet seen.
86 struct ccb_queue user_ccb_queue;
88 struct cam_periph *periph;
89 struct cam_path *path;
91 struct kqinfo read_kq;
92 struct devstat device_stats;
95 static d_open_t targopen;
96 static d_close_t targclose;
97 static d_read_t targread;
98 static d_write_t targwrite;
99 static d_ioctl_t targioctl;
100 static d_kqfilter_t targkqfilter;
101 static d_clone_t targclone;
102 DEVFS_DECLARE_CLONE_BITMAP(targ);
104 static void targfiltdetach(struct knote *kn);
105 static int targreadfilt(struct knote *kn, long hint);
106 static int targwritefilt(struct knote *kn, long hint);
107 static struct filterops targread_filtops =
108 { 1, NULL, targfiltdetach, targreadfilt };
109 static struct filterops targwrite_filtops =
110 { 1, NULL, targfiltdetach, targwritefilt };
112 #define TARG_CDEV_MAJOR 65
113 static struct dev_ops targ_ops = {
114 { "targ", TARG_CDEV_MAJOR, D_KQFILTER },
116 .d_close = targclose,
118 .d_write = targwrite,
119 .d_ioctl = targioctl,
120 .d_kqfilter = targkqfilter
123 static cam_status targendislun(struct cam_path *path, int enable,
124 int grp6_len, int grp7_len);
125 static cam_status targenable(struct targ_softc *softc,
126 struct cam_path *path,
127 int grp6_len, int grp7_len);
128 static cam_status targdisable(struct targ_softc *softc);
129 static periph_ctor_t targctor;
130 static periph_dtor_t targdtor;
131 static periph_start_t targstart;
132 static int targusermerge(struct targ_softc *softc,
133 struct targ_cmd_descr *descr,
135 static int targsendccb(struct targ_softc *softc, union ccb *ccb,
136 struct targ_cmd_descr *descr);
137 static void targdone(struct cam_periph *periph,
138 union ccb *done_ccb);
139 static int targreturnccb(struct targ_softc *softc,
141 static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type,
143 static void targfreeccb(struct targ_softc *softc, union ccb *ccb);
144 static struct targ_cmd_descr *
145 targgetdescr(struct targ_softc *softc);
146 static periph_init_t targinit;
147 static void targasync(void *callback_arg, u_int32_t code,
148 struct cam_path *path, void *arg);
149 static void abort_all_pending(struct targ_softc *softc);
150 static void notify_user(struct targ_softc *softc);
151 static int targcamstatus(cam_status status);
152 static size_t targccblen(xpt_opcode func_code);
154 static struct periph_driver targdriver =
157 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
159 PERIPHDRIVER_DECLARE(targ, targdriver);
161 static MALLOC_DEFINE(M_TARG, "TARG", "TARG data");
164 * Create softc and initialize it. Only one proc can open each targ device.
165 * There is no locking here because a periph doesn't get created until an
166 * ioctl is issued to do so, and that can't happen until this method returns.
169 targopen(struct dev_open_args *ap)
171 cdev_t dev = ap->a_head.a_dev;
172 struct targ_softc *softc;
174 if (dev->si_drv1 != 0) {
178 /* Mark device busy before any potentially blocking operations */
179 dev->si_drv1 = (void *)~0;
180 reference_dev(dev); /* save ref for later destroy_dev() */
182 /* Create the targ device, allocate its softc, initialize it */
184 make_dev(&targ_ops, minor(dev), UID_ROOT, GID_WHEEL, 0600,
185 "targ%d", lminor(dev));
187 MALLOC(softc, struct targ_softc *, sizeof(*softc), M_TARG,
189 dev->si_drv1 = softc;
190 softc->state = TARG_STATE_OPENED;
191 softc->periph = NULL;
194 TAILQ_INIT(&softc->pending_ccb_queue);
195 TAILQ_INIT(&softc->work_queue);
196 TAILQ_INIT(&softc->abort_queue);
197 TAILQ_INIT(&softc->user_ccb_queue);
202 /* Disable LUN if enabled and teardown softc */
204 targclose(struct dev_close_args *ap)
206 cdev_t dev = ap->a_head.a_dev;
207 struct targ_softc *softc;
208 struct cam_periph *periph;
211 softc = (struct targ_softc *)dev->si_drv1;
212 if ((softc->periph == NULL) ||
213 (softc->state & TARG_STATE_LUN_ENABLED) == 0) {
214 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(targ), dev->si_uminor);
221 * Acquire a hold on the periph so that it doesn't go away before
222 * we are ready at the end of the function.
224 periph = softc->periph;
225 cam_periph_acquire(periph);
226 cam_periph_lock(periph);
227 error = targdisable(softc);
228 if (error == CAM_REQ_CMP) {
230 if (softc->periph != NULL) {
231 cam_periph_invalidate(softc->periph);
232 softc->periph = NULL;
234 destroy_dev(dev); /* eats the open ref */
235 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(targ), dev->si_uminor);
240 cam_periph_unlock(periph);
241 cam_periph_release(periph);
246 /* Enable/disable LUNs, set debugging level */
248 targioctl(struct dev_ioctl_args *ap)
250 struct targ_softc *softc;
253 softc = (struct targ_softc *)ap->a_head.a_dev->si_drv1;
258 struct ioc_enable_lun *new_lun;
259 struct cam_path *path;
262 new_lun = (struct ioc_enable_lun *)ap->a_data;
263 status = xpt_create_path_unlocked(&path, /*periph*/NULL,
267 if (status != CAM_REQ_CMP) {
268 kprintf("Couldn't create path, status %#x\n", status);
271 sim = xpt_path_sim(path);
273 status = targenable(softc, path, new_lun->grp6_len,
280 if (softc->periph == NULL) {
281 status = CAM_DEV_NOT_THERE;
284 cam_periph_lock(softc->periph);
285 status = targdisable(softc);
286 cam_periph_unlock(softc->periph);
291 struct ccb_debug cdbg;
293 /* If no periph available, disallow debugging changes */
294 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
295 status = CAM_DEV_NOT_THERE;
298 bzero(&cdbg, sizeof cdbg);
299 if (*((int *)ap->a_data) != 0)
300 cdbg.flags = CAM_DEBUG_PERIPH;
302 cdbg.flags = CAM_DEBUG_NONE;
303 cam_periph_lock(softc->periph);
304 xpt_setup_ccb(&cdbg.ccb_h, softc->path, /*priority*/0);
305 cdbg.ccb_h.func_code = XPT_DEBUG;
306 cdbg.ccb_h.cbfcnp = targdone;
308 xpt_action((union ccb *)&cdbg);
309 cam_periph_unlock(softc->periph);
310 status = cdbg.ccb_h.status & CAM_STATUS_MASK;
312 status = CAM_FUNC_NOTAVAIL;
317 status = CAM_PROVIDE_FAIL;
321 return (targcamstatus(status));
325 targkqfilter(struct dev_kqfilter_args *ap)
327 struct knote *kn = ap->a_kn;
328 struct targ_softc *softc;
330 softc = (struct targ_softc *)ap->a_head.a_dev->si_drv1;
334 switch (kn->kn_filter) {
336 kn->kn_hook = (caddr_t)softc;
337 kn->kn_fop = &targread_filtops;
340 kn->kn_hook = (caddr_t)softc;
341 kn->kn_fop = &targwrite_filtops;
343 ap->a_result = EOPNOTSUPP;
347 knote_insert(&softc->read_kq.ki_note, kn);
352 targfiltdetach(struct knote *kn)
354 struct targ_softc *softc;
356 softc = (struct targ_softc *)kn->kn_hook;
357 knote_remove(&softc->read_kq.ki_note, kn);
360 /* Notify the user's kqueue when the user queue or abort queue gets a CCB */
362 targreadfilt(struct knote *kn, long hint)
364 struct targ_softc *softc;
367 softc = (struct targ_softc *)kn->kn_hook;
368 cam_periph_lock(softc->periph);
369 retval = !TAILQ_EMPTY(&softc->user_ccb_queue) ||
370 !TAILQ_EMPTY(&softc->abort_queue);
371 cam_periph_unlock(softc->periph);
375 /* write() is always ok */
377 targwritefilt(struct knote *kn, long hint)
382 /* Send the HBA the enable/disable message */
384 targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len)
386 struct ccb_en_lun en_ccb;
389 /* Tell the lun to begin answering selects */
390 xpt_setup_ccb(&en_ccb.ccb_h, path, /*priority*/1);
391 en_ccb.ccb_h.func_code = XPT_EN_LUN;
392 /* Don't need support for any vendor specific commands */
393 en_ccb.grp6_len = grp6_len;
394 en_ccb.grp7_len = grp7_len;
395 en_ccb.enable = enable ? 1 : 0;
396 xpt_action((union ccb *)&en_ccb);
397 status = en_ccb.ccb_h.status & CAM_STATUS_MASK;
398 if (status != CAM_REQ_CMP) {
399 xpt_print(path, "%sable lun CCB rejected, status %#x\n",
400 enable ? "en" : "dis", status);
405 /* Enable target mode on a LUN, given its path */
407 targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len,
410 struct cam_periph *periph;
411 struct ccb_pathinq cpi;
414 if ((softc->state & TARG_STATE_LUN_ENABLED) != 0)
415 return (CAM_LUN_ALRDY_ENA);
417 /* Make sure SIM supports target mode */
418 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
419 cpi.ccb_h.func_code = XPT_PATH_INQ;
420 xpt_action((union ccb *)&cpi);
421 status = cpi.ccb_h.status & CAM_STATUS_MASK;
422 if (status != CAM_REQ_CMP) {
423 kprintf("pathinq failed, status %#x\n", status);
426 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
427 kprintf("controller does not support target mode\n");
428 status = CAM_FUNC_NOTAVAIL;
432 /* Destroy any periph on our path if it is disabled */
433 periph = cam_periph_find(path, "targ");
434 if (periph != NULL) {
435 struct targ_softc *del_softc;
437 del_softc = (struct targ_softc *)periph->softc;
438 if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) {
439 cam_periph_invalidate(del_softc->periph);
440 del_softc->periph = NULL;
442 kprintf("Requested path still in use by targ%d\n",
443 periph->unit_number);
444 status = CAM_LUN_ALRDY_ENA;
449 /* Create a periph instance attached to this path */
450 status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
451 "targ", CAM_PERIPH_BIO, path, targasync, 0, softc);
452 if (status != CAM_REQ_CMP) {
453 kprintf("cam_periph_alloc failed, status %#x\n", status);
457 /* Ensure that the periph now exists. */
458 if (cam_periph_find(path, "targ") == NULL) {
459 panic("targenable: succeeded but no periph?");
463 /* Send the enable lun message */
464 status = targendislun(path, /*enable*/1, grp6_len, grp7_len);
465 if (status != CAM_REQ_CMP) {
466 kprintf("enable lun failed, status %#x\n", status);
469 softc->state |= TARG_STATE_LUN_ENABLED;
475 /* Disable this softc's target instance if enabled */
477 targdisable(struct targ_softc *softc)
481 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0)
482 return (CAM_REQ_CMP);
484 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n"));
486 /* Abort any ccbs pending on the controller */
488 abort_all_pending(softc);
491 /* Disable this lun */
492 status = targendislun(softc->path, /*enable*/0,
493 /*grp6_len*/0, /*grp7_len*/0);
494 if (status == CAM_REQ_CMP)
495 softc->state &= ~TARG_STATE_LUN_ENABLED;
497 kprintf("Disable lun failed, status %#x\n", status);
502 /* Initialize a periph (called from cam_periph_alloc) */
504 targctor(struct cam_periph *periph, void *arg)
506 struct targ_softc *softc;
508 /* Store pointer to softc for periph-driven routines */
509 softc = (struct targ_softc *)arg;
510 periph->softc = softc;
511 softc->periph = periph;
512 softc->path = periph->path;
513 return (CAM_REQ_CMP);
517 targdtor(struct cam_periph *periph)
519 struct targ_softc *softc;
520 struct ccb_hdr *ccb_h;
521 struct targ_cmd_descr *descr;
523 softc = (struct targ_softc *)periph->softc;
526 * targdisable() aborts CCBs back to the user and leaves them
527 * on user_ccb_queue and abort_queue in case the user is still
528 * interested in them. We free them now.
530 while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) {
531 TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe);
532 targfreeccb(softc, (union ccb *)ccb_h);
534 while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) {
535 TAILQ_REMOVE(&softc->abort_queue, descr, tqe);
539 softc->periph = NULL;
541 periph->softc = NULL;
544 /* Receive CCBs from user mode proc and send them to the HBA */
546 targwrite(struct dev_write_args *ap)
548 struct uio *uio = ap->a_uio;
550 struct targ_softc *softc;
551 struct targ_cmd_descr *descr;
552 int write_len, error;
553 int func_code, priority;
555 softc = (struct targ_softc *)ap->a_head.a_dev->si_drv1;
556 write_len = error = 0;
557 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
558 ("write - uio_resid %zu\n", uio->uio_resid));
559 while (uio->uio_resid >= sizeof(user_ccb) && error == 0) {
562 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
564 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
565 ("write - uiomove failed (%d)\n", error));
568 priority = fuword(&user_ccb->ccb_h.pinfo.priority);
569 if (priority == -1) {
573 func_code = fuword(&user_ccb->ccb_h.func_code);
575 case XPT_ACCEPT_TARGET_IO:
576 case XPT_IMMED_NOTIFY:
577 cam_periph_lock(softc->periph);
578 ccb = targgetccb(softc, func_code, priority);
579 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
580 descr->user_ccb = user_ccb;
581 descr->func_code = func_code;
582 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
583 ("Sent ATIO/INOT (%p)\n", user_ccb));
585 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue,
588 cam_periph_unlock(softc->periph);
591 cam_periph_lock(softc->periph);
592 if ((func_code & XPT_FC_QUEUED) != 0) {
593 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
594 ("Sending queued ccb %#x (%p)\n",
595 func_code, user_ccb));
596 descr = targgetdescr(softc);
597 descr->user_ccb = user_ccb;
598 descr->priority = priority;
599 descr->func_code = func_code;
601 TAILQ_INSERT_TAIL(&softc->work_queue,
604 xpt_schedule(softc->periph, priority);
606 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
607 ("Sending inline ccb %#x (%p)\n",
608 func_code, user_ccb));
609 ccb = targgetccb(softc, func_code, priority);
610 descr = (struct targ_cmd_descr *)
611 ccb->ccb_h.targ_descr;
612 descr->user_ccb = user_ccb;
613 descr->priority = priority;
614 descr->func_code = func_code;
615 if (targusermerge(softc, descr, ccb) != EFAULT)
616 targsendccb(softc, ccb, descr);
617 targreturnccb(softc, ccb);
619 cam_periph_unlock(softc->periph);
622 write_len += sizeof(user_ccb);
626 * If we've successfully taken in some amount of
627 * data, return success for that data first. If
628 * an error is persistent, it will be reported
631 if (error != 0 && write_len == 0)
633 if (write_len == 0 && uio->uio_resid != 0)
638 /* Process requests (descrs) via the periph-supplied CCBs */
640 targstart(struct cam_periph *periph, union ccb *start_ccb)
642 struct targ_softc *softc;
643 struct targ_cmd_descr *descr, *next_descr;
646 softc = (struct targ_softc *)periph->softc;
647 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb));
650 descr = TAILQ_FIRST(&softc->work_queue);
653 xpt_release_ccb(start_ccb);
655 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
656 next_descr = TAILQ_FIRST(&softc->work_queue);
659 /* Initiate a transaction using the descr and supplied CCB */
660 error = targusermerge(softc, descr, start_ccb);
662 error = targsendccb(softc, start_ccb, descr);
664 xpt_print(periph->path,
665 "targsendccb failed, err %d\n", error);
666 xpt_release_ccb(start_ccb);
667 suword(&descr->user_ccb->ccb_h.status,
670 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
675 /* If we have more work to do, stay scheduled */
676 if (next_descr != NULL)
677 xpt_schedule(periph, next_descr->priority);
682 targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr,
685 struct ccb_hdr *u_ccbh, *k_ccbh;
689 u_ccbh = &descr->user_ccb->ccb_h;
690 k_ccbh = &ccb->ccb_h;
693 * There are some fields in the CCB header that need to be
694 * preserved, the rest we get from the user ccb. (See xpt_merge_ccb)
696 xpt_setup_ccb(k_ccbh, softc->path, descr->priority);
697 k_ccbh->retry_count = fuword(&u_ccbh->retry_count);
698 k_ccbh->func_code = descr->func_code;
699 k_ccbh->flags = fuword(&u_ccbh->flags);
700 k_ccbh->timeout = fuword(&u_ccbh->timeout);
701 ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr);
702 error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len);
704 k_ccbh->status = CAM_REQ_CMP_ERR;
708 /* Translate usermode abort_ccb pointer to its kernel counterpart */
709 if (k_ccbh->func_code == XPT_ABORT) {
710 struct ccb_abort *cab;
711 struct ccb_hdr *ccb_h;
713 cab = (struct ccb_abort *)ccb;
715 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue,
717 struct targ_cmd_descr *ab_descr;
719 ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
720 if (ab_descr->user_ccb == cab->abort_ccb) {
721 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
722 ("Changing abort for %p to %p\n",
723 cab->abort_ccb, ccb_h));
724 cab->abort_ccb = (union ccb *)ccb_h;
729 /* CCB not found, set appropriate status */
731 k_ccbh->status = CAM_PATH_INVALID;
739 /* Build and send a kernel CCB formed from descr->user_ccb */
741 targsendccb(struct targ_softc *softc, union ccb *ccb,
742 struct targ_cmd_descr *descr)
744 struct cam_periph_map_info *mapinfo;
745 struct ccb_hdr *ccb_h;
749 mapinfo = &descr->mapinfo;
750 mapinfo->num_bufs_used = 0;
753 * There's no way for the user to have a completion
754 * function, so we put our own completion function in here.
755 * We also stash in a reference to our descriptor so targreturnccb()
756 * can find our mapping info.
758 ccb_h->cbfcnp = targdone;
759 ccb_h->targ_descr = descr;
762 * We only attempt to map the user memory into kernel space
763 * if they haven't passed in a physical memory pointer,
764 * and if there is actually an I/O operation to perform.
765 * Right now cam_periph_mapmem() only supports SCSI and device
766 * match CCBs. For the SCSI CCBs, we only pass the CCB in if
767 * there's actually data to map. cam_periph_mapmem() will do the
768 * right thing, even if there isn't data to map, but since CCBs
769 * without data are a reasonably common occurance (e.g. test unit
770 * ready), it will save a few cycles if we check for it here.
772 if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
773 && (((ccb_h->func_code == XPT_CONT_TARGET_IO)
774 && ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
775 || (ccb_h->func_code == XPT_DEV_MATCH))) {
777 error = cam_periph_mapmem(ccb, mapinfo);
780 * cam_periph_mapmem returned an error, we can't continue.
781 * Return the error to the user.
784 ccb_h->status = CAM_REQ_CMP_ERR;
785 mapinfo->num_bufs_used = 0;
791 * Once queued on the pending CCB list, this CCB will be protected
792 * by our error recovery handler.
794 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb));
795 if (XPT_FC_IS_QUEUED(ccb)) {
797 TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h,
806 /* Completion routine for CCBs (called in a critical section) */
808 targdone(struct cam_periph *periph, union ccb *done_ccb)
810 struct targ_softc *softc;
813 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb));
814 softc = (struct targ_softc *)periph->softc;
815 TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h,
817 status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
819 /* If we're no longer enabled, throw away CCB */
820 if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) {
821 targfreeccb(softc, done_ccb);
824 /* abort_all_pending() waits for pending queue to be empty */
825 if (TAILQ_EMPTY(&softc->pending_ccb_queue))
826 wakeup(&softc->pending_ccb_queue);
828 switch (done_ccb->ccb_h.func_code) {
829 /* All FC_*_QUEUED CCBs go back to userland */
830 case XPT_IMMED_NOTIFY:
831 case XPT_ACCEPT_TARGET_IO:
832 case XPT_CONT_TARGET_IO:
833 TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
838 panic("targdone: impossible xpt opcode %#x",
839 done_ccb->ccb_h.func_code);
844 /* Return CCBs to the user from the user queue and abort queue */
846 targread(struct dev_read_args *ap)
848 struct uio *uio = ap->a_uio;
849 struct descr_queue *abort_queue;
850 struct targ_cmd_descr *user_descr;
851 struct targ_softc *softc;
852 struct ccb_queue *user_queue;
853 struct ccb_hdr *ccb_h;
859 softc = (struct targ_softc *)ap->a_head.a_dev->si_drv1;
860 user_queue = &softc->user_ccb_queue;
861 abort_queue = &softc->abort_queue;
862 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n"));
864 /* If no data is available, wait or return immediately */
865 cam_periph_lock(softc->periph);
866 ccb_h = TAILQ_FIRST(user_queue);
867 user_descr = TAILQ_FIRST(abort_queue);
868 while (ccb_h == NULL && user_descr == NULL) {
869 if ((ap->a_ioflag & IO_NDELAY) == 0) {
870 error = sim_lock_sleep(user_queue, PCATCH, "targrd", 0,
871 softc->periph->sim->lock);
872 ccb_h = TAILQ_FIRST(user_queue);
873 user_descr = TAILQ_FIRST(abort_queue);
875 if (error == ERESTART) {
882 cam_periph_unlock(softc->periph);
887 /* Data is available so fill the user's buffer */
888 while (ccb_h != NULL) {
889 struct targ_cmd_descr *descr;
891 if (uio->uio_resid < sizeof(user_ccb))
893 TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe);
894 descr = (struct targ_cmd_descr *)ccb_h->targ_descr;
895 user_ccb = descr->user_ccb;
896 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
897 ("targread ccb %p (%p)\n", ccb_h, user_ccb));
898 error = targreturnccb(softc, (union ccb *)ccb_h);
901 cam_periph_unlock(softc->periph);
902 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
903 cam_periph_lock(softc->periph);
906 read_len += sizeof(user_ccb);
908 ccb_h = TAILQ_FIRST(user_queue);
911 /* Flush out any aborted descriptors */
912 while (user_descr != NULL) {
913 if (uio->uio_resid < sizeof(user_ccb))
915 TAILQ_REMOVE(abort_queue, user_descr, tqe);
916 user_ccb = user_descr->user_ccb;
917 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
918 ("targread aborted descr %p (%p)\n",
919 user_descr, user_ccb));
920 suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED);
921 cam_periph_unlock(softc->periph);
922 error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio);
923 cam_periph_lock(softc->periph);
926 read_len += sizeof(user_ccb);
928 user_descr = TAILQ_FIRST(abort_queue);
932 * If we've successfully read some amount of data, don't report an
933 * error. If the error is persistent, it will be reported on the
936 if (read_len == 0 && uio->uio_resid != 0)
940 cam_periph_unlock(softc->periph);
944 /* Copy completed ccb back to the user */
946 targreturnccb(struct targ_softc *softc, union ccb *ccb)
948 struct targ_cmd_descr *descr;
949 struct ccb_hdr *u_ccbh;
953 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb));
954 descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
955 u_ccbh = &descr->user_ccb->ccb_h;
957 /* Copy out the central portion of the ccb_hdr */
958 copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count,
959 offsetof(struct ccb_hdr, periph_priv) -
960 offsetof(struct ccb_hdr, retry_count));
962 /* Copy out the rest of the ccb (after the ccb_hdr) */
963 ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr);
964 if (descr->mapinfo.num_bufs_used != 0)
965 cam_periph_unmapmem(ccb, &descr->mapinfo);
966 error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len);
968 xpt_print(softc->path,
969 "targreturnccb - CCB copyout failed (%d)\n", error);
971 /* Free CCB or send back to devq. */
972 targfreeccb(softc, ccb);
978 targgetccb(struct targ_softc *softc, xpt_opcode type, int priority)
983 ccb_len = targccblen(type);
984 MALLOC(ccb, union ccb *, ccb_len, M_TARG, M_INTWAIT);
985 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb));
987 xpt_setup_ccb(&ccb->ccb_h, softc->path, priority);
988 ccb->ccb_h.func_code = type;
989 ccb->ccb_h.cbfcnp = targdone;
990 ccb->ccb_h.targ_descr = targgetdescr(softc);
995 targfreeccb(struct targ_softc *softc, union ccb *ccb)
997 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n",
998 ccb->ccb_h.targ_descr));
999 FREE(ccb->ccb_h.targ_descr, M_TARG);
1001 switch (ccb->ccb_h.func_code) {
1002 case XPT_ACCEPT_TARGET_IO:
1003 case XPT_IMMED_NOTIFY:
1004 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
1008 /* Send back CCB if we got it from the periph */
1009 if (XPT_FC_IS_QUEUED(ccb)) {
1010 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1011 ("returning queued ccb %p\n", ccb));
1012 xpt_release_ccb(ccb);
1014 CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH,
1015 ("freeing ccb %p\n", ccb));
1022 static struct targ_cmd_descr *
1023 targgetdescr(struct targ_softc *softc)
1025 struct targ_cmd_descr *descr;
1027 MALLOC(descr, struct targ_cmd_descr *, sizeof(*descr),
1029 descr->mapinfo.num_bufs_used = 0;
1034 targclone(struct dev_clone_args *ap)
1038 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(targ), 0);
1039 ap->a_dev = make_only_dev(&targ_ops, unit, UID_ROOT, GID_WHEEL,
1040 0600, "targ%d", unit);
1047 make_autoclone_dev(&targ_ops, &DEVFS_CLONE_BITMAP(targ),
1048 targclone, UID_ROOT, GID_WHEEL, 0600, "targ");
1049 /* XXX: need uninit or so? */
1053 targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
1055 /* All events are handled in usermode by INOTs */
1056 panic("targasync() called, should be an INOT instead");
1059 /* Cancel all pending requests and CCBs awaiting work. */
1061 abort_all_pending(struct targ_softc *softc)
1063 struct targ_cmd_descr *descr;
1064 struct ccb_abort cab;
1065 struct ccb_hdr *ccb_h;
1066 struct cam_sim *sim;
1068 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
1070 /* First abort the descriptors awaiting resources */
1071 while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) {
1072 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1073 ("Aborting descr from workq %p\n", descr));
1074 TAILQ_REMOVE(&softc->work_queue, descr, tqe);
1075 TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe);
1079 * Then abort all pending CCBs.
1080 * targdone() will return the aborted CCB via user_ccb_queue
1082 xpt_setup_ccb(&cab.ccb_h, softc->path, /*priority*/0);
1083 cab.ccb_h.func_code = XPT_ABORT;
1084 cab.ccb_h.status = CAM_REQ_CMP_ERR;
1085 TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) {
1086 CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH,
1087 ("Aborting pending CCB %p\n", ccb_h));
1088 cab.abort_ccb = (union ccb *)ccb_h;
1089 xpt_action((union ccb *)&cab);
1090 if (cab.ccb_h.status != CAM_REQ_CMP) {
1091 xpt_print(cab.ccb_h.path,
1092 "Unable to abort CCB, status %#x\n",
1097 /* If we aborted at least one pending CCB ok, wait for it. */
1098 if (cab.ccb_h.status == CAM_REQ_CMP) {
1099 sim = xpt_path_sim(softc->path);
1100 sim_lock_sleep(&softc->pending_ccb_queue, PCATCH, "tgabrt", 0,
1104 /* If we aborted anything from the work queue, wakeup user. */
1105 if (!TAILQ_EMPTY(&softc->user_ccb_queue)
1106 || !TAILQ_EMPTY(&softc->abort_queue))
1110 /* Notify the user that data is ready */
1112 notify_user(struct targ_softc *softc)
1115 * Notify users sleeping via poll(), kqueue(), and
1118 KNOTE(&softc->read_kq.ki_note, 0);
1119 wakeup(&softc->user_ccb_queue);
1122 /* Convert CAM status to errno values */
1124 targcamstatus(cam_status status)
1126 switch (status & CAM_STATUS_MASK) {
1127 case CAM_REQ_CMP: /* CCB request completed without error */
1129 case CAM_REQ_INPROG: /* CCB request is in progress */
1130 return (EINPROGRESS);
1131 case CAM_REQ_CMP_ERR: /* CCB request completed with an error */
1133 case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */
1135 case CAM_FUNC_NOTAVAIL: /* The requested function is not available */
1137 case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */
1138 return (EADDRINUSE);
1139 case CAM_PATH_INVALID: /* Supplied Path ID is invalid */
1140 case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */
1142 case CAM_REQ_ABORTED: /* CCB request aborted by the host */
1144 case CAM_CMD_TIMEOUT: /* Command timeout */
1146 case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */
1148 case CAM_REQ_INVALID: /* CCB request was invalid */
1150 case CAM_RESRC_UNAVAIL: /* Resource Unavailable */
1152 case CAM_BUSY: /* CAM subsytem is busy */
1153 case CAM_UA_ABORT: /* Unable to abort CCB request */
1161 targccblen(xpt_opcode func_code)
1165 /* Codes we expect to see as a target */
1166 switch (func_code) {
1167 case XPT_CONT_TARGET_IO:
1169 len = sizeof(struct ccb_scsiio);
1171 case XPT_ACCEPT_TARGET_IO:
1172 len = sizeof(struct ccb_accept_tio);
1174 case XPT_IMMED_NOTIFY:
1175 len = sizeof(struct ccb_immed_notify);
1178 len = sizeof(struct ccb_relsim);
1181 len = sizeof(struct ccb_pathinq);
1184 len = sizeof(struct ccb_debug);
1187 len = sizeof(struct ccb_abort);
1190 len = sizeof(struct ccb_en_lun);
1193 len = sizeof(union ccb);