1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_freebsd.c,v 1.2 2003/06/17 04:28:27 dillon Exp $ */
4 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/isp/isp_freebsd.h>
30 #include <sys/unistd.h>
31 #include <sys/kthread.h>
32 #include <machine/stdarg.h> /* for use by isp_prt below */
34 #include <sys/ioccom.h>
35 #include <dev/isp/isp_ioctl.h>
38 static d_ioctl_t ispioctl;
39 static void isp_intr_enable(void *);
40 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
41 static void isp_poll(struct cam_sim *);
42 static timeout_t isp_watchdog;
43 static void isp_kthread(void *);
44 static void isp_action(struct cam_sim *, union ccb *);
47 #define ISP_CDEV_MAJOR 248
48 static struct cdevsw isp_cdevsw = {
50 /* close */ nullclose,
56 /* strategy */ nostrategy,
58 /* maj */ ISP_CDEV_MAJOR,
64 static struct ispsoftc *isplist = NULL;
67 isp_attach(struct ispsoftc *isp)
69 int primary, secondary;
70 struct ccb_setasync csa;
71 struct cam_devq *devq;
73 struct cam_path *path;
76 * Establish (in case of 12X0) which bus is the primary.
83 * Create the device queue for our SIM(s).
85 devq = cam_simq_alloc(isp->isp_maxcmds);
91 * Construct our SIM entry.
93 ISPLOCK_2_CAMLOCK(isp);
94 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
95 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 CAMLOCK_2_ISPLOCK(isp);
101 CAMLOCK_2_ISPLOCK(isp);
103 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
104 isp->isp_osinfo.ehook.ich_arg = isp;
105 ISPLOCK_2_CAMLOCK(isp);
106 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
107 cam_sim_free(sim, TRUE);
108 CAMLOCK_2_ISPLOCK(isp);
109 isp_prt(isp, ISP_LOGERR,
110 "could not establish interrupt enable hook");
114 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
115 cam_sim_free(sim, TRUE);
116 CAMLOCK_2_ISPLOCK(isp);
120 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
121 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
122 xpt_bus_deregister(cam_sim_path(sim));
123 cam_sim_free(sim, TRUE);
124 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
125 CAMLOCK_2_ISPLOCK(isp);
129 xpt_setup_ccb(&csa.ccb_h, path, 5);
130 csa.ccb_h.func_code = XPT_SASYNC_CB;
131 csa.event_enable = AC_LOST_DEVICE;
132 csa.callback = isp_cam_async;
133 csa.callback_arg = sim;
134 xpt_action((union ccb *)&csa);
135 CAMLOCK_2_ISPLOCK(isp);
137 isp->isp_path = path;
139 * Create a kernel thread for fibre channel instances. We
140 * don't have dual channel FC cards.
143 ISPLOCK_2_CAMLOCK(isp);
144 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
145 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) {
146 xpt_bus_deregister(cam_sim_path(sim));
147 cam_sim_free(sim, TRUE);
148 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
149 CAMLOCK_2_ISPLOCK(isp);
150 isp_prt(isp, ISP_LOGERR, "could not create kthread");
153 CAMLOCK_2_ISPLOCK(isp);
158 * If we have a second channel, construct SIM entry for that.
160 if (IS_DUALBUS(isp)) {
161 ISPLOCK_2_CAMLOCK(isp);
162 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
163 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
165 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
166 xpt_free_path(isp->isp_path);
168 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
171 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
172 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
173 xpt_free_path(isp->isp_path);
174 cam_sim_free(sim, TRUE);
175 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
176 CAMLOCK_2_ISPLOCK(isp);
180 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
181 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
182 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
183 xpt_free_path(isp->isp_path);
184 xpt_bus_deregister(cam_sim_path(sim));
185 cam_sim_free(sim, TRUE);
186 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
187 CAMLOCK_2_ISPLOCK(isp);
191 xpt_setup_ccb(&csa.ccb_h, path, 5);
192 csa.ccb_h.func_code = XPT_SASYNC_CB;
193 csa.event_enable = AC_LOST_DEVICE;
194 csa.callback = isp_cam_async;
195 csa.callback_arg = sim;
196 xpt_action((union ccb *)&csa);
197 CAMLOCK_2_ISPLOCK(isp);
199 isp->isp_path2 = path;
202 * Create device nodes
204 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
205 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
207 if (isp->isp_role != ISP_ROLE_NONE) {
208 isp->isp_state = ISP_RUNSTATE;
210 if (isplist == NULL) {
213 struct ispsoftc *tmp = isplist;
214 while (tmp->isp_osinfo.next) {
215 tmp = tmp->isp_osinfo.next;
217 tmp->isp_osinfo.next = isp;
223 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
225 if (isp->isp_osinfo.simqfrozen == 0) {
226 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
227 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
228 ISPLOCK_2_CAMLOCK(isp);
229 xpt_freeze_simq(isp->isp_sim, 1);
230 CAMLOCK_2_ISPLOCK(isp);
232 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
233 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
238 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
240 struct ispsoftc *isp;
245 if (minor(dev) == device_get_unit(isp->isp_dev)) {
248 isp = isp->isp_osinfo.next;
254 #ifdef ISP_FW_CRASH_DUMP
255 case ISP_GET_FW_CRASH_DUMP:
257 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
262 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
264 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
267 void *uaddr = *((void **) addr);
268 if (copyout(ptr, uaddr, sz)) {
280 case ISP_FORCE_CRASH_DUMP:
282 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
291 int olddblev = isp->isp_dblev;
292 isp->isp_dblev = *(int *)addr;
293 *(int *)addr = olddblev;
306 if (isp_fc_runstate(isp, 5 * 1000000)) {
317 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
325 case ISP_FC_GETDINFO:
327 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
330 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
335 lp = &FCPARAM(isp)->portdb[ifc->loopid];
337 ifc->loopid = lp->loopid;
338 ifc->portid = lp->portid;
339 ifc->node_wwn = lp->node_wwn;
340 ifc->port_wwn = lp->port_wwn;
350 isp_stats_t *sp = (isp_stats_t *) addr;
352 MEMZERO(sp, sizeof (*sp));
353 sp->isp_stat_version = ISP_STATS_VERSION;
354 sp->isp_type = isp->isp_type;
355 sp->isp_revision = isp->isp_revision;
357 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
358 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
359 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
360 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
361 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
362 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
363 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
364 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
372 isp->isp_intbogus = 0;
373 isp->isp_intmboxc = 0;
374 isp->isp_intoasync = 0;
375 isp->isp_rsltccmplt = 0;
376 isp->isp_fphccmplt = 0;
377 isp->isp_rscchiwater = 0;
378 isp->isp_fpcchiwater = 0;
382 case ISP_FC_GETHINFO:
384 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
385 MEMZERO(hba, sizeof (*hba));
387 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
388 hba->fc_scsi_supported = 1;
389 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
390 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
391 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
392 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
397 case ISP_GET_FC_PARAM:
399 struct isp_fc_param *f = (struct isp_fc_param *) addr;
406 if (strcmp(f->param_name, "framelength") == 0) {
407 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
411 if (strcmp(f->param_name, "exec_throttle") == 0) {
412 f->parameter = FCPARAM(isp)->isp_execthrottle;
416 if (strcmp(f->param_name, "fullduplex") == 0) {
417 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
422 if (strcmp(f->param_name, "loopid") == 0) {
423 f->parameter = FCPARAM(isp)->isp_loopid;
430 case ISP_SET_FC_PARAM:
432 struct isp_fc_param *f = (struct isp_fc_param *) addr;
433 u_int32_t param = f->parameter;
440 if (strcmp(f->param_name, "framelength") == 0) {
441 if (param != 512 && param != 1024 && param != 1024) {
445 FCPARAM(isp)->isp_maxfrmlen = param;
449 if (strcmp(f->param_name, "exec_throttle") == 0) {
450 if (param < 16 || param > 255) {
454 FCPARAM(isp)->isp_execthrottle = param;
458 if (strcmp(f->param_name, "fullduplex") == 0) {
459 if (param != 0 && param != 1) {
464 FCPARAM(isp)->isp_fwoptions |=
467 FCPARAM(isp)->isp_fwoptions &=
473 if (strcmp(f->param_name, "loopid") == 0) {
474 if (param < 0 || param > 125) {
478 FCPARAM(isp)->isp_loopid = param;
492 isp_intr_enable(void *arg)
494 struct ispsoftc *isp = arg;
495 if (isp->isp_role != ISP_ROLE_NONE) {
498 /* Release our hook so that the boot can continue. */
499 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
503 * Put the target mode functions here, because some are inlines
506 #ifdef ISP_TARGET_MODE
508 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
509 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
510 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
511 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
512 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
513 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
514 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
515 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
516 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
518 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
519 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
520 static void isp_en_lun(struct ispsoftc *, union ccb *);
521 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
522 static timeout_t isp_refire_putback_atio;
523 static void isp_complete_ctio(union ccb *);
524 static void isp_target_putback_atio(union ccb *);
525 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
526 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
527 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
528 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
529 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
530 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
533 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
536 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
541 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
544 } while ((tptr = tptr->next) != NULL);
549 are_any_luns_enabled(struct ispsoftc *isp, int port)
552 if (IS_DUALBUS(isp)) {
553 lo = (port * (LUN_HASH_SIZE >> 1));
554 hi = lo + (LUN_HASH_SIZE >> 1);
559 for (lo = 0; lo < hi; lo++) {
560 if (isp->isp_osinfo.lun_hash[lo]) {
567 static INLINE tstate_t *
568 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
570 tstate_t *tptr = NULL;
572 if (lun == CAM_LUN_WILDCARD) {
573 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
574 tptr = &isp->isp_osinfo.tsdflt[bus];
579 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
586 if (tptr->lun == lun && tptr->bus == bus) {
590 } while ((tptr = tptr->next) != NULL);
595 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
602 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
604 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
605 isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
606 if (tsleep(&isp->isp_osinfo.tmflags[bus],
607 PRIBIO|PCATCH, "i0", 0)) {
610 isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
616 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
618 if (tsleep(&isp->isp_osinfo.rstatus[bus], PRIBIO, "qt1", timo)) {
625 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
627 isp->isp_osinfo.rstatus[bus] = status;
628 wakeup(&isp->isp_osinfo.rstatus[bus]);
632 isp_vsema_rqe(struct ispsoftc *isp, int bus)
634 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
635 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
636 wakeup(&isp->isp_osinfo.tmflags[bus]);
638 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
641 static __inline atio_private_data_t *
642 isp_get_atpd(struct ispsoftc *isp, int tag)
644 atio_private_data_t *atp;
645 for (atp = isp->isp_osinfo.atpdp;
646 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
654 create_lun_state(struct ispsoftc *isp, int bus,
655 struct cam_path *path, tstate_t **rslt)
660 tstate_t *tptr, *new;
662 lun = xpt_path_lun_id(path);
664 return (CAM_LUN_INVALID);
666 if (is_lun_enabled(isp, bus, lun)) {
667 return (CAM_LUN_ALRDY_ENA);
669 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
671 return (CAM_RESRC_UNAVAIL);
674 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
675 xpt_path_target_id(path), xpt_path_lun_id(path));
676 if (status != CAM_REQ_CMP) {
682 SLIST_INIT(&new->atios);
683 SLIST_INIT(&new->inots);
686 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
687 tptr = isp->isp_osinfo.lun_hash[hfx];
689 isp->isp_osinfo.lun_hash[hfx] = new;
696 return (CAM_REQ_CMP);
700 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
705 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
709 pw = isp->isp_osinfo.lun_hash[hfx];
712 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
713 isp->isp_osinfo.lun_hash[hfx] = pw->next;
718 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
729 free(tptr, M_DEVBUF);
733 * we enter with our locks held.
736 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
738 const char lfmt[] = "Lun now %sabled for target mode on channel %d";
739 struct ccb_en_lun *cel = &ccb->cel;
742 int bus, cmd, av, wildcard;
747 bus = XS_CHANNEL(ccb) & 0x1;
748 tgt = ccb->ccb_h.target_id;
749 lun = ccb->ccb_h.target_lun;
752 * Do some sanity checking first.
755 if ((lun != CAM_LUN_WILDCARD) &&
756 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
757 ccb->ccb_h.status = CAM_LUN_INVALID;
762 sdparam *sdp = isp->isp_param;
764 if (tgt != CAM_TARGET_WILDCARD &&
765 tgt != sdp->isp_initiator_id) {
766 ccb->ccb_h.status = CAM_TID_INVALID;
770 if (tgt != CAM_TARGET_WILDCARD &&
771 tgt != FCPARAM(isp)->isp_iid) {
772 ccb->ccb_h.status = CAM_TID_INVALID;
776 * This is as a good a place as any to check f/w capabilities.
778 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
779 isp_prt(isp, ISP_LOGERR,
780 "firmware does not support target mode");
781 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
785 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
786 * XXX: dorks with our already fragile enable/disable code.
788 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
789 isp_prt(isp, ISP_LOGERR,
790 "firmware not SCCLUN capable");
794 if (tgt == CAM_TARGET_WILDCARD) {
795 if (lun == CAM_LUN_WILDCARD) {
798 ccb->ccb_h.status = CAM_LUN_INVALID;
806 * Next check to see whether this is a target/lun wildcard action.
808 * If so, we know that we can accept commands for luns that haven't
809 * been enabled yet and send them upstream. Otherwise, we have to
810 * handle them locally (if we see them at all).
814 tptr = &isp->isp_osinfo.tsdflt[bus];
816 if (isp->isp_osinfo.tmflags[bus] &
817 TM_WILDCARD_ENABLED) {
818 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
822 xpt_create_path(&tptr->owner, NULL,
823 xpt_path_path_id(ccb->ccb_h.path),
824 xpt_path_target_id(ccb->ccb_h.path),
825 xpt_path_lun_id(ccb->ccb_h.path));
826 if (ccb->ccb_h.status != CAM_REQ_CMP) {
829 SLIST_INIT(&tptr->atios);
830 SLIST_INIT(&tptr->inots);
831 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
833 if ((isp->isp_osinfo.tmflags[bus] &
834 TM_WILDCARD_ENABLED) == 0) {
835 ccb->ccb_h.status = CAM_REQ_CMP;
839 ccb->ccb_h.status = CAM_SCSI_BUSY;
842 xpt_free_path(tptr->owner);
843 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
848 * Now check to see whether this bus needs to be
849 * enabled/disabled with respect to target mode.
852 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
853 av |= ENABLE_TARGET_FLAG;
854 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
856 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
858 isp->isp_osinfo.tmflags[bus] &=
859 ~TM_WILDCARD_ENABLED;
860 xpt_free_path(tptr->owner);
864 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
865 isp_prt(isp, ISP_LOGINFO,
866 "Target Mode enabled on channel %d", bus);
867 } else if (cel->enable == 0 &&
868 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
869 if (are_any_luns_enabled(isp, bus)) {
870 ccb->ccb_h.status = CAM_SCSI_BUSY;
873 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
875 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
878 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
879 isp_prt(isp, ISP_LOGINFO,
880 "Target Mode disabled on channel %d", bus);
884 ccb->ccb_h.status = CAM_REQ_CMP;
890 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
891 if (ccb->ccb_h.status != CAM_REQ_CMP) {
895 tptr = get_lun_statep(isp, bus, lun);
897 ccb->ccb_h.status = CAM_LUN_INVALID;
902 if (isp_psema_sig_rqe(isp, bus)) {
903 rls_lun_statep(isp, tptr);
905 destroy_lun_state(isp, tptr);
906 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
911 u_int32_t seq = isp->isp_osinfo.rollinfo++;
912 int c, n, ulun = lun;
914 cmd = RQSTYPE_ENABLE_LUN;
917 if (IS_FC(isp) && lun != 0) {
918 cmd = RQSTYPE_MODIFY_LUN;
921 * For SCC firmware, we only deal with setting
922 * (enabling or modifying) lun 0.
927 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
928 xpt_print_path(ccb->ccb_h.path);
929 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
932 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
933 xpt_print_path(ccb->ccb_h.path);
934 isp_prt(isp, ISP_LOGERR,
935 "wait for ENABLE/MODIFY LUN timed out");
938 rstat = isp->isp_osinfo.rstatus[bus];
939 if (rstat != LUN_OK) {
940 xpt_print_path(ccb->ccb_h.path);
941 isp_prt(isp, ISP_LOGERR,
942 "ENABLE/MODIFY LUN returned 0x%x", rstat);
946 int c, n, ulun = lun;
950 seq = isp->isp_osinfo.rollinfo++;
951 cmd = -RQSTYPE_MODIFY_LUN;
955 if (IS_FC(isp) && lun != 0) {
958 * For SCC firmware, we only deal with setting
959 * (enabling or modifying) lun 0.
963 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
964 xpt_print_path(ccb->ccb_h.path);
965 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
968 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
969 xpt_print_path(ccb->ccb_h.path);
970 isp_prt(isp, ISP_LOGERR,
971 "wait for MODIFY LUN timed out");
974 rstat = isp->isp_osinfo.rstatus[bus];
975 if (rstat != LUN_OK) {
976 xpt_print_path(ccb->ccb_h.path);
977 isp_prt(isp, ISP_LOGERR,
978 "MODIFY LUN returned 0x%x", rstat);
981 if (IS_FC(isp) && lun) {
985 seq = isp->isp_osinfo.rollinfo++;
988 cmd = -RQSTYPE_ENABLE_LUN;
989 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
990 xpt_print_path(ccb->ccb_h.path);
991 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
994 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
995 xpt_print_path(ccb->ccb_h.path);
996 isp_prt(isp, ISP_LOGERR,
997 "wait for DISABLE LUN timed out");
1000 rstat = isp->isp_osinfo.rstatus[bus];
1001 if (rstat != LUN_OK) {
1002 xpt_print_path(ccb->ccb_h.path);
1003 isp_prt(isp, ISP_LOGWARN,
1004 "DISABLE LUN returned 0x%x", rstat);
1007 if (are_any_luns_enabled(isp, bus) == 0) {
1008 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1010 isp_prt(isp, ISP_LOGWARN,
1011 "disable target mode on channel %d failed",
1015 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1016 xpt_print_path(ccb->ccb_h.path);
1017 isp_prt(isp, ISP_LOGINFO,
1018 "Target Mode disabled on channel %d", bus);
1023 isp_vsema_rqe(isp, bus);
1025 if (rstat != LUN_OK) {
1026 xpt_print_path(ccb->ccb_h.path);
1027 isp_prt(isp, ISP_LOGWARN,
1028 "lun %sable failed", (cel->enable) ? "en" : "dis");
1029 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1030 rls_lun_statep(isp, tptr);
1032 destroy_lun_state(isp, tptr);
1034 xpt_print_path(ccb->ccb_h.path);
1035 isp_prt(isp, ISP_LOGINFO, lfmt,
1036 (cel->enable) ? "en" : "dis", bus);
1037 rls_lun_statep(isp, tptr);
1038 if (cel->enable == 0) {
1039 destroy_lun_state(isp, tptr);
1041 ccb->ccb_h.status = CAM_REQ_CMP;
1046 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1049 struct ccb_hdr_slist *lp;
1050 struct ccb_hdr *curelm;
1052 union ccb *accb = ccb->cab.abort_ccb;
1054 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1055 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1056 ((fcparam *) isp->isp_param)->isp_loopid)) {
1057 return (CAM_PATH_INVALID);
1058 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1059 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1060 return (CAM_PATH_INVALID);
1063 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1065 return (CAM_PATH_INVALID);
1067 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1069 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1072 rls_lun_statep(isp, tptr);
1073 return (CAM_UA_ABORT);
1075 curelm = SLIST_FIRST(lp);
1077 if (curelm == &accb->ccb_h) {
1079 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1081 while(curelm != NULL) {
1082 struct ccb_hdr *nextelm;
1084 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1085 if (nextelm == &accb->ccb_h) {
1087 SLIST_NEXT(curelm, sim_links.sle) =
1088 SLIST_NEXT(nextelm, sim_links.sle);
1094 rls_lun_statep(isp, tptr);
1096 accb->ccb_h.status = CAM_REQ_ABORTED;
1097 return (CAM_REQ_CMP);
1099 return(CAM_PATH_INVALID);
1103 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1106 struct ccb_scsiio *cso = &ccb->csio;
1107 u_int16_t *hp, save_handle;
1108 u_int16_t nxti, optr;
1109 u_int8_t local[QENTRY_LEN];
1112 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1113 xpt_print_path(ccb->ccb_h.path);
1114 printf("Request Queue Overflow in isp_target_start_ctio\n");
1115 return (CAM_RESRC_UNAVAIL);
1117 bzero(local, QENTRY_LEN);
1120 * We're either moving data or completing a command here.
1124 atio_private_data_t *atp;
1125 ct2_entry_t *cto = (ct2_entry_t *) local;
1127 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1128 cto->ct_header.rqs_entry_count = 1;
1129 cto->ct_iid = cso->init_id;
1130 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1131 cto->ct_lun = ccb->ccb_h.target_lun;
1134 atp = isp_get_atpd(isp, cso->tag_id);
1136 isp_prt(isp, ISP_LOGERR,
1137 "cannot find private data adjunct for tag %x",
1142 cto->ct_rxid = cso->tag_id;
1143 if (cso->dxfer_len == 0) {
1144 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1145 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1146 cto->ct_flags |= CT2_SENDSTATUS;
1147 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1149 atp->orig_datalen - atp->bytes_xfered;
1150 if (cto->ct_resid < 0) {
1151 cto->rsp.m1.ct_scsi_status |=
1153 } else if (cto->ct_resid > 0) {
1154 cto->rsp.m1.ct_scsi_status |=
1158 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1159 int m = min(cso->sense_len, MAXRESPLEN);
1160 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1161 cto->rsp.m1.ct_senselen = m;
1162 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1165 cto->ct_flags |= CT2_FLAG_MODE0;
1166 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1167 cto->ct_flags |= CT2_DATA_IN;
1169 cto->ct_flags |= CT2_DATA_OUT;
1171 cto->ct_reloff = atp->bytes_xfered;
1172 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1173 cto->ct_flags |= CT2_SENDSTATUS;
1174 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1177 (atp->bytes_xfered + cso->dxfer_len);
1178 if (cto->ct_resid < 0) {
1179 cto->rsp.m0.ct_scsi_status |=
1181 } else if (cto->ct_resid > 0) {
1182 cto->rsp.m0.ct_scsi_status |=
1186 atp->last_xframt = cso->dxfer_len;
1189 * If we're sending data and status back together,
1190 * we can't also send back sense data as well.
1192 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1195 if (cto->ct_flags & CT2_SENDSTATUS) {
1196 isp_prt(isp, ISP_LOGTDEBUG0,
1197 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1198 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1199 cso->dxfer_len, cto->ct_resid);
1200 cto->ct_flags |= CT2_CCINCR;
1201 atp->state = ATPD_STATE_LAST_CTIO;
1203 atp->state = ATPD_STATE_CTIO;
1204 cto->ct_timeout = 10;
1205 hp = &cto->ct_syshandle;
1207 ct_entry_t *cto = (ct_entry_t *) local;
1209 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1210 cto->ct_header.rqs_entry_count = 1;
1211 cto->ct_iid = cso->init_id;
1212 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1213 cto->ct_tgt = ccb->ccb_h.target_id;
1214 cto->ct_lun = ccb->ccb_h.target_lun;
1215 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1216 if (AT_HAS_TAG(cso->tag_id)) {
1217 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1218 cto->ct_flags |= CT_TQAE;
1220 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1221 cto->ct_flags |= CT_NODISC;
1223 if (cso->dxfer_len == 0) {
1224 cto->ct_flags |= CT_NO_DATA;
1225 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1226 cto->ct_flags |= CT_DATA_IN;
1228 cto->ct_flags |= CT_DATA_OUT;
1230 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1231 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1232 cto->ct_scsi_status = cso->scsi_status;
1233 cto->ct_resid = cso->resid;
1234 isp_prt(isp, ISP_LOGTDEBUG0,
1235 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1236 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1239 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1240 cto->ct_timeout = 10;
1241 hp = &cto->ct_syshandle;
1244 if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1245 xpt_print_path(ccb->ccb_h.path);
1246 printf("No XFLIST pointers for isp_target_start_ctio\n");
1247 return (CAM_RESRC_UNAVAIL);
1252 * Call the dma setup routines for this entry (and any subsequent
1253 * CTIOs) if there's data to move, and then tell the f/w it's got
1254 * new things to play with. As with isp_start's usage of DMA setup,
1255 * any swizzling is done in the machine dependent layer. Because
1256 * of this, we put the request onto the queue area first in native
1262 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1264 ISP_ADD_REQUEST(isp, nxti);
1265 return (CAM_REQ_INPROG);
1268 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1269 isp_destroy_handle(isp, save_handle);
1270 return (CAM_RESRC_UNAVAIL);
1273 isp_destroy_handle(isp, save_handle);
1274 return (XS_ERR(ccb));
1279 isp_refire_putback_atio(void *arg)
1282 isp_target_putback_atio(arg);
1287 isp_target_putback_atio(union ccb *ccb)
1289 struct ispsoftc *isp;
1290 struct ccb_scsiio *cso;
1291 u_int16_t nxti, optr;
1296 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1297 (void) timeout(isp_refire_putback_atio, ccb, 10);
1298 isp_prt(isp, ISP_LOGWARN,
1299 "isp_target_putback_atio: Request Queue Overflow");
1302 bzero(qe, QENTRY_LEN);
1305 at2_entry_t local, *at = &local;
1306 MEMZERO(at, sizeof (at2_entry_t));
1307 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1308 at->at_header.rqs_entry_count = 1;
1309 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1310 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1312 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1314 at->at_status = CT_OK;
1315 at->at_rxid = cso->tag_id;
1316 at->at_iid = cso->ccb_h.target_id;
1317 isp_put_atio2(isp, at, qe);
1319 at_entry_t local, *at = &local;
1320 MEMZERO(at, sizeof (at_entry_t));
1321 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1322 at->at_header.rqs_entry_count = 1;
1323 at->at_iid = cso->init_id;
1324 at->at_iid |= XS_CHANNEL(ccb) << 7;
1325 at->at_tgt = cso->ccb_h.target_id;
1326 at->at_lun = cso->ccb_h.target_lun;
1327 at->at_status = CT_OK;
1328 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1329 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1330 isp_put_atio(isp, at, qe);
1332 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1333 ISP_ADD_REQUEST(isp, nxti);
1334 isp_complete_ctio(ccb);
1338 isp_complete_ctio(union ccb *ccb)
1340 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1341 ccb->ccb_h.status |= CAM_REQ_CMP;
1343 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1348 * Handle ATIO stuff that the generic code can't.
1349 * This means handling CDBs.
1353 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1356 int status, bus, iswildcard;
1357 struct ccb_accept_tio *atiop;
1360 * The firmware status (except for the QLTM_SVALID bit)
1361 * indicates why this ATIO was sent to us.
1363 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1365 * If the DISCONNECTS DISABLED bit is set in the flags field,
1366 * we're still connected on the SCSI bus.
1368 status = aep->at_status;
1369 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1371 * Bus Phase Sequence error. We should have sense data
1372 * suggested by the f/w. I'm not sure quite yet what
1373 * to do about this for CAM.
1375 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1376 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1379 if ((status & ~QLTM_SVALID) != AT_CDB) {
1380 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1382 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1386 bus = GET_BUS_VAL(aep->at_iid);
1387 tptr = get_lun_statep(isp, bus, aep->at_lun);
1389 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1397 * Because we can't autofeed sense data back with
1398 * a command for parallel SCSI, we can't give back
1399 * a CHECK CONDITION. We'll give back a BUSY status
1400 * instead. This works out okay because the only
1401 * time we should, in fact, get this, is in the
1402 * case that somebody configured us without the
1403 * blackhole driver, so they get what they deserve.
1405 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1409 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1410 if (atiop == NULL) {
1412 * Because we can't autofeed sense data back with
1413 * a command for parallel SCSI, we can't give back
1414 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1415 * instead. This works out okay because the only time we
1416 * should, in fact, get this, is in the case that we've
1419 xpt_print_path(tptr->owner);
1420 isp_prt(isp, ISP_LOGWARN,
1421 "no ATIOS for lun %d from initiator %d on channel %d",
1422 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1423 if (aep->at_flags & AT_TQAE)
1424 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1426 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1427 rls_lun_statep(isp, tptr);
1430 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1432 atiop->ccb_h.target_id = aep->at_tgt;
1433 atiop->ccb_h.target_lun = aep->at_lun;
1435 if (aep->at_flags & AT_NODISC) {
1436 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1438 atiop->ccb_h.flags = 0;
1441 if (status & QLTM_SVALID) {
1442 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1443 atiop->sense_len = amt;
1444 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1446 atiop->sense_len = 0;
1449 atiop->init_id = GET_IID_VAL(aep->at_iid);
1450 atiop->cdb_len = aep->at_cdblen;
1451 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1452 atiop->ccb_h.status = CAM_CDB_RECVD;
1454 * Construct a tag 'id' based upon tag value (which may be 0..255)
1455 * and the handle (which we have to preserve).
1457 AT_MAKE_TAGID(atiop->tag_id, aep);
1458 if (aep->at_flags & AT_TQAE) {
1459 atiop->tag_action = aep->at_tag_type;
1460 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1462 xpt_done((union ccb*)atiop);
1463 isp_prt(isp, ISP_LOGTDEBUG0,
1464 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1465 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1466 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1467 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1468 "nondisc" : "disconnecting");
1469 rls_lun_statep(isp, tptr);
1474 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1478 struct ccb_accept_tio *atiop;
1479 atio_private_data_t *atp;
1482 * The firmware status (except for the QLTM_SVALID bit)
1483 * indicates why this ATIO was sent to us.
1485 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1487 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1488 isp_prt(isp, ISP_LOGWARN,
1489 "bogus atio (0x%x) leaked to platform", aep->at_status);
1490 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1494 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1495 lun = aep->at_scclun;
1499 tptr = get_lun_statep(isp, 0, lun);
1501 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1502 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1507 * What we'd like to know is whether or not we have a listener
1508 * upstream that really hasn't configured yet. If we do, then
1509 * we can give a more sensible reply here. If not, then we can
1510 * reject this out of hand.
1512 * Choices for what to send were
1514 * Not Ready, Unit Not Self-Configured Yet
1517 * for the former and
1519 * Illegal Request, Logical Unit Not Supported
1524 * We used to decide whether there was at least one listener
1525 * based upon whether the black hole driver was configured.
1526 * However, recent config(8) changes have made this hard to do
1530 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1534 atp = isp_get_atpd(isp, 0);
1535 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1536 if (atiop == NULL || atp == NULL) {
1538 * Because we can't autofeed sense data back with
1539 * a command for parallel SCSI, we can't give back
1540 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1541 * instead. This works out okay because the only time we
1542 * should, in fact, get this, is in the case that we've
1545 xpt_print_path(tptr->owner);
1546 isp_prt(isp, ISP_LOGWARN,
1547 "no %s for lun %d from initiator %d",
1548 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1549 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1550 rls_lun_statep(isp, tptr);
1551 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1554 atp->state = ATPD_STATE_ATIO;
1555 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1557 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1558 lun, tptr->atio_count);
1560 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1561 atiop->ccb_h.target_id =
1562 ((fcparam *)isp->isp_param)->isp_loopid;
1563 atiop->ccb_h.target_lun = lun;
1566 * We don't get 'suggested' sense data as we do with SCSI cards.
1568 atiop->sense_len = 0;
1570 atiop->init_id = aep->at_iid;
1571 atiop->cdb_len = ATIO2_CDBLEN;
1572 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1573 atiop->ccb_h.status = CAM_CDB_RECVD;
1574 atiop->tag_id = aep->at_rxid;
1575 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1576 case ATIO2_TC_ATTR_SIMPLEQ:
1577 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1579 case ATIO2_TC_ATTR_HEADOFQ:
1580 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1582 case ATIO2_TC_ATTR_ORDERED:
1583 atiop->tag_action = MSG_ORDERED_Q_TAG;
1585 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1586 case ATIO2_TC_ATTR_UNTAGGED:
1588 atiop->tag_action = 0;
1591 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1593 atp->tag = atiop->tag_id;
1595 atp->orig_datalen = aep->at_datalen;
1596 atp->last_xframt = 0;
1597 atp->bytes_xfered = 0;
1598 atp->state = ATPD_STATE_CAM;
1599 xpt_done((union ccb*)atiop);
1601 isp_prt(isp, ISP_LOGTDEBUG0,
1602 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1603 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1604 lun, aep->at_taskflags, aep->at_datalen);
1605 rls_lun_statep(isp, tptr);
1610 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1613 int sentstatus, ok, notify_cam, resid = 0;
1617 * CTIO and CTIO2 are close enough....
1620 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1621 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1622 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1625 ct2_entry_t *ct = arg;
1626 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1628 isp_prt(isp, ISP_LOGERR,
1629 "cannot find adjunct for %x after I/O",
1633 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1634 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1635 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1636 ccb->ccb_h.status |= CAM_SENT_SENSE;
1638 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1639 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1640 resid = ct->ct_resid;
1641 atp->bytes_xfered += (atp->last_xframt - resid);
1642 atp->last_xframt = 0;
1644 if (sentstatus || !ok) {
1647 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1648 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1649 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1650 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1651 resid, sentstatus? "FIN" : "MID");
1654 /* XXX: should really come after isp_complete_ctio */
1655 atp->state = ATPD_STATE_PDON;
1657 ct_entry_t *ct = arg;
1658 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1659 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1661 * We *ought* to be able to get back to the original ATIO
1662 * here, but for some reason this gets lost. It's just as
1663 * well because it's squirrelled away as part of periph
1666 * We can live without it as long as we continue to use
1667 * the auto-replenish feature for CTIOs.
1669 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1670 if (ct->ct_status & QLTM_SVALID) {
1671 char *sp = (char *)ct;
1672 sp += CTIO_SENSE_OFFSET;
1673 ccb->csio.sense_len =
1674 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1675 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1676 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1678 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1679 resid = ct->ct_resid;
1681 isp_prt(isp, ISP_LOGTDEBUG0,
1682 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1683 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1684 ct->ct_status, ct->ct_flags, resid,
1685 sentstatus? "FIN" : "MID");
1686 tval = ct->ct_fwhandle;
1688 ccb->csio.resid += resid;
1691 * We're here either because intermediate data transfers are done
1692 * and/or the final status CTIO (which may have joined with a
1693 * Data Transfer) is done.
1695 * In any case, for this platform, the upper layers figure out
1696 * what to do next, so all we do here is collect status and
1697 * pass information along. Any DMA handles have already been
1700 if (notify_cam == 0) {
1701 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1705 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1706 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1709 isp_target_putback_atio(ccb);
1711 isp_complete_ctio(ccb);
1718 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1720 return (0); /* XXXX */
1724 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1727 switch (inp->in_status) {
1728 case IN_PORT_LOGOUT:
1729 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1732 case IN_PORT_CHANGED:
1733 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1736 case IN_GLOBAL_LOGO:
1737 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1741 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1742 struct ccb_immed_notify *inot = NULL;
1745 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1747 inot = (struct ccb_immed_notify *)
1748 SLIST_FIRST(&tptr->inots);
1750 SLIST_REMOVE_HEAD(&tptr->inots,
1754 isp_prt(isp, ISP_LOGWARN,
1755 "abort task RX_ID %x IID %d state %d",
1756 inp->in_seqid, inp->in_iid, atp->state);
1758 isp_prt(isp, ISP_LOGWARN,
1759 "abort task RX_ID %x from iid %d, state unknown",
1760 inp->in_seqid, inp->in_iid);
1763 inot->initiator_id = inp->in_iid;
1764 inot->sense_len = 0;
1765 inot->message_args[0] = MSG_ABORT_TAG;
1766 inot->message_args[1] = inp->in_seqid & 0xff;
1767 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1768 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1769 xpt_done((union ccb *)inot);
1781 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1783 struct cam_sim *sim;
1784 struct ispsoftc *isp;
1786 sim = (struct cam_sim *)cbarg;
1787 isp = (struct ispsoftc *) cam_sim_softc(sim);
1789 case AC_LOST_DEVICE:
1791 u_int16_t oflags, nflags;
1792 sdparam *sdp = isp->isp_param;
1795 tgt = xpt_path_target_id(path);
1797 sdp += cam_sim_bus(sim);
1799 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1800 #ifndef ISP_TARGET_MODE
1801 nflags &= DPARM_SAFE_DFLT;
1802 if (isp->isp_loaded_fw) {
1803 nflags |= DPARM_NARROW | DPARM_ASYNC;
1806 nflags = DPARM_DEFAULT;
1808 oflags = sdp->isp_devparam[tgt].goal_flags;
1809 sdp->isp_devparam[tgt].goal_flags = nflags;
1810 sdp->isp_devparam[tgt].dev_update = 1;
1811 isp->isp_update |= (1 << cam_sim_bus(sim));
1812 (void) isp_control(isp,
1813 ISPCTL_UPDATE_PARAMS, NULL);
1814 sdp->isp_devparam[tgt].goal_flags = oflags;
1820 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1826 isp_poll(struct cam_sim *sim)
1828 struct ispsoftc *isp = cam_sim_softc(sim);
1829 u_int16_t isr, sema, mbox;
1832 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1833 isp_intr(isp, isr, sema, mbox);
1840 isp_watchdog(void *arg)
1843 struct ispsoftc *isp = XS_ISP(xs);
1848 * We've decided this command is dead. Make sure we're not trying
1849 * to kill a command that's already dead by getting it's handle and
1850 * and seeing whether it's still alive.
1853 iok = isp->isp_osinfo.intsok;
1854 isp->isp_osinfo.intsok = 0;
1855 handle = isp_find_handle(isp, xs);
1857 u_int16_t isr, sema, mbox;
1859 if (XS_CMD_DONE_P(xs)) {
1860 isp_prt(isp, ISP_LOGDEBUG1,
1861 "watchdog found done cmd (handle 0x%x)", handle);
1866 if (XS_CMD_WDOG_P(xs)) {
1867 isp_prt(isp, ISP_LOGDEBUG2,
1868 "recursive watchdog (handle 0x%x)", handle);
1874 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1875 isp_intr(isp, isr, sema, mbox);
1877 if (XS_CMD_DONE_P(xs)) {
1878 isp_prt(isp, ISP_LOGDEBUG2,
1879 "watchdog cleanup for handle 0x%x", handle);
1880 xpt_done((union ccb *) xs);
1881 } else if (XS_CMD_GRACE_P(xs)) {
1883 * Make sure the command is *really* dead before we
1884 * release the handle (and DMA resources) for reuse.
1886 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1889 * After this point, the comamnd is really dead.
1891 if (XS_XFRLEN(xs)) {
1892 ISP_DMAFREE(isp, xs, handle);
1894 isp_destroy_handle(isp, handle);
1895 xpt_print_path(xs->ccb_h.path);
1896 isp_prt(isp, ISP_LOGWARN,
1897 "watchdog timeout for handle 0x%x", handle);
1898 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1902 u_int16_t nxti, optr;
1903 ispreq_t local, *mp= &local, *qe;
1906 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1907 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1912 MEMZERO((void *) mp, sizeof (*mp));
1913 mp->req_header.rqs_entry_count = 1;
1914 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1915 mp->req_modifier = SYNC_ALL;
1916 mp->req_target = XS_CHANNEL(xs) << 7;
1917 isp_put_request(isp, mp, qe);
1918 ISP_ADD_REQUEST(isp, nxti);
1921 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1923 isp->isp_osinfo.intsok = iok;
1928 isp_kthread(void *arg)
1930 struct ispsoftc *isp = arg;
1934 isp->isp_osinfo.intsok = 1;
1937 * The first loop is for our usage where we have yet to have
1938 * gotten good fibre channel state.
1943 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1944 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1945 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1946 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1947 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1948 if (FCPARAM(isp)->loop_seen_once == 0 ||
1949 isp->isp_osinfo.ktmature == 0) {
1953 tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1958 * Even if we didn't get good loop state we may be
1959 * unfreezing the SIMQ so that we can kill off
1960 * commands (if we've never seen loop before, for example).
1962 isp->isp_osinfo.ktmature = 1;
1963 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1964 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1965 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1966 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1967 ISPLOCK_2_CAMLOCK(isp);
1968 xpt_release_simq(isp->isp_sim, 1);
1969 CAMLOCK_2_ISPLOCK(isp);
1971 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "isp_fc_worker", 0);
1972 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1977 isp_action(struct cam_sim *sim, union ccb *ccb)
1979 int bus, tgt, error;
1980 struct ispsoftc *isp;
1981 struct ccb_trans_settings *cts;
1983 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1985 isp = (struct ispsoftc *)cam_sim_softc(sim);
1986 ccb->ccb_h.sim_priv.entries[0].field = 0;
1987 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1988 if (isp->isp_state != ISP_RUNSTATE &&
1989 ccb->ccb_h.func_code == XPT_SCSI_IO) {
1990 CAMLOCK_2_ISPLOCK(isp);
1992 if (isp->isp_state != ISP_INITSTATE) {
1995 * Lie. Say it was a selection timeout.
1997 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1998 xpt_freeze_devq(ccb->ccb_h.path, 1);
2002 isp->isp_state = ISP_RUNSTATE;
2003 ISPLOCK_2_CAMLOCK(isp);
2005 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2008 switch (ccb->ccb_h.func_code) {
2009 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2011 * Do a couple of preliminary checks...
2013 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2014 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2015 ccb->ccb_h.status = CAM_REQ_INVALID;
2021 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2022 ccb->ccb_h.status = CAM_PATH_INVALID;
2023 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2024 ccb->ccb_h.status = CAM_PATH_INVALID;
2026 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2027 isp_prt(isp, ISP_LOGERR,
2028 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2029 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2034 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2035 CAMLOCK_2_ISPLOCK(isp);
2036 error = isp_start((XS_T *) ccb);
2039 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2040 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2041 u_int64_t ticks = (u_int64_t) hz;
2042 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2043 ticks = 60 * 1000 * ticks;
2045 ticks = ccb->ccb_h.timeout * hz;
2046 ticks = ((ticks + 999) / 1000) + hz + hz;
2047 if (ticks >= 0x80000000) {
2048 isp_prt(isp, ISP_LOGERR,
2049 "timeout overflow");
2052 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2053 (caddr_t)ccb, (int)ticks);
2055 callout_handle_init(&ccb->ccb_h.timeout_ch);
2057 ISPLOCK_2_CAMLOCK(isp);
2061 * This can only happen for Fibre Channel
2063 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2064 if (FCPARAM(isp)->loop_seen_once == 0 &&
2065 isp->isp_osinfo.ktmature) {
2066 ISPLOCK_2_CAMLOCK(isp);
2067 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2071 wakeup(&isp->isp_osinfo.kproc);
2072 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2073 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2074 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2075 ISPLOCK_2_CAMLOCK(isp);
2079 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2080 ISPLOCK_2_CAMLOCK(isp);
2084 isp_done((struct ccb_scsiio *) ccb);
2085 ISPLOCK_2_CAMLOCK(isp);
2088 isp_prt(isp, ISP_LOGERR,
2089 "What's this? 0x%x at %d in file %s",
2090 error, __LINE__, __FILE__);
2091 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2093 ISPLOCK_2_CAMLOCK(isp);
2097 #ifdef ISP_TARGET_MODE
2098 case XPT_EN_LUN: /* Enable LUN as a target */
2101 CAMLOCK_2_ISPLOCK(isp);
2102 iok = isp->isp_osinfo.intsok;
2103 isp->isp_osinfo.intsok = 0;
2104 isp_en_lun(isp, ccb);
2105 isp->isp_osinfo.intsok = iok;
2106 ISPLOCK_2_CAMLOCK(isp);
2110 case XPT_NOTIFY_ACK: /* recycle notify ack */
2111 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2112 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2115 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2117 ccb->ccb_h.status = CAM_LUN_INVALID;
2121 ccb->ccb_h.sim_priv.entries[0].field = 0;
2122 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2123 ccb->ccb_h.flags = 0;
2125 CAMLOCK_2_ISPLOCK(isp);
2126 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2128 * Note that the command itself may not be done-
2129 * it may not even have had the first CTIO sent.
2132 isp_prt(isp, ISP_LOGTDEBUG0,
2133 "Put FREE ATIO2, lun %d, count now %d",
2134 ccb->ccb_h.target_lun, tptr->atio_count);
2135 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2137 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2138 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2143 rls_lun_statep(isp, tptr);
2144 ccb->ccb_h.status = CAM_REQ_INPROG;
2145 ISPLOCK_2_CAMLOCK(isp);
2148 case XPT_CONT_TARGET_IO:
2150 CAMLOCK_2_ISPLOCK(isp);
2151 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2152 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2153 isp_prt(isp, ISP_LOGWARN,
2154 "XPT_CONT_TARGET_IO: status 0x%x",
2156 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2157 ISPLOCK_2_CAMLOCK(isp);
2160 ISPLOCK_2_CAMLOCK(isp);
2161 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2166 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2168 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2169 tgt = ccb->ccb_h.target_id;
2172 CAMLOCK_2_ISPLOCK(isp);
2173 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2174 ISPLOCK_2_CAMLOCK(isp);
2176 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2178 ccb->ccb_h.status = CAM_REQ_CMP;
2182 case XPT_ABORT: /* Abort the specified CCB */
2184 union ccb *accb = ccb->cab.abort_ccb;
2185 CAMLOCK_2_ISPLOCK(isp);
2186 switch (accb->ccb_h.func_code) {
2187 #ifdef ISP_TARGET_MODE
2188 case XPT_ACCEPT_TARGET_IO:
2189 case XPT_IMMED_NOTIFY:
2190 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2192 case XPT_CONT_TARGET_IO:
2193 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2194 ccb->ccb_h.status = CAM_UA_ABORT;
2198 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2200 ccb->ccb_h.status = CAM_UA_ABORT;
2202 ccb->ccb_h.status = CAM_REQ_CMP;
2206 ccb->ccb_h.status = CAM_REQ_INVALID;
2209 ISPLOCK_2_CAMLOCK(isp);
2213 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2214 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2216 if (!IS_CURRENT_SETTINGS(cts)) {
2217 ccb->ccb_h.status = CAM_REQ_INVALID;
2221 tgt = cts->ccb_h.target_id;
2222 CAMLOCK_2_ISPLOCK(isp);
2224 sdparam *sdp = isp->isp_param;
2227 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2231 * We always update (internally) from goal_flags
2232 * so any request to change settings just gets
2233 * vectored to that location.
2235 dptr = &sdp->isp_devparam[tgt].goal_flags;
2238 * Note that these operations affect the
2239 * the goal flags (goal_flags)- not
2240 * the current state flags. Then we mark
2241 * things so that the next operation to
2242 * this HBA will cause the update to occur.
2244 if (cts->valid & CCB_TRANS_DISC_VALID) {
2245 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2246 *dptr |= DPARM_DISC;
2248 *dptr &= ~DPARM_DISC;
2251 if (cts->valid & CCB_TRANS_TQ_VALID) {
2252 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2253 *dptr |= DPARM_TQING;
2255 *dptr &= ~DPARM_TQING;
2258 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2259 switch (cts->bus_width) {
2260 case MSG_EXT_WDTR_BUS_16_BIT:
2261 *dptr |= DPARM_WIDE;
2264 *dptr &= ~DPARM_WIDE;
2268 * Any SYNC RATE of nonzero and SYNC_OFFSET
2269 * of nonzero will cause us to go to the
2270 * selected (from NVRAM) maximum value for
2271 * this device. At a later point, we'll
2272 * allow finer control.
2274 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2275 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2276 (cts->sync_offset > 0)) {
2277 *dptr |= DPARM_SYNC;
2279 *dptr &= ~DPARM_SYNC;
2281 *dptr |= DPARM_SAFE_DFLT;
2282 isp_prt(isp, ISP_LOGDEBUG0,
2283 "SET bus %d targ %d to flags %x off %x per %x",
2284 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2285 sdp->isp_devparam[tgt].goal_offset,
2286 sdp->isp_devparam[tgt].goal_period);
2287 sdp->isp_devparam[tgt].dev_update = 1;
2288 isp->isp_update |= (1 << bus);
2290 ISPLOCK_2_CAMLOCK(isp);
2291 ccb->ccb_h.status = CAM_REQ_CMP;
2294 case XPT_GET_TRAN_SETTINGS:
2296 tgt = cts->ccb_h.target_id;
2297 CAMLOCK_2_ISPLOCK(isp);
2300 * a lot of normal SCSI things don't make sense.
2302 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2303 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2305 * How do you measure the width of a high
2306 * speed serial bus? Well, in bytes.
2308 * Offset and period make no sense, though, so we set
2309 * (above) a 'base' transfer speed to be gigabit.
2311 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2313 sdparam *sdp = isp->isp_param;
2314 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2315 u_int16_t dval, pval, oval;
2319 if (IS_CURRENT_SETTINGS(cts)) {
2320 sdp->isp_devparam[tgt].dev_refresh = 1;
2321 isp->isp_update |= (1 << bus);
2322 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2324 dval = sdp->isp_devparam[tgt].actv_flags;
2325 oval = sdp->isp_devparam[tgt].actv_offset;
2326 pval = sdp->isp_devparam[tgt].actv_period;
2328 dval = sdp->isp_devparam[tgt].nvrm_flags;
2329 oval = sdp->isp_devparam[tgt].nvrm_offset;
2330 pval = sdp->isp_devparam[tgt].nvrm_period;
2333 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2335 if (dval & DPARM_DISC) {
2336 cts->flags |= CCB_TRANS_DISC_ENB;
2338 if (dval & DPARM_TQING) {
2339 cts->flags |= CCB_TRANS_TAG_ENB;
2341 if (dval & DPARM_WIDE) {
2342 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2344 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2346 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2347 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2349 if ((dval & DPARM_SYNC) && oval != 0) {
2350 cts->sync_period = pval;
2351 cts->sync_offset = oval;
2353 CCB_TRANS_SYNC_RATE_VALID |
2354 CCB_TRANS_SYNC_OFFSET_VALID;
2356 isp_prt(isp, ISP_LOGDEBUG0,
2357 "GET %s bus %d targ %d to flags %x off %x per %x",
2358 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2359 bus, tgt, dval, oval, pval);
2361 ISPLOCK_2_CAMLOCK(isp);
2362 ccb->ccb_h.status = CAM_REQ_CMP;
2366 case XPT_CALC_GEOMETRY:
2368 struct ccb_calc_geometry *ccg;
2369 u_int32_t secs_per_cylinder;
2373 if (ccg->block_size == 0) {
2374 isp_prt(isp, ISP_LOGERR,
2375 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2376 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2377 ccb->ccb_h.status = CAM_REQ_INVALID;
2381 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2382 if (size_mb > 1024) {
2384 ccg->secs_per_track = 63;
2387 ccg->secs_per_track = 32;
2389 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2390 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2391 ccb->ccb_h.status = CAM_REQ_CMP;
2395 case XPT_RESET_BUS: /* Reset the specified bus */
2396 bus = cam_sim_bus(sim);
2397 CAMLOCK_2_ISPLOCK(isp);
2398 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2399 ISPLOCK_2_CAMLOCK(isp);
2401 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2403 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2404 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2405 else if (isp->isp_path != NULL)
2406 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2407 ccb->ccb_h.status = CAM_REQ_CMP;
2412 case XPT_TERM_IO: /* Terminate the I/O process */
2413 ccb->ccb_h.status = CAM_REQ_INVALID;
2417 case XPT_PATH_INQ: /* Path routing inquiry */
2419 struct ccb_pathinq *cpi = &ccb->cpi;
2421 cpi->version_num = 1;
2422 #ifdef ISP_TARGET_MODE
2423 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2425 cpi->target_sprt = 0;
2427 cpi->hba_eng_cnt = 0;
2428 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2429 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2430 cpi->bus_id = cam_sim_bus(sim);
2432 cpi->hba_misc = PIM_NOBUSRESET;
2434 * Because our loop ID can shift from time to time,
2435 * make our initiator ID out of range of our bus.
2437 cpi->initiator_id = cpi->max_target + 1;
2440 * Set base transfer capabilities for Fibre Channel.
2441 * Technically not correct because we don't know
2442 * what media we're running on top of- but we'll
2443 * look good if we always say 100MB/s.
2445 if (FCPARAM(isp)->isp_gbspeed == 2)
2446 cpi->base_transfer_speed = 200000;
2448 cpi->base_transfer_speed = 100000;
2449 cpi->hba_inquiry = PI_TAG_ABLE;
2451 sdparam *sdp = isp->isp_param;
2452 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2453 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2455 cpi->initiator_id = sdp->isp_initiator_id;
2456 cpi->base_transfer_speed = 3300;
2458 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2459 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2460 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2461 cpi->unit_number = cam_sim_unit(sim);
2462 cpi->ccb_h.status = CAM_REQ_CMP;
2467 ccb->ccb_h.status = CAM_REQ_INVALID;
2473 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2475 isp_done(struct ccb_scsiio *sccb)
2477 struct ispsoftc *isp = XS_ISP(sccb);
2480 XS_SETERR(sccb, CAM_REQ_CMP);
2482 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2483 (sccb->scsi_status != SCSI_STATUS_OK)) {
2484 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2485 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2486 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2487 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2489 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2493 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2494 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2495 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2496 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2497 xpt_freeze_devq(sccb->ccb_h.path, 1);
2498 isp_prt(isp, ISP_LOGDEBUG0,
2499 "freeze devq %d.%d cam sts %x scsi sts %x",
2500 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2501 sccb->ccb_h.status, sccb->scsi_status);
2505 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2506 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2507 xpt_print_path(sccb->ccb_h.path);
2508 isp_prt(isp, ISP_LOGINFO,
2509 "cam completion status 0x%x", sccb->ccb_h.status);
2512 XS_CMD_S_DONE(sccb);
2513 if (XS_CMD_WDOG_P(sccb) == 0) {
2514 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2515 if (XS_CMD_GRACE_P(sccb)) {
2516 isp_prt(isp, ISP_LOGDEBUG2,
2517 "finished command on borrowed time");
2519 XS_CMD_S_CLEAR(sccb);
2520 ISPLOCK_2_CAMLOCK(isp);
2521 xpt_done((union ccb *) sccb);
2522 CAMLOCK_2_ISPLOCK(isp);
2527 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2531 case ISPASYNC_NEW_TGT_PARAMS:
2534 sdparam *sdp = isp->isp_param;
2535 struct ccb_trans_settings cts;
2536 struct cam_path *tmppath;
2538 bzero(&cts, sizeof (struct ccb_trans_settings));
2540 tgt = *((int *)arg);
2541 bus = (tgt >> 16) & 0xffff;
2544 ISPLOCK_2_CAMLOCK(isp);
2545 if (xpt_create_path(&tmppath, NULL,
2546 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2547 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2548 CAMLOCK_2_ISPLOCK(isp);
2549 isp_prt(isp, ISP_LOGWARN,
2550 "isp_async cannot make temp path for %d.%d",
2555 CAMLOCK_2_ISPLOCK(isp);
2556 flags = sdp->isp_devparam[tgt].actv_flags;
2557 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2558 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2559 if (flags & DPARM_DISC) {
2560 cts.flags |= CCB_TRANS_DISC_ENB;
2562 if (flags & DPARM_TQING) {
2563 cts.flags |= CCB_TRANS_TAG_ENB;
2565 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2566 cts.bus_width = (flags & DPARM_WIDE)?
2567 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2568 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2569 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2570 if (flags & DPARM_SYNC) {
2572 CCB_TRANS_SYNC_RATE_VALID |
2573 CCB_TRANS_SYNC_OFFSET_VALID;
2575 isp_prt(isp, ISP_LOGDEBUG2,
2576 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2577 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2578 sdp->isp_devparam[tgt].actv_offset, flags);
2579 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2580 ISPLOCK_2_CAMLOCK(isp);
2581 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2582 xpt_free_path(tmppath);
2583 CAMLOCK_2_ISPLOCK(isp);
2586 case ISPASYNC_BUS_RESET:
2587 bus = *((int *)arg);
2588 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2590 if (bus > 0 && isp->isp_path2) {
2591 ISPLOCK_2_CAMLOCK(isp);
2592 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2593 CAMLOCK_2_ISPLOCK(isp);
2594 } else if (isp->isp_path) {
2595 ISPLOCK_2_CAMLOCK(isp);
2596 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2597 CAMLOCK_2_ISPLOCK(isp);
2601 if (isp->isp_path) {
2602 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2604 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2606 case ISPASYNC_LOOP_RESET:
2607 if (isp->isp_path) {
2608 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2610 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2612 case ISPASYNC_LOOP_DOWN:
2613 if (isp->isp_path) {
2614 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2616 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2618 case ISPASYNC_LOOP_UP:
2620 * Now we just note that Loop has come up. We don't
2621 * actually do anything because we're waiting for a
2622 * Change Notify before activating the FC cleanup
2623 * thread to look at the state of the loop again.
2625 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2627 case ISPASYNC_PROMENADE:
2629 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2630 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2631 static const char *roles[4] = {
2632 "(none)", "Target", "Initiator", "Target/Initiator"
2634 fcparam *fcp = isp->isp_param;
2635 int tgt = *((int *) arg);
2636 struct lportdb *lp = &fcp->portdb[tgt];
2638 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2639 roles[lp->roles & 0x3],
2640 (lp->valid)? "Arrived" : "Departed",
2641 (u_int32_t) (lp->port_wwn >> 32),
2642 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2643 (u_int32_t) (lp->node_wwn >> 32),
2644 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2648 case ISPASYNC_CHANGE_NOTIFY:
2649 if (arg == ISPASYNC_CHANGE_PDB) {
2650 isp_prt(isp, ISP_LOGINFO,
2651 "Port Database Changed");
2652 } else if (arg == ISPASYNC_CHANGE_SNS) {
2653 isp_prt(isp, ISP_LOGINFO,
2654 "Name Server Database Changed");
2656 wakeup(&isp->isp_osinfo.kproc);
2658 case ISPASYNC_FABRIC_DEV:
2660 int target, base, lim;
2661 fcparam *fcp = isp->isp_param;
2662 struct lportdb *lp = NULL;
2663 struct lportdb *clp = (struct lportdb *) arg;
2666 switch (clp->port_type) {
2693 isp_prt(isp, ISP_LOGINFO,
2694 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2697 * If we don't have an initiator role we bail.
2699 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2702 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2707 * Is this entry for us? If so, we bail.
2710 if (fcp->isp_portid == clp->portid) {
2715 * Else, the default policy is to find room for it in
2716 * our local port database. Later, when we execute
2717 * the call to isp_pdb_sync either this newly arrived
2718 * or already logged in device will be (re)announced.
2721 if (fcp->isp_topo == TOPO_FL_PORT)
2726 if (fcp->isp_topo == TOPO_N_PORT)
2732 * Is it already in our list?
2734 for (target = base; target < lim; target++) {
2735 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2738 lp = &fcp->portdb[target];
2739 if (lp->port_wwn == clp->port_wwn &&
2740 lp->node_wwn == clp->node_wwn) {
2748 for (target = base; target < lim; target++) {
2749 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2752 lp = &fcp->portdb[target];
2753 if (lp->port_wwn == 0) {
2757 if (target == lim) {
2758 isp_prt(isp, ISP_LOGWARN,
2759 "out of space for fabric devices");
2762 lp->port_type = clp->port_type;
2763 lp->fc4_type = clp->fc4_type;
2764 lp->node_wwn = clp->node_wwn;
2765 lp->port_wwn = clp->port_wwn;
2766 lp->portid = clp->portid;
2770 #ifdef ISP_TARGET_MODE
2771 case ISPASYNC_TARGET_MESSAGE:
2773 tmd_msg_t *mp = arg;
2774 isp_prt(isp, ISP_LOGALL,
2775 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2776 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2777 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2781 case ISPASYNC_TARGET_EVENT:
2783 tmd_event_t *ep = arg;
2784 isp_prt(isp, ISP_LOGALL,
2785 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2788 case ISPASYNC_TARGET_ACTION:
2789 switch (((isphdr_t *)arg)->rqs_entry_type) {
2791 isp_prt(isp, ISP_LOGWARN,
2792 "event 0x%x for unhandled target action",
2793 ((isphdr_t *)arg)->rqs_entry_type);
2795 case RQSTYPE_NOTIFY:
2797 rv = isp_handle_platform_notify_scsi(isp,
2798 (in_entry_t *) arg);
2800 rv = isp_handle_platform_notify_fc(isp,
2801 (in_fcentry_t *) arg);
2805 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2808 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2812 rv = isp_handle_platform_ctio(isp, arg);
2814 case RQSTYPE_ENABLE_LUN:
2815 case RQSTYPE_MODIFY_LUN:
2816 if (IS_DUALBUS(isp)) {
2818 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2822 isp_cv_signal_rqe(isp, bus,
2823 ((lun_entry_t *)arg)->le_status);
2828 case ISPASYNC_FW_CRASH:
2830 u_int16_t mbox1, mbox6;
2831 mbox1 = ISP_READ(isp, OUTMAILBOX1);
2832 if (IS_DUALBUS(isp)) {
2833 mbox6 = ISP_READ(isp, OUTMAILBOX6);
2837 isp_prt(isp, ISP_LOGERR,
2838 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2840 #ifdef ISP_FW_CRASH_DUMP
2842 * XXX: really need a thread to do this right.
2845 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
2846 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
2847 isp_freeze_loopdown(isp, "f/w crash");
2851 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
2855 case ISPASYNC_UNHANDLED_RESPONSE:
2858 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2866 * Locks are held before coming here.
2869 isp_uninit(struct ispsoftc *isp)
2871 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2876 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2879 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2882 printf("%s: ", device_get_nameunit(isp->isp_dev));