2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
4 * Copyright (c) 1994-2002 Justin T. Gibbs.
5 * Copyright (c) 2001-2002 Adaptec Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU Public License ("GPL").
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#27 $
34 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.3.2.4 2003/06/10 03:26:07 gibbs Exp $
35 * $DragonFly: src/sys/dev/disk/aic7xxx/aic79xx_osm.c,v 1.11 2006/12/22 23:26:15 swildner Exp $
38 #include "aic79xx_osm.h"
39 #include "aic79xx_inline.h"
46 #ifndef AHD_TMODE_ENABLE
47 #define AHD_TMODE_ENABLE 0
50 #define ccb_scb_ptr spriv_ptr0
53 static void ahd_dump_targcmd(struct target_cmd *cmd);
55 static int ahd_modevent(module_t mod, int type, void *data);
56 static void ahd_action(struct cam_sim *sim, union ccb *ccb);
57 static void ahd_set_tran_settings(struct ahd_softc *ahd,
58 int our_id, char channel,
59 struct ccb_trans_settings *cts);
60 static void ahd_get_tran_settings(struct ahd_softc *ahd,
61 int our_id, char channel,
62 struct ccb_trans_settings *cts);
63 static void ahd_async(void *callback_arg, uint32_t code,
64 struct cam_path *path, void *arg);
65 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
66 int nsegments, int error);
67 static void ahd_poll(struct cam_sim *sim);
68 static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
69 struct ccb_scsiio *csio, struct scb *scb);
70 static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
72 static int ahd_create_path(struct ahd_softc *ahd,
73 char channel, u_int target, u_int lun,
74 struct cam_path **path);
77 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
81 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
82 u_int lun, struct cam_path **path)
87 path_id = cam_sim_path(ahd->platform_data->sim_b);
89 path_id = cam_sim_path(ahd->platform_data->sim);
91 return (xpt_create_path(path, /*periph*/NULL,
92 path_id, target, lun));
96 ahd_map_int(struct ahd_softc *ahd)
100 /* Hook up our interrupt handler */
101 error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
102 0, ahd_platform_intr, ahd,
103 &ahd->platform_data->ih, NULL);
105 device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
111 * Attach all the sub-devices we can find
114 ahd_attach(struct ahd_softc *ahd)
117 struct ccb_setasync csa;
119 struct cam_path *path;
126 ahd_controller_info(ahd, ahd_info);
127 kprintf("%s\n", ahd_info);
131 * Construct our SIM entry
133 sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
134 device_get_unit(ahd->dev_softc),
135 1, AHD_MAX_QUEUE, NULL);
139 if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) {
145 if (xpt_create_path(&path, /*periph*/NULL,
146 cam_sim_path(sim), CAM_TARGET_WILDCARD,
147 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
148 xpt_bus_deregister(cam_sim_path(sim));
154 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
155 csa.ccb_h.func_code = XPT_SASYNC_CB;
156 csa.event_enable = AC_LOST_DEVICE;
157 csa.callback = ahd_async;
158 csa.callback_arg = sim;
159 xpt_action((union ccb *)&csa);
163 ahd->platform_data->sim = sim;
164 ahd->platform_data->path = path;
166 /* We have to wait until after any system dumps... */
167 ahd->platform_data->eh =
168 EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
169 ahd, SHUTDOWN_PRI_DEFAULT);
170 ahd_intr_enable(ahd, TRUE);
179 * Catch an interrupt from the adapter
182 ahd_platform_intr(void *arg)
184 struct ahd_softc *ahd;
186 ahd = (struct ahd_softc *)arg;
191 * We have an scb which has been processed by the
192 * adaptor, now we look to see how the operation
196 ahd_done(struct ahd_softc *ahd, struct scb *scb)
200 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
201 ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
204 LIST_REMOVE(scb, pending_links);
206 callout_stop(&ccb->ccb_h.timeout_ch);
208 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
211 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
212 op = BUS_DMASYNC_POSTREAD;
214 op = BUS_DMASYNC_POSTWRITE;
215 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
216 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
219 #ifdef AHD_TARGET_MODE
220 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
221 struct cam_path *ccb_path;
224 * If we have finally disconnected, clean up our
225 * pending device state.
226 * XXX - There may be error states that cause where
227 * we will remain connected.
229 ccb_path = ccb->ccb_h.path;
230 if (ahd->pending_device != NULL
231 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
233 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
234 ahd->pending_device = NULL;
236 xpt_print_path(ccb->ccb_h.path);
237 kprintf("Still disconnected\n");
242 if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG)
243 ccb->ccb_h.status |= CAM_REQ_CMP;
244 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
245 ahd_free_scb(ahd, scb);
252 * If the recovery SCB completes, we have to be
253 * out of our timeout.
255 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
256 struct scb *list_scb;
259 * We were able to complete the command successfully,
260 * so reinstate the timeouts for all other pending
263 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
267 ccb = list_scb->io_ctx;
268 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
271 time = ccb->ccb_h.timeout;
274 callout_reset(&ccb->ccb_h.timeout_ch, time,
275 ahd_timeout, list_scb);
278 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
279 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
280 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
281 ahd_print_path(ahd, scb);
282 kprintf("no longer in timeout, status = %x\n",
286 /* Don't clobber any existing error state */
287 if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
288 ccb->ccb_h.status |= CAM_REQ_CMP;
289 } else if ((scb->flags & SCB_SENSE) != 0) {
291 * We performed autosense retrieval.
293 * Zero any sense not transferred by the
294 * device. The SCSI spec mandates that any
295 * untransfered data should be assumed to be
296 * zero. Complete the 'bounce' of sense information
297 * through buffers accessible via bus-space by
298 * copying it into the clients csio.
300 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
301 memcpy(&ccb->csio.sense_data,
302 ahd_get_sense_buf(ahd, scb),
303 /* XXX What size do we want to use??? */
304 sizeof(ccb->csio.sense_data)
305 - ccb->csio.sense_resid);
306 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
307 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
308 struct scsi_status_iu_header *siu;
313 * Copy only the sense data into the provided buffer.
315 siu = (struct scsi_status_iu_header *)scb->sense_data;
316 sense_len = MIN(scsi_4btoul(siu->sense_length),
317 sizeof(ccb->csio.sense_data));
318 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
319 memcpy(&ccb->csio.sense_data,
320 ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
322 kprintf("Copied %d bytes of sense data offset %d:", sense_len,
323 SIU_SENSE_OFFSET(siu));
324 for (i = 0; i < sense_len; i++)
325 kprintf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]);
327 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
329 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
330 ahd_free_scb(ahd, scb);
335 ahd_action(struct cam_sim *sim, union ccb *ccb)
337 struct ahd_softc *ahd;
338 #ifdef AHD_TARGET_MODE
339 struct ahd_tmode_lstate *lstate;
345 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
347 ahd = (struct ahd_softc *)cam_sim_softc(sim);
349 target_id = ccb->ccb_h.target_id;
350 our_id = SIM_SCSI_ID(ahd, sim);
352 switch (ccb->ccb_h.func_code) {
353 /* Common cases first */
354 #ifdef AHD_TARGET_MODE
355 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
356 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
358 struct ahd_tmode_tstate *tstate;
361 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
364 if (status != CAM_REQ_CMP) {
365 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
366 /* Response from the black hole device */
368 lstate = ahd->black_hole;
370 ccb->ccb_h.status = status;
375 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
378 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
380 ccb->ccb_h.status = CAM_REQ_INPROG;
381 if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
382 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
388 * The target_id represents the target we attempt to
389 * select. In target mode, this is the initiator of
390 * the original command.
393 target_id = ccb->csio.init_id;
397 case XPT_SCSI_IO: /* Execute the requested I/O operation */
398 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
401 struct hardware_scb *hscb;
402 struct ahd_initiator_tinfo *tinfo;
403 struct ahd_tmode_tstate *tstate;
406 if ((ahd->flags & AHD_INITIATORROLE) == 0
407 && (ccb->ccb_h.func_code == XPT_SCSI_IO
408 || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
409 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
418 tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
420 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
421 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
422 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
423 col_idx = AHD_NEVER_COL_IDX;
425 col_idx = AHD_BUILD_COL_IDX(target_id,
426 ccb->ccb_h.target_lun);
428 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
430 xpt_freeze_simq(sim, /*count*/1);
431 ahd->flags |= AHD_RESOURCE_SHORTAGE;
433 ccb->ccb_h.status = CAM_REQUEUE_REQ;
441 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
442 ("start scb(%p)\n", scb));
445 * So we can find the SCB when an abort is requested
447 ccb->ccb_h.ccb_scb_ptr = scb;
450 * Put all the arguments for the xfer in the scb
453 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
454 hscb->lun = ccb->ccb_h.target_lun;
455 if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
457 scb->flags |= SCB_DEVICE_RESET;
458 hscb->control |= MK_MESSAGE;
459 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
460 ahd_execute_scb(scb, NULL, 0, 0);
462 #ifdef AHD_TARGET_MODE
463 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
464 struct target_data *tdata;
466 tdata = &hscb->shared_data.tdata;
467 if (ahd->pending_device == lstate)
468 scb->flags |= SCB_TARGET_IMMEDIATE;
469 hscb->control |= TARGET_SCB;
470 tdata->target_phases = 0;
471 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
472 tdata->target_phases |= SPHASE_PENDING;
474 ccb->csio.scsi_status;
476 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
477 tdata->target_phases |= NO_DISCONNECT;
479 tdata->initiator_tag =
480 ahd_htole16(ccb->csio.tag_id);
483 hscb->task_management = 0;
484 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
485 hscb->control |= ccb->csio.tag_action;
487 ahd_setup_data(ahd, sim, &ccb->csio, scb);
491 #ifdef AHD_TARGET_MODE
493 case XPT_IMMED_NOTIFY:
495 struct ahd_tmode_tstate *tstate;
496 struct ahd_tmode_lstate *lstate;
499 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
502 if (status != CAM_REQ_CMP) {
503 ccb->ccb_h.status = status;
507 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
509 ccb->ccb_h.status = CAM_REQ_INPROG;
510 ahd_send_lstate_events(ahd, lstate);
513 case XPT_EN_LUN: /* Enable LUN as a target */
514 ahd_handle_en_lun(ahd, sim, ccb);
518 case XPT_ABORT: /* Abort the specified CCB */
520 ahd_abort_ccb(ahd, sim, ccb);
523 case XPT_SET_TRAN_SETTINGS:
526 ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
527 SIM_CHANNEL(ahd, sim), &ccb->cts);
532 case XPT_GET_TRAN_SETTINGS:
533 /* Get default/user set transfer settings for the target */
536 ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
537 SIM_CHANNEL(ahd, sim), &ccb->cts);
542 case XPT_CALC_GEOMETRY:
544 struct ccb_calc_geometry *ccg;
546 uint32_t secs_per_cylinder;
550 size_mb = ccg->volume_size
551 / ((1024L * 1024L) / ccg->block_size);
552 extended = ahd->flags & AHD_EXTENDED_TRANS_A;
554 if (size_mb > 1024 && extended) {
556 ccg->secs_per_track = 63;
559 ccg->secs_per_track = 32;
561 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
562 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
563 ccb->ccb_h.status = CAM_REQ_CMP;
567 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
572 found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
573 /*initiate reset*/TRUE);
576 xpt_print_path(SIM_PATH(ahd, sim));
577 kprintf("SCSI bus reset delivered. "
578 "%d SCBs aborted.\n", found);
580 ccb->ccb_h.status = CAM_REQ_CMP;
584 case XPT_TERM_IO: /* Terminate the I/O process */
586 ccb->ccb_h.status = CAM_REQ_INVALID;
589 case XPT_PATH_INQ: /* Path routing inquiry */
591 struct ccb_pathinq *cpi = &ccb->cpi;
593 cpi->version_num = 1; /* XXX??? */
594 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
595 if ((ahd->features & AHD_WIDE) != 0)
596 cpi->hba_inquiry |= PI_WIDE_16;
597 if ((ahd->features & AHD_TARGETMODE) != 0) {
598 cpi->target_sprt = PIT_PROCESSOR
602 cpi->target_sprt = 0;
605 cpi->hba_eng_cnt = 0;
606 cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
607 cpi->max_lun = AHD_NUM_LUNS - 1;
608 cpi->initiator_id = ahd->our_id;
609 if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
610 cpi->hba_misc |= PIM_NOBUSRESET;
612 cpi->bus_id = cam_sim_bus(sim);
613 cpi->base_transfer_speed = 3300;
614 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
615 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
616 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
617 cpi->unit_number = cam_sim_unit(sim);
618 #ifdef AHD_NEW_TRAN_SETTINGS
619 cpi->protocol = PROTO_SCSI;
620 cpi->protocol_version = SCSI_REV_2;
621 cpi->transport = XPORT_SPI;
622 cpi->transport_version = 2;
623 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
624 cpi->transport_version = 4;
625 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST;
627 cpi->ccb_h.status = CAM_REQ_CMP;
632 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
640 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
641 struct ccb_trans_settings *cts)
643 #ifdef AHD_NEW_TRAN_SETTINGS
644 struct ahd_devinfo devinfo;
645 struct ccb_trans_settings_scsi *scsi;
646 struct ccb_trans_settings_spi *spi;
647 struct ahd_initiator_tinfo *tinfo;
648 struct ahd_tmode_tstate *tstate;
649 uint16_t *discenable;
653 scsi = &cts->proto_specific.scsi;
654 spi = &cts->xport_specific.spi;
655 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
656 cts->ccb_h.target_id,
657 cts->ccb_h.target_lun,
658 SIM_CHANNEL(ahd, sim),
660 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
662 devinfo.target, &tstate);
664 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
665 update_type |= AHD_TRANS_GOAL;
666 discenable = &tstate->discenable;
667 tagenable = &tstate->tagenable;
668 tinfo->curr.protocol_version = cts->protocol_version;
669 tinfo->curr.transport_version = cts->transport_version;
670 tinfo->goal.protocol_version = cts->protocol_version;
671 tinfo->goal.transport_version = cts->transport_version;
672 } else if (cts->type == CTS_TYPE_USER_SETTINGS) {
673 update_type |= AHD_TRANS_USER;
674 discenable = &ahd->user_discenable;
675 tagenable = &ahd->user_tagenable;
676 tinfo->user.protocol_version = cts->protocol_version;
677 tinfo->user.transport_version = cts->transport_version;
679 cts->ccb_h.status = CAM_REQ_INVALID;
683 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
684 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
685 *discenable |= devinfo.target_mask;
687 *discenable &= ~devinfo.target_mask;
690 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
691 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
692 *tagenable |= devinfo.target_mask;
694 *tagenable &= ~devinfo.target_mask;
697 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
698 ahd_validate_width(ahd, /*tinfo limit*/NULL,
699 &spi->bus_width, ROLE_UNKNOWN);
700 ahd_set_width(ahd, &devinfo, spi->bus_width,
701 update_type, /*paused*/FALSE);
704 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
705 if (update_type == AHD_TRANS_USER)
706 spi->ppr_options = tinfo->user.ppr_options;
708 spi->ppr_options = tinfo->goal.ppr_options;
711 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
712 if (update_type == AHD_TRANS_USER)
713 spi->sync_offset = tinfo->user.offset;
715 spi->sync_offset = tinfo->goal.offset;
718 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
719 if (update_type == AHD_TRANS_USER)
720 spi->sync_period = tinfo->user.period;
722 spi->sync_period = tinfo->goal.period;
725 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
726 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
729 maxsync = AHD_SYNCRATE_MAX;
731 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
732 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
734 if ((*discenable & devinfo.target_mask) == 0)
735 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
737 ahd_find_syncrate(ahd, &spi->sync_period,
738 &spi->ppr_options, maxsync);
739 ahd_validate_offset(ahd, /*tinfo limit*/NULL,
740 spi->sync_period, &spi->sync_offset,
741 spi->bus_width, ROLE_UNKNOWN);
743 /* We use a period of 0 to represent async */
744 if (spi->sync_offset == 0) {
745 spi->sync_period = 0;
746 spi->ppr_options = 0;
749 ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
750 spi->sync_offset, spi->ppr_options,
751 update_type, /*paused*/FALSE);
753 cts->ccb_h.status = CAM_REQ_CMP;
755 struct ahd_devinfo devinfo;
756 struct ahd_initiator_tinfo *tinfo;
757 struct ahd_tmode_tstate *tstate;
758 uint16_t *discenable;
762 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
763 cts->ccb_h.target_id,
764 cts->ccb_h.target_lun,
765 SIM_CHANNEL(ahd, sim),
767 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
769 devinfo.target, &tstate);
771 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
772 update_type |= AHD_TRANS_GOAL;
773 discenable = &tstate->discenable;
774 tagenable = &tstate->tagenable;
775 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
776 update_type |= AHD_TRANS_USER;
777 discenable = &ahd->user_discenable;
778 tagenable = &ahd->user_tagenable;
780 cts->ccb_h.status = CAM_REQ_INVALID;
784 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
785 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
786 *discenable |= devinfo.target_mask;
788 *discenable &= ~devinfo.target_mask;
791 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
792 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
793 *tagenable |= devinfo.target_mask;
795 *tagenable &= ~devinfo.target_mask;
798 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
799 ahd_validate_width(ahd, /*tinfo limit*/NULL,
800 &cts->bus_width, ROLE_UNKNOWN);
801 ahd_set_width(ahd, &devinfo, cts->bus_width,
802 update_type, /*paused*/FALSE);
805 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
806 if (update_type == AHD_TRANS_USER)
807 cts->sync_offset = tinfo->user.offset;
809 cts->sync_offset = tinfo->goal.offset;
812 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
813 if (update_type == AHD_TRANS_USER)
814 cts->sync_period = tinfo->user.period;
816 cts->sync_period = tinfo->goal.period;
819 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
820 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
821 || ((cts->valid & CCB_TRANS_TQ_VALID) != 0)
822 || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) {
826 maxsync = AHD_SYNCRATE_MAX;
828 if (cts->sync_period <= AHD_SYNCRATE_DT
829 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
830 ppr_options = tinfo->user.ppr_options
831 | MSG_EXT_PPR_DT_REQ;
834 if ((*tagenable & devinfo.target_mask) == 0
835 || (*discenable & devinfo.target_mask) == 0)
836 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
838 ahd_find_syncrate(ahd, &cts->sync_period,
839 &ppr_options, maxsync);
840 ahd_validate_offset(ahd, /*tinfo limit*/NULL,
841 cts->sync_period, &cts->sync_offset,
842 MSG_EXT_WDTR_BUS_8_BIT,
845 /* We use a period of 0 to represent async */
846 if (cts->sync_offset == 0) {
847 cts->sync_period = 0;
852 && tinfo->user.transport_version >= 3) {
853 tinfo->goal.transport_version =
854 tinfo->user.transport_version;
855 tinfo->curr.transport_version =
856 tinfo->user.transport_version;
859 ahd_set_syncrate(ahd, &devinfo, cts->sync_period,
860 cts->sync_offset, ppr_options,
861 update_type, /*paused*/FALSE);
863 cts->ccb_h.status = CAM_REQ_CMP;
868 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
869 struct ccb_trans_settings *cts)
871 #ifdef AHD_NEW_TRAN_SETTINGS
872 struct ahd_devinfo devinfo;
873 struct ccb_trans_settings_scsi *scsi;
874 struct ccb_trans_settings_spi *spi;
875 struct ahd_initiator_tinfo *targ_info;
876 struct ahd_tmode_tstate *tstate;
877 struct ahd_transinfo *tinfo;
879 scsi = &cts->proto_specific.scsi;
880 spi = &cts->xport_specific.spi;
881 ahd_compile_devinfo(&devinfo, our_id,
882 cts->ccb_h.target_id,
883 cts->ccb_h.target_lun,
884 channel, ROLE_UNKNOWN);
885 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
887 devinfo.target, &tstate);
889 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
890 tinfo = &targ_info->curr;
892 tinfo = &targ_info->user;
894 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
895 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
896 if (cts->type == CTS_TYPE_USER_SETTINGS) {
897 if ((ahd->user_discenable & devinfo.target_mask) != 0)
898 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
900 if ((ahd->user_tagenable & devinfo.target_mask) != 0)
901 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
903 if ((tstate->discenable & devinfo.target_mask) != 0)
904 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
906 if ((tstate->tagenable & devinfo.target_mask) != 0)
907 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
909 cts->protocol_version = tinfo->protocol_version;
910 cts->transport_version = tinfo->transport_version;
912 spi->sync_period = tinfo->period;
913 spi->sync_offset = tinfo->offset;
914 spi->bus_width = tinfo->width;
915 spi->ppr_options = tinfo->ppr_options;
917 cts->protocol = PROTO_SCSI;
918 cts->transport = XPORT_SPI;
919 spi->valid = CTS_SPI_VALID_SYNC_RATE
920 | CTS_SPI_VALID_SYNC_OFFSET
921 | CTS_SPI_VALID_BUS_WIDTH
922 | CTS_SPI_VALID_PPR_OPTIONS;
924 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
925 scsi->valid = CTS_SCSI_VALID_TQ;
926 spi->valid |= CTS_SPI_VALID_DISC;
931 cts->ccb_h.status = CAM_REQ_CMP;
933 struct ahd_devinfo devinfo;
934 struct ahd_initiator_tinfo *targ_info;
935 struct ahd_tmode_tstate *tstate;
936 struct ahd_transinfo *tinfo;
938 ahd_compile_devinfo(&devinfo, our_id,
939 cts->ccb_h.target_id,
940 cts->ccb_h.target_lun,
941 channel, ROLE_UNKNOWN);
942 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
944 devinfo.target, &tstate);
946 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
947 tinfo = &targ_info->curr;
949 tinfo = &targ_info->user;
951 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
952 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
953 if ((ahd->user_discenable & devinfo.target_mask) != 0)
954 cts->flags |= CCB_TRANS_DISC_ENB;
956 if ((ahd->user_tagenable & devinfo.target_mask) != 0)
957 cts->flags |= CCB_TRANS_TAG_ENB;
959 if ((tstate->discenable & devinfo.target_mask) != 0)
960 cts->flags |= CCB_TRANS_DISC_ENB;
962 if ((tstate->tagenable & devinfo.target_mask) != 0)
963 cts->flags |= CCB_TRANS_TAG_ENB;
965 cts->sync_period = tinfo->period;
966 cts->sync_offset = tinfo->offset;
967 cts->bus_width = tinfo->width;
969 cts->valid = CCB_TRANS_SYNC_RATE_VALID
970 | CCB_TRANS_SYNC_OFFSET_VALID
971 | CCB_TRANS_BUS_WIDTH_VALID;
973 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
974 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
976 cts->ccb_h.status = CAM_REQ_CMP;
981 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
983 struct ahd_softc *ahd;
986 sim = (struct cam_sim *)callback_arg;
987 ahd = (struct ahd_softc *)cam_sim_softc(sim);
991 struct ahd_devinfo devinfo;
994 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
995 xpt_path_target_id(path),
996 xpt_path_lun_id(path),
997 SIM_CHANNEL(ahd, sim),
1001 * Revert to async/narrow transfers
1002 * for the next device.
1005 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1006 AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
1007 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
1008 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
1010 ahd_unlock(ahd, &s);
1019 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1024 struct ahd_softc *ahd;
1025 struct ahd_initiator_tinfo *tinfo;
1026 struct ahd_tmode_tstate *tstate;
1030 scb = (struct scb *)arg;
1032 ahd = scb->ahd_softc;
1036 ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1038 ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1040 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
1042 ahd_free_scb(ahd, scb);
1043 ahd_unlock(ahd, &s);
1048 if (nsegments != 0) {
1050 bus_dmasync_op_t op;
1053 /* Copy the segments into our SG list */
1054 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
1056 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
1062 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1063 op = BUS_DMASYNC_PREREAD;
1065 op = BUS_DMASYNC_PREWRITE;
1067 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
1069 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1070 struct target_data *tdata;
1072 tdata = &scb->hscb->shared_data.tdata;
1073 tdata->target_phases |= DPHASE_PENDING;
1074 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1075 tdata->data_phase = P_DATAOUT;
1077 tdata->data_phase = P_DATAIN;
1084 * Last time we need to check if this SCB needs to
1087 if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) {
1089 bus_dmamap_unload(ahd->buffer_dmat,
1091 ahd_free_scb(ahd, scb);
1092 ahd_unlock(ahd, &s);
1097 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1098 SCSIID_OUR_ID(scb->hscb->scsiid),
1099 SCSIID_TARGET(ahd, scb->hscb->scsiid),
1102 mask = SCB_GET_TARGET_MASK(ahd, scb);
1104 if ((tstate->discenable & mask) != 0
1105 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1106 scb->hscb->control |= DISCENB;
1108 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1109 scb->flags |= SCB_PACKETIZED;
1110 if (scb->hscb->task_management != 0)
1111 scb->hscb->control &= ~MK_MESSAGE;
1114 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1115 && (tinfo->goal.width != 0
1116 || tinfo->goal.period != 0
1117 || tinfo->goal.ppr_options != 0)) {
1118 scb->flags |= SCB_NEGOTIATE;
1119 scb->hscb->control |= MK_MESSAGE;
1120 } else if ((tstate->auto_negotiate & mask) != 0) {
1121 scb->flags |= SCB_AUTO_NEGOTIATE;
1122 scb->hscb->control |= MK_MESSAGE;
1125 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1127 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1129 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1132 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1133 ccb->ccb_h.timeout = 5 * 1000;
1135 time = ccb->ccb_h.timeout;
1138 callout_reset(&ccb->ccb_h.timeout_ch, time, ahd_timeout, scb);
1141 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1142 /* Define a mapping from our tag to the SCB. */
1143 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1145 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1146 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1149 ahd_queue_scb(ahd, scb);
1152 ahd_unlock(ahd, &s);
1156 ahd_poll(struct cam_sim *sim)
1158 ahd_intr(cam_sim_softc(sim));
1162 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1163 struct ccb_scsiio *csio, struct scb *scb)
1165 struct hardware_scb *hscb;
1166 struct ccb_hdr *ccb_h;
1169 ccb_h = &csio->ccb_h;
1172 csio->sense_resid = 0;
1173 if (ccb_h->func_code == XPT_SCSI_IO) {
1174 hscb->cdb_len = csio->cdb_len;
1175 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1177 if (hscb->cdb_len > MAX_CDB_LEN
1178 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1182 * Should CAM start to support CDB sizes
1183 * greater than 16 bytes, we could use
1184 * the sense buffer to store the CDB.
1186 ahd_set_transaction_status(scb,
1189 ahd_free_scb(ahd, scb);
1190 ahd_unlock(ahd, &s);
1191 xpt_done((union ccb *)csio);
1194 if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1195 hscb->shared_data.idata.cdb_from_host.cdbptr =
1196 ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1197 hscb->shared_data.idata.cdb_from_host.cdblen =
1199 hscb->cdb_len |= SCB_CDB_LEN_PTR;
1201 memcpy(hscb->shared_data.idata.cdb,
1202 csio->cdb_io.cdb_ptr,
1206 if (hscb->cdb_len > MAX_CDB_LEN) {
1209 ahd_set_transaction_status(scb,
1212 ahd_free_scb(ahd, scb);
1213 ahd_unlock(ahd, &s);
1214 xpt_done((union ccb *)csio);
1217 memcpy(hscb->shared_data.idata.cdb,
1218 csio->cdb_io.cdb_bytes, hscb->cdb_len);
1222 /* Only use S/G if there is a transfer */
1223 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1224 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1225 /* We've been given a pointer to a single buffer */
1226 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1230 error = bus_dmamap_load(ahd->buffer_dmat,
1236 if (error == EINPROGRESS) {
1238 * So as to maintain ordering,
1239 * freeze the controller queue
1240 * until our mapping is
1243 xpt_freeze_simq(sim,
1245 scb->io_ctx->ccb_h.status |=
1250 struct bus_dma_segment seg;
1252 /* Pointer to physical buffer */
1253 if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1254 panic("ahd_setup_data - Transfer size "
1255 "larger than can device max");
1258 (bus_addr_t)(vm_offset_t)csio->data_ptr;
1259 seg.ds_len = csio->dxfer_len;
1260 ahd_execute_scb(scb, &seg, 1, 0);
1263 struct bus_dma_segment *segs;
1265 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1266 panic("ahd_setup_data - Physical segment "
1267 "pointers unsupported");
1269 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1270 panic("ahd_setup_data - Virtual segment "
1271 "addresses unsupported");
1273 /* Just use the segments provided */
1274 segs = (struct bus_dma_segment *)csio->data_ptr;
1275 ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1278 ahd_execute_scb(scb, NULL, 0, 0);
1284 ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) {
1286 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1287 struct scb *list_scb;
1289 scb->flags |= SCB_RECOVERY_SCB;
1292 * Take all queued, but not sent SCBs out of the equation.
1293 * Also ensure that no new CCBs are queued to us while we
1294 * try to fix this problem.
1296 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1297 xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1);
1298 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1302 * Go through all of our pending SCBs and remove
1303 * any scheduled timeouts for them. We will reschedule
1304 * them after we've successfully fixed this problem.
1306 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
1309 ccb = list_scb->io_ctx;
1310 callout_stop(&ccb->ccb_h.timeout_ch);
1317 ahd_timeout(void *arg)
1320 struct ahd_softc *ahd;
1321 ahd_mode_state saved_modes;
1333 scb = (struct scb *)arg;
1334 ahd = (struct ahd_softc *)scb->ahd_softc;
1338 ahd_pause_and_flushwork(ahd);
1340 saved_modes = ahd_save_modes(ahd);
1342 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1343 ahd_outb(ahd, SCSISIGO, ACKO);
1344 kprintf("set ACK\n");
1345 ahd_outb(ahd, SCSISIGO, 0);
1346 kprintf("clearing Ack\n");
1347 ahd_restore_modes(ahd, saved_modes);
1349 if ((scb->flags & SCB_ACTIVE) == 0) {
1350 /* Previous timeout took care of me already */
1351 kprintf("%s: Timedout SCB already complete. "
1352 "Interrupts may not be functioning.\n", ahd_name(ahd));
1354 ahd_unlock(ahd, &s);
1358 target = SCB_GET_TARGET(ahd, scb);
1359 channel = SCB_GET_CHANNEL(ahd, scb);
1360 lun = SCB_GET_LUN(scb);
1362 ahd_print_path(ahd, scb);
1363 kprintf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
1364 ahd_dump_card_state(ahd);
1365 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
1366 /*initiate reset*/TRUE);
1367 ahd_unlock(ahd, &s);
1370 last_phase = ahd_inb(ahd, LASTPHASE);
1371 if (scb->sg_count > 0) {
1372 for (i = 0; i < scb->sg_count; i++) {
1373 kprintf("sg[%d] - Addr 0x%x : Length %d\n",
1375 ((struct ahd_dma_seg *)scb->sg_list)[i].addr,
1376 ((struct ahd_dma_seg *)scb->sg_list)[i].len
1380 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1382 * Been down this road before.
1383 * Do a full bus reset.
1386 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1387 found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE);
1388 kprintf("%s: Issued Channel %c Bus Reset. "
1389 "%d SCBs aborted\n", ahd_name(ahd), channel, found);
1392 * If we are a target, transition to bus free and report
1395 * The target/initiator that is holding up the bus may not
1396 * be the same as the one that triggered this timeout
1397 * (different commands have different timeout lengths).
1398 * If the bus is idle and we are actiing as the initiator
1399 * for this request, queue a BDR message to the timed out
1400 * target. Otherwise, if the timed out transaction is
1402 * Initiator transaction:
1403 * Stuff the message buffer with a BDR message and assert
1404 * ATN in the hopes that the target will let go of the bus
1405 * and go to the mesgout phase. If this fails, we'll
1406 * get another timeout 2 seconds later which will attempt
1409 * Target transaction:
1410 * Transition to BUS FREE and report the error.
1411 * It's good to be the target!
1413 u_int active_scb_index;
1416 saved_scbptr = ahd_get_scbptr(ahd);
1417 active_scb_index = saved_scbptr;
1419 if (last_phase != P_BUSFREE
1420 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
1421 && (active_scb_index < ahd->scb_data.numscbs)) {
1422 struct scb *active_scb;
1425 * If the active SCB is not us, assume that
1426 * the active SCB has a longer timeout than
1427 * the timedout SCB, and wait for the active
1430 active_scb = ahd_lookup_scb(ahd, active_scb_index);
1431 if (active_scb != scb) {
1432 struct ccb_hdr *ccbh;
1433 uint64_t newtimeout;
1435 ahd_print_path(ahd, scb);
1436 kprintf("Other SCB Timeout%s",
1437 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1438 ? " again\n" : "\n");
1439 scb->flags |= SCB_OTHERTCL_TIMEOUT;
1441 MAX(active_scb->io_ctx->ccb_h.timeout,
1442 scb->io_ctx->ccb_h.timeout);
1445 ccbh = &scb->io_ctx->ccb_h;
1446 callout_reset(&scb->io_ctx->ccb_h.timeout_ch,
1447 newtimeout, ahd_timeout, scb);
1449 ahd_unlock(ahd, &s);
1454 if ((scb->hscb->control & TARGET_SCB) != 0) {
1457 * Send back any queued up transactions
1458 * and properly record the error condition.
1460 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1461 SCB_GET_CHANNEL(ahd, scb),
1467 /* Will clear us from the bus */
1469 ahd_unlock(ahd, &s);
1473 ahd_set_recoveryscb(ahd, active_scb);
1474 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1475 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1476 ahd_print_path(ahd, active_scb);
1477 kprintf("BDR message in message buffer\n");
1478 active_scb->flags |= SCB_DEVICE_RESET;
1479 callout_reset(&active_scb->io_ctx->ccb_h.timeout_ch,
1480 2 * hz, ahd_timeout, active_scb);
1485 /* XXX Shouldn't panic. Just punt instead? */
1486 if ((scb->hscb->control & TARGET_SCB) != 0)
1487 panic("Timed-out target SCB but bus idle");
1489 if (last_phase != P_BUSFREE
1490 && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) {
1491 /* XXX What happened to the SCB? */
1492 /* Hung target selection. Goto busfree */
1493 kprintf("%s: Hung target selection\n",
1496 ahd_unlock(ahd, &s);
1500 if (ahd_search_qinfifo(ahd, target, channel, lun,
1501 SCB_GET_TAG(scb), ROLE_INITIATOR,
1502 /*status*/0, SEARCH_COUNT) > 0) {
1503 disconnected = FALSE;
1505 disconnected = TRUE;
1510 ahd_set_recoveryscb(ahd, scb);
1512 * Actually re-queue this SCB in an attempt
1513 * to select the device before it reconnects.
1514 * In either case (selection or reselection),
1515 * we will now issue a target reset to the
1518 * Set the MK_MESSAGE control bit indicating
1519 * that we desire to send a message. We
1520 * also set the disconnected flag since
1521 * in the paging case there is no guarantee
1522 * that our SCB control byte matches the
1523 * version on the card. We don't want the
1524 * sequencer to abort the command thinking
1525 * an unsolicited reselection occurred.
1527 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1528 scb->flags |= SCB_DEVICE_RESET;
1531 * The sequencer will never re-reference the
1532 * in-core SCB. To make sure we are notified
1533 * during reslection, set the MK_MESSAGE flag
1534 * in the card's copy of the SCB.
1536 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1537 ahd_outb(ahd, SCB_CONTROL,
1538 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1541 * Clear out any entries in the QINFIFO first
1542 * so we are the next SCB for this target
1545 ahd_search_qinfifo(ahd,
1546 SCB_GET_TARGET(ahd, scb),
1547 channel, SCB_GET_LUN(scb),
1552 ahd_print_path(ahd, scb);
1553 kprintf("Queuing a BDR SCB\n");
1554 ahd_qinfifo_requeue_tail(ahd, scb);
1555 ahd_set_scbptr(ahd, saved_scbptr);
1556 callout_reset(&scb->io_ctx->ccb_h.timeout_ch,
1557 2 * hz, ahd_timeout, scb);
1560 /* Go "immediatly" to the bus reset */
1561 /* This shouldn't happen */
1562 ahd_set_recoveryscb(ahd, scb);
1563 ahd_print_path(ahd, scb);
1564 kprintf("SCB %d: Immediate reset. "
1565 "Flags = 0x%x\n", SCB_GET_TAG(scb),
1571 ahd_unlock(ahd, &s);
1576 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1578 union ccb *abort_ccb;
1580 abort_ccb = ccb->cab.abort_ccb;
1581 switch (abort_ccb->ccb_h.func_code) {
1582 #ifdef AHD_TARGET_MODE
1583 case XPT_ACCEPT_TARGET_IO:
1584 case XPT_IMMED_NOTIFY:
1585 case XPT_CONT_TARGET_IO:
1587 struct ahd_tmode_tstate *tstate;
1588 struct ahd_tmode_lstate *lstate;
1589 struct ccb_hdr_slist *list;
1592 status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1595 if (status != CAM_REQ_CMP) {
1596 ccb->ccb_h.status = status;
1600 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1601 list = &lstate->accept_tios;
1602 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1603 list = &lstate->immed_notifies;
1608 struct ccb_hdr *curelm;
1611 curelm = SLIST_FIRST(list);
1613 if (curelm == &abort_ccb->ccb_h) {
1615 SLIST_REMOVE_HEAD(list, sim_links.sle);
1617 while(curelm != NULL) {
1618 struct ccb_hdr *nextelm;
1621 SLIST_NEXT(curelm, sim_links.sle);
1623 if (nextelm == &abort_ccb->ccb_h) {
1636 abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1637 xpt_done(abort_ccb);
1638 ccb->ccb_h.status = CAM_REQ_CMP;
1640 xpt_print_path(abort_ccb->ccb_h.path);
1641 kprintf("Not found\n");
1642 ccb->ccb_h.status = CAM_PATH_INVALID;
1650 /* XXX Fully implement the hard ones */
1651 ccb->ccb_h.status = CAM_UA_ABORT;
1654 ccb->ccb_h.status = CAM_REQ_INVALID;
1661 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1662 u_int lun, ac_code code, void *opt_arg)
1664 struct ccb_trans_settings cts;
1665 struct cam_path *path;
1670 error = ahd_create_path(ahd, channel, target, lun, &path);
1672 if (error != CAM_REQ_CMP)
1676 case AC_TRANSFER_NEG:
1678 #ifdef AHD_NEW_TRAN_SETTINGS
1679 struct ccb_trans_settings_scsi *scsi;
1681 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1682 scsi = &cts.proto_specific.scsi;
1684 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1686 cts.ccb_h.path = path;
1687 cts.ccb_h.target_id = target;
1688 cts.ccb_h.target_lun = lun;
1689 ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1691 #ifdef AHD_NEW_TRAN_SETTINGS
1692 scsi->valid &= ~CTS_SCSI_VALID_TQ;
1693 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1695 cts.valid &= ~CCB_TRANS_TQ_VALID;
1696 cts.flags &= ~CCB_TRANS_TAG_ENB;
1698 if (opt_arg == NULL)
1700 if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1701 #ifdef AHD_NEW_TRAN_SETTINGS
1702 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1703 scsi->valid |= CTS_SCSI_VALID_TQ;
1705 cts.flags |= CCB_TRANS_TAG_ENB;
1706 cts.valid |= CCB_TRANS_TQ_VALID;
1714 panic("ahd_send_async: Unexpected async event");
1716 xpt_async(code, path, arg);
1717 xpt_free_path(path);
1721 ahd_platform_set_tags(struct ahd_softc *ahd,
1722 struct ahd_devinfo *devinfo, int enable)
1727 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1729 ahd->platform_data = kmalloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1730 M_INTWAIT | M_ZERO);
1735 ahd_platform_free(struct ahd_softc *ahd)
1737 struct ahd_platform_data *pdata;
1739 pdata = ahd->platform_data;
1740 if (pdata != NULL) {
1741 if (pdata->regs[0] != NULL)
1742 bus_release_resource(ahd->dev_softc,
1743 pdata->regs_res_type[0],
1744 pdata->regs_res_id[0],
1747 if (pdata->regs[1] != NULL)
1748 bus_release_resource(ahd->dev_softc,
1749 pdata->regs_res_type[1],
1750 pdata->regs_res_id[1],
1753 if (pdata->irq != NULL)
1754 bus_release_resource(ahd->dev_softc,
1755 pdata->irq_res_type,
1758 if (pdata->sim_b != NULL) {
1759 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1760 xpt_free_path(pdata->path_b);
1761 xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1762 cam_sim_free(pdata->sim_b);
1764 if (pdata->sim != NULL) {
1765 xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1766 xpt_free_path(pdata->path);
1767 xpt_bus_deregister(cam_sim_path(pdata->sim));
1768 cam_sim_free(pdata->sim);
1770 if (pdata->eh != NULL)
1771 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1772 kfree(ahd->platform_data, M_DEVBUF);
1777 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1779 /* We don't sort softcs under FreeBSD so report equal always */
1784 ahd_detach(device_t dev)
1786 struct ahd_softc *ahd;
1791 device_printf(dev, "detaching device\n");
1792 ahd = device_get_softc(dev);
1793 ahd = ahd_find_softc(ahd);
1795 device_printf(dev, "aic7xxx already detached\n");
1796 ahd_list_unlock(&l);
1800 ahd_intr_enable(ahd, FALSE);
1801 bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1802 ahd_unlock(ahd, &s);
1804 ahd_list_unlock(&l);
1810 ahd_dump_targcmd(struct target_cmd *cmd)
1816 byte = &cmd->initiator_channel;
1817 /* Debugging info for received commands */
1818 last_byte = &cmd[1].initiator_channel;
1821 while (byte < last_byte) {
1824 kprintf("%#x", *byte++);
1837 ahd_modevent(module_t mod, int type, void *data)
1839 /* XXX Deal with busy status on unload. */
1843 static moduledata_t ahd_mod = {
1849 /********************************** DDB Hooks *********************************/
1851 static struct ahd_softc *ahd_ddb_softc;
1852 static int ahd_ddb_paused;
1853 static int ahd_ddb_paused_on_entry;
1854 DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit)
1856 struct ahd_softc *list_ahd;
1858 ahd_ddb_softc = NULL;
1859 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1860 if (list_ahd->unit == addr)
1861 ahd_ddb_softc = list_ahd;
1863 if (ahd_ddb_softc == NULL)
1864 db_error("No matching softc found!\n");
1867 DB_COMMAND(ahd_pause, ahd_ddb_pause)
1869 if (ahd_ddb_softc == NULL) {
1870 db_error("Must set unit with ahd_set_unit first!\n");
1873 if (ahd_ddb_paused == 0) {
1875 if (ahd_is_paused(ahd_ddb_softc)) {
1876 ahd_ddb_paused_on_entry++;
1879 ahd_pause(ahd_ddb_softc);
1883 DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1885 if (ahd_ddb_softc == NULL) {
1886 db_error("Must set unit with ahd_set_unit first!\n");
1889 if (ahd_ddb_paused != 0) {
1891 if (ahd_ddb_paused_on_entry)
1893 ahd_unpause(ahd_ddb_softc);
1894 } else if (ahd_ddb_paused_on_entry != 0) {
1895 /* Two unpauses to clear a paused on entry. */
1896 ahd_ddb_paused_on_entry = 0;
1897 ahd_unpause(ahd_ddb_softc);
1901 DB_COMMAND(ahd_in, ahd_ddb_in)
1906 if (ahd_ddb_softc == NULL) {
1907 db_error("Must set unit with ahd_set_unit first!\n");
1914 while ((c = *modif++) != '\0') {
1930 while (--count >= 0) {
1931 db_printf("%04lx (M)%x: \t", (u_long)addr,
1932 ahd_inb(ahd_ddb_softc, MODE_PTR));
1935 db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1938 db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1941 db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1947 DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL)
1949 db_expr_t old_value;
1950 db_expr_t new_value;
1953 if (ahd_ddb_softc == NULL) {
1954 db_error("Must set unit with ahd_set_unit first!\n");
1970 db_error("Unknown size\n");
1974 while (db_expression(&new_value)) {
1978 old_value = ahd_inb(ahd_ddb_softc, addr);
1979 ahd_outb(ahd_ddb_softc, addr, new_value);
1982 old_value = ahd_inw(ahd_ddb_softc, addr);
1983 ahd_outw(ahd_ddb_softc, addr, new_value);
1986 old_value = ahd_inl(ahd_ddb_softc, addr);
1987 ahd_outl(ahd_ddb_softc, addr, new_value);
1990 db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx",
1991 (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1992 (u_long)old_value, (u_long)new_value);
2001 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
2002 MODULE_DEPEND(ahd, cam, 1, 1, 1);
2003 MODULE_VERSION(ahd, 1);