2 *********************************************************************
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34 * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.14 2007/12/23 07:00:56 pavalos Exp $
38 *********************************************************************
41 * REV# DATE NAME DESCRIPTION
42 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
43 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
44 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
45 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
46 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
47 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
48 *********************************************************************
51 /* #define AMD_DEBUG0 */
52 /* #define AMD_DEBUG_SCSI_PHASE */
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
62 #include <sys/thread2.h>
67 #include <machine/clock.h>
69 #include <bus/cam/cam.h>
70 #include <bus/cam/cam_ccb.h>
71 #include <bus/cam/cam_sim.h>
72 #include <bus/cam/cam_xpt_sim.h>
73 #include <bus/cam/cam_debug.h>
75 #include <bus/cam/scsi/scsi_all.h>
76 #include <bus/cam/scsi/scsi_message.h>
78 #include <bus/pci/pcivar.h>
79 #include <bus/pci/pcireg.h>
82 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
83 #define PCI_BASE_ADDR0 0x10
85 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
86 typedef phase_handler_t *phase_handler_func_t;
88 static void amd_intr(void *vamd);
89 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
90 static phase_handler_t amd_NopPhase;
92 static phase_handler_t amd_DataOutPhase0;
93 static phase_handler_t amd_DataInPhase0;
94 #define amd_CommandPhase0 amd_NopPhase
95 static phase_handler_t amd_StatusPhase0;
96 static phase_handler_t amd_MsgOutPhase0;
97 static phase_handler_t amd_MsgInPhase0;
98 static phase_handler_t amd_DataOutPhase1;
99 static phase_handler_t amd_DataInPhase1;
100 static phase_handler_t amd_CommandPhase1;
101 static phase_handler_t amd_StatusPhase1;
102 static phase_handler_t amd_MsgOutPhase1;
103 static phase_handler_t amd_MsgInPhase1;
105 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
106 static int amdparsemsg(struct amd_softc *amd);
107 static int amdhandlemsgreject(struct amd_softc *amd);
108 static void amdconstructsdtr(struct amd_softc *amd,
109 u_int period, u_int offset);
110 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
111 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
113 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
114 static void amd_Disconnect(struct amd_softc *amd);
115 static void amd_Reselect(struct amd_softc *amd);
116 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
117 static void amd_ScsiRstDetect(struct amd_softc *amd);
118 static void amd_ResetSCSIBus(struct amd_softc *amd);
119 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
120 static void amd_InvalidCmd(struct amd_softc *amd);
123 static void amd_timeout(void *arg1);
124 static void amd_reset(struct amd_softc *amd);
126 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
128 void amd_linkSRB(struct amd_softc *amd);
129 static int amd_init(device_t);
130 static void amd_load_defaults(struct amd_softc *amd);
131 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
132 static int amd_EEpromInDO(struct amd_softc *amd);
133 static u_int16_t EEpromGetData1(struct amd_softc *amd);
134 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
135 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
136 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
137 static void amd_ReadEEprom(struct amd_softc *amd);
139 static int amd_probe(device_t);
140 static int amd_attach(device_t);
141 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
142 lun_id_t lun, u_int tag, struct srb_queue *queue,
144 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
145 u_int period, u_int offset, u_int type);
146 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
148 static __inline void amd_clear_msg_state(struct amd_softc *amd);
151 amd_clear_msg_state(struct amd_softc *amd)
154 amd->msgout_index = 0;
155 amd->msgin_index = 0;
158 /* CAM SIM entry points */
159 #define ccb_srb_ptr spriv_ptr0
160 #define ccb_amd_ptr spriv_ptr1
161 static void amd_action(struct cam_sim *sim, union ccb *ccb);
162 static void amd_poll(struct cam_sim *sim);
165 * State engine function tables indexed by SCSI phase number
167 phase_handler_func_t amd_SCSI_phase0[] = {
178 phase_handler_func_t amd_SCSI_phase1[] = {
190 * EEProm/BIOS negotiation periods
192 u_int8_t eeprom_period[] = {
204 * chip clock setting to SCSI specified sync parameter table.
206 u_int8_t tinfo_sync_period[] = {
219 static __inline struct amd_srb *
220 amdgetsrb(struct amd_softc * amd)
222 struct amd_srb * pSRB;
225 pSRB = TAILQ_FIRST(&amd->free_srbs);
227 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
233 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
235 struct scsi_request_sense sense_cmd;
236 struct ccb_scsiio *csio;
240 csio = &srb->pccb->csio;
242 if (srb->SRBFlag & AUTO_REQSENSE) {
243 sense_cmd.opcode = REQUEST_SENSE;
244 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 sense_cmd.unused[0] = 0;
246 sense_cmd.unused[1] = 0;
247 sense_cmd.length = csio->sense_len;
248 sense_cmd.control = 0;
249 cdb = &sense_cmd.opcode;
250 cdb_len = sizeof(sense_cmd);
252 cdb = &srb->CmdBlock[0];
253 cdb_len = srb->ScsiCmdLen;
255 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
259 * Attempt to start a waiting transaction. Interrupts must be disabled
260 * upon entry to this function.
263 amdrunwaiting(struct amd_softc *amd) {
266 if (amd->last_phase != SCSI_BUS_FREE)
269 srb = TAILQ_FIRST(&amd->waiting_srbs);
273 if (amdstart(amd, srb) == 0) {
274 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
280 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
284 struct amd_softc *amd;
286 srb = (struct amd_srb *)arg;
288 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
292 kprintf("amd%d: Unexpected error 0x%x returned from "
293 "bus_dmamap_load\n", amd->unit, error);
294 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
295 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
296 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
298 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
305 bus_dma_segment_t *end_seg;
308 end_seg = dm_segs + nseg;
310 /* Copy the segments into our SG list */
311 srb->pSGlist = &srb->SGsegment[0];
313 while (dm_segs < end_seg) {
314 sg->SGXLen = dm_segs->ds_len;
315 sg->SGXPtr = dm_segs->ds_addr;
320 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
321 op = BUS_DMASYNC_PREREAD;
323 op = BUS_DMASYNC_PREWRITE;
325 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
330 srb->AdaptStatus = 0;
331 srb->TargetStatus = 0;
336 srb->TotalXferredLen = 0;
338 srb->SGToBeXferLen = 0;
344 * Last time we need to check if this CCB needs to
347 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
349 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
350 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
355 ccb->ccb_h.status |= CAM_SIM_QUEUED;
357 /* XXX Need a timeout handler */
358 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
361 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
367 amd_action(struct cam_sim * psim, union ccb * pccb)
369 struct amd_softc * amd;
370 u_int target_id, target_lun;
372 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
374 amd = (struct amd_softc *) cam_sim_softc(psim);
375 target_id = pccb->ccb_h.target_id;
376 target_lun = pccb->ccb_h.target_lun;
378 switch (pccb->ccb_h.func_code) {
381 struct amd_srb * pSRB;
382 struct ccb_scsiio *pcsio;
387 * Assign an SRB and connect it with this ccb.
389 pSRB = amdgetsrb(amd);
393 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
398 pccb->ccb_h.ccb_srb_ptr = pSRB;
399 pccb->ccb_h.ccb_amd_ptr = amd;
400 pSRB->ScsiCmdLen = pcsio->cdb_len;
401 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
402 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
403 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
405 * We've been given a pointer
406 * to a single buffer.
408 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
413 bus_dmamap_load(amd->buffer_dmat,
419 if (error == EINPROGRESS) {
422 * ordering, freeze the
424 * until our mapping is
427 xpt_freeze_simq(amd->psim, 1);
428 pccb->ccb_h.status |=
433 struct bus_dma_segment seg;
435 /* Pointer to physical buffer */
437 (bus_addr_t)pcsio->data_ptr;
438 seg.ds_len = pcsio->dxfer_len;
439 amdexecutesrb(pSRB, &seg, 1, 0);
442 struct bus_dma_segment *segs;
444 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
445 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
446 TAILQ_INSERT_HEAD(&amd->free_srbs,
448 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
453 /* Just use the segments provided */
455 (struct bus_dma_segment *)pcsio->data_ptr;
456 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
459 amdexecutesrb(pSRB, NULL, 0, 0);
464 struct ccb_pathinq *cpi = &pccb->cpi;
466 cpi->version_num = 1;
467 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
468 cpi->target_sprt = 0;
470 cpi->hba_eng_cnt = 0;
472 cpi->max_lun = amd->max_lun; /* 7 or 0 */
473 cpi->initiator_id = amd->AdaptSCSIID;
474 cpi->bus_id = cam_sim_bus(psim);
475 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
476 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
477 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
478 cpi->unit_number = cam_sim_unit(psim);
479 #ifdef CAM_NEW_TRAN_CODE
480 cpi->transport = XPORT_SPI;
481 cpi->transport_version = 2;
482 cpi->protocol = PROTO_SCSI;
483 cpi->protocol_version = SCSI_REV_2;
485 cpi->ccb_h.status = CAM_REQ_CMP;
490 pccb->ccb_h.status = CAM_REQ_INVALID;
498 amd_ResetSCSIBus(amd);
501 for (i = 0; i < 500; i++) {
502 DELAY(1000); /* Wait until our interrupt
506 pccb->ccb_h.status = CAM_REQ_CMP;
511 pccb->ccb_h.status = CAM_REQ_INVALID;
515 pccb->ccb_h.status = CAM_REQ_INVALID;
518 case XPT_GET_TRAN_SETTINGS:
520 struct ccb_trans_settings *cts = &pccb->cts;
521 struct amd_target_info *targ_info = &amd->tinfo[target_id];
522 struct amd_transinfo *tinfo;
523 #ifdef CAM_NEW_TRAN_CODE
524 struct ccb_trans_settings_scsi *scsi =
525 &cts->proto_specific.scsi;
526 struct ccb_trans_settings_spi *spi =
527 &cts->xport_specific.spi;
529 cts->protocol = PROTO_SCSI;
530 cts->protocol_version = SCSI_REV_2;
531 cts->transport = XPORT_SPI;
532 cts->transport_version = 2;
535 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
536 /* current transfer settings */
537 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
538 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
542 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
543 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
547 tinfo = &targ_info->current;
549 /* default(user) transfer settings */
550 if (targ_info->disc_tag & AMD_USR_DISCENB) {
551 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
555 if (targ_info->disc_tag & AMD_USR_TAGENB) {
556 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
560 tinfo = &targ_info->user;
562 spi->sync_period = tinfo->period;
563 spi->sync_offset = tinfo->offset;
566 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
567 spi->valid = CTS_SPI_VALID_SYNC_RATE
568 | CTS_SPI_VALID_SYNC_OFFSET
569 | CTS_SPI_VALID_BUS_WIDTH
570 | CTS_SPI_VALID_DISC;
571 scsi->valid = CTS_SCSI_VALID_TQ;
574 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
575 /* current transfer settings */
576 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
577 cts->flags = CCB_TRANS_DISC_ENB;
579 cts->flags = 0; /* no tag & disconnect */
581 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
582 cts->flags |= CCB_TRANS_TAG_ENB;
584 tinfo = &targ_info->current;
586 /* default(user) transfer settings */
587 if (targ_info->disc_tag & AMD_USR_DISCENB) {
588 cts->flags = CCB_TRANS_DISC_ENB;
592 if (targ_info->disc_tag & AMD_USR_TAGENB) {
593 cts->flags |= CCB_TRANS_TAG_ENB;
595 tinfo = &targ_info->user;
598 cts->sync_period = tinfo->period;
599 cts->sync_offset = tinfo->offset;
600 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
603 cts->valid = CCB_TRANS_SYNC_RATE_VALID
604 | CCB_TRANS_SYNC_OFFSET_VALID
605 | CCB_TRANS_BUS_WIDTH_VALID
606 | CCB_TRANS_DISC_VALID
607 | CCB_TRANS_TQ_VALID;
609 pccb->ccb_h.status = CAM_REQ_CMP;
613 #ifdef CAM_NEW_TRAN_CODE
614 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
615 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
617 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
618 #define IS_USER_SETTINGS(c) (c->flags & CCB_TRANS_USER_SETTINGS)
620 case XPT_SET_TRAN_SETTINGS:
622 struct ccb_trans_settings *cts = &pccb->cts;
623 struct amd_target_info *targ_info;
624 u_int update_type = 0;
627 #ifdef CAM_NEW_TRAN_CODE
628 struct ccb_trans_settings_scsi *scsi =
629 &cts->proto_specific.scsi;
630 struct ccb_trans_settings_spi *spi =
631 &cts->xport_specific.spi;
633 if (IS_CURRENT_SETTINGS(cts)) {
634 update_type |= AMD_TRANS_GOAL;
635 } else if (IS_USER_SETTINGS(cts)) {
636 update_type |= AMD_TRANS_USER;
639 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
640 cts->ccb_h.status = CAM_REQ_INVALID;
644 #ifdef CAM_NEW_TRAN_CODE
646 targ_info = &amd->tinfo[target_id];
648 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
649 if (update_type & AMD_TRANS_GOAL) {
650 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
652 targ_info->disc_tag |= AMD_CUR_DISCENB;
654 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
657 if (update_type & AMD_TRANS_USER) {
658 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
660 targ_info->disc_tag |= AMD_USR_DISCENB;
662 targ_info->disc_tag &= ~AMD_USR_DISCENB;
666 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
667 if (update_type & AMD_TRANS_GOAL) {
668 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
670 targ_info->disc_tag |= AMD_CUR_TAGENB;
672 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
675 if (update_type & AMD_TRANS_USER) {
676 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
678 targ_info->disc_tag |= AMD_USR_TAGENB;
680 targ_info->disc_tag &= ~AMD_USR_TAGENB;
685 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
686 if (update_type & AMD_TRANS_GOAL)
687 spi->sync_offset = targ_info->goal.offset;
689 spi->sync_offset = targ_info->user.offset;
692 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
693 spi->sync_offset = AMD_MAX_SYNC_OFFSET;
695 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
696 if (update_type & AMD_TRANS_GOAL)
697 spi->sync_period = targ_info->goal.period;
699 spi->sync_period = targ_info->user.period;
702 last_entry = sizeof(tinfo_sync_period) - 1;
703 if ((spi->sync_period != 0)
704 && (spi->sync_period < tinfo_sync_period[0]))
705 spi->sync_period = tinfo_sync_period[0];
706 if (spi->sync_period > tinfo_sync_period[last_entry])
707 spi->sync_period = 0;
708 if (spi->sync_offset == 0)
709 spi->sync_period = 0;
711 if ((update_type & AMD_TRANS_USER) != 0) {
712 targ_info->user.period = spi->sync_period;
713 targ_info->user.offset = spi->sync_offset;
715 if ((update_type & AMD_TRANS_GOAL) != 0) {
716 targ_info->goal.period = spi->sync_period;
717 targ_info->goal.offset = spi->sync_offset;
721 targ_info = &amd->tinfo[target_id];
723 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
724 if (update_type & AMD_TRANS_GOAL) {
725 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
726 targ_info->disc_tag |= AMD_CUR_DISCENB;
728 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
731 if (update_type & AMD_TRANS_USER) {
732 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
733 targ_info->disc_tag |= AMD_USR_DISCENB;
735 targ_info->disc_tag &= ~AMD_USR_DISCENB;
739 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
740 if (update_type & AMD_TRANS_GOAL) {
741 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
742 targ_info->disc_tag |= AMD_CUR_TAGENB;
744 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
747 if (update_type & AMD_TRANS_USER) {
748 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
749 targ_info->disc_tag |= AMD_USR_TAGENB;
751 targ_info->disc_tag &= ~AMD_USR_TAGENB;
756 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
757 if (update_type & AMD_TRANS_GOAL)
758 cts->sync_offset = targ_info->goal.offset;
760 cts->sync_offset = targ_info->user.offset;
763 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
764 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
766 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
767 if (update_type & AMD_TRANS_GOAL)
768 cts->sync_period = targ_info->goal.period;
770 cts->sync_period = targ_info->user.period;
773 last_entry = sizeof(tinfo_sync_period) - 1;
774 if ((cts->sync_period != 0)
775 && (cts->sync_period < tinfo_sync_period[0]))
776 cts->sync_period = tinfo_sync_period[0];
777 if (cts->sync_period > tinfo_sync_period[last_entry])
778 cts->sync_period = 0;
779 if (cts->sync_offset == 0)
780 cts->sync_period = 0;
782 if ((update_type & AMD_TRANS_USER) != 0) {
783 targ_info->user.period = cts->sync_period;
784 targ_info->user.offset = cts->sync_offset;
786 if ((update_type & AMD_TRANS_GOAL) != 0) {
787 targ_info->goal.period = cts->sync_period;
788 targ_info->goal.offset = cts->sync_offset;
792 pccb->ccb_h.status = CAM_REQ_CMP;
796 case XPT_CALC_GEOMETRY:
798 struct ccb_calc_geometry *ccg;
800 u_int32_t secs_per_cylinder;
804 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
805 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
807 if (size_mb > 1024 && extended) {
809 ccg->secs_per_track = 63;
812 ccg->secs_per_track = 32;
814 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
815 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
816 pccb->ccb_h.status = CAM_REQ_CMP;
821 pccb->ccb_h.status = CAM_REQ_INVALID;
828 amd_poll(struct cam_sim * psim)
830 amd_intr(cam_sim_softc(psim));
834 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
837 struct ccb_scsiio *pcsio;
839 struct amd_sg * pseg;
842 pcsio = &pSRB->pccb->csio;
844 dataPtr = (int) pcsio->data_ptr;
845 pseg = pSRB->SGsegment;
846 for (i = 0; i < pSRB->SGIndex; i++) {
847 dataPtr += (int) pseg->SGXLen;
850 dataPtr += (int) xferCnt;
851 return ((u_int8_t *) dataPtr);
855 ResetDevParam(struct amd_softc * amd)
859 for (target = 0; target <= amd->max_id; target++) {
860 if (amd->AdaptSCSIID != target) {
861 amdsetsync(amd, target, /*clockrate*/0,
862 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
868 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
869 u_int tag, struct srb_queue *queue, cam_status status)
872 struct amd_srb *next_srb;
874 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
877 next_srb = TAILQ_NEXT(srb, links);
878 if (srb->pccb->ccb_h.target_id != target
879 && target != CAM_TARGET_WILDCARD)
882 if (srb->pccb->ccb_h.target_lun != lun
883 && lun != CAM_LUN_WILDCARD)
886 if (srb->TagNumber != tag
887 && tag != AMD_TAG_WILDCARD)
891 TAILQ_REMOVE(queue, srb, links);
892 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
893 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
894 && (status & CAM_DEV_QFRZN) != 0)
895 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
896 ccb->ccb_h.status = status;
903 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
904 u_int period, u_int offset, u_int type)
906 struct amd_target_info *tinfo;
910 tinfo = &amd->tinfo[target];
911 old_period = tinfo->current.period;
912 old_offset = tinfo->current.offset;
913 if ((type & AMD_TRANS_CUR) != 0
914 && (old_period != period || old_offset != offset)) {
915 struct cam_path *path;
917 tinfo->current.period = period;
918 tinfo->current.offset = offset;
919 tinfo->sync_period_reg = clockrate;
920 tinfo->sync_offset_reg = offset;
921 tinfo->CtrlR3 &= ~FAST_SCSI;
922 tinfo->CtrlR4 &= ~EATER_25NS;
924 tinfo->CtrlR4 |= EATER_25NS;
926 tinfo->CtrlR3 |= FAST_SCSI;
928 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
929 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
930 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
931 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
932 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
934 /* If possible, update the XPT's notion of our transfer rate */
935 if (xpt_create_path(&path, /*periph*/NULL,
936 cam_sim_path(amd->psim), target,
937 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
938 struct ccb_trans_settings neg;
939 #ifdef CAM_NEW_TRAN_CODE
940 struct ccb_trans_settings_spi *spi =
941 &neg.xport_specific.spi;
943 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
944 memset(&neg, 0, sizeof (neg));
945 #ifdef CAM_NEW_TRAN_CODE
946 spi->sync_period = period;
947 spi->sync_offset = offset;
948 spi->valid = CTS_SPI_VALID_SYNC_RATE
949 | CTS_SPI_VALID_SYNC_OFFSET;
951 neg.sync_period = period;
952 neg.sync_offset = offset;
953 neg.valid = CCB_TRANS_SYNC_RATE_VALID
954 | CCB_TRANS_SYNC_OFFSET_VALID;
956 xpt_async(AC_TRANSFER_NEG, path, &neg);
960 if ((type & AMD_TRANS_GOAL) != 0) {
961 tinfo->goal.period = period;
962 tinfo->goal.offset = offset;
965 if ((type & AMD_TRANS_USER) != 0) {
966 tinfo->user.period = period;
967 tinfo->user.offset = offset;
972 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
974 panic("Implement me!\n");
980 **********************************************************************
981 * Function : amd_reset (struct amd_softc * amd)
982 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
983 * Inputs : cmd - command which caused the SCSI RESET
984 **********************************************************************
987 amd_reset(struct amd_softc * amd)
994 kprintf("DC390: RESET");
998 bval = amd_read8(amd, CNTLREG1);
999 bval |= DIS_INT_ON_SCSI_RST;
1000 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
1001 amd_ResetSCSIBus(amd);
1003 for (i = 0; i < 500; i++) {
1007 bval = amd_read8(amd, CNTLREG1);
1008 bval &= ~DIS_INT_ON_SCSI_RST;
1009 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
1011 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
1012 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1015 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
1016 AMD_TAG_WILDCARD, &amd->running_srbs,
1017 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
1018 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
1019 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1020 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
1021 amd->active_srb = NULL;
1028 amd_timeout(void *arg1)
1030 struct amd_srb * pSRB;
1032 pSRB = (struct amd_srb *) arg1;
1037 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
1040 struct ccb_scsiio *pcsio;
1041 struct amd_target_info *targ_info;
1049 pcsio = &pccb->csio;
1050 target = pccb->ccb_h.target_id;
1051 lun = pccb->ccb_h.target_lun;
1052 targ_info = &amd->tinfo[target];
1054 amd_clear_msg_state(amd);
1055 amd_write8(amd, SCSIDESTIDREG, target);
1056 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
1057 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
1058 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
1059 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
1060 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
1061 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1063 identify_msg = MSG_IDENTIFYFLAG | lun;
1064 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1065 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
1066 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
1067 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
1068 identify_msg |= MSG_IDENTIFY_DISCFLAG;
1070 amd_write8(amd, SCSIFIFOREG, identify_msg);
1072 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
1073 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
1074 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1075 if (targ_info->current.period != targ_info->goal.period
1076 || targ_info->current.offset != targ_info->goal.offset) {
1077 command = SEL_W_ATN_STOP;
1078 amdconstructsdtr(amd, targ_info->goal.period,
1079 targ_info->goal.offset);
1080 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1081 command = SEL_W_ATN2;
1082 pSRB->SRBState = SRB_START;
1083 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
1084 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
1087 command = SEL_W_ATN;
1088 pSRB->SRBState = SRB_START;
1090 if (command != SEL_W_ATN_STOP)
1091 amdsetupcommand(amd, pSRB);
1093 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
1094 pSRB->SRBState = SRB_READY;
1097 amd->last_phase = SCSI_ARBITRATING;
1098 amd_write8(amd, SCSICMDREG, command);
1099 amd->active_srb = pSRB;
1100 amd->cur_target = target;
1107 * Catch an interrupt from the adapter.
1108 * Process pending device interrupts.
1113 struct amd_softc *amd;
1114 struct amd_srb *pSRB;
1115 u_int internstat = 0;
1119 amd = (struct amd_softc *)arg;
1123 kprintf("amd_intr: amd NULL return......");
1128 scsistat = amd_read8(amd, SCSISTATREG);
1129 if (!(scsistat & INTERRUPT)) {
1131 kprintf("amd_intr: scsistat = NULL ,return......");
1135 #ifdef AMD_DEBUG_SCSI_PHASE
1136 kprintf("scsistat=%2x,", scsistat);
1139 internstat = amd_read8(amd, INTERNSTATREG);
1140 intstat = amd_read8(amd, INTSTATREG);
1142 #ifdef AMD_DEBUG_SCSI_PHASE
1143 kprintf("intstat=%2x,", intstat);
1146 if (intstat & DISCONNECTED) {
1147 amd_Disconnect(amd);
1150 if (intstat & RESELECTED) {
1154 if (intstat & INVALID_CMD) {
1155 amd_InvalidCmd(amd);
1158 if (intstat & SCSI_RESET_) {
1159 amd_ScsiRstDetect(amd);
1162 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1163 pSRB = amd->active_srb;
1165 * Run our state engine. First perform
1166 * post processing for the last phase we
1167 * were in, followed by any processing
1168 * required to handle the current phase.
1171 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1172 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1173 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1178 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1180 struct amd_sg *psgl;
1181 u_int32_t ResidCnt, xferCnt;
1183 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1184 if (scsistat & PARITY_ERR) {
1185 pSRB->SRBStatus |= PARITY_ERROR;
1187 if (scsistat & COUNT_2_ZERO) {
1188 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1190 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1192 if (pSRB->SGIndex < pSRB->SGcount) {
1194 psgl = pSRB->pSGlist;
1195 pSRB->SGPhysAddr = psgl->SGXPtr;
1196 pSRB->SGToBeXferLen = psgl->SGXLen;
1198 pSRB->SGToBeXferLen = 0;
1201 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1202 ResidCnt += amd_read8(amd, CTCREG_LOW)
1203 | (amd_read8(amd, CTCREG_MID) << 8)
1204 | (amd_read8(amd, CURTXTCNTREG) << 16);
1206 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1207 pSRB->SGPhysAddr += xferCnt;
1208 pSRB->TotalXferredLen += xferCnt;
1209 pSRB->SGToBeXferLen = ResidCnt;
1212 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1217 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1220 u_int16_t i, residual;
1221 struct amd_sg *psgl;
1222 u_int32_t ResidCnt, xferCnt;
1225 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1226 if (scsistat & PARITY_ERR) {
1227 pSRB->SRBStatus |= PARITY_ERROR;
1229 if (scsistat & COUNT_2_ZERO) {
1231 bval = amd_read8(amd, DMA_Status);
1232 if ((bval & DMA_XFER_DONE) != 0)
1235 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1237 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1239 if (pSRB->SGIndex < pSRB->SGcount) {
1241 psgl = pSRB->pSGlist;
1242 pSRB->SGPhysAddr = psgl->SGXPtr;
1243 pSRB->SGToBeXferLen = psgl->SGXLen;
1245 pSRB->SGToBeXferLen = 0;
1247 } else { /* phase changed */
1249 bval = amd_read8(amd, CURRENTFIFOREG);
1250 while (bval & 0x1f) {
1251 if ((bval & 0x1f) == 1) {
1252 for (i = 0; i < 0x100; i++) {
1253 bval = amd_read8(amd, CURRENTFIFOREG);
1254 if (!(bval & 0x1f)) {
1256 } else if (i == 0x0ff) {
1262 bval = amd_read8(amd, CURRENTFIFOREG);
1266 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1267 for (i = 0; i < 0x8000; i++) {
1268 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1271 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1273 ResidCnt = amd_read8(amd, CTCREG_LOW)
1274 | (amd_read8(amd, CTCREG_MID) << 8)
1275 | (amd_read8(amd, CURTXTCNTREG) << 16);
1276 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1277 pSRB->SGPhysAddr += xferCnt;
1278 pSRB->TotalXferredLen += xferCnt;
1279 pSRB->SGToBeXferLen = ResidCnt;
1281 /* get residual byte */
1282 bval = amd_read8(amd, SCSIFIFOREG);
1283 ptr = phystovirt(pSRB, xferCnt);
1286 pSRB->TotalXferredLen++;
1287 pSRB->SGToBeXferLen--;
1295 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1297 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1299 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1300 pSRB->SRBState = SRB_COMPLETED;
1301 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1306 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1308 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1309 scsistat = SCSI_NOP0;
1315 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1319 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1321 done = amdparsemsg(amd);
1323 amd->msgin_index = 0;
1330 amdparsemsg(struct amd_softc *amd)
1332 struct amd_target_info *targ_info;
1341 targ_info = &amd->tinfo[amd->cur_target];
1344 * Parse as much of the message as is availible,
1345 * rejecting it if we don't support it. When
1346 * the entire message is availible and has been
1347 * handled, return TRUE indicating that we have
1348 * parsed an entire message.
1350 switch (amd->msgin_buf[0]) {
1351 case MSG_DISCONNECT:
1352 amd->active_srb->SRBState = SRB_DISCONNECT;
1353 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1356 case MSG_SIMPLE_Q_TAG:
1358 struct amd_srb *disc_srb;
1360 if (amd->msgin_index < 1)
1362 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1363 if (amd->active_srb != NULL
1364 || disc_srb->SRBState != SRB_DISCONNECT
1365 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1366 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1367 kprintf("amd%d: Unexpected tagged reselection "
1368 "for target %d, Issuing Abort\n", amd->unit,
1370 amd->msgout_buf[0] = MSG_ABORT;
1371 amd->msgout_len = 1;
1375 amd->active_srb = disc_srb;
1376 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1380 case MSG_MESSAGE_REJECT:
1381 response = amdhandlemsgreject(amd);
1382 if (response == FALSE)
1383 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1395 /* Wait for enough of the message to begin validation */
1396 if (amd->msgin_index < 1)
1398 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1403 /* Wait for opcode */
1404 if (amd->msgin_index < 2)
1407 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1413 * Wait until we have both args before validating
1414 * and acting on this message.
1416 * Add one to MSG_EXT_SDTR_LEN to account for
1417 * the extended message preamble.
1419 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1422 period = amd->msgin_buf[3];
1423 saved_offset = offset = amd->msgin_buf[4];
1424 clockrate = amdfindclockrate(amd, &period);
1425 if (offset > AMD_MAX_SYNC_OFFSET)
1426 offset = AMD_MAX_SYNC_OFFSET;
1427 if (period == 0 || offset == 0) {
1432 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1433 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1436 * See if we initiated Sync Negotiation
1437 * and didn't have to fall down to async
1440 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1442 if (saved_offset != offset) {
1443 /* Went too low - force async */
1448 * Send our own SDTR in reply
1451 kprintf("Sending SDTR!\n");
1452 amd->msgout_index = 0;
1453 amd->msgout_len = 0;
1454 amdconstructsdtr(amd, period, offset);
1455 amd->msgout_index = 0;
1461 case MSG_SAVEDATAPOINTER:
1462 case MSG_RESTOREPOINTERS:
1463 /* XXX Implement!!! */
1472 amd->msgout_index = 0;
1473 amd->msgout_len = 1;
1474 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1480 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1482 if (done && !response)
1483 /* Clear the outgoing message buffer */
1484 amd->msgout_len = 0;
1487 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1493 amdfindclockrate(struct amd_softc *amd, u_int *period)
1498 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1499 u_int8_t *table_entry;
1501 table_entry = &tinfo_sync_period[i];
1502 if (*period <= *table_entry) {
1504 * When responding to a target that requests
1505 * sync, the requested rate may fall between
1506 * two rates that we can output, but still be
1507 * a rate that we can receive. Because of this,
1508 * we want to respond to the target with
1509 * the same rate that it sent to us even
1510 * if the period we use to send data to it
1511 * is lower. Only lower the response period
1515 *period = *table_entry;
1521 if (i == sizeof(tinfo_sync_period)) {
1522 /* Too slow for us. Use asnyc transfers. */
1532 * See if we sent a particular extended message to the target.
1533 * If "full" is true, the target saw the full message.
1534 * If "full" is false, the target saw at least the first
1535 * byte of the message.
1538 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1546 while (index < amd->msgout_len) {
1547 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1548 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1550 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1551 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1552 /* Skip tag type and tag id */
1554 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1555 /* Found a candidate */
1556 if (amd->msgout_buf[index+2] == msgtype) {
1559 end_index = index + 1
1560 + amd->msgout_buf[index + 1];
1562 if (amd->msgout_index > end_index)
1564 } else if (amd->msgout_index > index)
1569 panic("amdsentmsg: Inconsistent msg buffer");
1576 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1578 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1579 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1580 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1581 amd->msgout_buf[amd->msgout_index++] = period;
1582 amd->msgout_buf[amd->msgout_index++] = offset;
1583 amd->msgout_len += 5;
1587 amdhandlemsgreject(struct amd_softc *amd)
1590 * If we had an outstanding SDTR for this
1591 * target, this is a signal that the target
1592 * is refusing negotiation. Also watch out
1593 * for rejected tag messages.
1595 struct amd_srb *srb;
1596 struct amd_target_info *targ_info;
1597 int response = FALSE;
1599 srb = amd->active_srb;
1600 targ_info = &amd->tinfo[amd->cur_target];
1601 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1602 /* note asynch xfers and clear flag */
1603 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1604 /*period*/0, /*offset*/0,
1605 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1606 kprintf("amd%d:%d: refuses synchronous negotiation. "
1607 "Using asynchronous transfers\n",
1608 amd->unit, amd->cur_target);
1609 } else if ((srb != NULL)
1610 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1611 struct ccb_trans_settings neg;
1612 #ifdef CAM_NEW_TRAN_CODE
1613 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1616 kprintf("amd%d:%d: refuses tagged commands. Performing "
1617 "non-tagged I/O\n", amd->unit, amd->cur_target);
1619 amdsettags(amd, amd->cur_target, FALSE);
1620 memset(&neg, 0, sizeof (neg));
1621 #ifdef CAM_NEW_TRAN_CODE
1622 scsi->valid = CTS_SCSI_VALID_TQ;
1625 neg.valid = CCB_TRANS_TQ_VALID;
1627 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1628 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1631 * Resend the identify for this CCB as the target
1632 * may believe that the selection is invalid otherwise.
1634 if (amd->msgout_len != 0)
1635 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1637 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1638 | srb->pccb->ccb_h.target_lun;
1640 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1641 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1642 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1644 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1647 * Requeue all tagged commands for this target
1648 * currently in our posession so they can be
1649 * converted to untagged commands.
1651 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1652 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1653 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1656 * Otherwise, we ignore it.
1658 kprintf("amd%d:%d: Message reject received -- ignored\n",
1659 amd->unit, amd->cur_target);
1665 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1666 if (bval == MSG_DISCONNECT) {
1667 pSRB->SRBState = SRB_DISCONNECT;
1668 } else if (bval == MSG_SAVEDATAPOINTER) {
1670 } else if ((bval == MSG_EXTENDED)
1671 || ((bval >= MSG_SIMPLE_Q_TAG)
1672 && (bval <= MSG_ORDERED_Q_TAG))) {
1673 pSRB->SRBState |= SRB_MSGIN_MULTI;
1674 pSRB->MsgInBuf[0] = bval;
1676 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1677 } else if (bval == MSG_MESSAGE_REJECT) {
1678 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1680 if (pSRB->SRBState & DO_SYNC_NEGO) {
1683 } else if (bval == MSG_RESTOREPOINTERS) {
1688 } else { /* minx: */
1689 *pSRB->pMsgPtr = bval;
1692 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1693 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1694 if (pSRB->MsgCnt == 2) {
1696 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1697 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1698 pSRB = amd->pTmpSRB;
1699 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1700 pDCB->pActiveSRB = pSRB;
1701 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1702 EnableMsgOut2(amd, pSRB);
1704 if (pDCB->DCBFlag & ABORT_DEV_) {
1705 pSRB->SRBState = SRB_ABORT_SENT;
1706 EnableMsgOut1(amd, pSRB);
1708 pDCB->pActiveSRB = pSRB;
1709 pSRB->SRBState = SRB_DATA_XFER;
1712 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1713 && (pSRB->MsgCnt == 5)) {
1714 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1715 if ((pSRB->MsgInBuf[1] != 3)
1716 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1718 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1719 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1720 } else if (!(pSRB->MsgInBuf[3])
1721 || !(pSRB->MsgInBuf[4])) {
1722 set_async: /* set async */
1724 pDCB = pSRB->pSRBDCB;
1725 /* disable sync & sync nego */
1726 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1727 pDCB->SyncPeriod = 0;
1728 pDCB->SyncOffset = 0;
1730 pDCB->tinfo.goal.period = 0;
1731 pDCB->tinfo.goal.offset = 0;
1733 pDCB->tinfo.current.period = 0;
1734 pDCB->tinfo.current.offset = 0;
1735 pDCB->tinfo.current.width =
1736 MSG_EXT_WDTR_BUS_8_BIT;
1738 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1739 pDCB->CtrlR4 &= 0x3f;
1740 pDCB->CtrlR4 |= EATER_25NS;
1742 } else {/* set sync */
1744 pDCB = pSRB->pSRBDCB;
1745 /* enable sync & sync nego */
1746 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1748 /* set sync offset */
1749 pDCB->SyncOffset &= 0x0f0;
1750 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1752 /* set sync period */
1753 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1755 wval = (u_int16_t) pSRB->MsgInBuf[3];
1759 if ((wval1 * 25) != wval) {
1762 bval = FAST_CLK|FAST_SCSI;
1763 pDCB->CtrlR4 &= 0x3f;
1768 pDCB->CtrlR4 |= EATER_25NS;
1770 pDCB->CtrlR3 = bval;
1771 pDCB->SyncPeriod = (u_int8_t) wval1;
1773 pDCB->tinfo.goal.period =
1774 tinfo_sync_period[pDCB->SyncPeriod - 4];
1775 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1776 pDCB->tinfo.current.period =
1777 tinfo_sync_period[pDCB->SyncPeriod - 4];
1778 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1781 * program SCSI control register
1784 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1785 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1786 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1787 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1792 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1798 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1800 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1805 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1807 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1812 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1814 struct amd_sg * psgl;
1817 if (pSRB->SGIndex < pSRB->SGcount) {
1818 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1820 if (!pSRB->SGToBeXferLen) {
1821 psgl = pSRB->pSGlist;
1822 pSRB->SGPhysAddr = psgl->SGXPtr;
1823 pSRB->SGToBeXferLen = psgl->SGXLen;
1825 lval = pSRB->SGToBeXferLen;
1826 amd_write8(amd, CTCREG_LOW, lval);
1827 amd_write8(amd, CTCREG_MID, lval >> 8);
1828 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1830 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1832 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1834 pSRB->SRBState = SRB_DATA_XFER;
1836 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1838 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1840 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1841 } else { /* xfer pad */
1842 if (pSRB->SGcount) {
1843 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1844 pSRB->SRBStatus |= OVER_RUN;
1846 amd_write8(amd, CTCREG_LOW, 0);
1847 amd_write8(amd, CTCREG_MID, 0);
1848 amd_write8(amd, CURTXTCNTREG, 0);
1850 pSRB->SRBState |= SRB_XFERPAD;
1851 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1856 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1858 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1859 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1861 amdsetupcommand(amd, srb);
1863 srb->SRBState = SRB_COMMAND;
1864 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1869 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1871 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1872 pSRB->SRBState = SRB_STATUS;
1873 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1878 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1880 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1882 if (amd->msgout_len == 0) {
1883 amd->msgout_buf[0] = MSG_NOOP;
1884 amd->msgout_len = 1;
1886 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1887 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1892 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1894 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1895 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1900 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1906 amd_Disconnect(struct amd_softc * amd)
1908 struct amd_srb *srb;
1912 srb = amd->active_srb;
1913 amd->active_srb = NULL;
1914 amd->last_phase = SCSI_BUS_FREE;
1915 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1916 target = amd->cur_target;
1920 /* Invalid reselection */
1922 } else if (srb->SRBState & SRB_ABORT_SENT) {
1923 /* Clean up and done this srb */
1925 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1926 /* XXX What about "done'ing" these srbs??? */
1927 if (pSRB->pSRBDCB == pDCB) {
1928 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1929 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1935 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1936 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1937 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1939 } else if (srb->SRBState & SRB_DISCONNECT) {
1940 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1941 amd->untagged_srbs[target][lun] = srb;
1943 } else if (srb->SRBState & SRB_COMPLETED) {
1945 srb->SRBState = SRB_FREE;
1953 amd_Reselect(struct amd_softc *amd)
1955 struct amd_target_info *tinfo;
1956 u_int16_t disc_count;
1958 amd_clear_msg_state(amd);
1959 if (amd->active_srb != NULL) {
1960 /* Requeue the SRB for our attempted Selection */
1961 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1962 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1963 amd->active_srb = NULL;
1966 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1967 amd->cur_target ^= amd->HostID_Bit;
1968 amd->cur_target = ffs(amd->cur_target) - 1;
1969 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1970 tinfo = &amd->tinfo[amd->cur_target];
1971 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1972 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1973 if (disc_count == 0) {
1974 kprintf("amd%d: Unexpected reselection for target %d, "
1975 "Issuing Abort\n", amd->unit, amd->cur_target);
1976 amd->msgout_buf[0] = MSG_ABORT;
1977 amd->msgout_len = 1;
1978 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1980 if (amd->active_srb != NULL) {
1981 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1982 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1985 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1986 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1987 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1988 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1989 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1990 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1991 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1992 amd->last_phase = SCSI_NOP0;
1996 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1998 u_int8_t bval, i, status;
2000 struct ccb_scsiio *pcsio;
2001 struct amd_sg *ptr2;
2003 u_int target_id, target_lun;
2006 pcsio = &pccb->csio;
2007 target_id = pSRB->pccb->ccb_h.target_id;
2008 target_lun = pSRB->pccb->ccb_h.target_lun;
2010 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
2011 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
2013 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2014 bus_dmasync_op_t op;
2016 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2017 op = BUS_DMASYNC_POSTREAD;
2019 op = BUS_DMASYNC_POSTWRITE;
2020 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
2021 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
2024 status = pSRB->TargetStatus;
2025 pccb->ccb_h.status = CAM_REQ_CMP;
2026 pccb->ccb_h.status = CAM_REQ_CMP;
2027 if (pSRB->SRBFlag & AUTO_REQSENSE) {
2028 pSRB->SRBFlag &= ~AUTO_REQSENSE;
2029 pSRB->AdaptStatus = 0;
2030 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
2032 if (status == SCSI_STATUS_CHECK_COND) {
2033 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
2036 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
2038 pcsio->sense_resid = pcsio->sense_len
2039 - pSRB->TotalXferredLen;
2040 pSRB->TotalXferredLen = pSRB->Segment1[1];
2041 if (pSRB->TotalXferredLen) {
2043 pcsio->resid = pcsio->dxfer_len
2044 - pSRB->TotalXferredLen;
2045 /* The resid field contains valid data */
2046 /* Flush resid bytes on complete */
2048 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
2050 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
2054 if (status == SCSI_STATUS_CHECK_COND) {
2056 if ((pSRB->SGIndex < pSRB->SGcount)
2057 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
2058 bval = pSRB->SGcount;
2059 swlval = pSRB->SGToBeXferLen;
2060 ptr2 = pSRB->pSGlist;
2062 for (i = pSRB->SGIndex + 1; i < bval; i++) {
2063 swlval += ptr2->SGXLen;
2067 pcsio->resid = (u_int32_t) swlval;
2070 kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
2071 pSRB->TotalXferredLen, swlval);
2074 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
2076 kprintf("RequestSense..................\n");
2078 RequestSense(amd, pSRB);
2081 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
2082 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2084 } else if (status == SCSI_STATUS_QUEUE_FULL) {
2085 pSRB->AdaptStatus = 0;
2086 pSRB->TargetStatus = 0;
2087 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
2088 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2090 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
2091 pSRB->AdaptStatus = H_SEL_TIMEOUT;
2092 pSRB->TargetStatus = 0;
2094 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
2095 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
2096 } else if (status == SCSI_STATUS_BUSY) {
2098 kprintf("DC390: target busy at %s %d\n",
2099 __FILE__, __LINE__);
2101 pcsio->scsi_status = SCSI_STATUS_BUSY;
2102 pccb->ccb_h.status = CAM_SCSI_BUSY;
2103 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
2105 kprintf("DC390: target reserved at %s %d\n",
2106 __FILE__, __LINE__);
2108 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
2109 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
2111 pSRB->AdaptStatus = 0;
2113 kprintf("DC390: driver stuffup at %s %d\n",
2114 __FILE__, __LINE__);
2116 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2119 status = pSRB->AdaptStatus;
2120 if (status & H_OVER_UNDER_RUN) {
2121 pSRB->TargetStatus = 0;
2123 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
2124 } else if (pSRB->SRBStatus & PARITY_ERROR) {
2126 kprintf("DC390: driver stuffup %s %d\n",
2127 __FILE__, __LINE__);
2129 /* Driver failed to perform operation */
2130 pccb->ccb_h.status = CAM_UNCOR_PARITY;
2131 } else { /* No error */
2132 pSRB->AdaptStatus = 0;
2133 pSRB->TargetStatus = 0;
2135 /* there is no error, (sense is invalid) */
2140 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2141 /* CAM request not yet complete =>device_Q frozen */
2142 xpt_freeze_devq(pccb->ccb_h.path, 1);
2143 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2145 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2146 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2154 amd_ResetSCSIBus(struct amd_softc * amd)
2157 amd->ACBFlag |= RESET_DEV;
2158 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2159 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2165 amd_ScsiRstDetect(struct amd_softc * amd)
2170 kprintf("amd_ScsiRstDetect \n");
2174 while (--wlval) { /* delay 1 sec */
2179 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2180 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2182 if (amd->ACBFlag & RESET_DEV) {
2183 amd->ACBFlag |= RESET_DONE;
2185 amd->ACBFlag |= RESET_DETECT;
2187 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2188 AMD_TAG_WILDCARD, &amd->running_srbs,
2189 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2190 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2191 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2192 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2193 amd->active_srb = NULL;
2202 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2205 struct ccb_scsiio *pcsio;
2208 pcsio = &pccb->csio;
2210 pSRB->SRBFlag |= AUTO_REQSENSE;
2211 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2212 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2213 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2214 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2216 pSRB->AdaptStatus = 0;
2217 pSRB->TargetStatus = 0;
2219 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2220 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2222 pSRB->pSGlist = &pSRB->Segmentx;
2226 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2227 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2228 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2229 pSRB->ScsiCmdLen = 6;
2231 pSRB->TotalXferredLen = 0;
2232 pSRB->SGToBeXferLen = 0;
2233 if (amdstart(amd, pSRB) != 0) {
2234 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2235 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2240 amd_InvalidCmd(struct amd_softc * amd)
2242 struct amd_srb *srb;
2244 srb = amd->active_srb;
2245 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2246 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2250 amd_linkSRB(struct amd_softc *amd)
2253 struct amd_srb *psrb;
2256 count = amd->SRBCount;
2258 for (i = 0; i < count; i++) {
2259 psrb = (struct amd_srb *)&amd->SRB_array[i];
2260 psrb->TagNumber = i;
2263 * Create the dmamap. This is no longer optional!
2265 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2267 device_printf(amd->dev, "Error %d creating buffer "
2268 "dmamap!\n", error);
2271 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2276 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2278 if (mode == ENABLE_CE) {
2283 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2284 if (mode == DISABLE_CE) {
2285 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2291 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2299 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2303 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2305 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2310 amd_EEpromInDO(struct amd_softc *amd)
2312 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2314 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2316 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2322 EEpromGetData1(struct amd_softc *amd)
2329 for (i = 0; i < 16; i++) {
2331 carryFlag = amd_EEpromInDO(amd);
2338 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2345 for (i = 0; i < 9; i++) {
2346 amd_EEpromOutDI(amd, regval, carryFlag);
2347 carryFlag = (EEpromCmd & j) ? 1 : 0;
2353 amd_ReadEEprom(struct amd_softc *amd)
2360 ptr = (u_int16_t *)&amd->eepromBuf[0];
2362 for (i = 0; i < 0x40; i++) {
2363 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2364 amd_Prepare(amd, ®val, cmd);
2365 *ptr = EEpromGetData1(amd);
2368 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2373 amd_load_defaults(struct amd_softc *amd)
2377 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2378 for (target = 0; target < MAX_SCSI_ID; target++)
2379 amd->eepromBuf[target << 2] =
2380 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2381 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2382 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2383 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2387 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2389 u_int16_t wval, *ptr;
2392 amd_ReadEEprom(amd);
2394 ptr = (u_int16_t *) & amd->eepromBuf[0];
2395 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2398 if (wval != EE_CHECKSUM) {
2400 kprintf("amd%d: SEEPROM data unavailable. "
2401 "Using default device parameters.\n",
2403 amd_load_defaults(amd);
2408 **********************************************************************
2409 * Function : static int amd_init (struct Scsi_Host *host)
2410 * Purpose : initialize the internal structures for a given SCSI host
2411 * Inputs : host - pointer to this host adapter's structure/
2412 **********************************************************************
2415 amd_init(device_t dev)
2417 struct amd_softc *amd = device_get_softc(dev);
2418 struct resource *iores;
2422 rid = PCI_BASE_ADDR0;
2423 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2425 if (iores == NULL) {
2427 kprintf("amd_init: bus_alloc_resource failure!\n");
2430 amd->tag = rman_get_bustag(iores);
2431 amd->bsh = rman_get_bushandle(iores);
2433 /* DMA tag for mapping buffers into device visible space. */
2434 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2436 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2437 /*highaddr*/BUS_SPACE_MAXADDR,
2438 /*filter*/NULL, /*filterarg*/NULL,
2439 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2440 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2441 /*flags*/BUS_DMA_ALLOCNOW,
2442 &amd->buffer_dmat) != 0) {
2444 kprintf("amd_init: bus_dma_tag_create failure!\n");
2447 TAILQ_INIT(&amd->free_srbs);
2448 TAILQ_INIT(&amd->running_srbs);
2449 TAILQ_INIT(&amd->waiting_srbs);
2450 amd->last_phase = SCSI_BUS_FREE;
2452 amd->unit = device_get_unit(dev);
2453 amd->SRBCount = MAX_SRB_CNT;
2455 amd_load_eeprom_or_defaults(amd);
2457 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2462 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2463 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2464 amd->AdaptSCSILUN = 0;
2465 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2467 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2469 for (i = 0; i <= amd->max_id; i++) {
2471 if (amd->AdaptSCSIID != i) {
2472 struct amd_target_info *tinfo;
2475 tinfo = &amd->tinfo[i];
2476 prom = (PEEprom)&amd->eepromBuf[i << 2];
2477 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2478 tinfo->disc_tag |= AMD_USR_DISCENB;
2479 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2480 tinfo->disc_tag |= AMD_USR_TAGENB;
2482 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2483 tinfo->user.period =
2484 eeprom_period[prom->EE_SPEED];
2485 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2487 tinfo->CtrlR1 = amd->AdaptSCSIID;
2488 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2489 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2490 tinfo->CtrlR3 = FAST_CLK;
2491 tinfo->CtrlR4 = EATER_25NS;
2492 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2493 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2496 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2497 /* Conversion factor = 0 , 40MHz clock */
2498 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2499 /* NOP cmd - clear command register */
2500 amd_write8(amd, SCSICMDREG, NOP_CMD);
2501 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2502 amd_write8(amd, CNTLREG3, FAST_CLK);
2504 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2505 bval |= NEGATE_REQACKDATA;
2507 amd_write8(amd, CNTLREG4, bval);
2509 /* Disable SCSI bus reset interrupt */
2510 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2516 * attach and init a host adapter
2519 amd_attach(device_t dev)
2521 struct cam_devq *devq; /* Device Queue to use for this SIM */
2523 struct amd_softc *amd = device_get_softc(dev);
2524 int unit = device_get_unit(dev);
2527 struct resource *irqres;
2529 if (amd_init(dev)) {
2531 kprintf("amd_attach: amd_init failure!\n");
2535 /* Reset Pending INT */
2536 intstat = amd_read8(amd, INTSTATREG);
2538 /* After setting up the adapter, map our interrupt */
2540 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2541 RF_SHAREABLE | RF_ACTIVE);
2542 if (irqres == NULL ||
2543 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2546 kprintf("amd%d: unable to register interrupt handler!\n",
2552 * Now let the CAM generic SCSI layer find the SCSI devices on
2553 * the bus * start queue to reset to the idle loop. *
2554 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2555 * max_sim_transactions
2557 devq = cam_simq_alloc(MAX_START_JOB);
2560 kprintf("amd_attach: cam_simq_alloc failure!\n");
2564 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2565 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2567 cam_simq_release(devq);
2568 if (amd->psim == NULL) {
2570 kprintf("amd_attach: cam_sim_alloc failure!\n");
2574 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2575 cam_sim_free(amd->psim);
2577 kprintf("amd_attach: xpt_bus_register failure!\n");
2581 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2582 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2583 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2584 xpt_bus_deregister(cam_sim_path(amd->psim));
2585 cam_sim_free(amd->psim);
2587 kprintf("amd_attach: xpt_create_path failure!\n");
2595 amd_probe(device_t dev)
2597 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2598 device_set_desc(dev,
2599 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2605 static device_method_t amd_methods[] = {
2606 /* Device interface */
2607 DEVMETHOD(device_probe, amd_probe),
2608 DEVMETHOD(device_attach, amd_attach),
2612 static driver_t amd_driver = {
2613 "amd", amd_methods, sizeof(struct amd_softc)
2616 static devclass_t amd_devclass;
2617 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);