2 *********************************************************************
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34 * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.9 2005/10/12 17:35:50 dillon Exp $
38 *********************************************************************
41 * REV# DATE NAME DESCRIPTION
42 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
43 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
44 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
45 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
46 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
47 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
48 *********************************************************************
51 /* #define AMD_DEBUG0 */
52 /* #define AMD_DEBUG_SCSI_PHASE */
54 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
60 #include <sys/kernel.h>
61 #include <sys/thread2.h>
66 #include <machine/bus_pio.h>
67 #include <machine/bus.h>
68 #include <machine/clock.h>
69 #include <machine/resource.h>
73 #include <bus/cam/cam.h>
74 #include <bus/cam/cam_ccb.h>
75 #include <bus/cam/cam_sim.h>
76 #include <bus/cam/cam_xpt_sim.h>
77 #include <bus/cam/cam_debug.h>
79 #include <bus/cam/scsi/scsi_all.h>
80 #include <bus/cam/scsi/scsi_message.h>
82 #include <bus/pci/pcivar.h>
83 #include <bus/pci/pcireg.h>
86 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
87 #define PCI_BASE_ADDR0 0x10
89 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
90 typedef phase_handler_t *phase_handler_func_t;
92 static void amd_intr(void *vamd);
93 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
94 static phase_handler_t amd_NopPhase;
96 static phase_handler_t amd_DataOutPhase0;
97 static phase_handler_t amd_DataInPhase0;
98 #define amd_CommandPhase0 amd_NopPhase
99 static phase_handler_t amd_StatusPhase0;
100 static phase_handler_t amd_MsgOutPhase0;
101 static phase_handler_t amd_MsgInPhase0;
102 static phase_handler_t amd_DataOutPhase1;
103 static phase_handler_t amd_DataInPhase1;
104 static phase_handler_t amd_CommandPhase1;
105 static phase_handler_t amd_StatusPhase1;
106 static phase_handler_t amd_MsgOutPhase1;
107 static phase_handler_t amd_MsgInPhase1;
109 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
110 static int amdparsemsg(struct amd_softc *amd);
111 static int amdhandlemsgreject(struct amd_softc *amd);
112 static void amdconstructsdtr(struct amd_softc *amd,
113 u_int period, u_int offset);
114 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
115 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
117 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
118 static void amd_Disconnect(struct amd_softc *amd);
119 static void amd_Reselect(struct amd_softc *amd);
120 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
121 static void amd_ScsiRstDetect(struct amd_softc *amd);
122 static void amd_ResetSCSIBus(struct amd_softc *amd);
123 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
124 static void amd_InvalidCmd(struct amd_softc *amd);
127 static void amd_timeout(void *arg1);
128 static void amd_reset(struct amd_softc *amd);
130 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
132 void amd_linkSRB(struct amd_softc *amd);
133 static int amd_init(device_t);
134 static void amd_load_defaults(struct amd_softc *amd);
135 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
136 static int amd_EEpromInDO(struct amd_softc *amd);
137 static u_int16_t EEpromGetData1(struct amd_softc *amd);
138 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
139 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
140 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
141 static void amd_ReadEEprom(struct amd_softc *amd);
143 static int amd_probe(device_t);
144 static int amd_attach(device_t);
145 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
146 lun_id_t lun, u_int tag, struct srb_queue *queue,
148 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
149 u_int period, u_int offset, u_int type);
150 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
152 static __inline void amd_clear_msg_state(struct amd_softc *amd);
155 amd_clear_msg_state(struct amd_softc *amd)
158 amd->msgout_index = 0;
159 amd->msgin_index = 0;
162 /* CAM SIM entry points */
163 #define ccb_srb_ptr spriv_ptr0
164 #define ccb_amd_ptr spriv_ptr1
165 static void amd_action(struct cam_sim *sim, union ccb *ccb);
166 static void amd_poll(struct cam_sim *sim);
169 * State engine function tables indexed by SCSI phase number
171 phase_handler_func_t amd_SCSI_phase0[] = {
182 phase_handler_func_t amd_SCSI_phase1[] = {
194 * EEProm/BIOS negotiation periods
196 u_int8_t eeprom_period[] = {
208 * chip clock setting to SCSI specified sync parameter table.
210 u_int8_t tinfo_sync_period[] = {
223 static __inline struct amd_srb *
224 amdgetsrb(struct amd_softc * amd)
226 struct amd_srb * pSRB;
229 pSRB = TAILQ_FIRST(&amd->free_srbs);
231 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
237 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
239 struct scsi_request_sense sense_cmd;
240 struct ccb_scsiio *csio;
244 csio = &srb->pccb->csio;
246 if (srb->SRBFlag & AUTO_REQSENSE) {
247 sense_cmd.opcode = REQUEST_SENSE;
248 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
249 sense_cmd.unused[0] = 0;
250 sense_cmd.unused[1] = 0;
251 sense_cmd.length = csio->sense_len;
252 sense_cmd.control = 0;
253 cdb = &sense_cmd.opcode;
254 cdb_len = sizeof(sense_cmd);
256 cdb = &srb->CmdBlock[0];
257 cdb_len = srb->ScsiCmdLen;
259 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
263 * Attempt to start a waiting transaction. Interrupts must be disabled
264 * upon entry to this function.
267 amdrunwaiting(struct amd_softc *amd) {
270 if (amd->last_phase != SCSI_BUS_FREE)
273 srb = TAILQ_FIRST(&amd->waiting_srbs);
277 if (amdstart(amd, srb) == 0) {
278 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
279 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
284 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
288 struct amd_softc *amd;
290 srb = (struct amd_srb *)arg;
292 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
296 printf("amd%d: Unexepected error 0x%x returned from "
297 "bus_dmamap_load\n", amd->unit, error);
298 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
299 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
300 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
302 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
309 bus_dma_segment_t *end_seg;
312 end_seg = dm_segs + nseg;
314 /* Copy the segments into our SG list */
315 srb->pSGlist = &srb->SGsegment[0];
317 while (dm_segs < end_seg) {
318 sg->SGXLen = dm_segs->ds_len;
319 sg->SGXPtr = dm_segs->ds_addr;
324 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
325 op = BUS_DMASYNC_PREREAD;
327 op = BUS_DMASYNC_PREWRITE;
329 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
334 srb->AdaptStatus = 0;
335 srb->TargetStatus = 0;
340 srb->TotalXferredLen = 0;
342 srb->SGToBeXferLen = 0;
348 * Last time we need to check if this CCB needs to
351 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
353 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
354 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
359 ccb->ccb_h.status |= CAM_SIM_QUEUED;
361 /* XXX Need a timeout handler */
362 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
365 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
371 amd_action(struct cam_sim * psim, union ccb * pccb)
373 struct amd_softc * amd;
374 u_int target_id, target_lun;
376 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
378 amd = (struct amd_softc *) cam_sim_softc(psim);
379 target_id = pccb->ccb_h.target_id;
380 target_lun = pccb->ccb_h.target_lun;
382 switch (pccb->ccb_h.func_code) {
385 struct amd_srb * pSRB;
386 struct ccb_scsiio *pcsio;
391 * Assign an SRB and connect it with this ccb.
393 pSRB = amdgetsrb(amd);
397 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
402 pccb->ccb_h.ccb_srb_ptr = pSRB;
403 pccb->ccb_h.ccb_amd_ptr = amd;
404 pSRB->ScsiCmdLen = pcsio->cdb_len;
405 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
406 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
407 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
409 * We've been given a pointer
410 * to a single buffer.
412 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
417 bus_dmamap_load(amd->buffer_dmat,
423 if (error == EINPROGRESS) {
426 * ordering, freeze the
428 * until our mapping is
431 xpt_freeze_simq(amd->psim, 1);
432 pccb->ccb_h.status |=
437 struct bus_dma_segment seg;
439 /* Pointer to physical buffer */
441 (bus_addr_t)pcsio->data_ptr;
442 seg.ds_len = pcsio->dxfer_len;
443 amdexecutesrb(pSRB, &seg, 1, 0);
446 struct bus_dma_segment *segs;
448 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
449 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
450 TAILQ_INSERT_HEAD(&amd->free_srbs,
452 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
457 /* Just use the segments provided */
459 (struct bus_dma_segment *)pcsio->data_ptr;
460 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
463 amdexecutesrb(pSRB, NULL, 0, 0);
468 struct ccb_pathinq *cpi = &pccb->cpi;
470 cpi->version_num = 1;
471 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
472 cpi->target_sprt = 0;
474 cpi->hba_eng_cnt = 0;
476 cpi->max_lun = amd->max_lun; /* 7 or 0 */
477 cpi->initiator_id = amd->AdaptSCSIID;
478 cpi->bus_id = cam_sim_bus(psim);
479 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
480 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
481 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
482 cpi->unit_number = cam_sim_unit(psim);
483 cpi->ccb_h.status = CAM_REQ_CMP;
488 pccb->ccb_h.status = CAM_REQ_INVALID;
496 amd_ResetSCSIBus(amd);
499 for (i = 0; i < 500; i++) {
500 DELAY(1000); /* Wait until our interrupt
504 pccb->ccb_h.status = CAM_REQ_CMP;
509 pccb->ccb_h.status = CAM_REQ_INVALID;
513 pccb->ccb_h.status = CAM_REQ_INVALID;
515 case XPT_GET_TRAN_SETTINGS:
517 struct ccb_trans_settings *cts;
518 struct amd_target_info *targ_info;
519 struct amd_transinfo *tinfo;
523 targ_info = &amd->tinfo[target_id];
524 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
525 /* current transfer settings */
526 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
527 cts->flags = CCB_TRANS_DISC_ENB;
529 cts->flags = 0; /* no tag & disconnect */
531 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
532 cts->flags |= CCB_TRANS_TAG_ENB;
534 tinfo = &targ_info->current;
536 /* default(user) transfer settings */
537 if (targ_info->disc_tag & AMD_USR_DISCENB) {
538 cts->flags = CCB_TRANS_DISC_ENB;
542 if (targ_info->disc_tag & AMD_USR_TAGENB) {
543 cts->flags |= CCB_TRANS_TAG_ENB;
545 tinfo = &targ_info->user;
548 cts->sync_period = tinfo->period;
549 cts->sync_offset = tinfo->offset;
550 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
552 cts->valid = CCB_TRANS_SYNC_RATE_VALID
553 | CCB_TRANS_SYNC_OFFSET_VALID
554 | CCB_TRANS_BUS_WIDTH_VALID
555 | CCB_TRANS_DISC_VALID
556 | CCB_TRANS_TQ_VALID;
557 pccb->ccb_h.status = CAM_REQ_CMP;
561 case XPT_SET_TRAN_SETTINGS:
563 struct ccb_trans_settings *cts;
564 struct amd_target_info *targ_info;
570 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
571 update_type |= AMD_TRANS_GOAL;
572 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
573 update_type |= AMD_TRANS_USER;
576 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
577 cts->ccb_h.status = CAM_REQ_INVALID;
582 targ_info = &amd->tinfo[target_id];
584 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
585 if (update_type & AMD_TRANS_GOAL) {
586 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
587 targ_info->disc_tag |= AMD_CUR_DISCENB;
589 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
592 if (update_type & AMD_TRANS_USER) {
593 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
594 targ_info->disc_tag |= AMD_USR_DISCENB;
596 targ_info->disc_tag &= ~AMD_USR_DISCENB;
600 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
601 if (update_type & AMD_TRANS_GOAL) {
602 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
603 targ_info->disc_tag |= AMD_CUR_TAGENB;
605 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
608 if (update_type & AMD_TRANS_USER) {
609 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
610 targ_info->disc_tag |= AMD_USR_TAGENB;
612 targ_info->disc_tag &= ~AMD_USR_TAGENB;
617 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
618 if (update_type & AMD_TRANS_GOAL)
619 cts->sync_offset = targ_info->goal.offset;
621 cts->sync_offset = targ_info->user.offset;
624 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
625 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
627 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
628 if (update_type & AMD_TRANS_GOAL)
629 cts->sync_period = targ_info->goal.period;
631 cts->sync_period = targ_info->user.period;
634 last_entry = sizeof(tinfo_sync_period) - 1;
635 if ((cts->sync_period != 0)
636 && (cts->sync_period < tinfo_sync_period[0]))
637 cts->sync_period = tinfo_sync_period[0];
638 if (cts->sync_period > tinfo_sync_period[last_entry])
639 cts->sync_period = 0;
640 if (cts->sync_offset == 0)
641 cts->sync_period = 0;
643 if ((update_type & AMD_TRANS_USER) != 0) {
644 targ_info->user.period = cts->sync_period;
645 targ_info->user.offset = cts->sync_offset;
647 if ((update_type & AMD_TRANS_GOAL) != 0) {
648 targ_info->goal.period = cts->sync_period;
649 targ_info->goal.offset = cts->sync_offset;
652 pccb->ccb_h.status = CAM_REQ_CMP;
656 case XPT_CALC_GEOMETRY:
658 struct ccb_calc_geometry *ccg;
660 u_int32_t secs_per_cylinder;
664 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
665 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
667 if (size_mb > 1024 && extended) {
669 ccg->secs_per_track = 63;
672 ccg->secs_per_track = 32;
674 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
675 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
676 pccb->ccb_h.status = CAM_REQ_CMP;
681 pccb->ccb_h.status = CAM_REQ_INVALID;
688 amd_poll(struct cam_sim * psim)
690 amd_intr(cam_sim_softc(psim));
694 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
697 struct ccb_scsiio *pcsio;
699 struct amd_sg * pseg;
702 pcsio = &pSRB->pccb->csio;
704 dataPtr = (int) pcsio->data_ptr;
705 pseg = pSRB->SGsegment;
706 for (i = 0; i < pSRB->SGIndex; i++) {
707 dataPtr += (int) pseg->SGXLen;
710 dataPtr += (int) xferCnt;
711 return ((u_int8_t *) dataPtr);
715 ResetDevParam(struct amd_softc * amd)
719 for (target = 0; target <= amd->max_id; target++) {
720 if (amd->AdaptSCSIID != target) {
721 amdsetsync(amd, target, /*clockrate*/0,
722 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
728 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
729 u_int tag, struct srb_queue *queue, cam_status status)
732 struct amd_srb *next_srb;
734 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
737 next_srb = TAILQ_NEXT(srb, links);
738 if (srb->pccb->ccb_h.target_id != target
739 && target != CAM_TARGET_WILDCARD)
742 if (srb->pccb->ccb_h.target_lun != lun
743 && lun != CAM_LUN_WILDCARD)
746 if (srb->TagNumber != tag
747 && tag != AMD_TAG_WILDCARD)
751 TAILQ_REMOVE(queue, srb, links);
752 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
753 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
754 && (status & CAM_DEV_QFRZN) != 0)
755 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
756 ccb->ccb_h.status = status;
763 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
764 u_int period, u_int offset, u_int type)
766 struct amd_target_info *tinfo;
770 tinfo = &amd->tinfo[target];
771 old_period = tinfo->current.period;
772 old_offset = tinfo->current.offset;
773 if ((type & AMD_TRANS_CUR) != 0
774 && (old_period != period || old_offset != offset)) {
775 struct cam_path *path;
777 tinfo->current.period = period;
778 tinfo->current.offset = offset;
779 tinfo->sync_period_reg = clockrate;
780 tinfo->sync_offset_reg = offset;
781 tinfo->CtrlR3 &= ~FAST_SCSI;
782 tinfo->CtrlR4 &= ~EATER_25NS;
784 tinfo->CtrlR4 |= EATER_25NS;
786 tinfo->CtrlR3 |= FAST_SCSI;
788 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
789 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
790 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
791 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
792 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
794 /* If possible, update the XPT's notion of our transfer rate */
795 if (xpt_create_path(&path, /*periph*/NULL,
796 cam_sim_path(amd->psim), target,
797 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
798 struct ccb_trans_settings neg;
800 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
801 neg.sync_period = period;
802 neg.sync_offset = offset;
803 neg.valid = CCB_TRANS_SYNC_RATE_VALID
804 | CCB_TRANS_SYNC_OFFSET_VALID;
805 xpt_async(AC_TRANSFER_NEG, path, &neg);
809 if ((type & AMD_TRANS_GOAL) != 0) {
810 tinfo->goal.period = period;
811 tinfo->goal.offset = offset;
814 if ((type & AMD_TRANS_USER) != 0) {
815 tinfo->user.period = period;
816 tinfo->user.offset = offset;
821 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
823 panic("Implement me!\n");
829 **********************************************************************
830 * Function : amd_reset (struct amd_softc * amd)
831 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
832 * Inputs : cmd - command which caused the SCSI RESET
833 **********************************************************************
836 amd_reset(struct amd_softc * amd)
843 printf("DC390: RESET");
847 bval = amd_read8(amd, CNTLREG1);
848 bval |= DIS_INT_ON_SCSI_RST;
849 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
850 amd_ResetSCSIBus(amd);
852 for (i = 0; i < 500; i++) {
856 bval = amd_read8(amd, CNTLREG1);
857 bval &= ~DIS_INT_ON_SCSI_RST;
858 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
860 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
861 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
864 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
865 AMD_TAG_WILDCARD, &amd->running_srbs,
866 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
867 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
868 AMD_TAG_WILDCARD, &amd->waiting_srbs,
869 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
870 amd->active_srb = NULL;
877 amd_timeout(void *arg1)
879 struct amd_srb * pSRB;
881 pSRB = (struct amd_srb *) arg1;
886 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
889 struct ccb_scsiio *pcsio;
890 struct amd_target_info *targ_info;
899 target = pccb->ccb_h.target_id;
900 lun = pccb->ccb_h.target_lun;
901 targ_info = &amd->tinfo[target];
903 amd_clear_msg_state(amd);
904 amd_write8(amd, SCSIDESTIDREG, target);
905 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
906 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
907 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
908 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
909 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
910 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
912 identify_msg = MSG_IDENTIFYFLAG | lun;
913 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
914 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
915 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
916 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
917 identify_msg |= MSG_IDENTIFY_DISCFLAG;
919 amd_write8(amd, SCSIFIFOREG, identify_msg);
921 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
922 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
923 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
924 if (targ_info->current.period != targ_info->goal.period
925 || targ_info->current.offset != targ_info->goal.offset) {
926 command = SEL_W_ATN_STOP;
927 amdconstructsdtr(amd, targ_info->goal.period,
928 targ_info->goal.offset);
929 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
930 command = SEL_W_ATN2;
931 pSRB->SRBState = SRB_START;
932 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
933 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
937 pSRB->SRBState = SRB_START;
939 if (command != SEL_W_ATN_STOP)
940 amdsetupcommand(amd, pSRB);
942 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
943 pSRB->SRBState = SRB_READY;
946 amd->last_phase = SCSI_ARBITRATING;
947 amd_write8(amd, SCSICMDREG, command);
948 amd->active_srb = pSRB;
949 amd->cur_target = target;
956 * Catch an interrupt from the adapter.
957 * Process pending device interrupts.
962 struct amd_softc *amd;
963 struct amd_srb *pSRB;
964 u_int internstat = 0;
968 amd = (struct amd_softc *)arg;
972 printf("amd_intr: amd NULL return......");
977 scsistat = amd_read8(amd, SCSISTATREG);
978 if (!(scsistat & INTERRUPT)) {
980 printf("amd_intr: scsistat = NULL ,return......");
984 #ifdef AMD_DEBUG_SCSI_PHASE
985 printf("scsistat=%2x,", scsistat);
988 internstat = amd_read8(amd, INTERNSTATREG);
989 intstat = amd_read8(amd, INTSTATREG);
991 #ifdef AMD_DEBUG_SCSI_PHASE
992 printf("intstat=%2x,", intstat);
995 if (intstat & DISCONNECTED) {
999 if (intstat & RESELECTED) {
1003 if (intstat & INVALID_CMD) {
1004 amd_InvalidCmd(amd);
1007 if (intstat & SCSI_RESET_) {
1008 amd_ScsiRstDetect(amd);
1011 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1012 pSRB = amd->active_srb;
1014 * Run our state engine. First perform
1015 * post processing for the last phase we
1016 * were in, followed by any processing
1017 * required to handle the current phase.
1020 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1021 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1022 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1027 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1029 struct amd_sg *psgl;
1030 u_int32_t ResidCnt, xferCnt;
1032 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1033 if (scsistat & PARITY_ERR) {
1034 pSRB->SRBStatus |= PARITY_ERROR;
1036 if (scsistat & COUNT_2_ZERO) {
1037 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1039 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1041 if (pSRB->SGIndex < pSRB->SGcount) {
1043 psgl = pSRB->pSGlist;
1044 pSRB->SGPhysAddr = psgl->SGXPtr;
1045 pSRB->SGToBeXferLen = psgl->SGXLen;
1047 pSRB->SGToBeXferLen = 0;
1050 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1051 ResidCnt += amd_read8(amd, CTCREG_LOW)
1052 | (amd_read8(amd, CTCREG_MID) << 8)
1053 | (amd_read8(amd, CURTXTCNTREG) << 16);
1055 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1056 pSRB->SGPhysAddr += xferCnt;
1057 pSRB->TotalXferredLen += xferCnt;
1058 pSRB->SGToBeXferLen = ResidCnt;
1061 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1066 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1069 u_int16_t i, residual;
1070 struct amd_sg *psgl;
1071 u_int32_t ResidCnt, xferCnt;
1074 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1075 if (scsistat & PARITY_ERR) {
1076 pSRB->SRBStatus |= PARITY_ERROR;
1078 if (scsistat & COUNT_2_ZERO) {
1080 bval = amd_read8(amd, DMA_Status);
1081 if ((bval & DMA_XFER_DONE) != 0)
1084 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1086 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1088 if (pSRB->SGIndex < pSRB->SGcount) {
1090 psgl = pSRB->pSGlist;
1091 pSRB->SGPhysAddr = psgl->SGXPtr;
1092 pSRB->SGToBeXferLen = psgl->SGXLen;
1094 pSRB->SGToBeXferLen = 0;
1096 } else { /* phase changed */
1098 bval = amd_read8(amd, CURRENTFIFOREG);
1099 while (bval & 0x1f) {
1100 if ((bval & 0x1f) == 1) {
1101 for (i = 0; i < 0x100; i++) {
1102 bval = amd_read8(amd, CURRENTFIFOREG);
1103 if (!(bval & 0x1f)) {
1105 } else if (i == 0x0ff) {
1111 bval = amd_read8(amd, CURRENTFIFOREG);
1115 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1116 for (i = 0; i < 0x8000; i++) {
1117 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1120 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1122 ResidCnt = amd_read8(amd, CTCREG_LOW)
1123 | (amd_read8(amd, CTCREG_MID) << 8)
1124 | (amd_read8(amd, CURTXTCNTREG) << 16);
1125 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1126 pSRB->SGPhysAddr += xferCnt;
1127 pSRB->TotalXferredLen += xferCnt;
1128 pSRB->SGToBeXferLen = ResidCnt;
1130 /* get residual byte */
1131 bval = amd_read8(amd, SCSIFIFOREG);
1132 ptr = phystovirt(pSRB, xferCnt);
1135 pSRB->TotalXferredLen++;
1136 pSRB->SGToBeXferLen--;
1144 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1146 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1148 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1149 pSRB->SRBState = SRB_COMPLETED;
1150 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1155 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1157 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1158 scsistat = SCSI_NOP0;
1164 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1168 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1170 done = amdparsemsg(amd);
1172 amd->msgin_index = 0;
1179 amdparsemsg(struct amd_softc *amd)
1181 struct amd_target_info *targ_info;
1190 targ_info = &amd->tinfo[amd->cur_target];
1193 * Parse as much of the message as is availible,
1194 * rejecting it if we don't support it. When
1195 * the entire message is availible and has been
1196 * handled, return TRUE indicating that we have
1197 * parsed an entire message.
1199 switch (amd->msgin_buf[0]) {
1200 case MSG_DISCONNECT:
1201 amd->active_srb->SRBState = SRB_DISCONNECT;
1202 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1205 case MSG_SIMPLE_Q_TAG:
1207 struct amd_srb *disc_srb;
1209 if (amd->msgin_index < 1)
1211 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1212 if (amd->active_srb != NULL
1213 || disc_srb->SRBState != SRB_DISCONNECT
1214 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1215 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1216 printf("amd%d: Unexpected tagged reselection "
1217 "for target %d, Issuing Abort\n", amd->unit,
1219 amd->msgout_buf[0] = MSG_ABORT;
1220 amd->msgout_len = 1;
1224 amd->active_srb = disc_srb;
1225 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1229 case MSG_MESSAGE_REJECT:
1230 response = amdhandlemsgreject(amd);
1231 if (response == FALSE)
1232 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1244 /* Wait for enough of the message to begin validation */
1245 if (amd->msgin_index < 1)
1247 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1252 /* Wait for opcode */
1253 if (amd->msgin_index < 2)
1256 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1262 * Wait until we have both args before validating
1263 * and acting on this message.
1265 * Add one to MSG_EXT_SDTR_LEN to account for
1266 * the extended message preamble.
1268 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1271 period = amd->msgin_buf[3];
1272 saved_offset = offset = amd->msgin_buf[4];
1273 clockrate = amdfindclockrate(amd, &period);
1274 if (offset > AMD_MAX_SYNC_OFFSET)
1275 offset = AMD_MAX_SYNC_OFFSET;
1276 if (period == 0 || offset == 0) {
1281 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1282 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1285 * See if we initiated Sync Negotiation
1286 * and didn't have to fall down to async
1289 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1291 if (saved_offset != offset) {
1292 /* Went too low - force async */
1297 * Send our own SDTR in reply
1300 printf("Sending SDTR!\n");
1301 amd->msgout_index = 0;
1302 amd->msgout_len = 0;
1303 amdconstructsdtr(amd, period, offset);
1304 amd->msgout_index = 0;
1310 case MSG_SAVEDATAPOINTER:
1311 case MSG_RESTOREPOINTERS:
1312 /* XXX Implement!!! */
1321 amd->msgout_index = 0;
1322 amd->msgout_len = 1;
1323 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1329 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1331 if (done && !response)
1332 /* Clear the outgoing message buffer */
1333 amd->msgout_len = 0;
1336 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1342 amdfindclockrate(struct amd_softc *amd, u_int *period)
1347 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1348 u_int8_t *table_entry;
1350 table_entry = &tinfo_sync_period[i];
1351 if (*period <= *table_entry) {
1353 * When responding to a target that requests
1354 * sync, the requested rate may fall between
1355 * two rates that we can output, but still be
1356 * a rate that we can receive. Because of this,
1357 * we want to respond to the target with
1358 * the same rate that it sent to us even
1359 * if the period we use to send data to it
1360 * is lower. Only lower the response period
1364 *period = *table_entry;
1370 if (i == sizeof(tinfo_sync_period)) {
1371 /* Too slow for us. Use asnyc transfers. */
1381 * See if we sent a particular extended message to the target.
1382 * If "full" is true, the target saw the full message.
1383 * If "full" is false, the target saw at least the first
1384 * byte of the message.
1387 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1395 while (index < amd->msgout_len) {
1396 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1397 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1399 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1400 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1401 /* Skip tag type and tag id */
1403 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1404 /* Found a candidate */
1405 if (amd->msgout_buf[index+2] == msgtype) {
1408 end_index = index + 1
1409 + amd->msgout_buf[index + 1];
1411 if (amd->msgout_index > end_index)
1413 } else if (amd->msgout_index > index)
1418 panic("amdsentmsg: Inconsistent msg buffer");
1425 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1427 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1428 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1429 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1430 amd->msgout_buf[amd->msgout_index++] = period;
1431 amd->msgout_buf[amd->msgout_index++] = offset;
1432 amd->msgout_len += 5;
1436 amdhandlemsgreject(struct amd_softc *amd)
1439 * If we had an outstanding SDTR for this
1440 * target, this is a signal that the target
1441 * is refusing negotiation. Also watch out
1442 * for rejected tag messages.
1444 struct amd_srb *srb;
1445 struct amd_target_info *targ_info;
1446 int response = FALSE;
1448 srb = amd->active_srb;
1449 targ_info = &amd->tinfo[amd->cur_target];
1450 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1451 /* note asynch xfers and clear flag */
1452 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1453 /*period*/0, /*offset*/0,
1454 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1455 printf("amd%d:%d: refuses synchronous negotiation. "
1456 "Using asynchronous transfers\n",
1457 amd->unit, amd->cur_target);
1458 } else if ((srb != NULL)
1459 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1460 struct ccb_trans_settings neg;
1462 printf("amd%d:%d: refuses tagged commands. Performing "
1463 "non-tagged I/O\n", amd->unit, amd->cur_target);
1465 amdsettags(amd, amd->cur_target, FALSE);
1467 neg.valid = CCB_TRANS_TQ_VALID;
1468 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1469 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1472 * Resend the identify for this CCB as the target
1473 * may believe that the selection is invalid otherwise.
1475 if (amd->msgout_len != 0)
1476 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1478 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1479 | srb->pccb->ccb_h.target_lun;
1481 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1482 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1483 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1485 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1488 * Requeue all tagged commands for this target
1489 * currently in our posession so they can be
1490 * converted to untagged commands.
1492 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1493 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1494 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1497 * Otherwise, we ignore it.
1499 printf("amd%d:%d: Message reject received -- ignored\n",
1500 amd->unit, amd->cur_target);
1506 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1507 if (bval == MSG_DISCONNECT) {
1508 pSRB->SRBState = SRB_DISCONNECT;
1509 } else if (bval == MSG_SAVEDATAPOINTER) {
1511 } else if ((bval == MSG_EXTENDED)
1512 || ((bval >= MSG_SIMPLE_Q_TAG)
1513 && (bval <= MSG_ORDERED_Q_TAG))) {
1514 pSRB->SRBState |= SRB_MSGIN_MULTI;
1515 pSRB->MsgInBuf[0] = bval;
1517 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1518 } else if (bval == MSG_MESSAGE_REJECT) {
1519 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1521 if (pSRB->SRBState & DO_SYNC_NEGO) {
1524 } else if (bval == MSG_RESTOREPOINTERS) {
1529 } else { /* minx: */
1530 *pSRB->pMsgPtr = bval;
1533 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1534 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1535 if (pSRB->MsgCnt == 2) {
1537 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1538 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1539 pSRB = amd->pTmpSRB;
1540 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1541 pDCB->pActiveSRB = pSRB;
1542 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1543 EnableMsgOut2(amd, pSRB);
1545 if (pDCB->DCBFlag & ABORT_DEV_) {
1546 pSRB->SRBState = SRB_ABORT_SENT;
1547 EnableMsgOut1(amd, pSRB);
1549 pDCB->pActiveSRB = pSRB;
1550 pSRB->SRBState = SRB_DATA_XFER;
1553 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1554 && (pSRB->MsgCnt == 5)) {
1555 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1556 if ((pSRB->MsgInBuf[1] != 3)
1557 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1559 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1560 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1561 } else if (!(pSRB->MsgInBuf[3])
1562 || !(pSRB->MsgInBuf[4])) {
1563 set_async: /* set async */
1565 pDCB = pSRB->pSRBDCB;
1566 /* disable sync & sync nego */
1567 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1568 pDCB->SyncPeriod = 0;
1569 pDCB->SyncOffset = 0;
1571 pDCB->tinfo.goal.period = 0;
1572 pDCB->tinfo.goal.offset = 0;
1574 pDCB->tinfo.current.period = 0;
1575 pDCB->tinfo.current.offset = 0;
1576 pDCB->tinfo.current.width =
1577 MSG_EXT_WDTR_BUS_8_BIT;
1579 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1580 pDCB->CtrlR4 &= 0x3f;
1581 pDCB->CtrlR4 |= EATER_25NS;
1583 } else {/* set sync */
1585 pDCB = pSRB->pSRBDCB;
1586 /* enable sync & sync nego */
1587 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1589 /* set sync offset */
1590 pDCB->SyncOffset &= 0x0f0;
1591 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1593 /* set sync period */
1594 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1596 wval = (u_int16_t) pSRB->MsgInBuf[3];
1600 if ((wval1 * 25) != wval) {
1603 bval = FAST_CLK|FAST_SCSI;
1604 pDCB->CtrlR4 &= 0x3f;
1609 pDCB->CtrlR4 |= EATER_25NS;
1611 pDCB->CtrlR3 = bval;
1612 pDCB->SyncPeriod = (u_int8_t) wval1;
1614 pDCB->tinfo.goal.period =
1615 tinfo_sync_period[pDCB->SyncPeriod - 4];
1616 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1617 pDCB->tinfo.current.period =
1618 tinfo_sync_period[pDCB->SyncPeriod - 4];;
1619 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1622 * program SCSI control register
1625 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1626 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1627 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1628 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1633 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1639 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1641 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1646 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1648 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1653 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1655 struct amd_sg * psgl;
1658 if (pSRB->SGIndex < pSRB->SGcount) {
1659 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1661 if (!pSRB->SGToBeXferLen) {
1662 psgl = pSRB->pSGlist;
1663 pSRB->SGPhysAddr = psgl->SGXPtr;
1664 pSRB->SGToBeXferLen = psgl->SGXLen;
1666 lval = pSRB->SGToBeXferLen;
1667 amd_write8(amd, CTCREG_LOW, lval);
1668 amd_write8(amd, CTCREG_MID, lval >> 8);
1669 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1671 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1673 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1675 pSRB->SRBState = SRB_DATA_XFER;
1677 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1679 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1681 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1682 } else { /* xfer pad */
1683 if (pSRB->SGcount) {
1684 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1685 pSRB->SRBStatus |= OVER_RUN;
1687 amd_write8(amd, CTCREG_LOW, 0);
1688 amd_write8(amd, CTCREG_MID, 0);
1689 amd_write8(amd, CURTXTCNTREG, 0);
1691 pSRB->SRBState |= SRB_XFERPAD;
1692 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1697 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1699 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1700 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1702 amdsetupcommand(amd, srb);
1704 srb->SRBState = SRB_COMMAND;
1705 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1710 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1712 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1713 pSRB->SRBState = SRB_STATUS;
1714 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1719 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1721 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1723 if (amd->msgout_len == 0) {
1724 amd->msgout_buf[0] = MSG_NOOP;
1725 amd->msgout_len = 1;
1727 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1728 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1733 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1735 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1736 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1741 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1747 amd_Disconnect(struct amd_softc * amd)
1749 struct amd_srb *srb;
1753 srb = amd->active_srb;
1754 amd->active_srb = NULL;
1755 amd->last_phase = SCSI_BUS_FREE;
1756 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1757 target = amd->cur_target;
1761 /* Invalid reselection */
1763 } else if (srb->SRBState & SRB_ABORT_SENT) {
1764 /* Clean up and done this srb */
1766 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1767 /* XXX What about "done'ing" these srbs??? */
1768 if (pSRB->pSRBDCB == pDCB) {
1769 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1770 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1776 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1777 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1778 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1780 } else if (srb->SRBState & SRB_DISCONNECT) {
1781 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1782 amd->untagged_srbs[target][lun] = srb;
1784 } else if (srb->SRBState & SRB_COMPLETED) {
1786 srb->SRBState = SRB_FREE;
1794 amd_Reselect(struct amd_softc *amd)
1796 struct amd_target_info *tinfo;
1797 u_int16_t disc_count;
1799 amd_clear_msg_state(amd);
1800 if (amd->active_srb != NULL) {
1801 /* Requeue the SRB for our attempted Selection */
1802 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1803 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1804 amd->active_srb = NULL;
1807 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1808 amd->cur_target ^= amd->HostID_Bit;
1809 amd->cur_target = ffs(amd->cur_target) - 1;
1810 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1811 tinfo = &amd->tinfo[amd->cur_target];
1812 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1813 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1814 if (disc_count == 0) {
1815 printf("amd%d: Unexpected reselection for target %d, "
1816 "Issuing Abort\n", amd->unit, amd->cur_target);
1817 amd->msgout_buf[0] = MSG_ABORT;
1818 amd->msgout_len = 1;
1819 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1821 if (amd->active_srb != NULL) {
1822 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1823 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1826 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1827 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1828 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1829 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1830 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1831 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1832 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1833 amd->last_phase = SCSI_NOP0;
1837 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1839 u_int8_t bval, i, status;
1841 struct ccb_scsiio *pcsio;
1842 struct amd_sg *ptr2;
1844 u_int target_id, target_lun;
1847 pcsio = &pccb->csio;
1848 target_id = pSRB->pccb->ccb_h.target_id;
1849 target_lun = pSRB->pccb->ccb_h.target_lun;
1851 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1852 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1854 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1855 bus_dmasync_op_t op;
1857 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1858 op = BUS_DMASYNC_POSTREAD;
1860 op = BUS_DMASYNC_POSTWRITE;
1861 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1862 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1865 status = pSRB->TargetStatus;
1866 pccb->ccb_h.status = CAM_REQ_CMP;
1867 pccb->ccb_h.status = CAM_REQ_CMP;
1868 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1869 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1870 pSRB->AdaptStatus = 0;
1871 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1873 if (status == SCSI_STATUS_CHECK_COND) {
1874 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1877 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1879 pcsio->sense_resid = pcsio->sense_len
1880 - pSRB->TotalXferredLen;
1881 pSRB->TotalXferredLen = pSRB->Segment1[1];
1882 if (pSRB->TotalXferredLen) {
1884 pcsio->resid = pcsio->dxfer_len
1885 - pSRB->TotalXferredLen;
1886 /* The resid field contains valid data */
1887 /* Flush resid bytes on complete */
1889 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1891 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1895 if (status == SCSI_STATUS_CHECK_COND) {
1897 if ((pSRB->SGIndex < pSRB->SGcount)
1898 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1899 bval = pSRB->SGcount;
1900 swlval = pSRB->SGToBeXferLen;
1901 ptr2 = pSRB->pSGlist;
1903 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1904 swlval += ptr2->SGXLen;
1908 pcsio->resid = (u_int32_t) swlval;
1911 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1912 pSRB->TotalXferredLen, swlval);
1915 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1917 printf("RequestSense..................\n");
1919 RequestSense(amd, pSRB);
1922 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1923 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1925 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1926 pSRB->AdaptStatus = 0;
1927 pSRB->TargetStatus = 0;
1928 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1929 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1931 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1932 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1933 pSRB->TargetStatus = 0;
1935 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1936 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1937 } else if (status == SCSI_STATUS_BUSY) {
1939 printf("DC390: target busy at %s %d\n",
1940 __FILE__, __LINE__);
1942 pcsio->scsi_status = SCSI_STATUS_BUSY;
1943 pccb->ccb_h.status = CAM_SCSI_BUSY;
1944 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1946 printf("DC390: target reserved at %s %d\n",
1947 __FILE__, __LINE__);
1949 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1950 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1952 pSRB->AdaptStatus = 0;
1954 printf("DC390: driver stuffup at %s %d\n",
1955 __FILE__, __LINE__);
1957 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1960 status = pSRB->AdaptStatus;
1961 if (status & H_OVER_UNDER_RUN) {
1962 pSRB->TargetStatus = 0;
1964 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1965 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1967 printf("DC390: driver stuffup %s %d\n",
1968 __FILE__, __LINE__);
1970 /* Driver failed to perform operation */
1971 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1972 } else { /* No error */
1973 pSRB->AdaptStatus = 0;
1974 pSRB->TargetStatus = 0;
1976 /* there is no error, (sense is invalid) */
1981 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1982 /* CAM request not yet complete =>device_Q frozen */
1983 xpt_freeze_devq(pccb->ccb_h.path, 1);
1984 pccb->ccb_h.status |= CAM_DEV_QFRZN;
1986 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1987 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1995 amd_ResetSCSIBus(struct amd_softc * amd)
1998 amd->ACBFlag |= RESET_DEV;
1999 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2000 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2006 amd_ScsiRstDetect(struct amd_softc * amd)
2011 printf("amd_ScsiRstDetect \n");
2015 while (--wlval) { /* delay 1 sec */
2020 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2021 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2023 if (amd->ACBFlag & RESET_DEV) {
2024 amd->ACBFlag |= RESET_DONE;
2026 amd->ACBFlag |= RESET_DETECT;
2028 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2029 AMD_TAG_WILDCARD, &amd->running_srbs,
2030 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2031 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2032 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2033 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2034 amd->active_srb = NULL;
2043 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2046 struct ccb_scsiio *pcsio;
2049 pcsio = &pccb->csio;
2051 pSRB->SRBFlag |= AUTO_REQSENSE;
2052 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2053 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2054 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2055 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2057 pSRB->AdaptStatus = 0;
2058 pSRB->TargetStatus = 0;
2060 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2061 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2063 pSRB->pSGlist = &pSRB->Segmentx;
2067 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2068 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2069 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2070 pSRB->ScsiCmdLen = 6;
2072 pSRB->TotalXferredLen = 0;
2073 pSRB->SGToBeXferLen = 0;
2074 if (amdstart(amd, pSRB) != 0) {
2075 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2076 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2081 amd_InvalidCmd(struct amd_softc * amd)
2083 struct amd_srb *srb;
2085 srb = amd->active_srb;
2086 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2087 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2091 amd_linkSRB(struct amd_softc *amd)
2094 struct amd_srb *psrb;
2097 count = amd->SRBCount;
2099 for (i = 0; i < count; i++) {
2100 psrb = (struct amd_srb *)&amd->SRB_array[i];
2101 psrb->TagNumber = i;
2104 * Create the dmamap. This is no longer optional!
2106 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2108 device_printf(amd->dev, "Error %d creating buffer "
2109 "dmamap!\n", error);
2112 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2117 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2119 if (mode == ENABLE_CE) {
2124 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2125 if (mode == DISABLE_CE) {
2126 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2132 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2140 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2144 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2146 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2151 amd_EEpromInDO(struct amd_softc *amd)
2153 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2155 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2157 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2163 EEpromGetData1(struct amd_softc *amd)
2170 for (i = 0; i < 16; i++) {
2172 carryFlag = amd_EEpromInDO(amd);
2179 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2186 for (i = 0; i < 9; i++) {
2187 amd_EEpromOutDI(amd, regval, carryFlag);
2188 carryFlag = (EEpromCmd & j) ? 1 : 0;
2194 amd_ReadEEprom(struct amd_softc *amd)
2201 ptr = (u_int16_t *)&amd->eepromBuf[0];
2203 for (i = 0; i < 0x40; i++) {
2204 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2205 amd_Prepare(amd, ®val, cmd);
2206 *ptr = EEpromGetData1(amd);
2209 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2214 amd_load_defaults(struct amd_softc *amd)
2218 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2219 for (target = 0; target < MAX_SCSI_ID; target++)
2220 amd->eepromBuf[target << 2] =
2221 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2222 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2223 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2224 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2228 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2230 u_int16_t wval, *ptr;
2233 amd_ReadEEprom(amd);
2235 ptr = (u_int16_t *) & amd->eepromBuf[0];
2236 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2239 if (wval != EE_CHECKSUM) {
2241 printf("amd%d: SEEPROM data unavailable. "
2242 "Using default device parameters.\n",
2244 amd_load_defaults(amd);
2249 **********************************************************************
2250 * Function : static int amd_init (struct Scsi_Host *host)
2251 * Purpose : initialize the internal structures for a given SCSI host
2252 * Inputs : host - pointer to this host adapter's structure/
2253 **********************************************************************
2256 amd_init(device_t dev)
2258 struct amd_softc *amd = device_get_softc(dev);
2259 struct resource *iores;
2263 rid = PCI_BASE_ADDR0;
2264 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2266 if (iores == NULL) {
2268 printf("amd_init: bus_alloc_resource failure!\n");
2271 amd->tag = rman_get_bustag(iores);
2272 amd->bsh = rman_get_bushandle(iores);
2274 /* DMA tag for mapping buffers into device visible space. */
2275 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2277 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2278 /*highaddr*/BUS_SPACE_MAXADDR,
2279 /*filter*/NULL, /*filterarg*/NULL,
2280 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2281 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2282 /*flags*/BUS_DMA_ALLOCNOW,
2283 &amd->buffer_dmat) != 0) {
2285 printf("amd_init: bus_dma_tag_create failure!\n");
2288 TAILQ_INIT(&amd->free_srbs);
2289 TAILQ_INIT(&amd->running_srbs);
2290 TAILQ_INIT(&amd->waiting_srbs);
2291 amd->last_phase = SCSI_BUS_FREE;
2293 amd->unit = device_get_unit(dev);
2294 amd->SRBCount = MAX_SRB_CNT;
2296 amd_load_eeprom_or_defaults(amd);
2298 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2303 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2304 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2305 amd->AdaptSCSILUN = 0;
2306 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2308 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2310 for (i = 0; i <= amd->max_id; i++) {
2312 if (amd->AdaptSCSIID != i) {
2313 struct amd_target_info *tinfo;
2316 tinfo = &amd->tinfo[i];
2317 prom = (PEEprom)&amd->eepromBuf[i << 2];
2318 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2319 tinfo->disc_tag |= AMD_USR_DISCENB;
2320 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2321 tinfo->disc_tag |= AMD_USR_TAGENB;
2323 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2324 tinfo->user.period =
2325 eeprom_period[prom->EE_SPEED];
2326 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2328 tinfo->CtrlR1 = amd->AdaptSCSIID;
2329 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2330 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2331 tinfo->CtrlR3 = FAST_CLK;
2332 tinfo->CtrlR4 = EATER_25NS;
2333 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2334 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2337 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2338 /* Conversion factor = 0 , 40MHz clock */
2339 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2340 /* NOP cmd - clear command register */
2341 amd_write8(amd, SCSICMDREG, NOP_CMD);
2342 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2343 amd_write8(amd, CNTLREG3, FAST_CLK);
2345 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2346 bval |= NEGATE_REQACKDATA;
2348 amd_write8(amd, CNTLREG4, bval);
2350 /* Disable SCSI bus reset interrupt */
2351 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2357 * attach and init a host adapter
2360 amd_attach(device_t dev)
2362 struct cam_devq *devq; /* Device Queue to use for this SIM */
2364 struct amd_softc *amd = device_get_softc(dev);
2365 int unit = device_get_unit(dev);
2368 struct resource *irqres;
2370 if (amd_init(dev)) {
2372 printf("amd_attach: amd_init failure!\n");
2376 /* Reset Pending INT */
2377 intstat = amd_read8(amd, INTSTATREG);
2379 /* After setting up the adapter, map our interrupt */
2381 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2382 RF_SHAREABLE | RF_ACTIVE);
2383 if (irqres == NULL ||
2384 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2387 printf("amd%d: unable to register interrupt handler!\n",
2393 * Now let the CAM generic SCSI layer find the SCSI devices on
2394 * the bus * start queue to reset to the idle loop. *
2395 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2396 * max_sim_transactions
2398 devq = cam_simq_alloc(MAX_START_JOB);
2401 printf("amd_attach: cam_simq_alloc failure!\n");
2405 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2406 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2408 cam_simq_release(devq);
2409 if (amd->psim == NULL) {
2411 printf("amd_attach: cam_sim_alloc failure!\n");
2415 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2416 cam_sim_free(amd->psim);
2418 printf("amd_attach: xpt_bus_register failure!\n");
2422 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2423 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2424 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2425 xpt_bus_deregister(cam_sim_path(amd->psim));
2426 cam_sim_free(amd->psim);
2428 printf("amd_attach: xpt_create_path failure!\n");
2436 amd_probe(device_t dev)
2438 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2439 device_set_desc(dev,
2440 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2446 static device_method_t amd_methods[] = {
2447 /* Device interface */
2448 DEVMETHOD(device_probe, amd_probe),
2449 DEVMETHOD(device_attach, amd_attach),
2453 static driver_t amd_driver = {
2454 "amd", amd_methods, sizeof(struct amd_softc)
2457 static devclass_t amd_devclass;
2458 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);