kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / dev / disk / amd / amd.c
1 /*
2  *********************************************************************
3  *      FILE NAME  : amd.c
4  *           BY    : C.L. Huang         (ching@tekram.com.tw)
5  *                   Erich Chen     (erich@tekram.com.tw)
6  *      Description: Device Driver for the amd53c974 PCI Bus Master
7  *                   SCSI Host adapter found on cards such as
8  *                   the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.3 2003/08/07 21:16:51 dillon Exp $
35  */
36
37 /*
38  *********************************************************************
39  *      HISTORY:
40  *
41  *      REV#    DATE    NAME            DESCRIPTION
42  *      1.00  07/02/96  CLH             First release for RELEASE-2.1.0
43  *      1.01  08/20/96  CLH             Update for RELEASE-2.1.5
44  *      1.02  11/06/96  CLH             Fixed more than 1 LUN scanning
45  *      1.03  12/20/96  CLH             Modify to support 2.2-ALPHA
46  *      1.04  12/26/97  CLH             Modify to support RELEASE-2.2.5
47  *      1.05  01/01/99  ERICH CHEN      Modify to support RELEASE-3.0.x (CAM)
48  *********************************************************************
49  */
50
51 /* #define AMD_DEBUG0           */
52 /* #define AMD_DEBUG_SCSI_PHASE */
53
54 #include <sys/param.h>
55
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
59 #include <sys/buf.h>
60 #include <sys/kernel.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #include <machine/bus_pio.h>
66 #include <machine/bus.h>
67 #include <machine/clock.h>
68 #include <machine/resource.h>
69 #include <sys/bus.h>
70 #include <sys/rman.h>
71
72 #include <bus/cam/cam.h>
73 #include <bus/cam/cam_ccb.h>
74 #include <bus/cam/cam_sim.h>
75 #include <bus/cam/cam_xpt_sim.h>
76 #include <bus/cam/cam_debug.h>
77
78 #include <bus/cam/scsi/scsi_all.h>
79 #include <bus/cam/scsi/scsi_message.h>
80
81 #include <bus/pci/pcivar.h>
82 #include <bus/pci/pcireg.h>
83 #include "amd.h"
84
85 #define PCI_DEVICE_ID_AMD53C974         0x20201022ul
86 #define PCI_BASE_ADDR0                  0x10
87
88 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
89 typedef phase_handler_t *phase_handler_func_t;
90
91 static void amd_intr(void *vamd);
92 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
93 static phase_handler_t amd_NopPhase;
94
95 static phase_handler_t amd_DataOutPhase0;
96 static phase_handler_t amd_DataInPhase0;
97 #define amd_CommandPhase0 amd_NopPhase
98 static phase_handler_t amd_StatusPhase0;
99 static phase_handler_t amd_MsgOutPhase0;
100 static phase_handler_t amd_MsgInPhase0;
101 static phase_handler_t amd_DataOutPhase1;
102 static phase_handler_t amd_DataInPhase1;
103 static phase_handler_t amd_CommandPhase1;
104 static phase_handler_t amd_StatusPhase1;
105 static phase_handler_t amd_MsgOutPhase1;
106 static phase_handler_t amd_MsgInPhase1;
107
108 static void     amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
109 static int      amdparsemsg(struct amd_softc *amd);
110 static int      amdhandlemsgreject(struct amd_softc *amd);
111 static void     amdconstructsdtr(struct amd_softc *amd,
112                                  u_int period, u_int offset);
113 static u_int    amdfindclockrate(struct amd_softc *amd, u_int *period);
114 static int      amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
115
116 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
117 static void amd_Disconnect(struct amd_softc *amd);
118 static void amd_Reselect(struct amd_softc *amd);
119 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
120 static void amd_ScsiRstDetect(struct amd_softc *amd);
121 static void amd_ResetSCSIBus(struct amd_softc *amd);
122 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
123 static void amd_InvalidCmd(struct amd_softc *amd);
124
125 #if 0
126 static void amd_timeout(void *arg1);
127 static void amd_reset(struct amd_softc *amd);
128 #endif
129 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
130
131 void    amd_linkSRB(struct amd_softc *amd);
132 static int amd_init(device_t);
133 static void amd_load_defaults(struct amd_softc *amd);
134 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
135 static int amd_EEpromInDO(struct amd_softc *amd);
136 static u_int16_t EEpromGetData1(struct amd_softc *amd);
137 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
138 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
139 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
140 static void amd_ReadEEprom(struct amd_softc *amd);
141
142 static int amd_probe(device_t);
143 static int amd_attach(device_t);
144 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
145                              lun_id_t lun, u_int tag, struct srb_queue *queue,
146                              cam_status status);
147 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
148                        u_int period, u_int offset, u_int type);
149 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
150
151 static __inline void amd_clear_msg_state(struct amd_softc *amd);
152
153 static __inline void
154 amd_clear_msg_state(struct amd_softc *amd)
155 {
156         amd->msgout_len = 0;
157         amd->msgout_index = 0;
158         amd->msgin_index = 0;
159 }
160
161 /* CAM SIM entry points */
162 #define ccb_srb_ptr spriv_ptr0
163 #define ccb_amd_ptr spriv_ptr1
164 static void     amd_action(struct cam_sim *sim, union ccb *ccb);
165 static void     amd_poll(struct cam_sim *sim);
166
167 /*
168  * State engine function tables indexed by SCSI phase number
169  */
170 phase_handler_func_t amd_SCSI_phase0[] = {
171         amd_DataOutPhase0,
172         amd_DataInPhase0,
173         amd_CommandPhase0,
174         amd_StatusPhase0,
175         amd_NopPhase,
176         amd_NopPhase,
177         amd_MsgOutPhase0,
178         amd_MsgInPhase0
179 };
180
181 phase_handler_func_t amd_SCSI_phase1[] = {
182         amd_DataOutPhase1,
183         amd_DataInPhase1,
184         amd_CommandPhase1,
185         amd_StatusPhase1,
186         amd_NopPhase,
187         amd_NopPhase,
188         amd_MsgOutPhase1,
189         amd_MsgInPhase1
190 };
191
192 /*
193  * EEProm/BIOS negotiation periods
194  */
195 u_int8_t   eeprom_period[] = {
196          25,    /* 10.0MHz */
197          32,    /*  8.0MHz */
198          38,    /*  6.6MHz */
199          44,    /*  5.7MHz */
200          50,    /*  5.0MHz */
201          63,    /*  4.0MHz */
202          83,    /*  3.0MHz */
203         125     /*  2.0MHz */
204 };
205
206 /*
207  * chip clock setting to SCSI specified sync parameter table.
208  */
209 u_int8_t tinfo_sync_period[] = {
210         25,     /* 10.0 */
211         32,     /* 8.0 */
212         38,     /* 6.6 */
213         44,     /* 5.7 */
214         50,     /* 5.0 */
215         57,     /* 4.4 */
216         63,     /* 4.0 */
217         70,     /* 3.6 */
218         76,     /* 3.3 */
219         83      /* 3.0 */
220 };
221
222 static __inline struct amd_srb *
223 amdgetsrb(struct amd_softc * amd)
224 {
225         int     intflag;
226         struct amd_srb *    pSRB;
227
228         intflag = splcam();
229         pSRB = TAILQ_FIRST(&amd->free_srbs);
230         if (pSRB)
231                 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
232         splx(intflag);
233         return (pSRB);
234 }
235
236 static void
237 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
238 {
239         struct scsi_request_sense sense_cmd;
240         struct ccb_scsiio *csio;
241         u_int8_t *cdb;
242         u_int cdb_len;
243
244         csio = &srb->pccb->csio;
245
246         if (srb->SRBFlag & AUTO_REQSENSE) {
247                 sense_cmd.opcode = REQUEST_SENSE;
248                 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
249                 sense_cmd.unused[0] = 0;
250                 sense_cmd.unused[1] = 0;
251                 sense_cmd.length = csio->sense_len;
252                 sense_cmd.control = 0;
253                 cdb = &sense_cmd.opcode;
254                 cdb_len = sizeof(sense_cmd);
255         } else {
256                 cdb = &srb->CmdBlock[0];
257                 cdb_len = srb->ScsiCmdLen;
258         }
259         amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
260 }
261
262 /*
263  * Attempt to start a waiting transaction.  Interrupts must be disabled
264  * upon entry to this function.
265  */
266 static void
267 amdrunwaiting(struct amd_softc *amd) {
268         struct amd_srb *srb;
269
270         if (amd->last_phase != SCSI_BUS_FREE)
271                 return;
272
273         srb = TAILQ_FIRST(&amd->waiting_srbs);
274         if (srb == NULL)
275                 return;
276         
277         if (amdstart(amd, srb) == 0) {
278                 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
279                 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
280         }
281 }
282
283 static void
284 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
285 {
286         struct   amd_srb *srb;
287         union    ccb *ccb;
288         struct   amd_softc *amd;
289         int      s;
290
291         srb = (struct amd_srb *)arg;
292         ccb = srb->pccb;
293         amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
294
295         if (error != 0) {
296                 if (error != EFBIG)
297                         printf("amd%d: Unexepected error 0x%x returned from "
298                                "bus_dmamap_load\n", amd->unit, error);
299                 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
300                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
301                         ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
302                 }
303                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
304                 xpt_done(ccb);
305                 return;
306         }
307
308         if (nseg != 0) {
309                 struct amd_sg *sg;
310                 bus_dma_segment_t *end_seg;
311                 bus_dmasync_op_t op;
312
313                 end_seg = dm_segs + nseg;
314
315                 /* Copy the segments into our SG list */
316                 srb->pSGlist = &srb->SGsegment[0];
317                 sg = srb->pSGlist;
318                 while (dm_segs < end_seg) {
319                         sg->SGXLen = dm_segs->ds_len;
320                         sg->SGXPtr = dm_segs->ds_addr;
321                         sg++;
322                         dm_segs++;
323                 }
324
325                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
326                         op = BUS_DMASYNC_PREREAD;
327                 else
328                         op = BUS_DMASYNC_PREWRITE;
329
330                 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
331
332         }
333         srb->SGcount = nseg;
334         srb->SGIndex = 0;
335         srb->AdaptStatus = 0;
336         srb->TargetStatus = 0;
337         srb->MsgCnt = 0;
338         srb->SRBStatus = 0;
339         srb->SRBFlag = 0;
340         srb->SRBState = 0;
341         srb->TotalXferredLen = 0;
342         srb->SGPhysAddr = 0;
343         srb->SGToBeXferLen = 0;
344         srb->EndMessage = 0;
345
346         s = splcam();
347
348         /*
349          * Last time we need to check if this CCB needs to
350          * be aborted.
351          */
352         if (ccb->ccb_h.status != CAM_REQ_INPROG) {
353                 if (nseg != 0)
354                         bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
355                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
356                 xpt_done(ccb);
357                 splx(s);
358                 return;
359         }
360         ccb->ccb_h.status |= CAM_SIM_QUEUED;
361 #if 0
362         /* XXX Need a timeout handler */
363         ccb->ccb_h.timeout_ch =
364             timeout(amdtimeout, (caddr_t)srb,
365                     (ccb->ccb_h.timeout * hz) / 1000);
366 #endif
367         TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
368         amdrunwaiting(amd);
369         splx(s);
370 }
371
372 static void
373 amd_action(struct cam_sim * psim, union ccb * pccb)
374 {
375         struct amd_softc *    amd;
376         u_int   target_id, target_lun;
377
378         CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
379
380         amd = (struct amd_softc *) cam_sim_softc(psim);
381         target_id = pccb->ccb_h.target_id;
382         target_lun = pccb->ccb_h.target_lun;
383
384         switch (pccb->ccb_h.func_code) {
385         case XPT_SCSI_IO:
386         {
387                 struct amd_srb *    pSRB;
388                 struct ccb_scsiio *pcsio;
389
390                 pcsio = &pccb->csio;
391
392                 /*
393                  * Assign an SRB and connect it with this ccb.
394                  */
395                 pSRB = amdgetsrb(amd);
396
397                 if (!pSRB) {
398                         /* Freeze SIMQ */
399                         pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
400                         xpt_done(pccb);
401                         return;
402                 }
403                 pSRB->pccb = pccb;
404                 pccb->ccb_h.ccb_srb_ptr = pSRB;
405                 pccb->ccb_h.ccb_amd_ptr = amd;
406                 pSRB->ScsiCmdLen = pcsio->cdb_len;
407                 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
408                 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
409                         if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
410                                 /*
411                                  * We've been given a pointer
412                                  * to a single buffer.
413                                  */
414                                 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
415                                         int s;
416                                         int error;
417
418                                         s = splsoftvm();
419                                         error =
420                                             bus_dmamap_load(amd->buffer_dmat,
421                                                             pSRB->dmamap,
422                                                             pcsio->data_ptr,
423                                                             pcsio->dxfer_len,
424                                                             amdexecutesrb,
425                                                             pSRB, /*flags*/0);
426                                         if (error == EINPROGRESS) {
427                                                 /*
428                                                  * So as to maintain
429                                                  * ordering, freeze the
430                                                  * controller queue
431                                                  * until our mapping is
432                                                  * returned.
433                                                  */
434                                                 xpt_freeze_simq(amd->psim, 1);
435                                                 pccb->ccb_h.status |=
436                                                     CAM_RELEASE_SIMQ;
437                                         }
438                                         splx(s);
439                                 } else {
440                                         struct bus_dma_segment seg;
441
442                                         /* Pointer to physical buffer */
443                                         seg.ds_addr =
444                                             (bus_addr_t)pcsio->data_ptr;
445                                         seg.ds_len = pcsio->dxfer_len;
446                                         amdexecutesrb(pSRB, &seg, 1, 0);
447                                 }
448                         } else {
449                                 struct bus_dma_segment *segs;
450
451                                 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
452                                  || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
453                                         TAILQ_INSERT_HEAD(&amd->free_srbs,
454                                                           pSRB, links);
455                                         pccb->ccb_h.status = CAM_PROVIDE_FAIL;
456                                         xpt_done(pccb);
457                                         return;
458                                 }
459
460                                 /* Just use the segments provided */
461                                 segs =
462                                     (struct bus_dma_segment *)pcsio->data_ptr;
463                                 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
464                         }
465                 } else
466                         amdexecutesrb(pSRB, NULL, 0, 0);
467                 break;
468         }
469         case XPT_PATH_INQ:
470         {
471                 struct ccb_pathinq *cpi = &pccb->cpi;
472
473                 cpi->version_num = 1;
474                 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
475                 cpi->target_sprt = 0;
476                 cpi->hba_misc = 0;
477                 cpi->hba_eng_cnt = 0;
478                 cpi->max_target = 7;
479                 cpi->max_lun = amd->max_lun;    /* 7 or 0 */
480                 cpi->initiator_id = amd->AdaptSCSIID;
481                 cpi->bus_id = cam_sim_bus(psim);
482                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
483                 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
484                 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
485                 cpi->unit_number = cam_sim_unit(psim);
486                 cpi->ccb_h.status = CAM_REQ_CMP;
487                 xpt_done(pccb);
488                 break;
489         }
490         case XPT_ABORT:
491                 pccb->ccb_h.status = CAM_REQ_INVALID;
492                 xpt_done(pccb);
493                 break;
494         case XPT_RESET_BUS:
495         {
496
497                 int     i;
498
499                 amd_ResetSCSIBus(amd);
500                 amd->ACBFlag = 0;
501
502                 for (i = 0; i < 500; i++) {
503                         DELAY(1000);    /* Wait until our interrupt
504                                          * handler sees it */
505                 }
506
507                 pccb->ccb_h.status = CAM_REQ_CMP;
508                 xpt_done(pccb);
509                 break;
510         }
511         case XPT_RESET_DEV:
512                 pccb->ccb_h.status = CAM_REQ_INVALID;
513                 xpt_done(pccb);
514                 break;
515         case XPT_TERM_IO:
516                 pccb->ccb_h.status = CAM_REQ_INVALID;
517                 xpt_done(pccb);
518         case XPT_GET_TRAN_SETTINGS:
519         {
520                 struct ccb_trans_settings *cts;
521                 struct amd_target_info *targ_info;
522                 struct amd_transinfo *tinfo;
523                 int     intflag;
524
525                 cts = &pccb->cts;
526                 intflag = splcam();
527                 targ_info = &amd->tinfo[target_id];
528                 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
529                         /* current transfer settings */
530                         if (targ_info->disc_tag & AMD_CUR_DISCENB) {
531                                 cts->flags = CCB_TRANS_DISC_ENB;
532                         } else {
533                                 cts->flags = 0; /* no tag & disconnect */
534                         }
535                         if (targ_info->disc_tag & AMD_CUR_TAGENB) {
536                                 cts->flags |= CCB_TRANS_TAG_ENB;
537                         }
538                         tinfo = &targ_info->current;
539                 } else {
540                         /* default(user) transfer settings */
541                         if (targ_info->disc_tag & AMD_USR_DISCENB) {
542                                 cts->flags = CCB_TRANS_DISC_ENB;
543                         } else {
544                                 cts->flags = 0;
545                         }
546                         if (targ_info->disc_tag & AMD_USR_TAGENB) {
547                                 cts->flags |= CCB_TRANS_TAG_ENB;
548                         }
549                         tinfo = &targ_info->user;
550                 }
551
552                 cts->sync_period = tinfo->period;
553                 cts->sync_offset = tinfo->offset;
554                 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
555                 splx(intflag);
556                 cts->valid = CCB_TRANS_SYNC_RATE_VALID
557                            | CCB_TRANS_SYNC_OFFSET_VALID
558                            | CCB_TRANS_BUS_WIDTH_VALID
559                            | CCB_TRANS_DISC_VALID
560                            | CCB_TRANS_TQ_VALID;
561                 pccb->ccb_h.status = CAM_REQ_CMP;
562                 xpt_done(pccb);
563                 break;
564         }
565         case XPT_SET_TRAN_SETTINGS:
566         {
567                 struct ccb_trans_settings *cts;
568                 struct amd_target_info *targ_info;
569                 u_int  update_type;
570                 int    intflag;
571                 int    last_entry;
572
573                 cts = &pccb->cts;
574                 update_type = 0;
575                 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
576                         update_type |= AMD_TRANS_GOAL;
577                 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
578                         update_type |= AMD_TRANS_USER;
579                 }
580                 if (update_type == 0
581                  || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
582                         cts->ccb_h.status = CAM_REQ_INVALID;
583                         xpt_done(pccb);
584                 }
585
586                 intflag = splcam();
587                 targ_info = &amd->tinfo[target_id];
588
589                 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
590                         if (update_type & AMD_TRANS_GOAL) {
591                                 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
592                                         targ_info->disc_tag |= AMD_CUR_DISCENB;
593                                 } else {
594                                         targ_info->disc_tag &= ~AMD_CUR_DISCENB;
595                                 }
596                         }
597                         if (update_type & AMD_TRANS_USER) {
598                                 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
599                                         targ_info->disc_tag |= AMD_USR_DISCENB;
600                                 } else {
601                                         targ_info->disc_tag &= ~AMD_USR_DISCENB;
602                                 }
603                         }
604                 }
605                 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
606                         if (update_type & AMD_TRANS_GOAL) {
607                                 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
608                                         targ_info->disc_tag |= AMD_CUR_TAGENB;
609                                 } else {
610                                         targ_info->disc_tag &= ~AMD_CUR_TAGENB;
611                                 }
612                         }
613                         if (update_type & AMD_TRANS_USER) {
614                                 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
615                                         targ_info->disc_tag |= AMD_USR_TAGENB;
616                                 } else {
617                                         targ_info->disc_tag &= ~AMD_USR_TAGENB;
618                                 }
619                         }
620                 }
621
622                 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
623                         if (update_type & AMD_TRANS_GOAL)
624                                 cts->sync_offset = targ_info->goal.offset;
625                         else
626                                 cts->sync_offset = targ_info->user.offset;
627                 }
628
629                 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
630                         cts->sync_offset = AMD_MAX_SYNC_OFFSET;
631
632                 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
633                         if (update_type & AMD_TRANS_GOAL)
634                                 cts->sync_period = targ_info->goal.period;
635                         else
636                                 cts->sync_period = targ_info->user.period;
637                 }
638
639                 last_entry = sizeof(tinfo_sync_period) - 1;
640                 if ((cts->sync_period != 0)
641                  && (cts->sync_period < tinfo_sync_period[0]))
642                         cts->sync_period = tinfo_sync_period[0];
643                 if (cts->sync_period > tinfo_sync_period[last_entry])
644                         cts->sync_period = 0;
645                 if (cts->sync_offset == 0)
646                         cts->sync_period = 0;
647
648                 if ((update_type & AMD_TRANS_USER) != 0) {
649                         targ_info->user.period = cts->sync_period;
650                         targ_info->user.offset = cts->sync_offset;
651                 }
652                 if ((update_type & AMD_TRANS_GOAL) != 0) {
653                         targ_info->goal.period = cts->sync_period;
654                         targ_info->goal.offset = cts->sync_offset;
655                 }
656                 splx(intflag);
657                 pccb->ccb_h.status = CAM_REQ_CMP;
658                 xpt_done(pccb);
659                 break;
660         }
661         case XPT_CALC_GEOMETRY:
662         {
663                 struct ccb_calc_geometry *ccg;
664                 u_int32_t size_mb;
665                 u_int32_t secs_per_cylinder;
666                 int     extended;
667
668                 ccg = &pccb->ccg;
669                 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
670                 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
671
672                 if (size_mb > 1024 && extended) {
673                         ccg->heads = 255;
674                         ccg->secs_per_track = 63;
675                 } else {
676                         ccg->heads = 64;
677                         ccg->secs_per_track = 32;
678                 }
679                 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
680                 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
681                 pccb->ccb_h.status = CAM_REQ_CMP;
682                 xpt_done(pccb);
683                 break;
684         }
685         default:
686                 pccb->ccb_h.status = CAM_REQ_INVALID;
687                 xpt_done(pccb);
688                 break;
689         }
690 }
691
692 static void
693 amd_poll(struct cam_sim * psim)
694 {
695         amd_intr(cam_sim_softc(psim));
696 }
697
698 static u_int8_t * 
699 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
700 {
701         int     dataPtr;
702         struct ccb_scsiio *pcsio;
703         u_int8_t   i;
704         struct amd_sg *    pseg;
705
706         dataPtr = 0;
707         pcsio = &pSRB->pccb->csio;
708
709         dataPtr = (int) pcsio->data_ptr;
710         pseg = pSRB->SGsegment;
711         for (i = 0; i < pSRB->SGIndex; i++) {
712                 dataPtr += (int) pseg->SGXLen;
713                 pseg++;
714         }
715         dataPtr += (int) xferCnt;
716         return ((u_int8_t *) dataPtr);
717 }
718
719 static void
720 ResetDevParam(struct amd_softc * amd)
721 {
722         u_int target;
723
724         for (target = 0; target <= amd->max_id; target++) {
725                 if (amd->AdaptSCSIID != target) {
726                         amdsetsync(amd, target, /*clockrate*/0,
727                                    /*period*/0, /*offset*/0, AMD_TRANS_CUR);
728                 }
729         }
730 }
731
732 static void
733 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
734                  u_int tag, struct srb_queue *queue, cam_status status)
735 {
736         struct amd_srb *srb;
737         struct amd_srb *next_srb;
738
739         for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
740                 union ccb *ccb;
741
742                 next_srb = TAILQ_NEXT(srb, links);
743                 if (srb->pccb->ccb_h.target_id != target
744                  && target != CAM_TARGET_WILDCARD)
745                         continue;
746
747                 if (srb->pccb->ccb_h.target_lun != lun
748                  && lun != CAM_LUN_WILDCARD)
749                         continue;
750
751                 if (srb->TagNumber != tag
752                  && tag != AMD_TAG_WILDCARD)
753                         continue;
754                 
755                 ccb = srb->pccb;
756                 TAILQ_REMOVE(queue, srb, links);
757                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
758                 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
759                  && (status & CAM_DEV_QFRZN) != 0)
760                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
761                 ccb->ccb_h.status = status;
762                 xpt_done(ccb);
763         }
764
765 }
766
767 static void
768 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
769            u_int period, u_int offset, u_int type)
770 {
771         struct amd_target_info *tinfo;
772         u_int old_period;
773         u_int old_offset;
774
775         tinfo = &amd->tinfo[target];
776         old_period = tinfo->current.period;
777         old_offset = tinfo->current.offset;
778         if ((type & AMD_TRANS_CUR) != 0
779          && (old_period != period || old_offset != offset)) {
780                 struct cam_path *path;
781
782                 tinfo->current.period = period;
783                 tinfo->current.offset = offset;
784                 tinfo->sync_period_reg = clockrate;
785                 tinfo->sync_offset_reg = offset;
786                 tinfo->CtrlR3 &= ~FAST_SCSI;
787                 tinfo->CtrlR4 &= ~EATER_25NS;
788                 if (clockrate > 7)
789                         tinfo->CtrlR4 |= EATER_25NS;
790                 else
791                         tinfo->CtrlR3 |= FAST_SCSI;
792
793                 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
794                         amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
795                         amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
796                         amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
797                         amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
798                 }
799                 /* If possible, update the XPT's notion of our transfer rate */
800                 if (xpt_create_path(&path, /*periph*/NULL,
801                                     cam_sim_path(amd->psim), target,
802                                     CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
803                         struct ccb_trans_settings neg;
804
805                         xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
806                         neg.sync_period = period;
807                         neg.sync_offset = offset;
808                         neg.valid = CCB_TRANS_SYNC_RATE_VALID
809                                   | CCB_TRANS_SYNC_OFFSET_VALID;
810                         xpt_async(AC_TRANSFER_NEG, path, &neg);
811                         xpt_free_path(path);    
812                 }
813         }
814         if ((type & AMD_TRANS_GOAL) != 0) {
815                 tinfo->goal.period = period;
816                 tinfo->goal.offset = offset;
817         }
818
819         if ((type & AMD_TRANS_USER) != 0) {
820                 tinfo->user.period = period;
821                 tinfo->user.offset = offset;
822         }
823 }
824
825 static void
826 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
827 {
828         panic("Implement me!\n");
829 }
830
831
832 #if 0
833 /*
834  **********************************************************************
835  * Function : amd_reset (struct amd_softc * amd)
836  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
837  * Inputs   : cmd - command which caused the SCSI RESET
838  **********************************************************************
839  */
840 static void
841 amd_reset(struct amd_softc * amd)
842 {
843         int        intflag;
844         u_int8_t   bval;
845         u_int16_t  i;
846
847
848 #ifdef AMD_DEBUG0
849         printf("DC390: RESET");
850 #endif
851
852         intflag = splcam();
853         bval = amd_read8(amd, CNTLREG1);
854         bval |= DIS_INT_ON_SCSI_RST;
855         amd_write8(amd, CNTLREG1, bval);        /* disable interrupt */
856         amd_ResetSCSIBus(amd);
857
858         for (i = 0; i < 500; i++) {
859                 DELAY(1000);
860         }
861
862         bval = amd_read8(amd, CNTLREG1);
863         bval &= ~DIS_INT_ON_SCSI_RST;
864         amd_write8(amd, CNTLREG1, bval);        /* re-enable interrupt */
865
866         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
867         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
868
869         ResetDevParam(amd);
870         amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
871                          AMD_TAG_WILDCARD, &amd->running_srbs,
872                          CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
873         amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
874                          AMD_TAG_WILDCARD, &amd->waiting_srbs,
875                          CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
876         amd->active_srb = NULL;
877         amd->ACBFlag = 0;
878         splx(intflag);
879         return;
880 }
881
882 void
883 amd_timeout(void *arg1)
884 {
885         struct amd_srb *    pSRB;
886
887         pSRB = (struct amd_srb *) arg1;
888 }
889 #endif
890
891 static int
892 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
893 {
894         union ccb *pccb;
895         struct ccb_scsiio *pcsio;
896         struct amd_target_info *targ_info;
897         u_int identify_msg;
898         u_int command;
899         u_int target;
900         u_int lun;
901         int tagged;
902
903         pccb = pSRB->pccb;
904         pcsio = &pccb->csio;
905         target = pccb->ccb_h.target_id;
906         lun = pccb->ccb_h.target_lun;
907         targ_info = &amd->tinfo[target];
908
909         amd_clear_msg_state(amd);
910         amd_write8(amd, SCSIDESTIDREG, target);
911         amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
912         amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
913         amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
914         amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
915         amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
916         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
917
918         identify_msg = MSG_IDENTIFYFLAG | lun;
919         if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
920           && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
921           && (pSRB->CmdBlock[0] != REQUEST_SENSE)
922           && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
923                 identify_msg |= MSG_IDENTIFY_DISCFLAG;
924
925         amd_write8(amd, SCSIFIFOREG, identify_msg);
926         tagged = 0;
927         if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
928           || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
929                 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
930         if (targ_info->current.period != targ_info->goal.period
931          || targ_info->current.offset != targ_info->goal.offset) {
932                 command = SEL_W_ATN_STOP;
933                 amdconstructsdtr(amd, targ_info->goal.period,
934                                  targ_info->goal.offset);
935         } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
936                 command = SEL_W_ATN2;
937                 pSRB->SRBState = SRB_START;
938                 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
939                 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
940                 tagged++;
941         } else {
942                 command = SEL_W_ATN;
943                 pSRB->SRBState = SRB_START;
944         }
945         if (command != SEL_W_ATN_STOP)
946                 amdsetupcommand(amd, pSRB);
947
948         if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
949                 pSRB->SRBState = SRB_READY;
950                 return (1);
951         } else {
952                 amd->last_phase = SCSI_ARBITRATING;
953                 amd_write8(amd, SCSICMDREG, command);
954                 amd->active_srb = pSRB;
955                 amd->cur_target = target;
956                 amd->cur_lun = lun;
957                 return (0);
958         }
959 }
960
961 /*
962  *  Catch an interrupt from the adapter.
963  *  Process pending device interrupts.
964  */
965 static void 
966 amd_intr(void   *arg)
967 {
968         struct amd_softc *amd;
969         struct amd_srb *pSRB;
970         u_int  internstat = 0;
971         u_int  scsistat;
972         u_int  intstat;
973
974         amd = (struct amd_softc *)arg;
975
976         if (amd == NULL) {
977 #ifdef AMD_DEBUG0
978                 printf("amd_intr: amd NULL return......");
979 #endif
980                 return;
981         }
982
983         scsistat = amd_read8(amd, SCSISTATREG);
984         if (!(scsistat & INTERRUPT)) {
985 #ifdef AMD_DEBUG0
986                 printf("amd_intr: scsistat = NULL ,return......");
987 #endif
988                 return;
989         }
990 #ifdef AMD_DEBUG_SCSI_PHASE
991         printf("scsistat=%2x,", scsistat);
992 #endif
993
994         internstat = amd_read8(amd, INTERNSTATREG);
995         intstat = amd_read8(amd, INTSTATREG);
996
997 #ifdef AMD_DEBUG_SCSI_PHASE
998         printf("intstat=%2x,", intstat);
999 #endif
1000
1001         if (intstat & DISCONNECTED) {
1002                 amd_Disconnect(amd);
1003                 return;
1004         }
1005         if (intstat & RESELECTED) {
1006                 amd_Reselect(amd);
1007                 return;
1008         }
1009         if (intstat & INVALID_CMD) {
1010                 amd_InvalidCmd(amd);
1011                 return;
1012         }
1013         if (intstat & SCSI_RESET_) {
1014                 amd_ScsiRstDetect(amd);
1015                 return;
1016         }
1017         if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1018                 pSRB = amd->active_srb;
1019                 /*
1020                  * Run our state engine.  First perform
1021                  * post processing for the last phase we
1022                  * were in, followed by any processing
1023                  * required to handle the current phase.
1024                  */
1025                 scsistat =
1026                     amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1027                 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1028                 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1029         }
1030 }
1031
1032 static u_int
1033 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1034 {
1035         struct amd_sg *psgl;
1036         u_int32_t   ResidCnt, xferCnt;
1037
1038         if (!(pSRB->SRBState & SRB_XFERPAD)) {
1039                 if (scsistat & PARITY_ERR) {
1040                         pSRB->SRBStatus |= PARITY_ERROR;
1041                 }
1042                 if (scsistat & COUNT_2_ZERO) {
1043                         while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1044                                 ;
1045                         pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1046                         pSRB->SGIndex++;
1047                         if (pSRB->SGIndex < pSRB->SGcount) {
1048                                 pSRB->pSGlist++;
1049                                 psgl = pSRB->pSGlist;
1050                                 pSRB->SGPhysAddr = psgl->SGXPtr;
1051                                 pSRB->SGToBeXferLen = psgl->SGXLen;
1052                         } else {
1053                                 pSRB->SGToBeXferLen = 0;
1054                         }
1055                 } else {
1056                         ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1057                         ResidCnt += amd_read8(amd, CTCREG_LOW)
1058                                   | (amd_read8(amd, CTCREG_MID) << 8)
1059                                   | (amd_read8(amd, CURTXTCNTREG) << 16);
1060
1061                         xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1062                         pSRB->SGPhysAddr += xferCnt;
1063                         pSRB->TotalXferredLen += xferCnt;
1064                         pSRB->SGToBeXferLen = ResidCnt;
1065                 }
1066         }
1067         amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1068         return (scsistat);
1069 }
1070
1071 static u_int
1072 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1073 {
1074         u_int8_t bval;
1075         u_int16_t  i, residual;
1076         struct amd_sg *psgl;
1077         u_int32_t   ResidCnt, xferCnt;
1078         u_int8_t *  ptr;
1079
1080         if (!(pSRB->SRBState & SRB_XFERPAD)) {
1081                 if (scsistat & PARITY_ERR) {
1082                         pSRB->SRBStatus |= PARITY_ERROR;
1083                 }
1084                 if (scsistat & COUNT_2_ZERO) {
1085                         while (1) {
1086                                 bval = amd_read8(amd, DMA_Status);
1087                                 if ((bval & DMA_XFER_DONE) != 0)
1088                                         break;
1089                         }
1090                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1091
1092                         pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1093                         pSRB->SGIndex++;
1094                         if (pSRB->SGIndex < pSRB->SGcount) {
1095                                 pSRB->pSGlist++;
1096                                 psgl = pSRB->pSGlist;
1097                                 pSRB->SGPhysAddr = psgl->SGXPtr;
1098                                 pSRB->SGToBeXferLen = psgl->SGXLen;
1099                         } else {
1100                                 pSRB->SGToBeXferLen = 0;
1101                         }
1102                 } else {        /* phase changed */
1103                         residual = 0;
1104                         bval = amd_read8(amd, CURRENTFIFOREG);
1105                         while (bval & 0x1f) {
1106                                 if ((bval & 0x1f) == 1) {
1107                                         for (i = 0; i < 0x100; i++) {
1108                                                 bval = amd_read8(amd, CURRENTFIFOREG);
1109                                                 if (!(bval & 0x1f)) {
1110                                                         goto din_1;
1111                                                 } else if (i == 0x0ff) {
1112                                                         residual = 1;
1113                                                         goto din_1;
1114                                                 }
1115                                         }
1116                                 } else {
1117                                         bval = amd_read8(amd, CURRENTFIFOREG);
1118                                 }
1119                         }
1120         din_1:
1121                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1122                         for (i = 0; i < 0x8000; i++) {
1123                                 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1124                                         break;
1125                         }
1126                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1127
1128                         ResidCnt = amd_read8(amd, CTCREG_LOW)
1129                                  | (amd_read8(amd, CTCREG_MID) << 8)
1130                                  | (amd_read8(amd, CURTXTCNTREG) << 16);
1131                         xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1132                         pSRB->SGPhysAddr += xferCnt;
1133                         pSRB->TotalXferredLen += xferCnt;
1134                         pSRB->SGToBeXferLen = ResidCnt;
1135                         if (residual) {
1136                                 /* get residual byte */ 
1137                                 bval = amd_read8(amd, SCSIFIFOREG);
1138                                 ptr = phystovirt(pSRB, xferCnt);
1139                                 *ptr = bval;
1140                                 pSRB->SGPhysAddr++;
1141                                 pSRB->TotalXferredLen++;
1142                                 pSRB->SGToBeXferLen--;
1143                         }
1144                 }
1145         }
1146         return (scsistat);
1147 }
1148
1149 static u_int
1150 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1151 {
1152         pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1153         /* get message */
1154         pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1155         pSRB->SRBState = SRB_COMPLETED;
1156         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1157         return (SCSI_NOP0);
1158 }
1159
1160 static u_int
1161 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1162 {
1163         if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1164                 scsistat = SCSI_NOP0;
1165         }
1166         return (scsistat);
1167 }
1168
1169 static u_int
1170 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1171 {
1172         int done;
1173         
1174         amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1175
1176         done = amdparsemsg(amd);
1177         if (done)
1178                 amd->msgin_index = 0;
1179         else 
1180                 amd->msgin_index++;
1181         return (SCSI_NOP0);
1182 }
1183
1184 static int
1185 amdparsemsg(struct amd_softc *amd)
1186 {
1187         struct  amd_target_info *targ_info;
1188         int     reject;
1189         int     done;
1190         int     response;
1191
1192         done = FALSE;
1193         response = FALSE;
1194         reject = FALSE;
1195
1196         targ_info = &amd->tinfo[amd->cur_target];
1197
1198         /*
1199          * Parse as much of the message as is availible,
1200          * rejecting it if we don't support it.  When
1201          * the entire message is availible and has been
1202          * handled, return TRUE indicating that we have
1203          * parsed an entire message.
1204          */
1205         switch (amd->msgin_buf[0]) {
1206         case MSG_DISCONNECT:
1207                 amd->active_srb->SRBState = SRB_DISCONNECT;
1208                 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1209                 done = TRUE;
1210                 break;
1211         case MSG_SIMPLE_Q_TAG:
1212         {
1213                 struct amd_srb *disc_srb;
1214
1215                 if (amd->msgin_index < 1)
1216                         break;          
1217                 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1218                 if (amd->active_srb != NULL
1219                  || disc_srb->SRBState != SRB_DISCONNECT
1220                  || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1221                  || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1222                         printf("amd%d: Unexpected tagged reselection "
1223                                "for target %d, Issuing Abort\n", amd->unit,
1224                                amd->cur_target);
1225                         amd->msgout_buf[0] = MSG_ABORT;
1226                         amd->msgout_len = 1;
1227                         response = TRUE;
1228                         break;
1229                 }
1230                 amd->active_srb = disc_srb;
1231                 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1232                 done = TRUE;
1233                 break;
1234         }
1235         case MSG_MESSAGE_REJECT:
1236                 response = amdhandlemsgreject(amd);
1237                 if (response == FALSE)
1238                         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1239                 /* FALLTHROUGH */
1240         case MSG_NOOP:
1241                 done = TRUE;
1242                 break;
1243         case MSG_EXTENDED:
1244         {
1245                 u_int clockrate;
1246                 u_int period;
1247                 u_int offset;
1248                 u_int saved_offset;
1249
1250                 /* Wait for enough of the message to begin validation */
1251                 if (amd->msgin_index < 1)
1252                         break;
1253                 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1254                         reject = TRUE;
1255                         break;
1256                 }
1257
1258                 /* Wait for opcode */
1259                 if (amd->msgin_index < 2)
1260                         break;
1261
1262                 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1263                         reject = TRUE;
1264                         break;
1265                 }
1266
1267                 /*
1268                  * Wait until we have both args before validating
1269                  * and acting on this message.
1270                  *
1271                  * Add one to MSG_EXT_SDTR_LEN to account for
1272                  * the extended message preamble.
1273                  */
1274                 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1275                         break;
1276
1277                 period = amd->msgin_buf[3];
1278                 saved_offset = offset = amd->msgin_buf[4];
1279                 clockrate = amdfindclockrate(amd, &period);
1280                 if (offset > AMD_MAX_SYNC_OFFSET)
1281                         offset = AMD_MAX_SYNC_OFFSET;
1282                 if (period == 0 || offset == 0) {
1283                         offset = 0;
1284                         period = 0;
1285                         clockrate = 0;
1286                 }
1287                 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1288                            AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1289
1290                 /*
1291                  * See if we initiated Sync Negotiation
1292                  * and didn't have to fall down to async
1293                  * transfers.
1294                  */
1295                 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1296                         /* We started it */
1297                         if (saved_offset != offset) {
1298                                 /* Went too low - force async */
1299                                 reject = TRUE;
1300                         }
1301                 } else {
1302                         /*
1303                          * Send our own SDTR in reply
1304                          */
1305                         if (bootverbose)
1306                                 printf("Sending SDTR!\n");
1307                         amd->msgout_index = 0;
1308                         amd->msgout_len = 0;
1309                         amdconstructsdtr(amd, period, offset);
1310                         amd->msgout_index = 0;
1311                         response = TRUE;
1312                 }
1313                 done = TRUE;
1314                 break;
1315         }
1316         case MSG_SAVEDATAPOINTER:
1317         case MSG_RESTOREPOINTERS:
1318                 /* XXX Implement!!! */
1319                 done = TRUE;
1320                 break;
1321         default:
1322                 reject = TRUE;
1323                 break;
1324         }
1325
1326         if (reject) {
1327                 amd->msgout_index = 0;
1328                 amd->msgout_len = 1;
1329                 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1330                 done = TRUE;
1331                 response = TRUE;
1332         }
1333
1334         if (response)
1335                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1336
1337         if (done && !response)
1338                 /* Clear the outgoing message buffer */
1339                 amd->msgout_len = 0;
1340
1341         /* Drop Ack */
1342         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1343
1344         return (done);
1345 }
1346
1347 static u_int
1348 amdfindclockrate(struct amd_softc *amd, u_int *period)
1349 {
1350         u_int i;
1351         u_int clockrate;
1352
1353         for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1354                 u_int8_t *table_entry;
1355
1356                 table_entry = &tinfo_sync_period[i];
1357                 if (*period <= *table_entry) {
1358                         /*
1359                          * When responding to a target that requests
1360                          * sync, the requested rate may fall between
1361                          * two rates that we can output, but still be
1362                          * a rate that we can receive.  Because of this,
1363                          * we want to respond to the target with
1364                          * the same rate that it sent to us even
1365                          * if the period we use to send data to it
1366                          * is lower.  Only lower the response period
1367                          * if we must.
1368                          */ 
1369                         if (i == 0) {
1370                                 *period = *table_entry;
1371                         }
1372                         break;
1373                 }
1374         }
1375
1376         if (i == sizeof(tinfo_sync_period)) {
1377                 /* Too slow for us.  Use asnyc transfers. */
1378                 *period = 0;
1379                 clockrate = 0;
1380         } else
1381                 clockrate = i + 4;
1382
1383         return (clockrate);
1384 }
1385
1386 /*
1387  * See if we sent a particular extended message to the target.
1388  * If "full" is true, the target saw the full message.
1389  * If "full" is false, the target saw at least the first
1390  * byte of the message.
1391  */
1392 static int
1393 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1394 {
1395         int found;
1396         int index;
1397
1398         found = FALSE;
1399         index = 0;
1400
1401         while (index < amd->msgout_len) {
1402                 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1403                  || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1404                         index++;
1405                 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1406                       && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1407                         /* Skip tag type and tag id */
1408                         index += 2;
1409                 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1410                         /* Found a candidate */
1411                         if (amd->msgout_buf[index+2] == msgtype) {
1412                                 u_int end_index;
1413
1414                                 end_index = index + 1
1415                                           + amd->msgout_buf[index + 1];
1416                                 if (full) {
1417                                         if (amd->msgout_index > end_index)
1418                                                 found = TRUE;
1419                                 } else if (amd->msgout_index > index)
1420                                         found = TRUE;
1421                         }
1422                         break;
1423                 } else {
1424                         panic("amdsentmsg: Inconsistent msg buffer");
1425                 }
1426         }
1427         return (found);
1428 }
1429
1430 static void
1431 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1432 {
1433         amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1434         amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1435         amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1436         amd->msgout_buf[amd->msgout_index++] = period;
1437         amd->msgout_buf[amd->msgout_index++] = offset;
1438         amd->msgout_len += 5;
1439 }
1440
1441 static int
1442 amdhandlemsgreject(struct amd_softc *amd)
1443 {
1444         /*
1445          * If we had an outstanding SDTR for this
1446          * target, this is a signal that the target
1447          * is refusing negotiation.  Also watch out
1448          * for rejected tag messages.
1449          */
1450         struct  amd_srb *srb;
1451         struct  amd_target_info *targ_info;
1452         int     response = FALSE;
1453
1454         srb = amd->active_srb;
1455         targ_info = &amd->tinfo[amd->cur_target];
1456         if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1457                 /* note asynch xfers and clear flag */
1458                 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1459                            /*period*/0, /*offset*/0,
1460                            AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1461                 printf("amd%d:%d: refuses synchronous negotiation. "
1462                        "Using asynchronous transfers\n",
1463                        amd->unit, amd->cur_target);
1464         } else if ((srb != NULL)
1465                 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1466                 struct  ccb_trans_settings neg;
1467
1468                 printf("amd%d:%d: refuses tagged commands.  Performing "
1469                        "non-tagged I/O\n", amd->unit, amd->cur_target);
1470
1471                 amdsettags(amd, amd->cur_target, FALSE);
1472                 neg.flags = 0;
1473                 neg.valid = CCB_TRANS_TQ_VALID;
1474                 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1475                 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1476
1477                 /*
1478                  * Resend the identify for this CCB as the target
1479                  * may believe that the selection is invalid otherwise.
1480                  */
1481                 if (amd->msgout_len != 0)
1482                         bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1483                               amd->msgout_len);
1484                 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1485                                     | srb->pccb->ccb_h.target_lun;
1486                 amd->msgout_len++;
1487                 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1488                   && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1489                         amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1490
1491                 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1492
1493                 /*
1494                  * Requeue all tagged commands for this target
1495                  * currently in our posession so they can be
1496                  * converted to untagged commands.
1497                  */
1498                 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1499                                  AMD_TAG_WILDCARD, &amd->waiting_srbs,
1500                                  CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1501         } else {
1502                 /*
1503                  * Otherwise, we ignore it.
1504                  */
1505                 printf("amd%d:%d: Message reject received -- ignored\n",
1506                        amd->unit, amd->cur_target);
1507         }
1508         return (response);
1509 }
1510
1511 #if 0
1512         if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1513                 if (bval == MSG_DISCONNECT) {
1514                         pSRB->SRBState = SRB_DISCONNECT;
1515                 } else if (bval == MSG_SAVEDATAPOINTER) {
1516                         goto min6;
1517                 } else if ((bval == MSG_EXTENDED)
1518                         || ((bval >= MSG_SIMPLE_Q_TAG)
1519                          && (bval <= MSG_ORDERED_Q_TAG))) {
1520                         pSRB->SRBState |= SRB_MSGIN_MULTI;
1521                         pSRB->MsgInBuf[0] = bval;
1522                         pSRB->MsgCnt = 1;
1523                         pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1524                 } else if (bval == MSG_MESSAGE_REJECT) {
1525                         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1526
1527                         if (pSRB->SRBState & DO_SYNC_NEGO) {
1528                                 goto set_async;
1529                         }
1530                 } else if (bval == MSG_RESTOREPOINTERS) {
1531                         goto min6;
1532                 } else {
1533                         goto min6;
1534                 }
1535         } else {                /* minx: */
1536                 *pSRB->pMsgPtr = bval;
1537                 pSRB->MsgCnt++;
1538                 pSRB->pMsgPtr++;
1539                 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1540                  && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1541                         if (pSRB->MsgCnt == 2) {
1542                                 pSRB->SRBState = 0;
1543                                 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1544                                 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1545                                         pSRB = amd->pTmpSRB;
1546                                         pSRB->SRBState = SRB_UNEXPECT_RESEL;
1547                                         pDCB->pActiveSRB = pSRB;
1548                                         pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1549                                         EnableMsgOut2(amd, pSRB);
1550                                 } else {
1551                                         if (pDCB->DCBFlag & ABORT_DEV_) {
1552                                                 pSRB->SRBState = SRB_ABORT_SENT;
1553                                                 EnableMsgOut1(amd, pSRB);
1554                                         }
1555                                         pDCB->pActiveSRB = pSRB;
1556                                         pSRB->SRBState = SRB_DATA_XFER;
1557                                 }
1558                         }
1559                 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1560                         && (pSRB->MsgCnt == 5)) {
1561                         pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1562                         if ((pSRB->MsgInBuf[1] != 3)
1563                          || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1564                                 pSRB->MsgCnt = 1;
1565                                 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1566                                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1567                         } else if (!(pSRB->MsgInBuf[3])
1568                                 || !(pSRB->MsgInBuf[4])) {
1569                 set_async:      /* set async */
1570
1571                                 pDCB = pSRB->pSRBDCB;
1572                                 /* disable sync & sync nego */
1573                                 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1574                                 pDCB->SyncPeriod = 0;
1575                                 pDCB->SyncOffset = 0;
1576
1577                                 pDCB->tinfo.goal.period = 0;
1578                                 pDCB->tinfo.goal.offset = 0;
1579
1580                                 pDCB->tinfo.current.period = 0;
1581                                 pDCB->tinfo.current.offset = 0;
1582                                 pDCB->tinfo.current.width =
1583                                     MSG_EXT_WDTR_BUS_8_BIT;
1584
1585                                 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1586                                 pDCB->CtrlR4 &= 0x3f;
1587                                 pDCB->CtrlR4 |= EATER_25NS; 
1588                                 goto re_prog;
1589                         } else {/* set sync */
1590
1591                                 pDCB = pSRB->pSRBDCB;
1592                                 /* enable sync & sync nego */
1593                                 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1594
1595                                 /* set sync offset */
1596                                 pDCB->SyncOffset &= 0x0f0;
1597                                 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1598
1599                                 /* set sync period */
1600                                 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1601
1602                                 wval = (u_int16_t) pSRB->MsgInBuf[3];
1603                                 wval = wval << 2;
1604                                 wval--;
1605                                 wval1 = wval / 25;
1606                                 if ((wval1 * 25) != wval) {
1607                                         wval1++;
1608                                 }
1609                                 bval = FAST_CLK|FAST_SCSI;
1610                                 pDCB->CtrlR4 &= 0x3f;
1611                                 if (wval1 >= 8) {
1612                                         /* Fast SCSI */
1613                                         wval1--;
1614                                         bval = FAST_CLK;
1615                                         pDCB->CtrlR4 |= EATER_25NS;
1616                                 }
1617                                 pDCB->CtrlR3 = bval;
1618                                 pDCB->SyncPeriod = (u_int8_t) wval1;
1619
1620                                 pDCB->tinfo.goal.period =
1621                                     tinfo_sync_period[pDCB->SyncPeriod - 4];
1622                                 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1623                                 pDCB->tinfo.current.period =
1624                                     tinfo_sync_period[pDCB->SyncPeriod - 4];;
1625                                 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1626
1627                                 /*
1628                                  * program SCSI control register
1629                                  */
1630                 re_prog:
1631                                 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1632                                 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1633                                 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1634                                 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1635                         }
1636                 }
1637         }
1638 min6:
1639         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1640         return (SCSI_NOP0);
1641 }
1642 #endif
1643
1644 static u_int
1645 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1646 {
1647         DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1648         return (scsistat);
1649 }
1650
1651 static u_int 
1652 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1653 {
1654         DataIO_Comm(amd, pSRB, READ_DIRECTION);
1655         return (scsistat);
1656 }
1657
1658 static void
1659 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1660 {
1661         struct amd_sg *    psgl;
1662         u_int32_t   lval;
1663
1664         if (pSRB->SGIndex < pSRB->SGcount) {
1665                 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1666
1667                 if (!pSRB->SGToBeXferLen) {
1668                         psgl = pSRB->pSGlist;
1669                         pSRB->SGPhysAddr = psgl->SGXPtr;
1670                         pSRB->SGToBeXferLen = psgl->SGXLen;
1671                 }
1672                 lval = pSRB->SGToBeXferLen;
1673                 amd_write8(amd, CTCREG_LOW, lval);
1674                 amd_write8(amd, CTCREG_MID, lval >> 8);
1675                 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1676
1677                 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1678
1679                 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1680
1681                 pSRB->SRBState = SRB_DATA_XFER;
1682
1683                 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1684
1685                 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1686
1687                 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1688         } else {                /* xfer pad */
1689                 if (pSRB->SGcount) {
1690                         pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1691                         pSRB->SRBStatus |= OVER_RUN;
1692                 }
1693                 amd_write8(amd, CTCREG_LOW, 0);
1694                 amd_write8(amd, CTCREG_MID, 0);
1695                 amd_write8(amd, CURTXTCNTREG, 0);
1696
1697                 pSRB->SRBState |= SRB_XFERPAD;
1698                 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1699         }
1700 }
1701
1702 static u_int
1703 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1704 {
1705         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1706         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1707
1708         amdsetupcommand(amd, srb);
1709
1710         srb->SRBState = SRB_COMMAND;
1711         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1712         return (scsistat);
1713 }
1714
1715 static u_int
1716 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1717 {
1718         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1719         pSRB->SRBState = SRB_STATUS;
1720         amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1721         return (scsistat);
1722 }
1723
1724 static u_int
1725 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1726 {
1727         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1728
1729         if (amd->msgout_len == 0) {
1730                 amd->msgout_buf[0] = MSG_NOOP;
1731                 amd->msgout_len = 1;
1732         }
1733         amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1734         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1735         return (scsistat);
1736 }
1737
1738 static u_int
1739 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1740 {
1741         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1742         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1743         return (scsistat);
1744 }
1745
1746 static u_int
1747 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1748 {
1749         return (scsistat);
1750 }
1751
1752 static void
1753 amd_Disconnect(struct amd_softc * amd)
1754 {
1755         struct  amd_srb *srb;
1756         int     target;
1757         int     lun;
1758
1759         srb = amd->active_srb;
1760         amd->active_srb = NULL;
1761         amd->last_phase = SCSI_BUS_FREE;
1762         amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1763         target = amd->cur_target;
1764         lun = amd->cur_lun;
1765
1766         if (srb == NULL) {
1767                 /* Invalid reselection */
1768                 amdrunwaiting(amd);
1769         } else if (srb->SRBState & SRB_ABORT_SENT) {
1770                 /* Clean up and done this srb */
1771 #if 0
1772                 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1773                         /* XXX What about "done'ing" these srbs??? */
1774                         if (pSRB->pSRBDCB == pDCB) {
1775                                 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1776                                 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1777                         }
1778                 }
1779                 amdrunwaiting(amd);
1780 #endif
1781         } else {
1782                 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1783                  || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1784                         srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1785                         goto disc1;
1786                 } else if (srb->SRBState & SRB_DISCONNECT) {
1787                         if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1788                                 amd->untagged_srbs[target][lun] = srb;
1789                         amdrunwaiting(amd);
1790                 } else if (srb->SRBState & SRB_COMPLETED) {
1791         disc1:
1792                         srb->SRBState = SRB_FREE;
1793                         SRBdone(amd, srb);
1794                 }
1795         }
1796         return;
1797 }
1798
1799 static void
1800 amd_Reselect(struct amd_softc *amd)
1801 {
1802         struct amd_target_info *tinfo;
1803         u_int16_t disc_count;
1804
1805         amd_clear_msg_state(amd);
1806         if (amd->active_srb != NULL) {
1807                 /* Requeue the SRB for our attempted Selection */
1808                 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1809                 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1810                 amd->active_srb = NULL;
1811         }
1812         /* get ID */
1813         amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1814         amd->cur_target ^= amd->HostID_Bit;
1815         amd->cur_target = ffs(amd->cur_target) - 1;
1816         amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1817         tinfo = &amd->tinfo[amd->cur_target];
1818         amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1819         disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1820         if (disc_count == 0) {
1821                 printf("amd%d: Unexpected reselection for target %d, "
1822                        "Issuing Abort\n", amd->unit, amd->cur_target);
1823                 amd->msgout_buf[0] = MSG_ABORT;
1824                 amd->msgout_len = 1;
1825                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1826         }
1827         if (amd->active_srb != NULL) {
1828                 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1829                 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1830         }
1831         
1832         amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1833         amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1834         amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1835         amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1836         amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1837         amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1838         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1839         amd->last_phase = SCSI_NOP0;
1840 }
1841
1842 static void
1843 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1844 {
1845         u_int8_t   bval, i, status;
1846         union ccb *pccb;
1847         struct ccb_scsiio *pcsio;
1848         int        intflag;
1849         struct amd_sg *ptr2;
1850         u_int32_t   swlval;
1851         u_int   target_id, target_lun;
1852
1853         pccb = pSRB->pccb;
1854         pcsio = &pccb->csio;
1855         target_id = pSRB->pccb->ccb_h.target_id;
1856         target_lun = pSRB->pccb->ccb_h.target_lun;
1857
1858         CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1859                   ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1860
1861         if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1862                 bus_dmasync_op_t op;
1863
1864                 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1865                         op = BUS_DMASYNC_POSTREAD;
1866                 else
1867                         op = BUS_DMASYNC_POSTWRITE;
1868                 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1869                 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1870         }
1871
1872         status = pSRB->TargetStatus;
1873         pccb->ccb_h.status = CAM_REQ_CMP;
1874         pccb->ccb_h.status = CAM_REQ_CMP;
1875         if (pSRB->SRBFlag & AUTO_REQSENSE) {
1876                 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1877                 pSRB->AdaptStatus = 0;
1878                 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1879
1880                 if (status == SCSI_STATUS_CHECK_COND) {
1881                         pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1882                         goto ckc_e;
1883                 }
1884                 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1885
1886                 pcsio->sense_resid = pcsio->sense_len
1887                                    - pSRB->TotalXferredLen;
1888                 pSRB->TotalXferredLen = pSRB->Segment1[1];
1889                 if (pSRB->TotalXferredLen) {
1890                         /* ???? */
1891                         pcsio->resid = pcsio->dxfer_len
1892                                      - pSRB->TotalXferredLen;
1893                         /* The resid field contains valid data   */
1894                         /* Flush resid bytes on complete        */
1895                 } else {
1896                         pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1897                 }
1898                 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1899                 goto ckc_e;
1900         }
1901         if (status) {
1902                 if (status == SCSI_STATUS_CHECK_COND) {
1903
1904                         if ((pSRB->SGIndex < pSRB->SGcount)
1905                          && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1906                                 bval = pSRB->SGcount;
1907                                 swlval = pSRB->SGToBeXferLen;
1908                                 ptr2 = pSRB->pSGlist;
1909                                 ptr2++;
1910                                 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1911                                         swlval += ptr2->SGXLen;
1912                                         ptr2++;
1913                                 }
1914                                 /* ??????? */
1915                                 pcsio->resid = (u_int32_t) swlval;
1916
1917 #ifdef  AMD_DEBUG0
1918                                 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1919                                         pSRB->TotalXferredLen, swlval);
1920 #endif
1921                         }
1922                         if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1923 #ifdef  AMD_DEBUG0
1924                                 printf("RequestSense..................\n");
1925 #endif
1926                                 RequestSense(amd, pSRB);
1927                                 return;
1928                         }
1929                         pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1930                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1931                         goto ckc_e;
1932                 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1933                         pSRB->AdaptStatus = 0;
1934                         pSRB->TargetStatus = 0;
1935                         pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1936                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1937                         goto ckc_e;
1938                 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1939                         pSRB->AdaptStatus = H_SEL_TIMEOUT;
1940                         pSRB->TargetStatus = 0;
1941
1942                         pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1943                         pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1944                 } else if (status == SCSI_STATUS_BUSY) {
1945 #ifdef AMD_DEBUG0
1946                         printf("DC390: target busy at %s %d\n",
1947                                __FILE__, __LINE__);
1948 #endif
1949                         pcsio->scsi_status = SCSI_STATUS_BUSY;
1950                         pccb->ccb_h.status = CAM_SCSI_BUSY;
1951                 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1952 #ifdef AMD_DEBUG0
1953                         printf("DC390: target reserved at %s %d\n",
1954                                __FILE__, __LINE__);
1955 #endif
1956                         pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1957                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1958                 } else {
1959                         pSRB->AdaptStatus = 0;
1960 #ifdef AMD_DEBUG0
1961                         printf("DC390: driver stuffup at %s %d\n",
1962                                __FILE__, __LINE__);
1963 #endif
1964                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1965                 }
1966         } else {
1967                 status = pSRB->AdaptStatus;
1968                 if (status & H_OVER_UNDER_RUN) {
1969                         pSRB->TargetStatus = 0;
1970
1971                         pccb->ccb_h.status = CAM_DATA_RUN_ERR;  
1972                 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1973 #ifdef AMD_DEBUG0
1974                         printf("DC390: driver stuffup %s %d\n",
1975                                __FILE__, __LINE__);
1976 #endif
1977                         /* Driver failed to perform operation     */
1978                         pccb->ccb_h.status = CAM_UNCOR_PARITY;
1979                 } else {        /* No error */
1980                         pSRB->AdaptStatus = 0;
1981                         pSRB->TargetStatus = 0;
1982                         pcsio->resid = 0;
1983                         /* there is no error, (sense is invalid)  */
1984                 }
1985         }
1986 ckc_e:
1987         intflag = splcam();
1988         if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1989                 /* CAM request not yet complete =>device_Q frozen */
1990                 xpt_freeze_devq(pccb->ccb_h.path, 1);
1991                 pccb->ccb_h.status |= CAM_DEV_QFRZN;
1992         }
1993         TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1994         TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1995         amdrunwaiting(amd);
1996         splx(intflag);
1997         xpt_done(pccb);
1998
1999 }
2000
2001 static void
2002 amd_ResetSCSIBus(struct amd_softc * amd)
2003 {
2004         int     intflag;
2005
2006         intflag = splcam();
2007         amd->ACBFlag |= RESET_DEV;
2008         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2009         amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2010         splx(intflag);
2011         return;
2012 }
2013
2014 static void
2015 amd_ScsiRstDetect(struct amd_softc * amd)
2016 {
2017         int     intflag;
2018         u_int32_t   wlval;
2019
2020 #ifdef AMD_DEBUG0
2021         printf("amd_ScsiRstDetect \n");
2022 #endif
2023
2024         wlval = 1000;
2025         while (--wlval) {       /* delay 1 sec */
2026                 DELAY(1000);
2027         }
2028         intflag = splcam();
2029
2030         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2031         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2032
2033         if (amd->ACBFlag & RESET_DEV) {
2034                 amd->ACBFlag |= RESET_DONE;
2035         } else {
2036                 amd->ACBFlag |= RESET_DETECT;
2037                 ResetDevParam(amd);
2038                 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2039                                  AMD_TAG_WILDCARD, &amd->running_srbs,
2040                                  CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2041                 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2042                                  AMD_TAG_WILDCARD, &amd->waiting_srbs,
2043                                  CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2044                 amd->active_srb = NULL;
2045                 amd->ACBFlag = 0;
2046                 amdrunwaiting(amd);
2047         }
2048         splx(intflag);
2049         return;
2050 }
2051
2052 static void
2053 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2054 {
2055         union ccb *pccb;
2056         struct ccb_scsiio *pcsio;
2057
2058         pccb = pSRB->pccb;
2059         pcsio = &pccb->csio;
2060
2061         pSRB->SRBFlag |= AUTO_REQSENSE;
2062         pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2063         pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2064         pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2065         pSRB->Segment1[1] = pSRB->TotalXferredLen;
2066
2067         pSRB->AdaptStatus = 0;
2068         pSRB->TargetStatus = 0;
2069
2070         pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2071         pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2072
2073         pSRB->pSGlist = &pSRB->Segmentx;
2074         pSRB->SGcount = 1;
2075         pSRB->SGIndex = 0;
2076
2077         *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2078         pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2079         *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2080         pSRB->ScsiCmdLen = 6;
2081
2082         pSRB->TotalXferredLen = 0;
2083         pSRB->SGToBeXferLen = 0;
2084         if (amdstart(amd, pSRB) != 0) {
2085                 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2086                 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2087         }
2088 }
2089
2090 static void
2091 amd_InvalidCmd(struct amd_softc * amd)
2092 {
2093         struct amd_srb *srb;
2094
2095         srb = amd->active_srb;
2096         if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2097                 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2098 }
2099
2100 void 
2101 amd_linkSRB(struct amd_softc *amd)
2102 {
2103         u_int16_t  count, i;
2104         struct amd_srb *psrb;
2105
2106         count = amd->SRBCount;
2107
2108         for (i = 0; i < count; i++) {
2109                 psrb = (struct amd_srb *)&amd->SRB_array[i];
2110                 psrb->TagNumber = i;
2111                 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2112         }
2113 }
2114
2115 void
2116 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2117 {
2118         if (mode == ENABLE_CE) {
2119                 *regval = 0xc0;
2120         } else {
2121                 *regval = 0x80;
2122         }
2123         pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2124         if (mode == DISABLE_CE) {
2125                 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2126         }
2127         DELAY(160);
2128 }
2129
2130 void
2131 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2132 {
2133         u_int bval;
2134
2135         bval = 0;
2136         if (Carry) {
2137                 bval = 0x40;
2138                 *regval = 0x80;
2139                 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2140         }
2141         DELAY(160);
2142         bval |= 0x80;
2143         pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2144         DELAY(160);
2145         pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2146         DELAY(160);
2147 }
2148
2149 static int
2150 amd_EEpromInDO(struct amd_softc *amd)
2151 {
2152         pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2153         DELAY(160);
2154         pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2155         DELAY(160);
2156         if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2157                 return (1);
2158         return (0);
2159 }
2160
2161 static u_int16_t
2162 EEpromGetData1(struct amd_softc *amd)
2163 {
2164         u_int     i;
2165         u_int     carryFlag;
2166         u_int16_t wval;
2167
2168         wval = 0;
2169         for (i = 0; i < 16; i++) {
2170                 wval <<= 1;
2171                 carryFlag = amd_EEpromInDO(amd);
2172                 wval |= carryFlag;
2173         }
2174         return (wval);
2175 }
2176
2177 static void
2178 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2179 {
2180         u_int i, j;
2181         int carryFlag;
2182
2183         carryFlag = 1;
2184         j = 0x80;
2185         for (i = 0; i < 9; i++) {
2186                 amd_EEpromOutDI(amd, regval, carryFlag);
2187                 carryFlag = (EEpromCmd & j) ? 1 : 0;
2188                 j >>= 1;
2189         }
2190 }
2191
2192 static void
2193 amd_ReadEEprom(struct amd_softc *amd)
2194 {
2195         int        regval;
2196         u_int      i;
2197         u_int16_t *ptr;
2198         u_int8_t   cmd;
2199
2200         ptr = (u_int16_t *)&amd->eepromBuf[0];
2201         cmd = EEPROM_READ;
2202         for (i = 0; i < 0x40; i++) {
2203                 amd_EnDisableCE(amd, ENABLE_CE, &regval);
2204                 amd_Prepare(amd, &regval, cmd);
2205                 *ptr = EEpromGetData1(amd);
2206                 ptr++;
2207                 cmd++;
2208                 amd_EnDisableCE(amd, DISABLE_CE, &regval);
2209         }
2210 }
2211
2212 static void
2213 amd_load_defaults(struct amd_softc *amd)
2214 {
2215         int target;
2216
2217         bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2218         for (target = 0; target < MAX_SCSI_ID; target++)
2219                 amd->eepromBuf[target << 2] =
2220                     (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2221         amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2222         amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2223         amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2224 }
2225
2226 static void
2227 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2228 {
2229         u_int16_t  wval, *ptr;
2230         u_int8_t   i;
2231
2232         amd_ReadEEprom(amd);
2233         wval = 0;
2234         ptr = (u_int16_t *) & amd->eepromBuf[0];
2235         for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2236                 wval += *ptr;
2237
2238         if (wval != EE_CHECKSUM) {
2239                 if (bootverbose)
2240                         printf("amd%d: SEEPROM data unavailable.  "
2241                                "Using default device parameters.\n",
2242                                amd->unit);
2243                 amd_load_defaults(amd);
2244         }
2245 }
2246
2247 /*
2248  **********************************************************************
2249  * Function      : static int amd_init (struct Scsi_Host *host)
2250  * Purpose       : initialize the internal structures for a given SCSI host
2251  * Inputs        : host - pointer to this host adapter's structure/
2252  **********************************************************************
2253  */
2254 static int
2255 amd_init(device_t dev)
2256 {
2257         struct amd_softc *amd = device_get_softc(dev);
2258         struct resource *iores;
2259         int     i, rid;
2260         u_int   bval;
2261
2262         rid = PCI_BASE_ADDR0;
2263         iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2264                                    RF_ACTIVE);
2265         if (iores == NULL) {
2266                 if (bootverbose)
2267                         printf("amd_init: bus_alloc_resource failure!\n");
2268                 return ENXIO;
2269         }
2270         amd->tag = rman_get_bustag(iores);
2271         amd->bsh = rman_get_bushandle(iores);
2272
2273         /* DMA tag for mapping buffers into device visible space. */
2274         if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2275                                /*boundary*/0,
2276                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2277                                /*highaddr*/BUS_SPACE_MAXADDR,
2278                                /*filter*/NULL, /*filterarg*/NULL,
2279                                /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2280                                /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2281                                /*flags*/BUS_DMA_ALLOCNOW,
2282                                &amd->buffer_dmat) != 0) {
2283                 if (bootverbose)
2284                         printf("amd_init: bus_dma_tag_create failure!\n");
2285                 return ENXIO;
2286         }
2287         TAILQ_INIT(&amd->free_srbs);
2288         TAILQ_INIT(&amd->running_srbs);
2289         TAILQ_INIT(&amd->waiting_srbs);
2290         amd->last_phase = SCSI_BUS_FREE;
2291         amd->dev = dev;
2292         amd->unit = device_get_unit(dev);
2293         amd->SRBCount = MAX_SRB_CNT;
2294         amd->status = 0;
2295         amd_load_eeprom_or_defaults(amd);
2296         amd->max_id = 7;
2297         if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2298                 amd->max_lun = 7;
2299         } else {
2300                 amd->max_lun = 0;
2301         }
2302         amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2303         amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2304         amd->AdaptSCSILUN = 0;
2305         /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2306         amd->ACBFlag = 0;
2307         amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2308         amd_linkSRB(amd);
2309         for (i = 0; i <= amd->max_id; i++) {
2310
2311                 if (amd->AdaptSCSIID != i) {
2312                         struct amd_target_info *tinfo;
2313                         PEEprom prom;
2314
2315                         tinfo = &amd->tinfo[i];
2316                         prom = (PEEprom)&amd->eepromBuf[i << 2];
2317                         if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2318                                 tinfo->disc_tag |= AMD_USR_DISCENB;
2319                                 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2320                                         tinfo->disc_tag |= AMD_USR_TAGENB;
2321                         }
2322                         if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2323                                 tinfo->user.period =
2324                                     eeprom_period[prom->EE_SPEED];
2325                                 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2326                         }
2327                         tinfo->CtrlR1 = amd->AdaptSCSIID;
2328                         if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2329                                 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2330                         tinfo->CtrlR3 = FAST_CLK;
2331                         tinfo->CtrlR4 = EATER_25NS;
2332                         if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2333                                 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2334                 }
2335         }
2336         amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2337         /* Conversion factor = 0 , 40MHz clock */
2338         amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2339         /* NOP cmd - clear command register */
2340         amd_write8(amd, SCSICMDREG, NOP_CMD);   
2341         amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2342         amd_write8(amd, CNTLREG3, FAST_CLK);
2343         bval = EATER_25NS;
2344         if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2345                 bval |= NEGATE_REQACKDATA;
2346         }
2347         amd_write8(amd, CNTLREG4, bval);
2348
2349         /* Disable SCSI bus reset interrupt */
2350         amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2351
2352         return 0;
2353 }
2354
2355 /*
2356  * attach and init a host adapter
2357  */
2358 static int
2359 amd_attach(device_t dev)
2360 {
2361         struct cam_devq *devq;  /* Device Queue to use for this SIM */
2362         u_int8_t        intstat;
2363         struct amd_softc *amd = device_get_softc(dev);
2364         int             unit = device_get_unit(dev);
2365         int             rid;
2366         void            *ih;
2367         struct resource *irqres;
2368
2369         if (amd_init(dev)) {
2370                 if (bootverbose)
2371                         printf("amd_attach: amd_init failure!\n");
2372                 return ENXIO;
2373         }
2374
2375         /* Reset Pending INT */
2376         intstat = amd_read8(amd, INTSTATREG);
2377
2378         /* After setting up the adapter, map our interrupt */
2379         rid = 0;
2380         irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2381                                     RF_SHAREABLE | RF_ACTIVE);
2382         if (irqres == NULL ||
2383             bus_setup_intr(dev, irqres, INTR_TYPE_CAM, amd_intr, amd, &ih)) {
2384                 if (bootverbose)
2385                         printf("amd%d: unable to register interrupt handler!\n",
2386                                unit);
2387                 return ENXIO;
2388         }
2389
2390         /*
2391          * Now let the CAM generic SCSI layer find the SCSI devices on
2392          * the bus *  start queue to reset to the idle loop. *
2393          * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2394          * max_sim_transactions
2395          */
2396         devq = cam_simq_alloc(MAX_START_JOB);
2397         if (devq == NULL) {
2398                 if (bootverbose)
2399                         printf("amd_attach: cam_simq_alloc failure!\n");
2400                 return ENXIO;
2401         }
2402
2403         amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2404                                   amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2405                                   devq);
2406         if (amd->psim == NULL) {
2407                 cam_simq_free(devq);
2408                 if (bootverbose)
2409                         printf("amd_attach: cam_sim_alloc failure!\n");
2410                 return ENXIO;
2411         }
2412
2413         if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2414                 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2415                 if (bootverbose)
2416                         printf("amd_attach: xpt_bus_register failure!\n");
2417                 return ENXIO;
2418         }
2419
2420         if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2421                             cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2422                             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2423                 xpt_bus_deregister(cam_sim_path(amd->psim));
2424                 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2425                 if (bootverbose)
2426                         printf("amd_attach: xpt_create_path failure!\n");
2427                 return ENXIO;
2428         }
2429
2430         return 0;
2431 }
2432
2433 static int
2434 amd_probe(device_t dev)
2435 {
2436         if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2437                 device_set_desc(dev,
2438                         "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2439                 return 0;
2440         }
2441         return ENXIO;
2442 }
2443
2444 static device_method_t amd_methods[] = {
2445         /* Device interface */
2446         DEVMETHOD(device_probe,         amd_probe),
2447         DEVMETHOD(device_attach,        amd_attach),
2448         { 0, 0 }
2449 };
2450
2451 static driver_t amd_driver = {
2452         "amd", amd_methods, sizeof(struct amd_softc)
2453 };
2454
2455 static devclass_t amd_devclass;
2456 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);