Do a major clean-up of the BUSDMA architecture. A large number of
[dragonfly.git] / sys / dev / disk / amd / amd.c
CommitLineData
984263bc
MD
1/*
2 *********************************************************************
3 * FILE NAME : amd.c
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
1f7ab7c9 34 * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.11 2006/10/25 20:55:53 dillon Exp $
984263bc
MD
35 */
36
37/*
38 *********************************************************************
39 * HISTORY:
40 *
41 * REV# DATE NAME DESCRIPTION
42 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
43 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
44 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
45 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
46 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
47 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
48 *********************************************************************
49 */
50
51/* #define AMD_DEBUG0 */
52/* #define AMD_DEBUG_SCSI_PHASE */
53
54#include <sys/param.h>
984263bc 55#include <sys/systm.h>
1f7ab7c9 56#include <sys/kernel.h>
984263bc
MD
57#include <sys/malloc.h>
58#include <sys/queue.h>
59#include <sys/buf.h>
1f7ab7c9
MD
60#include <sys/bus.h>
61#include <sys/rman.h>
c1139c5e 62#include <sys/thread2.h>
984263bc
MD
63
64#include <vm/vm.h>
65#include <vm/pmap.h>
66
984263bc 67#include <machine/clock.h>
984263bc 68
1f2de5d4
MD
69#include <bus/cam/cam.h>
70#include <bus/cam/cam_ccb.h>
71#include <bus/cam/cam_sim.h>
72#include <bus/cam/cam_xpt_sim.h>
73#include <bus/cam/cam_debug.h>
984263bc 74
1f2de5d4
MD
75#include <bus/cam/scsi/scsi_all.h>
76#include <bus/cam/scsi/scsi_message.h>
984263bc 77
1f2de5d4
MD
78#include <bus/pci/pcivar.h>
79#include <bus/pci/pcireg.h>
80#include "amd.h"
984263bc
MD
81
82#define PCI_DEVICE_ID_AMD53C974 0x20201022ul
83#define PCI_BASE_ADDR0 0x10
84
85typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
86typedef phase_handler_t *phase_handler_func_t;
87
88static void amd_intr(void *vamd);
89static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
90static phase_handler_t amd_NopPhase;
91
92static phase_handler_t amd_DataOutPhase0;
93static phase_handler_t amd_DataInPhase0;
94#define amd_CommandPhase0 amd_NopPhase
95static phase_handler_t amd_StatusPhase0;
96static phase_handler_t amd_MsgOutPhase0;
97static phase_handler_t amd_MsgInPhase0;
98static phase_handler_t amd_DataOutPhase1;
99static phase_handler_t amd_DataInPhase1;
100static phase_handler_t amd_CommandPhase1;
101static phase_handler_t amd_StatusPhase1;
102static phase_handler_t amd_MsgOutPhase1;
103static phase_handler_t amd_MsgInPhase1;
104
105static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
106static int amdparsemsg(struct amd_softc *amd);
107static int amdhandlemsgreject(struct amd_softc *amd);
108static void amdconstructsdtr(struct amd_softc *amd,
109 u_int period, u_int offset);
110static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
111static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
112
113static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
114static void amd_Disconnect(struct amd_softc *amd);
115static void amd_Reselect(struct amd_softc *amd);
116static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
117static void amd_ScsiRstDetect(struct amd_softc *amd);
118static void amd_ResetSCSIBus(struct amd_softc *amd);
119static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
120static void amd_InvalidCmd(struct amd_softc *amd);
121
122#if 0
123static void amd_timeout(void *arg1);
124static void amd_reset(struct amd_softc *amd);
125#endif
126static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
127
128void amd_linkSRB(struct amd_softc *amd);
129static int amd_init(device_t);
130static void amd_load_defaults(struct amd_softc *amd);
131static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
132static int amd_EEpromInDO(struct amd_softc *amd);
133static u_int16_t EEpromGetData1(struct amd_softc *amd);
134static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
135static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
136static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
137static void amd_ReadEEprom(struct amd_softc *amd);
138
139static int amd_probe(device_t);
140static int amd_attach(device_t);
141static void amdcompletematch(struct amd_softc *amd, target_id_t target,
142 lun_id_t lun, u_int tag, struct srb_queue *queue,
143 cam_status status);
144static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
145 u_int period, u_int offset, u_int type);
146static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
147
148static __inline void amd_clear_msg_state(struct amd_softc *amd);
149
150static __inline void
151amd_clear_msg_state(struct amd_softc *amd)
152{
153 amd->msgout_len = 0;
154 amd->msgout_index = 0;
155 amd->msgin_index = 0;
156}
157
158/* CAM SIM entry points */
159#define ccb_srb_ptr spriv_ptr0
160#define ccb_amd_ptr spriv_ptr1
161static void amd_action(struct cam_sim *sim, union ccb *ccb);
162static void amd_poll(struct cam_sim *sim);
163
164/*
165 * State engine function tables indexed by SCSI phase number
166 */
167phase_handler_func_t amd_SCSI_phase0[] = {
168 amd_DataOutPhase0,
169 amd_DataInPhase0,
170 amd_CommandPhase0,
171 amd_StatusPhase0,
172 amd_NopPhase,
173 amd_NopPhase,
174 amd_MsgOutPhase0,
175 amd_MsgInPhase0
176};
177
178phase_handler_func_t amd_SCSI_phase1[] = {
179 amd_DataOutPhase1,
180 amd_DataInPhase1,
181 amd_CommandPhase1,
182 amd_StatusPhase1,
183 amd_NopPhase,
184 amd_NopPhase,
185 amd_MsgOutPhase1,
186 amd_MsgInPhase1
187};
188
189/*
190 * EEProm/BIOS negotiation periods
191 */
192u_int8_t eeprom_period[] = {
193 25, /* 10.0MHz */
194 32, /* 8.0MHz */
195 38, /* 6.6MHz */
196 44, /* 5.7MHz */
197 50, /* 5.0MHz */
198 63, /* 4.0MHz */
199 83, /* 3.0MHz */
200 125 /* 2.0MHz */
201};
202
203/*
204 * chip clock setting to SCSI specified sync parameter table.
205 */
206u_int8_t tinfo_sync_period[] = {
207 25, /* 10.0 */
208 32, /* 8.0 */
209 38, /* 6.6 */
210 44, /* 5.7 */
211 50, /* 5.0 */
212 57, /* 4.4 */
213 63, /* 4.0 */
214 70, /* 3.6 */
215 76, /* 3.3 */
216 83 /* 3.0 */
217};
218
219static __inline struct amd_srb *
220amdgetsrb(struct amd_softc * amd)
221{
984263bc
MD
222 struct amd_srb * pSRB;
223
c1139c5e 224 crit_enter();
984263bc
MD
225 pSRB = TAILQ_FIRST(&amd->free_srbs);
226 if (pSRB)
227 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
c1139c5e 228 crit_exit();
984263bc
MD
229 return (pSRB);
230}
231
232static void
233amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
234{
235 struct scsi_request_sense sense_cmd;
236 struct ccb_scsiio *csio;
237 u_int8_t *cdb;
238 u_int cdb_len;
239
240 csio = &srb->pccb->csio;
241
242 if (srb->SRBFlag & AUTO_REQSENSE) {
243 sense_cmd.opcode = REQUEST_SENSE;
244 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 sense_cmd.unused[0] = 0;
246 sense_cmd.unused[1] = 0;
247 sense_cmd.length = csio->sense_len;
248 sense_cmd.control = 0;
249 cdb = &sense_cmd.opcode;
250 cdb_len = sizeof(sense_cmd);
251 } else {
252 cdb = &srb->CmdBlock[0];
253 cdb_len = srb->ScsiCmdLen;
254 }
255 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
256}
257
258/*
259 * Attempt to start a waiting transaction. Interrupts must be disabled
260 * upon entry to this function.
261 */
262static void
263amdrunwaiting(struct amd_softc *amd) {
264 struct amd_srb *srb;
265
266 if (amd->last_phase != SCSI_BUS_FREE)
267 return;
268
269 srb = TAILQ_FIRST(&amd->waiting_srbs);
270 if (srb == NULL)
271 return;
272
273 if (amdstart(amd, srb) == 0) {
274 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
276 }
277}
278
279static void
280amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
281{
282 struct amd_srb *srb;
283 union ccb *ccb;
284 struct amd_softc *amd;
984263bc
MD
285
286 srb = (struct amd_srb *)arg;
287 ccb = srb->pccb;
288 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
289
290 if (error != 0) {
291 if (error != EFBIG)
292 printf("amd%d: Unexepected error 0x%x returned from "
293 "bus_dmamap_load\n", amd->unit, error);
294 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
295 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
296 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
297 }
298 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
299 xpt_done(ccb);
300 return;
301 }
302
303 if (nseg != 0) {
304 struct amd_sg *sg;
305 bus_dma_segment_t *end_seg;
306 bus_dmasync_op_t op;
307
308 end_seg = dm_segs + nseg;
309
310 /* Copy the segments into our SG list */
311 srb->pSGlist = &srb->SGsegment[0];
312 sg = srb->pSGlist;
313 while (dm_segs < end_seg) {
314 sg->SGXLen = dm_segs->ds_len;
315 sg->SGXPtr = dm_segs->ds_addr;
316 sg++;
317 dm_segs++;
318 }
319
320 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
321 op = BUS_DMASYNC_PREREAD;
322 else
323 op = BUS_DMASYNC_PREWRITE;
324
325 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
326
327 }
328 srb->SGcount = nseg;
329 srb->SGIndex = 0;
330 srb->AdaptStatus = 0;
331 srb->TargetStatus = 0;
332 srb->MsgCnt = 0;
333 srb->SRBStatus = 0;
334 srb->SRBFlag = 0;
335 srb->SRBState = 0;
336 srb->TotalXferredLen = 0;
337 srb->SGPhysAddr = 0;
338 srb->SGToBeXferLen = 0;
339 srb->EndMessage = 0;
340
c1139c5e 341 crit_enter();
984263bc
MD
342
343 /*
344 * Last time we need to check if this CCB needs to
345 * be aborted.
346 */
347 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
348 if (nseg != 0)
349 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
350 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
351 xpt_done(ccb);
c1139c5e 352 crit_exit();
984263bc
MD
353 return;
354 }
355 ccb->ccb_h.status |= CAM_SIM_QUEUED;
356#if 0
357 /* XXX Need a timeout handler */
ddcafce9
JS
358 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
359 amdtimeout, srb);
984263bc
MD
360#endif
361 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
362 amdrunwaiting(amd);
c1139c5e 363 crit_exit();
984263bc
MD
364}
365
366static void
367amd_action(struct cam_sim * psim, union ccb * pccb)
368{
369 struct amd_softc * amd;
370 u_int target_id, target_lun;
371
372 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
373
374 amd = (struct amd_softc *) cam_sim_softc(psim);
375 target_id = pccb->ccb_h.target_id;
376 target_lun = pccb->ccb_h.target_lun;
377
378 switch (pccb->ccb_h.func_code) {
379 case XPT_SCSI_IO:
380 {
381 struct amd_srb * pSRB;
382 struct ccb_scsiio *pcsio;
383
384 pcsio = &pccb->csio;
385
386 /*
387 * Assign an SRB and connect it with this ccb.
388 */
389 pSRB = amdgetsrb(amd);
390
391 if (!pSRB) {
392 /* Freeze SIMQ */
393 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
394 xpt_done(pccb);
395 return;
396 }
397 pSRB->pccb = pccb;
398 pccb->ccb_h.ccb_srb_ptr = pSRB;
399 pccb->ccb_h.ccb_amd_ptr = amd;
400 pSRB->ScsiCmdLen = pcsio->cdb_len;
401 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
402 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
403 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
404 /*
405 * We've been given a pointer
406 * to a single buffer.
407 */
408 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
984263bc
MD
409 int error;
410
c1139c5e 411 crit_enter();
984263bc
MD
412 error =
413 bus_dmamap_load(amd->buffer_dmat,
414 pSRB->dmamap,
415 pcsio->data_ptr,
416 pcsio->dxfer_len,
417 amdexecutesrb,
418 pSRB, /*flags*/0);
419 if (error == EINPROGRESS) {
420 /*
421 * So as to maintain
422 * ordering, freeze the
423 * controller queue
424 * until our mapping is
425 * returned.
426 */
427 xpt_freeze_simq(amd->psim, 1);
428 pccb->ccb_h.status |=
429 CAM_RELEASE_SIMQ;
430 }
c1139c5e 431 crit_exit();
984263bc
MD
432 } else {
433 struct bus_dma_segment seg;
434
435 /* Pointer to physical buffer */
436 seg.ds_addr =
437 (bus_addr_t)pcsio->data_ptr;
438 seg.ds_len = pcsio->dxfer_len;
439 amdexecutesrb(pSRB, &seg, 1, 0);
440 }
441 } else {
442 struct bus_dma_segment *segs;
443
444 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
445 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
446 TAILQ_INSERT_HEAD(&amd->free_srbs,
447 pSRB, links);
448 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
449 xpt_done(pccb);
450 return;
451 }
452
453 /* Just use the segments provided */
454 segs =
455 (struct bus_dma_segment *)pcsio->data_ptr;
456 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
457 }
458 } else
459 amdexecutesrb(pSRB, NULL, 0, 0);
460 break;
461 }
462 case XPT_PATH_INQ:
463 {
464 struct ccb_pathinq *cpi = &pccb->cpi;
465
466 cpi->version_num = 1;
467 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
468 cpi->target_sprt = 0;
469 cpi->hba_misc = 0;
470 cpi->hba_eng_cnt = 0;
471 cpi->max_target = 7;
472 cpi->max_lun = amd->max_lun; /* 7 or 0 */
473 cpi->initiator_id = amd->AdaptSCSIID;
474 cpi->bus_id = cam_sim_bus(psim);
475 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
476 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
477 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
478 cpi->unit_number = cam_sim_unit(psim);
479 cpi->ccb_h.status = CAM_REQ_CMP;
480 xpt_done(pccb);
481 break;
482 }
483 case XPT_ABORT:
484 pccb->ccb_h.status = CAM_REQ_INVALID;
485 xpt_done(pccb);
486 break;
487 case XPT_RESET_BUS:
488 {
489
490 int i;
491
492 amd_ResetSCSIBus(amd);
493 amd->ACBFlag = 0;
494
495 for (i = 0; i < 500; i++) {
496 DELAY(1000); /* Wait until our interrupt
497 * handler sees it */
498 }
499
500 pccb->ccb_h.status = CAM_REQ_CMP;
501 xpt_done(pccb);
502 break;
503 }
504 case XPT_RESET_DEV:
505 pccb->ccb_h.status = CAM_REQ_INVALID;
506 xpt_done(pccb);
507 break;
508 case XPT_TERM_IO:
509 pccb->ccb_h.status = CAM_REQ_INVALID;
510 xpt_done(pccb);
511 case XPT_GET_TRAN_SETTINGS:
512 {
513 struct ccb_trans_settings *cts;
514 struct amd_target_info *targ_info;
515 struct amd_transinfo *tinfo;
984263bc
MD
516
517 cts = &pccb->cts;
c1139c5e 518 crit_enter();
984263bc
MD
519 targ_info = &amd->tinfo[target_id];
520 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
521 /* current transfer settings */
522 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
523 cts->flags = CCB_TRANS_DISC_ENB;
524 } else {
525 cts->flags = 0; /* no tag & disconnect */
526 }
527 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
528 cts->flags |= CCB_TRANS_TAG_ENB;
529 }
530 tinfo = &targ_info->current;
531 } else {
532 /* default(user) transfer settings */
533 if (targ_info->disc_tag & AMD_USR_DISCENB) {
534 cts->flags = CCB_TRANS_DISC_ENB;
535 } else {
536 cts->flags = 0;
537 }
538 if (targ_info->disc_tag & AMD_USR_TAGENB) {
539 cts->flags |= CCB_TRANS_TAG_ENB;
540 }
541 tinfo = &targ_info->user;
542 }
543
544 cts->sync_period = tinfo->period;
545 cts->sync_offset = tinfo->offset;
546 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
c1139c5e 547 crit_exit();
984263bc
MD
548 cts->valid = CCB_TRANS_SYNC_RATE_VALID
549 | CCB_TRANS_SYNC_OFFSET_VALID
550 | CCB_TRANS_BUS_WIDTH_VALID
551 | CCB_TRANS_DISC_VALID
552 | CCB_TRANS_TQ_VALID;
553 pccb->ccb_h.status = CAM_REQ_CMP;
554 xpt_done(pccb);
555 break;
556 }
557 case XPT_SET_TRAN_SETTINGS:
558 {
559 struct ccb_trans_settings *cts;
560 struct amd_target_info *targ_info;
561 u_int update_type;
984263bc
MD
562 int last_entry;
563
564 cts = &pccb->cts;
565 update_type = 0;
566 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
567 update_type |= AMD_TRANS_GOAL;
568 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
569 update_type |= AMD_TRANS_USER;
570 }
571 if (update_type == 0
572 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
573 cts->ccb_h.status = CAM_REQ_INVALID;
574 xpt_done(pccb);
575 }
576
c1139c5e 577 crit_enter();
984263bc
MD
578 targ_info = &amd->tinfo[target_id];
579
580 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
581 if (update_type & AMD_TRANS_GOAL) {
582 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
583 targ_info->disc_tag |= AMD_CUR_DISCENB;
584 } else {
585 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
586 }
587 }
588 if (update_type & AMD_TRANS_USER) {
589 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
590 targ_info->disc_tag |= AMD_USR_DISCENB;
591 } else {
592 targ_info->disc_tag &= ~AMD_USR_DISCENB;
593 }
594 }
595 }
596 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
597 if (update_type & AMD_TRANS_GOAL) {
598 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
599 targ_info->disc_tag |= AMD_CUR_TAGENB;
600 } else {
601 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
602 }
603 }
604 if (update_type & AMD_TRANS_USER) {
605 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
606 targ_info->disc_tag |= AMD_USR_TAGENB;
607 } else {
608 targ_info->disc_tag &= ~AMD_USR_TAGENB;
609 }
610 }
611 }
612
613 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
614 if (update_type & AMD_TRANS_GOAL)
615 cts->sync_offset = targ_info->goal.offset;
616 else
617 cts->sync_offset = targ_info->user.offset;
618 }
619
620 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
621 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
622
623 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
624 if (update_type & AMD_TRANS_GOAL)
625 cts->sync_period = targ_info->goal.period;
626 else
627 cts->sync_period = targ_info->user.period;
628 }
629
630 last_entry = sizeof(tinfo_sync_period) - 1;
631 if ((cts->sync_period != 0)
632 && (cts->sync_period < tinfo_sync_period[0]))
633 cts->sync_period = tinfo_sync_period[0];
634 if (cts->sync_period > tinfo_sync_period[last_entry])
635 cts->sync_period = 0;
636 if (cts->sync_offset == 0)
637 cts->sync_period = 0;
638
639 if ((update_type & AMD_TRANS_USER) != 0) {
640 targ_info->user.period = cts->sync_period;
641 targ_info->user.offset = cts->sync_offset;
642 }
643 if ((update_type & AMD_TRANS_GOAL) != 0) {
644 targ_info->goal.period = cts->sync_period;
645 targ_info->goal.offset = cts->sync_offset;
646 }
c1139c5e 647 crit_exit();
984263bc
MD
648 pccb->ccb_h.status = CAM_REQ_CMP;
649 xpt_done(pccb);
650 break;
651 }
652 case XPT_CALC_GEOMETRY:
653 {
654 struct ccb_calc_geometry *ccg;
655 u_int32_t size_mb;
656 u_int32_t secs_per_cylinder;
657 int extended;
658
659 ccg = &pccb->ccg;
660 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
661 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
662
663 if (size_mb > 1024 && extended) {
664 ccg->heads = 255;
665 ccg->secs_per_track = 63;
666 } else {
667 ccg->heads = 64;
668 ccg->secs_per_track = 32;
669 }
670 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
671 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
672 pccb->ccb_h.status = CAM_REQ_CMP;
673 xpt_done(pccb);
674 break;
675 }
676 default:
677 pccb->ccb_h.status = CAM_REQ_INVALID;
678 xpt_done(pccb);
679 break;
680 }
681}
682
683static void
684amd_poll(struct cam_sim * psim)
685{
686 amd_intr(cam_sim_softc(psim));
687}
688
689static u_int8_t *
690phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
691{
692 int dataPtr;
693 struct ccb_scsiio *pcsio;
694 u_int8_t i;
695 struct amd_sg * pseg;
696
697 dataPtr = 0;
698 pcsio = &pSRB->pccb->csio;
699
700 dataPtr = (int) pcsio->data_ptr;
701 pseg = pSRB->SGsegment;
702 for (i = 0; i < pSRB->SGIndex; i++) {
703 dataPtr += (int) pseg->SGXLen;
704 pseg++;
705 }
706 dataPtr += (int) xferCnt;
707 return ((u_int8_t *) dataPtr);
708}
709
710static void
711ResetDevParam(struct amd_softc * amd)
712{
713 u_int target;
714
715 for (target = 0; target <= amd->max_id; target++) {
716 if (amd->AdaptSCSIID != target) {
717 amdsetsync(amd, target, /*clockrate*/0,
718 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
719 }
720 }
721}
722
723static void
724amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
725 u_int tag, struct srb_queue *queue, cam_status status)
726{
727 struct amd_srb *srb;
728 struct amd_srb *next_srb;
729
730 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
731 union ccb *ccb;
732
733 next_srb = TAILQ_NEXT(srb, links);
734 if (srb->pccb->ccb_h.target_id != target
735 && target != CAM_TARGET_WILDCARD)
736 continue;
737
738 if (srb->pccb->ccb_h.target_lun != lun
739 && lun != CAM_LUN_WILDCARD)
740 continue;
741
742 if (srb->TagNumber != tag
743 && tag != AMD_TAG_WILDCARD)
744 continue;
745
746 ccb = srb->pccb;
747 TAILQ_REMOVE(queue, srb, links);
748 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
749 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
750 && (status & CAM_DEV_QFRZN) != 0)
751 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
752 ccb->ccb_h.status = status;
753 xpt_done(ccb);
754 }
755
756}
757
758static void
759amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
760 u_int period, u_int offset, u_int type)
761{
762 struct amd_target_info *tinfo;
763 u_int old_period;
764 u_int old_offset;
765
766 tinfo = &amd->tinfo[target];
767 old_period = tinfo->current.period;
768 old_offset = tinfo->current.offset;
769 if ((type & AMD_TRANS_CUR) != 0
770 && (old_period != period || old_offset != offset)) {
771 struct cam_path *path;
772
773 tinfo->current.period = period;
774 tinfo->current.offset = offset;
775 tinfo->sync_period_reg = clockrate;
776 tinfo->sync_offset_reg = offset;
777 tinfo->CtrlR3 &= ~FAST_SCSI;
778 tinfo->CtrlR4 &= ~EATER_25NS;
779 if (clockrate > 7)
780 tinfo->CtrlR4 |= EATER_25NS;
781 else
782 tinfo->CtrlR3 |= FAST_SCSI;
783
784 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
785 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
786 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
787 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
788 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
789 }
790 /* If possible, update the XPT's notion of our transfer rate */
791 if (xpt_create_path(&path, /*periph*/NULL,
792 cam_sim_path(amd->psim), target,
793 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
794 struct ccb_trans_settings neg;
795
796 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
797 neg.sync_period = period;
798 neg.sync_offset = offset;
799 neg.valid = CCB_TRANS_SYNC_RATE_VALID
800 | CCB_TRANS_SYNC_OFFSET_VALID;
801 xpt_async(AC_TRANSFER_NEG, path, &neg);
802 xpt_free_path(path);
803 }
804 }
805 if ((type & AMD_TRANS_GOAL) != 0) {
806 tinfo->goal.period = period;
807 tinfo->goal.offset = offset;
808 }
809
810 if ((type & AMD_TRANS_USER) != 0) {
811 tinfo->user.period = period;
812 tinfo->user.offset = offset;
813 }
814}
815
816static void
817amdsettags(struct amd_softc *amd, u_int target, int tagenb)
818{
819 panic("Implement me!\n");
820}
821
822
823#if 0
824/*
825 **********************************************************************
826 * Function : amd_reset (struct amd_softc * amd)
827 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
828 * Inputs : cmd - command which caused the SCSI RESET
829 **********************************************************************
830 */
831static void
832amd_reset(struct amd_softc * amd)
833{
984263bc
MD
834 u_int8_t bval;
835 u_int16_t i;
836
837
838#ifdef AMD_DEBUG0
839 printf("DC390: RESET");
840#endif
841
c1139c5e 842 crit_enter();
984263bc
MD
843 bval = amd_read8(amd, CNTLREG1);
844 bval |= DIS_INT_ON_SCSI_RST;
845 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
846 amd_ResetSCSIBus(amd);
847
848 for (i = 0; i < 500; i++) {
849 DELAY(1000);
850 }
851
852 bval = amd_read8(amd, CNTLREG1);
853 bval &= ~DIS_INT_ON_SCSI_RST;
854 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
855
856 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
857 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
858
859 ResetDevParam(amd);
860 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
861 AMD_TAG_WILDCARD, &amd->running_srbs,
862 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
863 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
864 AMD_TAG_WILDCARD, &amd->waiting_srbs,
865 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
866 amd->active_srb = NULL;
867 amd->ACBFlag = 0;
c1139c5e 868 crit_exit();
984263bc
MD
869 return;
870}
871
872void
873amd_timeout(void *arg1)
874{
875 struct amd_srb * pSRB;
876
877 pSRB = (struct amd_srb *) arg1;
878}
879#endif
880
881static int
882amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
883{
884 union ccb *pccb;
885 struct ccb_scsiio *pcsio;
886 struct amd_target_info *targ_info;
887 u_int identify_msg;
888 u_int command;
889 u_int target;
890 u_int lun;
891 int tagged;
892
893 pccb = pSRB->pccb;
894 pcsio = &pccb->csio;
895 target = pccb->ccb_h.target_id;
896 lun = pccb->ccb_h.target_lun;
897 targ_info = &amd->tinfo[target];
898
899 amd_clear_msg_state(amd);
900 amd_write8(amd, SCSIDESTIDREG, target);
901 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
902 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
903 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
904 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
905 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
906 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
907
908 identify_msg = MSG_IDENTIFYFLAG | lun;
909 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
910 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
911 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
912 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
913 identify_msg |= MSG_IDENTIFY_DISCFLAG;
914
915 amd_write8(amd, SCSIFIFOREG, identify_msg);
916 tagged = 0;
917 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
918 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
919 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
920 if (targ_info->current.period != targ_info->goal.period
921 || targ_info->current.offset != targ_info->goal.offset) {
922 command = SEL_W_ATN_STOP;
923 amdconstructsdtr(amd, targ_info->goal.period,
924 targ_info->goal.offset);
925 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
926 command = SEL_W_ATN2;
927 pSRB->SRBState = SRB_START;
928 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
929 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
930 tagged++;
931 } else {
932 command = SEL_W_ATN;
933 pSRB->SRBState = SRB_START;
934 }
935 if (command != SEL_W_ATN_STOP)
936 amdsetupcommand(amd, pSRB);
937
938 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
939 pSRB->SRBState = SRB_READY;
940 return (1);
941 } else {
942 amd->last_phase = SCSI_ARBITRATING;
943 amd_write8(amd, SCSICMDREG, command);
944 amd->active_srb = pSRB;
945 amd->cur_target = target;
946 amd->cur_lun = lun;
947 return (0);
948 }
949}
950
951/*
952 * Catch an interrupt from the adapter.
953 * Process pending device interrupts.
954 */
955static void
956amd_intr(void *arg)
957{
958 struct amd_softc *amd;
959 struct amd_srb *pSRB;
960 u_int internstat = 0;
961 u_int scsistat;
962 u_int intstat;
963
964 amd = (struct amd_softc *)arg;
965
966 if (amd == NULL) {
967#ifdef AMD_DEBUG0
968 printf("amd_intr: amd NULL return......");
969#endif
970 return;
971 }
972
973 scsistat = amd_read8(amd, SCSISTATREG);
974 if (!(scsistat & INTERRUPT)) {
975#ifdef AMD_DEBUG0
976 printf("amd_intr: scsistat = NULL ,return......");
977#endif
978 return;
979 }
980#ifdef AMD_DEBUG_SCSI_PHASE
981 printf("scsistat=%2x,", scsistat);
982#endif
983
984 internstat = amd_read8(amd, INTERNSTATREG);
985 intstat = amd_read8(amd, INTSTATREG);
986
987#ifdef AMD_DEBUG_SCSI_PHASE
988 printf("intstat=%2x,", intstat);
989#endif
990
991 if (intstat & DISCONNECTED) {
992 amd_Disconnect(amd);
993 return;
994 }
995 if (intstat & RESELECTED) {
996 amd_Reselect(amd);
997 return;
998 }
999 if (intstat & INVALID_CMD) {
1000 amd_InvalidCmd(amd);
1001 return;
1002 }
1003 if (intstat & SCSI_RESET_) {
1004 amd_ScsiRstDetect(amd);
1005 return;
1006 }
1007 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1008 pSRB = amd->active_srb;
1009 /*
1010 * Run our state engine. First perform
1011 * post processing for the last phase we
1012 * were in, followed by any processing
1013 * required to handle the current phase.
1014 */
1015 scsistat =
1016 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1017 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1018 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1019 }
1020}
1021
1022static u_int
1023amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1024{
1025 struct amd_sg *psgl;
1026 u_int32_t ResidCnt, xferCnt;
1027
1028 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1029 if (scsistat & PARITY_ERR) {
1030 pSRB->SRBStatus |= PARITY_ERROR;
1031 }
1032 if (scsistat & COUNT_2_ZERO) {
1033 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1034 ;
1035 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1036 pSRB->SGIndex++;
1037 if (pSRB->SGIndex < pSRB->SGcount) {
1038 pSRB->pSGlist++;
1039 psgl = pSRB->pSGlist;
1040 pSRB->SGPhysAddr = psgl->SGXPtr;
1041 pSRB->SGToBeXferLen = psgl->SGXLen;
1042 } else {
1043 pSRB->SGToBeXferLen = 0;
1044 }
1045 } else {
1046 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1047 ResidCnt += amd_read8(amd, CTCREG_LOW)
1048 | (amd_read8(amd, CTCREG_MID) << 8)
1049 | (amd_read8(amd, CURTXTCNTREG) << 16);
1050
1051 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1052 pSRB->SGPhysAddr += xferCnt;
1053 pSRB->TotalXferredLen += xferCnt;
1054 pSRB->SGToBeXferLen = ResidCnt;
1055 }
1056 }
1057 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1058 return (scsistat);
1059}
1060
1061static u_int
1062amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1063{
1064 u_int8_t bval;
1065 u_int16_t i, residual;
1066 struct amd_sg *psgl;
1067 u_int32_t ResidCnt, xferCnt;
1068 u_int8_t * ptr;
1069
1070 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1071 if (scsistat & PARITY_ERR) {
1072 pSRB->SRBStatus |= PARITY_ERROR;
1073 }
1074 if (scsistat & COUNT_2_ZERO) {
1075 while (1) {
1076 bval = amd_read8(amd, DMA_Status);
1077 if ((bval & DMA_XFER_DONE) != 0)
1078 break;
1079 }
1080 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1081
1082 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1083 pSRB->SGIndex++;
1084 if (pSRB->SGIndex < pSRB->SGcount) {
1085 pSRB->pSGlist++;
1086 psgl = pSRB->pSGlist;
1087 pSRB->SGPhysAddr = psgl->SGXPtr;
1088 pSRB->SGToBeXferLen = psgl->SGXLen;
1089 } else {
1090 pSRB->SGToBeXferLen = 0;
1091 }
1092 } else { /* phase changed */
1093 residual = 0;
1094 bval = amd_read8(amd, CURRENTFIFOREG);
1095 while (bval & 0x1f) {
1096 if ((bval & 0x1f) == 1) {
1097 for (i = 0; i < 0x100; i++) {
1098 bval = amd_read8(amd, CURRENTFIFOREG);
1099 if (!(bval & 0x1f)) {
1100 goto din_1;
1101 } else if (i == 0x0ff) {
1102 residual = 1;
1103 goto din_1;
1104 }
1105 }
1106 } else {
1107 bval = amd_read8(amd, CURRENTFIFOREG);
1108 }
1109 }
1110 din_1:
1111 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1112 for (i = 0; i < 0x8000; i++) {
1113 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1114 break;
1115 }
1116 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1117
1118 ResidCnt = amd_read8(amd, CTCREG_LOW)
1119 | (amd_read8(amd, CTCREG_MID) << 8)
1120 | (amd_read8(amd, CURTXTCNTREG) << 16);
1121 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1122 pSRB->SGPhysAddr += xferCnt;
1123 pSRB->TotalXferredLen += xferCnt;
1124 pSRB->SGToBeXferLen = ResidCnt;
1125 if (residual) {
1126 /* get residual byte */
1127 bval = amd_read8(amd, SCSIFIFOREG);
1128 ptr = phystovirt(pSRB, xferCnt);
1129 *ptr = bval;
1130 pSRB->SGPhysAddr++;
1131 pSRB->TotalXferredLen++;
1132 pSRB->SGToBeXferLen--;
1133 }
1134 }
1135 }
1136 return (scsistat);
1137}
1138
1139static u_int
1140amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1141{
1142 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1143 /* get message */
1144 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1145 pSRB->SRBState = SRB_COMPLETED;
1146 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1147 return (SCSI_NOP0);
1148}
1149
1150static u_int
1151amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1152{
1153 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1154 scsistat = SCSI_NOP0;
1155 }
1156 return (scsistat);
1157}
1158
1159static u_int
1160amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1161{
1162 int done;
1163
1164 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1165
1166 done = amdparsemsg(amd);
1167 if (done)
1168 amd->msgin_index = 0;
1169 else
1170 amd->msgin_index++;
1171 return (SCSI_NOP0);
1172}
1173
1174static int
1175amdparsemsg(struct amd_softc *amd)
1176{
1177 struct amd_target_info *targ_info;
1178 int reject;
1179 int done;
1180 int response;
1181
1182 done = FALSE;
1183 response = FALSE;
1184 reject = FALSE;
1185
1186 targ_info = &amd->tinfo[amd->cur_target];
1187
1188 /*
1189 * Parse as much of the message as is availible,
1190 * rejecting it if we don't support it. When
1191 * the entire message is availible and has been
1192 * handled, return TRUE indicating that we have
1193 * parsed an entire message.
1194 */
1195 switch (amd->msgin_buf[0]) {
1196 case MSG_DISCONNECT:
1197 amd->active_srb->SRBState = SRB_DISCONNECT;
1198 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1199 done = TRUE;
1200 break;
1201 case MSG_SIMPLE_Q_TAG:
1202 {
1203 struct amd_srb *disc_srb;
1204
1205 if (amd->msgin_index < 1)
1206 break;
1207 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1208 if (amd->active_srb != NULL
1209 || disc_srb->SRBState != SRB_DISCONNECT
1210 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1211 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1212 printf("amd%d: Unexpected tagged reselection "
1213 "for target %d, Issuing Abort\n", amd->unit,
1214 amd->cur_target);
1215 amd->msgout_buf[0] = MSG_ABORT;
1216 amd->msgout_len = 1;
1217 response = TRUE;
1218 break;
1219 }
1220 amd->active_srb = disc_srb;
1221 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1222 done = TRUE;
1223 break;
1224 }
1225 case MSG_MESSAGE_REJECT:
1226 response = amdhandlemsgreject(amd);
1227 if (response == FALSE)
1228 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1229 /* FALLTHROUGH */
1230 case MSG_NOOP:
1231 done = TRUE;
1232 break;
1233 case MSG_EXTENDED:
1234 {
1235 u_int clockrate;
1236 u_int period;
1237 u_int offset;
1238 u_int saved_offset;
1239
1240 /* Wait for enough of the message to begin validation */
1241 if (amd->msgin_index < 1)
1242 break;
1243 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1244 reject = TRUE;
1245 break;
1246 }
1247
1248 /* Wait for opcode */
1249 if (amd->msgin_index < 2)
1250 break;
1251
1252 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1253 reject = TRUE;
1254 break;
1255 }
1256
1257 /*
1258 * Wait until we have both args before validating
1259 * and acting on this message.
1260 *
1261 * Add one to MSG_EXT_SDTR_LEN to account for
1262 * the extended message preamble.
1263 */
1264 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1265 break;
1266
1267 period = amd->msgin_buf[3];
1268 saved_offset = offset = amd->msgin_buf[4];
1269 clockrate = amdfindclockrate(amd, &period);
1270 if (offset > AMD_MAX_SYNC_OFFSET)
1271 offset = AMD_MAX_SYNC_OFFSET;
1272 if (period == 0 || offset == 0) {
1273 offset = 0;
1274 period = 0;
1275 clockrate = 0;
1276 }
1277 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1278 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1279
1280 /*
1281 * See if we initiated Sync Negotiation
1282 * and didn't have to fall down to async
1283 * transfers.
1284 */
1285 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1286 /* We started it */
1287 if (saved_offset != offset) {
1288 /* Went too low - force async */
1289 reject = TRUE;
1290 }
1291 } else {
1292 /*
1293 * Send our own SDTR in reply
1294 */
1295 if (bootverbose)
1296 printf("Sending SDTR!\n");
1297 amd->msgout_index = 0;
1298 amd->msgout_len = 0;
1299 amdconstructsdtr(amd, period, offset);
1300 amd->msgout_index = 0;
1301 response = TRUE;
1302 }
1303 done = TRUE;
1304 break;
1305 }
1306 case MSG_SAVEDATAPOINTER:
1307 case MSG_RESTOREPOINTERS:
1308 /* XXX Implement!!! */
1309 done = TRUE;
1310 break;
1311 default:
1312 reject = TRUE;
1313 break;
1314 }
1315
1316 if (reject) {
1317 amd->msgout_index = 0;
1318 amd->msgout_len = 1;
1319 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1320 done = TRUE;
1321 response = TRUE;
1322 }
1323
1324 if (response)
1325 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1326
1327 if (done && !response)
1328 /* Clear the outgoing message buffer */
1329 amd->msgout_len = 0;
1330
1331 /* Drop Ack */
1332 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1333
1334 return (done);
1335}
1336
1337static u_int
1338amdfindclockrate(struct amd_softc *amd, u_int *period)
1339{
1340 u_int i;
1341 u_int clockrate;
1342
1343 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1344 u_int8_t *table_entry;
1345
1346 table_entry = &tinfo_sync_period[i];
1347 if (*period <= *table_entry) {
1348 /*
1349 * When responding to a target that requests
1350 * sync, the requested rate may fall between
1351 * two rates that we can output, but still be
1352 * a rate that we can receive. Because of this,
1353 * we want to respond to the target with
1354 * the same rate that it sent to us even
1355 * if the period we use to send data to it
1356 * is lower. Only lower the response period
1357 * if we must.
1358 */
1359 if (i == 0) {
1360 *period = *table_entry;
1361 }
1362 break;
1363 }
1364 }
1365
1366 if (i == sizeof(tinfo_sync_period)) {
1367 /* Too slow for us. Use asnyc transfers. */
1368 *period = 0;
1369 clockrate = 0;
1370 } else
1371 clockrate = i + 4;
1372
1373 return (clockrate);
1374}
1375
1376/*
1377 * See if we sent a particular extended message to the target.
1378 * If "full" is true, the target saw the full message.
1379 * If "full" is false, the target saw at least the first
1380 * byte of the message.
1381 */
1382static int
1383amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1384{
1385 int found;
1386 int index;
1387
1388 found = FALSE;
1389 index = 0;
1390
1391 while (index < amd->msgout_len) {
1392 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1393 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1394 index++;
1395 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1396 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1397 /* Skip tag type and tag id */
1398 index += 2;
1399 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1400 /* Found a candidate */
1401 if (amd->msgout_buf[index+2] == msgtype) {
1402 u_int end_index;
1403
1404 end_index = index + 1
1405 + amd->msgout_buf[index + 1];
1406 if (full) {
1407 if (amd->msgout_index > end_index)
1408 found = TRUE;
1409 } else if (amd->msgout_index > index)
1410 found = TRUE;
1411 }
1412 break;
1413 } else {
1414 panic("amdsentmsg: Inconsistent msg buffer");
1415 }
1416 }
1417 return (found);
1418}
1419
1420static void
1421amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1422{
1423 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1424 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1425 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1426 amd->msgout_buf[amd->msgout_index++] = period;
1427 amd->msgout_buf[amd->msgout_index++] = offset;
1428 amd->msgout_len += 5;
1429}
1430
1431static int
1432amdhandlemsgreject(struct amd_softc *amd)
1433{
1434 /*
1435 * If we had an outstanding SDTR for this
1436 * target, this is a signal that the target
1437 * is refusing negotiation. Also watch out
1438 * for rejected tag messages.
1439 */
1440 struct amd_srb *srb;
1441 struct amd_target_info *targ_info;
1442 int response = FALSE;
1443
1444 srb = amd->active_srb;
1445 targ_info = &amd->tinfo[amd->cur_target];
1446 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1447 /* note asynch xfers and clear flag */
1448 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1449 /*period*/0, /*offset*/0,
1450 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1451 printf("amd%d:%d: refuses synchronous negotiation. "
1452 "Using asynchronous transfers\n",
1453 amd->unit, amd->cur_target);
1454 } else if ((srb != NULL)
1455 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1456 struct ccb_trans_settings neg;
1457
1458 printf("amd%d:%d: refuses tagged commands. Performing "
1459 "non-tagged I/O\n", amd->unit, amd->cur_target);
1460
1461 amdsettags(amd, amd->cur_target, FALSE);
1462 neg.flags = 0;
1463 neg.valid = CCB_TRANS_TQ_VALID;
1464 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1465 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1466
1467 /*
1468 * Resend the identify for this CCB as the target
1469 * may believe that the selection is invalid otherwise.
1470 */
1471 if (amd->msgout_len != 0)
1472 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1473 amd->msgout_len);
1474 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1475 | srb->pccb->ccb_h.target_lun;
1476 amd->msgout_len++;
1477 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1478 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1479 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1480
1481 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1482
1483 /*
1484 * Requeue all tagged commands for this target
1485 * currently in our posession so they can be
1486 * converted to untagged commands.
1487 */
1488 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1489 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1490 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1491 } else {
1492 /*
1493 * Otherwise, we ignore it.
1494 */
1495 printf("amd%d:%d: Message reject received -- ignored\n",
1496 amd->unit, amd->cur_target);
1497 }
1498 return (response);
1499}
1500
1501#if 0
1502 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1503 if (bval == MSG_DISCONNECT) {
1504 pSRB->SRBState = SRB_DISCONNECT;
1505 } else if (bval == MSG_SAVEDATAPOINTER) {
1506 goto min6;
1507 } else if ((bval == MSG_EXTENDED)
1508 || ((bval >= MSG_SIMPLE_Q_TAG)
1509 && (bval <= MSG_ORDERED_Q_TAG))) {
1510 pSRB->SRBState |= SRB_MSGIN_MULTI;
1511 pSRB->MsgInBuf[0] = bval;
1512 pSRB->MsgCnt = 1;
1513 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1514 } else if (bval == MSG_MESSAGE_REJECT) {
1515 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1516
1517 if (pSRB->SRBState & DO_SYNC_NEGO) {
1518 goto set_async;
1519 }
1520 } else if (bval == MSG_RESTOREPOINTERS) {
1521 goto min6;
1522 } else {
1523 goto min6;
1524 }
1525 } else { /* minx: */
1526 *pSRB->pMsgPtr = bval;
1527 pSRB->MsgCnt++;
1528 pSRB->pMsgPtr++;
1529 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1530 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1531 if (pSRB->MsgCnt == 2) {
1532 pSRB->SRBState = 0;
1533 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1534 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1535 pSRB = amd->pTmpSRB;
1536 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1537 pDCB->pActiveSRB = pSRB;
1538 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1539 EnableMsgOut2(amd, pSRB);
1540 } else {
1541 if (pDCB->DCBFlag & ABORT_DEV_) {
1542 pSRB->SRBState = SRB_ABORT_SENT;
1543 EnableMsgOut1(amd, pSRB);
1544 }
1545 pDCB->pActiveSRB = pSRB;
1546 pSRB->SRBState = SRB_DATA_XFER;
1547 }
1548 }
1549 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1550 && (pSRB->MsgCnt == 5)) {
1551 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1552 if ((pSRB->MsgInBuf[1] != 3)
1553 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1554 pSRB->MsgCnt = 1;
1555 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1556 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1557 } else if (!(pSRB->MsgInBuf[3])
1558 || !(pSRB->MsgInBuf[4])) {
1559 set_async: /* set async */
1560
1561 pDCB = pSRB->pSRBDCB;
1562 /* disable sync & sync nego */
1563 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1564 pDCB->SyncPeriod = 0;
1565 pDCB->SyncOffset = 0;
1566
1567 pDCB->tinfo.goal.period = 0;
1568 pDCB->tinfo.goal.offset = 0;
1569
1570 pDCB->tinfo.current.period = 0;
1571 pDCB->tinfo.current.offset = 0;
1572 pDCB->tinfo.current.width =
1573 MSG_EXT_WDTR_BUS_8_BIT;
1574
1575 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1576 pDCB->CtrlR4 &= 0x3f;
1577 pDCB->CtrlR4 |= EATER_25NS;
1578 goto re_prog;
1579 } else {/* set sync */
1580
1581 pDCB = pSRB->pSRBDCB;
1582 /* enable sync & sync nego */
1583 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1584
1585 /* set sync offset */
1586 pDCB->SyncOffset &= 0x0f0;
1587 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1588
1589 /* set sync period */
1590 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1591
1592 wval = (u_int16_t) pSRB->MsgInBuf[3];
1593 wval = wval << 2;
1594 wval--;
1595 wval1 = wval / 25;
1596 if ((wval1 * 25) != wval) {
1597 wval1++;
1598 }
1599 bval = FAST_CLK|FAST_SCSI;
1600 pDCB->CtrlR4 &= 0x3f;
1601 if (wval1 >= 8) {
1602 /* Fast SCSI */
1603 wval1--;
1604 bval = FAST_CLK;
1605 pDCB->CtrlR4 |= EATER_25NS;
1606 }
1607 pDCB->CtrlR3 = bval;
1608 pDCB->SyncPeriod = (u_int8_t) wval1;
1609
1610 pDCB->tinfo.goal.period =
1611 tinfo_sync_period[pDCB->SyncPeriod - 4];
1612 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1613 pDCB->tinfo.current.period =
fc6d0222 1614 tinfo_sync_period[pDCB->SyncPeriod - 4];
984263bc
MD
1615 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1616
1617 /*
1618 * program SCSI control register
1619 */
1620 re_prog:
1621 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1622 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1623 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1624 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1625 }
1626 }
1627 }
1628min6:
1629 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1630 return (SCSI_NOP0);
1631}
1632#endif
1633
1634static u_int
1635amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1636{
1637 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1638 return (scsistat);
1639}
1640
1641static u_int
1642amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1643{
1644 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1645 return (scsistat);
1646}
1647
1648static void
1649DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1650{
1651 struct amd_sg * psgl;
1652 u_int32_t lval;
1653
1654 if (pSRB->SGIndex < pSRB->SGcount) {
1655 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1656
1657 if (!pSRB->SGToBeXferLen) {
1658 psgl = pSRB->pSGlist;
1659 pSRB->SGPhysAddr = psgl->SGXPtr;
1660 pSRB->SGToBeXferLen = psgl->SGXLen;
1661 }
1662 lval = pSRB->SGToBeXferLen;
1663 amd_write8(amd, CTCREG_LOW, lval);
1664 amd_write8(amd, CTCREG_MID, lval >> 8);
1665 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1666
1667 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1668
1669 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1670
1671 pSRB->SRBState = SRB_DATA_XFER;
1672
1673 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1674
1675 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1676
1677 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1678 } else { /* xfer pad */
1679 if (pSRB->SGcount) {
1680 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1681 pSRB->SRBStatus |= OVER_RUN;
1682 }
1683 amd_write8(amd, CTCREG_LOW, 0);
1684 amd_write8(amd, CTCREG_MID, 0);
1685 amd_write8(amd, CURTXTCNTREG, 0);
1686
1687 pSRB->SRBState |= SRB_XFERPAD;
1688 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1689 }
1690}
1691
1692static u_int
1693amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1694{
1695 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1696 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1697
1698 amdsetupcommand(amd, srb);
1699
1700 srb->SRBState = SRB_COMMAND;
1701 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1702 return (scsistat);
1703}
1704
1705static u_int
1706amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1707{
1708 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1709 pSRB->SRBState = SRB_STATUS;
1710 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1711 return (scsistat);
1712}
1713
1714static u_int
1715amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1716{
1717 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1718
1719 if (amd->msgout_len == 0) {
1720 amd->msgout_buf[0] = MSG_NOOP;
1721 amd->msgout_len = 1;
1722 }
1723 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1724 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1725 return (scsistat);
1726}
1727
1728static u_int
1729amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1730{
1731 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1732 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1733 return (scsistat);
1734}
1735
1736static u_int
1737amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1738{
1739 return (scsistat);
1740}
1741
1742static void
1743amd_Disconnect(struct amd_softc * amd)
1744{
1745 struct amd_srb *srb;
1746 int target;
1747 int lun;
1748
1749 srb = amd->active_srb;
1750 amd->active_srb = NULL;
1751 amd->last_phase = SCSI_BUS_FREE;
1752 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1753 target = amd->cur_target;
1754 lun = amd->cur_lun;
1755
1756 if (srb == NULL) {
1757 /* Invalid reselection */
1758 amdrunwaiting(amd);
1759 } else if (srb->SRBState & SRB_ABORT_SENT) {
1760 /* Clean up and done this srb */
1761#if 0
1762 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1763 /* XXX What about "done'ing" these srbs??? */
1764 if (pSRB->pSRBDCB == pDCB) {
1765 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1766 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1767 }
1768 }
1769 amdrunwaiting(amd);
1770#endif
1771 } else {
1772 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1773 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1774 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1775 goto disc1;
1776 } else if (srb->SRBState & SRB_DISCONNECT) {
1777 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1778 amd->untagged_srbs[target][lun] = srb;
1779 amdrunwaiting(amd);
1780 } else if (srb->SRBState & SRB_COMPLETED) {
1781 disc1:
1782 srb->SRBState = SRB_FREE;
1783 SRBdone(amd, srb);
1784 }
1785 }
1786 return;
1787}
1788
1789static void
1790amd_Reselect(struct amd_softc *amd)
1791{
1792 struct amd_target_info *tinfo;
1793 u_int16_t disc_count;
1794
1795 amd_clear_msg_state(amd);
1796 if (amd->active_srb != NULL) {
1797 /* Requeue the SRB for our attempted Selection */
1798 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1799 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1800 amd->active_srb = NULL;
1801 }
1802 /* get ID */
1803 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1804 amd->cur_target ^= amd->HostID_Bit;
1805 amd->cur_target = ffs(amd->cur_target) - 1;
1806 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1807 tinfo = &amd->tinfo[amd->cur_target];
1808 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1809 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1810 if (disc_count == 0) {
1811 printf("amd%d: Unexpected reselection for target %d, "
1812 "Issuing Abort\n", amd->unit, amd->cur_target);
1813 amd->msgout_buf[0] = MSG_ABORT;
1814 amd->msgout_len = 1;
1815 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1816 }
1817 if (amd->active_srb != NULL) {
1818 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1819 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1820 }
1821
1822 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1823 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1824 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1825 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1826 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1827 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1828 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1829 amd->last_phase = SCSI_NOP0;
1830}
1831
1832static void
1833SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1834{
1835 u_int8_t bval, i, status;
1836 union ccb *pccb;
1837 struct ccb_scsiio *pcsio;
984263bc
MD
1838 struct amd_sg *ptr2;
1839 u_int32_t swlval;
1840 u_int target_id, target_lun;
1841
1842 pccb = pSRB->pccb;
1843 pcsio = &pccb->csio;
1844 target_id = pSRB->pccb->ccb_h.target_id;
1845 target_lun = pSRB->pccb->ccb_h.target_lun;
1846
1847 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1848 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1849
1850 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1851 bus_dmasync_op_t op;
1852
1853 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1854 op = BUS_DMASYNC_POSTREAD;
1855 else
1856 op = BUS_DMASYNC_POSTWRITE;
1857 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1858 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1859 }
1860
1861 status = pSRB->TargetStatus;
1862 pccb->ccb_h.status = CAM_REQ_CMP;
1863 pccb->ccb_h.status = CAM_REQ_CMP;
1864 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1865 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1866 pSRB->AdaptStatus = 0;
1867 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1868
1869 if (status == SCSI_STATUS_CHECK_COND) {
1870 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1871 goto ckc_e;
1872 }
1873 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1874
1875 pcsio->sense_resid = pcsio->sense_len
1876 - pSRB->TotalXferredLen;
1877 pSRB->TotalXferredLen = pSRB->Segment1[1];
1878 if (pSRB->TotalXferredLen) {
1879 /* ???? */
1880 pcsio->resid = pcsio->dxfer_len
1881 - pSRB->TotalXferredLen;
1882 /* The resid field contains valid data */
1883 /* Flush resid bytes on complete */
1884 } else {
1885 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1886 }
1887 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1888 goto ckc_e;
1889 }
1890 if (status) {
1891 if (status == SCSI_STATUS_CHECK_COND) {
1892
1893 if ((pSRB->SGIndex < pSRB->SGcount)
1894 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1895 bval = pSRB->SGcount;
1896 swlval = pSRB->SGToBeXferLen;
1897 ptr2 = pSRB->pSGlist;
1898 ptr2++;
1899 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1900 swlval += ptr2->SGXLen;
1901 ptr2++;
1902 }
1903 /* ??????? */
1904 pcsio->resid = (u_int32_t) swlval;
1905
1906#ifdef AMD_DEBUG0
1907 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1908 pSRB->TotalXferredLen, swlval);
1909#endif
1910 }
1911 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1912#ifdef AMD_DEBUG0
1913 printf("RequestSense..................\n");
1914#endif
1915 RequestSense(amd, pSRB);
1916 return;
1917 }
1918 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1919 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1920 goto ckc_e;
1921 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1922 pSRB->AdaptStatus = 0;
1923 pSRB->TargetStatus = 0;
1924 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1925 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1926 goto ckc_e;
1927 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1928 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1929 pSRB->TargetStatus = 0;
1930
1931 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1932 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1933 } else if (status == SCSI_STATUS_BUSY) {
1934#ifdef AMD_DEBUG0
1935 printf("DC390: target busy at %s %d\n",
1936 __FILE__, __LINE__);
1937#endif
1938 pcsio->scsi_status = SCSI_STATUS_BUSY;
1939 pccb->ccb_h.status = CAM_SCSI_BUSY;
1940 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1941#ifdef AMD_DEBUG0
1942 printf("DC390: target reserved at %s %d\n",
1943 __FILE__, __LINE__);
1944#endif
1945 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1946 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1947 } else {
1948 pSRB->AdaptStatus = 0;
1949#ifdef AMD_DEBUG0
1950 printf("DC390: driver stuffup at %s %d\n",
1951 __FILE__, __LINE__);
1952#endif
1953 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1954 }
1955 } else {
1956 status = pSRB->AdaptStatus;
1957 if (status & H_OVER_UNDER_RUN) {
1958 pSRB->TargetStatus = 0;
1959
1960 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1961 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1962#ifdef AMD_DEBUG0
1963 printf("DC390: driver stuffup %s %d\n",
1964 __FILE__, __LINE__);
1965#endif
1966 /* Driver failed to perform operation */
1967 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1968 } else { /* No error */
1969 pSRB->AdaptStatus = 0;
1970 pSRB->TargetStatus = 0;
1971 pcsio->resid = 0;
1972 /* there is no error, (sense is invalid) */
1973 }
1974 }
1975ckc_e:
c1139c5e 1976 crit_enter();
984263bc
MD
1977 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1978 /* CAM request not yet complete =>device_Q frozen */
1979 xpt_freeze_devq(pccb->ccb_h.path, 1);
1980 pccb->ccb_h.status |= CAM_DEV_QFRZN;
1981 }
1982 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1983 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1984 amdrunwaiting(amd);
c1139c5e 1985 crit_exit();
984263bc
MD
1986 xpt_done(pccb);
1987
1988}
1989
1990static void
1991amd_ResetSCSIBus(struct amd_softc * amd)
1992{
c1139c5e 1993 crit_enter();
984263bc
MD
1994 amd->ACBFlag |= RESET_DEV;
1995 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
1996 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
c1139c5e 1997 crit_exit();
984263bc
MD
1998 return;
1999}
2000
2001static void
2002amd_ScsiRstDetect(struct amd_softc * amd)
2003{
984263bc
MD
2004 u_int32_t wlval;
2005
2006#ifdef AMD_DEBUG0
2007 printf("amd_ScsiRstDetect \n");
2008#endif
2009
2010 wlval = 1000;
2011 while (--wlval) { /* delay 1 sec */
2012 DELAY(1000);
2013 }
c1139c5e 2014 crit_enter();
984263bc
MD
2015
2016 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2017 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2018
2019 if (amd->ACBFlag & RESET_DEV) {
2020 amd->ACBFlag |= RESET_DONE;
2021 } else {
2022 amd->ACBFlag |= RESET_DETECT;
2023 ResetDevParam(amd);
2024 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2025 AMD_TAG_WILDCARD, &amd->running_srbs,
2026 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2027 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2028 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2029 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2030 amd->active_srb = NULL;
2031 amd->ACBFlag = 0;
2032 amdrunwaiting(amd);
2033 }
c1139c5e 2034 crit_exit();
984263bc
MD
2035 return;
2036}
2037
2038static void
2039RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2040{
2041 union ccb *pccb;
2042 struct ccb_scsiio *pcsio;
2043
2044 pccb = pSRB->pccb;
2045 pcsio = &pccb->csio;
2046
2047 pSRB->SRBFlag |= AUTO_REQSENSE;
2048 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2049 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2050 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2051 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2052
2053 pSRB->AdaptStatus = 0;
2054 pSRB->TargetStatus = 0;
2055
2056 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2057 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2058
2059 pSRB->pSGlist = &pSRB->Segmentx;
2060 pSRB->SGcount = 1;
2061 pSRB->SGIndex = 0;
2062
2063 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2064 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2065 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2066 pSRB->ScsiCmdLen = 6;
2067
2068 pSRB->TotalXferredLen = 0;
2069 pSRB->SGToBeXferLen = 0;
2070 if (amdstart(amd, pSRB) != 0) {
2071 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2072 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2073 }
2074}
2075
2076static void
2077amd_InvalidCmd(struct amd_softc * amd)
2078{
2079 struct amd_srb *srb;
2080
2081 srb = amd->active_srb;
2082 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2083 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2084}
2085
2086void
2087amd_linkSRB(struct amd_softc *amd)
2088{
05498f57 2089 u_int16_t count, i;
984263bc 2090 struct amd_srb *psrb;
05498f57 2091 int error;
984263bc
MD
2092
2093 count = amd->SRBCount;
2094
2095 for (i = 0; i < count; i++) {
2096 psrb = (struct amd_srb *)&amd->SRB_array[i];
2097 psrb->TagNumber = i;
05498f57
MD
2098
2099 /*
2100 * Create the dmamap. This is no longer optional!
2101 */
2102 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2103 if (error) {
2104 device_printf(amd->dev, "Error %d creating buffer "
2105 "dmamap!\n", error);
2106 break;
2107 }
984263bc
MD
2108 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2109 }
2110}
2111
2112void
2113amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2114{
2115 if (mode == ENABLE_CE) {
2116 *regval = 0xc0;
2117 } else {
2118 *regval = 0x80;
2119 }
2120 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2121 if (mode == DISABLE_CE) {
2122 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2123 }
2124 DELAY(160);
2125}
2126
2127void
2128amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2129{
2130 u_int bval;
2131
2132 bval = 0;
2133 if (Carry) {
2134 bval = 0x40;
2135 *regval = 0x80;
2136 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2137 }
2138 DELAY(160);
2139 bval |= 0x80;
2140 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2141 DELAY(160);
2142 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2143 DELAY(160);
2144}
2145
2146static int
2147amd_EEpromInDO(struct amd_softc *amd)
2148{
2149 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2150 DELAY(160);
2151 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2152 DELAY(160);
2153 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2154 return (1);
2155 return (0);
2156}
2157
2158static u_int16_t
2159EEpromGetData1(struct amd_softc *amd)
2160{
2161 u_int i;
2162 u_int carryFlag;
2163 u_int16_t wval;
2164
2165 wval = 0;
2166 for (i = 0; i < 16; i++) {
2167 wval <<= 1;
2168 carryFlag = amd_EEpromInDO(amd);
2169 wval |= carryFlag;
2170 }
2171 return (wval);
2172}
2173
2174static void
2175amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2176{
2177 u_int i, j;
2178 int carryFlag;
2179
2180 carryFlag = 1;
2181 j = 0x80;
2182 for (i = 0; i < 9; i++) {
2183 amd_EEpromOutDI(amd, regval, carryFlag);
2184 carryFlag = (EEpromCmd & j) ? 1 : 0;
2185 j >>= 1;
2186 }
2187}
2188
2189static void
2190amd_ReadEEprom(struct amd_softc *amd)
2191{
2192 int regval;
2193 u_int i;
2194 u_int16_t *ptr;
2195 u_int8_t cmd;
2196
2197 ptr = (u_int16_t *)&amd->eepromBuf[0];
2198 cmd = EEPROM_READ;
2199 for (i = 0; i < 0x40; i++) {
2200 amd_EnDisableCE(amd, ENABLE_CE, &regval);
2201 amd_Prepare(amd, &regval, cmd);
2202 *ptr = EEpromGetData1(amd);
2203 ptr++;
2204 cmd++;
2205 amd_EnDisableCE(amd, DISABLE_CE, &regval);
2206 }
2207}
2208
2209static void
2210amd_load_defaults(struct amd_softc *amd)
2211{
2212 int target;
2213
2214 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2215 for (target = 0; target < MAX_SCSI_ID; target++)
2216 amd->eepromBuf[target << 2] =
2217 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2218 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2219 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2220 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2221}
2222
2223static void
2224amd_load_eeprom_or_defaults(struct amd_softc *amd)
2225{
2226 u_int16_t wval, *ptr;
2227 u_int8_t i;
2228
2229 amd_ReadEEprom(amd);
2230 wval = 0;
2231 ptr = (u_int16_t *) & amd->eepromBuf[0];
2232 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2233 wval += *ptr;
2234
2235 if (wval != EE_CHECKSUM) {
2236 if (bootverbose)
2237 printf("amd%d: SEEPROM data unavailable. "
2238 "Using default device parameters.\n",
2239 amd->unit);
2240 amd_load_defaults(amd);
2241 }
2242}
2243
2244/*
2245 **********************************************************************
2246 * Function : static int amd_init (struct Scsi_Host *host)
2247 * Purpose : initialize the internal structures for a given SCSI host
2248 * Inputs : host - pointer to this host adapter's structure/
2249 **********************************************************************
2250 */
2251static int
2252amd_init(device_t dev)
2253{
2254 struct amd_softc *amd = device_get_softc(dev);
2255 struct resource *iores;
2256 int i, rid;
2257 u_int bval;
2258
2259 rid = PCI_BASE_ADDR0;
2260 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2261 RF_ACTIVE);
2262 if (iores == NULL) {
2263 if (bootverbose)
2264 printf("amd_init: bus_alloc_resource failure!\n");
2265 return ENXIO;
2266 }
2267 amd->tag = rman_get_bustag(iores);
2268 amd->bsh = rman_get_bushandle(iores);
2269
2270 /* DMA tag for mapping buffers into device visible space. */
2271 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2272 /*boundary*/0,
2273 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2274 /*highaddr*/BUS_SPACE_MAXADDR,
2275 /*filter*/NULL, /*filterarg*/NULL,
2276 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2277 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2278 /*flags*/BUS_DMA_ALLOCNOW,
2279 &amd->buffer_dmat) != 0) {
2280 if (bootverbose)
2281 printf("amd_init: bus_dma_tag_create failure!\n");
2282 return ENXIO;
2283 }
2284 TAILQ_INIT(&amd->free_srbs);
2285 TAILQ_INIT(&amd->running_srbs);
2286 TAILQ_INIT(&amd->waiting_srbs);
2287 amd->last_phase = SCSI_BUS_FREE;
2288 amd->dev = dev;
2289 amd->unit = device_get_unit(dev);
2290 amd->SRBCount = MAX_SRB_CNT;
2291 amd->status = 0;
2292 amd_load_eeprom_or_defaults(amd);
2293 amd->max_id = 7;
2294 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2295 amd->max_lun = 7;
2296 } else {
2297 amd->max_lun = 0;
2298 }
2299 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2300 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2301 amd->AdaptSCSILUN = 0;
2302 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2303 amd->ACBFlag = 0;
2304 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2305 amd_linkSRB(amd);
2306 for (i = 0; i <= amd->max_id; i++) {
2307
2308 if (amd->AdaptSCSIID != i) {
2309 struct amd_target_info *tinfo;
2310 PEEprom prom;
2311
2312 tinfo = &amd->tinfo[i];
2313 prom = (PEEprom)&amd->eepromBuf[i << 2];
2314 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2315 tinfo->disc_tag |= AMD_USR_DISCENB;
2316 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2317 tinfo->disc_tag |= AMD_USR_TAGENB;
2318 }
2319 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2320 tinfo->user.period =
2321 eeprom_period[prom->EE_SPEED];
2322 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2323 }
2324 tinfo->CtrlR1 = amd->AdaptSCSIID;
2325 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2326 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2327 tinfo->CtrlR3 = FAST_CLK;
2328 tinfo->CtrlR4 = EATER_25NS;
2329 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2330 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2331 }
2332 }
2333 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2334 /* Conversion factor = 0 , 40MHz clock */
2335 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2336 /* NOP cmd - clear command register */
2337 amd_write8(amd, SCSICMDREG, NOP_CMD);
2338 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2339 amd_write8(amd, CNTLREG3, FAST_CLK);
2340 bval = EATER_25NS;
2341 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2342 bval |= NEGATE_REQACKDATA;
2343 }
2344 amd_write8(amd, CNTLREG4, bval);
2345
2346 /* Disable SCSI bus reset interrupt */
2347 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2348
2349 return 0;
2350}
2351
2352/*
2353 * attach and init a host adapter
2354 */
2355static int
2356amd_attach(device_t dev)
2357{
2358 struct cam_devq *devq; /* Device Queue to use for this SIM */
2359 u_int8_t intstat;
2360 struct amd_softc *amd = device_get_softc(dev);
2361 int unit = device_get_unit(dev);
2362 int rid;
2363 void *ih;
2364 struct resource *irqres;
2365
2366 if (amd_init(dev)) {
2367 if (bootverbose)
2368 printf("amd_attach: amd_init failure!\n");
2369 return ENXIO;
2370 }
2371
2372 /* Reset Pending INT */
2373 intstat = amd_read8(amd, INTSTATREG);
2374
2375 /* After setting up the adapter, map our interrupt */
2376 rid = 0;
2377 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2378 RF_SHAREABLE | RF_ACTIVE);
2379 if (irqres == NULL ||
ee61f228 2380 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
e9cb6d99 2381 ) {
984263bc
MD
2382 if (bootverbose)
2383 printf("amd%d: unable to register interrupt handler!\n",
2384 unit);
2385 return ENXIO;
2386 }
2387
2388 /*
2389 * Now let the CAM generic SCSI layer find the SCSI devices on
2390 * the bus * start queue to reset to the idle loop. *
2391 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2392 * max_sim_transactions
2393 */
2394 devq = cam_simq_alloc(MAX_START_JOB);
2395 if (devq == NULL) {
2396 if (bootverbose)
2397 printf("amd_attach: cam_simq_alloc failure!\n");
2398 return ENXIO;
2399 }
2400
2401 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2402 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2403 devq);
3aed1355 2404 cam_simq_release(devq);
984263bc 2405 if (amd->psim == NULL) {
984263bc
MD
2406 if (bootverbose)
2407 printf("amd_attach: cam_sim_alloc failure!\n");
2408 return ENXIO;
2409 }
2410
2411 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
3aed1355 2412 cam_sim_free(amd->psim);
984263bc
MD
2413 if (bootverbose)
2414 printf("amd_attach: xpt_bus_register failure!\n");
2415 return ENXIO;
2416 }
2417
2418 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2419 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2420 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2421 xpt_bus_deregister(cam_sim_path(amd->psim));
3aed1355 2422 cam_sim_free(amd->psim);
984263bc
MD
2423 if (bootverbose)
2424 printf("amd_attach: xpt_create_path failure!\n");
2425 return ENXIO;
2426 }
2427
2428 return 0;
2429}
2430
2431static int
2432amd_probe(device_t dev)
2433{
2434 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2435 device_set_desc(dev,
2436 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2437 return 0;
2438 }
2439 return ENXIO;
2440}
2441
2442static device_method_t amd_methods[] = {
2443 /* Device interface */
2444 DEVMETHOD(device_probe, amd_probe),
2445 DEVMETHOD(device_attach, amd_attach),
2446 { 0, 0 }
2447};
2448
2449static driver_t amd_driver = {
2450 "amd", amd_methods, sizeof(struct amd_softc)
2451};
2452
2453static devclass_t amd_devclass;
2454DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);