kernel: Remove some unused variables in RAID and disk drivers.
[dragonfly.git] / sys / dev / disk / aic7xxx / aic79xx.c
CommitLineData
984263bc
MD
1/*
2 * Core routines and tables shareable across OS platforms.
3 *
fb5acdc8 4 * Copyright (c) 1994-2002, 2004 Justin T. Gibbs.
984263bc
MD
5 * Copyright (c) 2000-2003 Adaptec Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * substantially similar to the "NO WARRANTY" disclaimer below
16 * ("Disclaimer") and any redistribution must be conditioned upon
17 * including a substantially similar Disclaimer requirement for further
18 * binary redistribution.
19 * 3. Neither the names of the above-listed copyright holders nor the names
20 * of any contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * Alternatively, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2 as published by the Free
25 * Software Foundation.
26 *
27 * NO WARRANTY
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES.
39 *
f39dcdf3 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#246 $
984263bc 41 *
2923a98d 42 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx.c,v 1.40 2007/04/19 18:53:52 scottl Exp $
984263bc
MD
43 */
44
984263bc
MD
45#include "aic79xx_osm.h"
46#include "aic79xx_inline.h"
47#include "aicasm/aicasm_insformat.h"
984263bc
MD
48
49/******************************** Globals *************************************/
50struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
7009d94e 51uint32_t ahd_attach_to_HostRAID_controllers = 1;
984263bc
MD
52
53/***************************** Lookup Tables **********************************/
54char *ahd_chip_names[] =
55{
56 "NONE",
57 "aic7901",
58 "aic7902",
59 "aic7901A"
60};
61static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names);
62
63/*
64 * Hardware error codes.
65 */
66struct ahd_hard_error_entry {
71f385dc 67 uint8_t error;
984263bc
MD
68 char *errmesg;
69};
70
71static struct ahd_hard_error_entry ahd_hard_errors[] = {
72 { DSCTMOUT, "Discard Timer has timed out" },
73 { ILLOPCODE, "Illegal Opcode in sequencer program" },
74 { SQPARERR, "Sequencer Parity Error" },
75 { DPARERR, "Data-path Parity Error" },
76 { MPARERR, "Scratch or SCB Memory Parity Error" },
77 { CIOPARERR, "CIOBUS Parity Error" },
78};
79static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
80
81static struct ahd_phase_table_entry ahd_phase_table[] =
82{
83 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
84 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
85 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
86 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
87 { P_COMMAND, MSG_NOOP, "in Command phase" },
88 { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
89 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
90 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
91 { P_BUSFREE, MSG_NOOP, "while idle" },
92 { 0, MSG_NOOP, "in unknown phase" }
93};
94
95/*
96 * In most cases we only wish to itterate over real phases, so
97 * exclude the last element from the count.
98 */
99static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
100
101/* Our Sequencer Program */
102#include "aic79xx_seq.h"
103
104/**************************** Function Declarations ***************************/
105static void ahd_handle_transmission_error(struct ahd_softc *ahd);
106static void ahd_handle_lqiphase_error(struct ahd_softc *ahd,
107 u_int lqistat1);
108static int ahd_handle_pkt_busfree(struct ahd_softc *ahd,
109 u_int busfreetime);
110static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
111static void ahd_handle_proto_violation(struct ahd_softc *ahd);
112static void ahd_force_renegotiation(struct ahd_softc *ahd,
113 struct ahd_devinfo *devinfo);
114
115static struct ahd_tmode_tstate*
116 ahd_alloc_tstate(struct ahd_softc *ahd,
117 u_int scsi_id, char channel);
118#ifdef AHD_TARGET_MODE
119static void ahd_free_tstate(struct ahd_softc *ahd,
120 u_int scsi_id, char channel, int force);
121#endif
122static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
123 struct ahd_initiator_tinfo *,
124 u_int *period,
125 u_int *ppr_options,
126 role_t role);
127static void ahd_update_neg_table(struct ahd_softc *ahd,
128 struct ahd_devinfo *devinfo,
129 struct ahd_transinfo *tinfo);
130static void ahd_update_pending_scbs(struct ahd_softc *ahd);
131static void ahd_fetch_devinfo(struct ahd_softc *ahd,
132 struct ahd_devinfo *devinfo);
133static void ahd_scb_devinfo(struct ahd_softc *ahd,
134 struct ahd_devinfo *devinfo,
135 struct scb *scb);
136static void ahd_setup_initiator_msgout(struct ahd_softc *ahd,
137 struct ahd_devinfo *devinfo,
138 struct scb *scb);
139static void ahd_build_transfer_msg(struct ahd_softc *ahd,
140 struct ahd_devinfo *devinfo);
141static void ahd_construct_sdtr(struct ahd_softc *ahd,
142 struct ahd_devinfo *devinfo,
143 u_int period, u_int offset);
144static void ahd_construct_wdtr(struct ahd_softc *ahd,
145 struct ahd_devinfo *devinfo,
146 u_int bus_width);
147static void ahd_construct_ppr(struct ahd_softc *ahd,
148 struct ahd_devinfo *devinfo,
149 u_int period, u_int offset,
150 u_int bus_width, u_int ppr_options);
151static void ahd_clear_msg_state(struct ahd_softc *ahd);
152static void ahd_handle_message_phase(struct ahd_softc *ahd);
153typedef enum {
154 AHDMSG_1B,
155 AHDMSG_2B,
156 AHDMSG_EXT
157} ahd_msgtype;
158static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
159 u_int msgval, int full);
160static int ahd_parse_msg(struct ahd_softc *ahd,
161 struct ahd_devinfo *devinfo);
162static int ahd_handle_msg_reject(struct ahd_softc *ahd,
163 struct ahd_devinfo *devinfo);
164static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
165 struct ahd_devinfo *devinfo);
166static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
167static void ahd_handle_devreset(struct ahd_softc *ahd,
168 struct ahd_devinfo *devinfo,
169 u_int lun, cam_status status,
170 char *message, int verbose_level);
9f00895f 171#ifdef AHD_TARGET_MODE
984263bc
MD
172static void ahd_setup_target_msgin(struct ahd_softc *ahd,
173 struct ahd_devinfo *devinfo,
174 struct scb *scb);
175#endif
176
177static u_int ahd_sglist_size(struct ahd_softc *ahd);
178static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
179static bus_dmamap_callback_t
180 ahd_dmamap_cb;
181static void ahd_initialize_hscbs(struct ahd_softc *ahd);
182static int ahd_init_scbdata(struct ahd_softc *ahd);
183static void ahd_fini_scbdata(struct ahd_softc *ahd);
184static void ahd_setup_iocell_workaround(struct ahd_softc *ahd);
185static void ahd_iocell_first_selection(struct ahd_softc *ahd);
186static void ahd_add_col_list(struct ahd_softc *ahd,
187 struct scb *scb, u_int col_idx);
188static void ahd_rem_col_list(struct ahd_softc *ahd,
189 struct scb *scb);
190static void ahd_chip_init(struct ahd_softc *ahd);
191static void ahd_qinfifo_requeue(struct ahd_softc *ahd,
192 struct scb *prev_scb,
193 struct scb *scb);
194static int ahd_qinfifo_count(struct ahd_softc *ahd);
195static int ahd_search_scb_list(struct ahd_softc *ahd, int target,
196 char channel, int lun, u_int tag,
197 role_t role, uint32_t status,
198 ahd_search_action action,
fb5acdc8
PA
199 u_int *list_head, u_int *list_tail,
200 u_int tid);
984263bc
MD
201static void ahd_stitch_tid_list(struct ahd_softc *ahd,
202 u_int tid_prev, u_int tid_cur,
203 u_int tid_next);
204static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
205 u_int scbid);
206static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
207 u_int prev, u_int next, u_int tid);
208static void ahd_reset_current_bus(struct ahd_softc *ahd);
209static ahd_callback_t ahd_reset_poll;
210static ahd_callback_t ahd_stat_timer;
211#ifdef AHD_DUMP_SEQ
212static void ahd_dumpseq(struct ahd_softc *ahd);
213#endif
214static void ahd_loadseq(struct ahd_softc *ahd);
215static int ahd_check_patch(struct ahd_softc *ahd,
216 struct patch **start_patch,
217 u_int start_instr, u_int *skip_addr);
218static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
219 u_int address);
220static void ahd_download_instr(struct ahd_softc *ahd,
221 u_int instrptr, uint8_t *dconsts);
222static int ahd_probe_stack_size(struct ahd_softc *ahd);
f39dcdf3 223static int ahd_other_scb_timeout(struct ahd_softc *ahd,
750f3593
PA
224 struct scb *scb,
225 struct scb *other_scb);
975524e9
PA
226static int ahd_scb_active_in_fifo(struct ahd_softc *ahd,
227 struct scb *scb);
228static void ahd_run_data_fifo(struct ahd_softc *ahd,
229 struct scb *scb);
230
984263bc
MD
231#ifdef AHD_TARGET_MODE
232static void ahd_queue_lstate_event(struct ahd_softc *ahd,
233 struct ahd_tmode_lstate *lstate,
234 u_int initiator_id,
235 u_int event_type,
236 u_int event_arg);
237static void ahd_update_scsiid(struct ahd_softc *ahd,
238 u_int targid_mask);
239static int ahd_handle_target_cmd(struct ahd_softc *ahd,
240 struct target_cmd *cmd);
241#endif
242
243/******************************** Private Inlines *****************************/
244static __inline void ahd_assert_atn(struct ahd_softc *ahd);
245static __inline int ahd_currently_packetized(struct ahd_softc *ahd);
246static __inline int ahd_set_active_fifo(struct ahd_softc *ahd);
247
248static __inline void
249ahd_assert_atn(struct ahd_softc *ahd)
250{
251 ahd_outb(ahd, SCSISIGO, ATNO);
252}
253
254/*
255 * Determine if the current connection has a packetized
256 * agreement. This does not necessarily mean that we
257 * are currently in a packetized transfer. We could
258 * just as easily be sending or receiving a message.
259 */
260static __inline int
261ahd_currently_packetized(struct ahd_softc *ahd)
262{
263 ahd_mode_state saved_modes;
264 int packetized;
265
266 saved_modes = ahd_save_modes(ahd);
267 if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
268 /*
269 * The packetized bit refers to the last
270 * connection, not the current one. Check
271 * for non-zero LQISTATE instead.
272 */
273 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
274 packetized = ahd_inb(ahd, LQISTATE) != 0;
275 } else {
276 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
277 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
278 }
279 ahd_restore_modes(ahd, saved_modes);
280 return (packetized);
281}
282
283static __inline int
284ahd_set_active_fifo(struct ahd_softc *ahd)
285{
286 u_int active_fifo;
287
288 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
289 active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
290 switch (active_fifo) {
291 case 0:
292 case 1:
293 ahd_set_modes(ahd, active_fifo, active_fifo);
294 return (1);
295 default:
296 return (0);
297 }
298}
299
300/************************* Sequencer Execution Control ************************/
301/*
302 * Restart the sequencer program from address zero
303 */
304void
305ahd_restart(struct ahd_softc *ahd)
306{
307
308 ahd_pause(ahd);
309
310 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
311
312 /* No more pending messages */
313 ahd_clear_msg_state(ahd);
314 ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */
315 ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */
316 ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
317 ahd_outb(ahd, SEQINTCTL, 0);
318 ahd_outb(ahd, LASTPHASE, P_BUSFREE);
319 ahd_outb(ahd, SEQ_FLAGS, 0);
320 ahd_outb(ahd, SAVED_SCSIID, 0xFF);
321 ahd_outb(ahd, SAVED_LUN, 0xFF);
322
323 /*
324 * Ensure that the sequencer's idea of TQINPOS
325 * matches our own. The sequencer increments TQINPOS
326 * only after it sees a DMA complete and a reset could
327 * occur before the increment leaving the kernel to believe
328 * the command arrived but the sequencer to not.
329 */
330 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
331
332 /* Always allow reselection */
333 ahd_outb(ahd, SCSISEQ1,
334 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
984263bc 335 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
f39dcdf3
PA
336
337 /*
338 * Clear any pending sequencer interrupt. It is no
339 * longer relevant since we're resetting the Program
340 * Counter.
341 */
342 ahd_outb(ahd, CLRINT, CLRSEQINT);
343
984263bc
MD
344 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
345 ahd_unpause(ahd);
346}
347
348void
349ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
350{
351 ahd_mode_state saved_modes;
352
353#ifdef AHD_DEBUG
354 if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
e3869ec7 355 kprintf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
984263bc
MD
356#endif
357 saved_modes = ahd_save_modes(ahd);
358 ahd_set_modes(ahd, fifo, fifo);
359 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
360 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
361 ahd_outb(ahd, CCSGCTL, CCSGRESET);
362 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
363 ahd_outb(ahd, SG_STATE, 0);
364 ahd_restore_modes(ahd, saved_modes);
365}
366
367/************************* Input/Output Queues ********************************/
368/*
369 * Flush and completed commands that are sitting in the command
370 * complete queues down on the chip but have yet to be dma'ed back up.
371 */
372void
373ahd_flush_qoutfifo(struct ahd_softc *ahd)
374{
375 struct scb *scb;
376 ahd_mode_state saved_modes;
377 u_int saved_scbptr;
378 u_int ccscbctl;
379 u_int scbid;
380 u_int next_scbid;
381
382 saved_modes = ahd_save_modes(ahd);
975524e9
PA
383
384 /*
750f3593 385 * Flush the good status FIFO for completed packetized commands.
975524e9
PA
386 */
387 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
984263bc 388 saved_scbptr = ahd_get_scbptr(ahd);
975524e9
PA
389 while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
390 u_int fifo_mode;
391 u_int i;
392
750f3593 393 scbid = ahd_inw(ahd, GSFIFO);
975524e9
PA
394 scb = ahd_lookup_scb(ahd, scbid);
395 if (scb == NULL) {
396 kprintf("%s: Warning - GSFIFO SCB %d invalid\n",
397 ahd_name(ahd), scbid);
398 continue;
399 }
400 /*
401 * Determine if this transaction is still active in
402 * any FIFO. If it is, we must flush that FIFO to
403 * the host before completing the command.
404 */
405 fifo_mode = 0;
750f3593 406rescan_fifos:
975524e9
PA
407 for (i = 0; i < 2; i++) {
408 /* Toggle to the other mode. */
409 fifo_mode ^= 1;
410 ahd_set_modes(ahd, fifo_mode, fifo_mode);
750f3593 411
975524e9
PA
412 if (ahd_scb_active_in_fifo(ahd, scb) == 0)
413 continue;
414
415 ahd_run_data_fifo(ahd, scb);
416
417 /*
750f3593
PA
418 * Running this FIFO may cause a CFG4DATA for
419 * this same transaction to assert in the other
420 * FIFO or a new snapshot SAVEPTRS interrupt
421 * in this FIFO. Even running a FIFO may not
422 * clear the transaction if we are still waiting
423 * for data to drain to the host. We must loop
424 * until the transaction is not active in either
425 * FIFO just to be sure. Reset our loop counter
426 * so we will visit both FIFOs again before
427 * declaring this transaction finished. We
428 * also delay a bit so that status has a chance
429 * to change before we look at this FIFO again.
975524e9 430 */
750f3593
PA
431 aic_delay(200);
432 goto rescan_fifos;
975524e9
PA
433 }
434 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
435 ahd_set_scbptr(ahd, scbid);
436 if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
437 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
438 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
439 & SG_LIST_NULL) != 0)) {
440 u_int comp_head;
441
442 /*
443 * The transfer completed with a residual.
444 * Place this SCB on the complete DMA list
750f3593 445 * so that we update our in-core copy of the
975524e9
PA
446 * SCB before completing the command.
447 */
448 ahd_outb(ahd, SCB_SCSI_STATUS, 0);
449 ahd_outb(ahd, SCB_SGPTR,
450 ahd_inb_scbram(ahd, SCB_SGPTR)
451 | SG_STATUS_VALID);
7009d94e
PA
452 ahd_outw(ahd, SCB_TAG, scbid);
453 ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
975524e9 454 comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
7009d94e
PA
455 if (SCBID_IS_NULL(comp_head)) {
456 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
457 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
458 } else {
459 u_int tail;
460
461 tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL);
462 ahd_set_scbptr(ahd, tail);
463 ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid);
464 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
465 ahd_set_scbptr(ahd, scbid);
466 }
975524e9
PA
467 } else
468 ahd_complete_scb(ahd, scb);
469 }
470 ahd_set_scbptr(ahd, saved_scbptr);
471
472 /*
473 * Setup for command channel portion of flush.
474 */
475 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
984263bc
MD
476
477 /*
478 * Wait for any inprogress DMA to complete and clear DMA state
479 * if this if for an SCB in the qinfifo.
480 */
593f3c4b 481 while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
984263bc
MD
482
483 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
484 if ((ccscbctl & ARRDONE) != 0)
485 break;
486 } else if ((ccscbctl & CCSCBDONE) != 0)
487 break;
750f3593 488 aic_delay(200);
984263bc 489 }
750f3593
PA
490 /*
491 * We leave the sequencer to cleanup in the case of DMA's to
492 * update the qoutfifo. In all other cases (DMA's to the
493 * chip or a push of an SCB from the COMPLETE_DMA_SCB list),
494 * we disable the DMA engine so that the sequencer will not
495 * attempt to handle the DMA completion.
496 */
497 if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
984263bc
MD
498 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
499
750f3593
PA
500 /*
501 * Complete any SCBs that just finished
502 * being DMA'ed into the qoutfifo.
503 */
504 ahd_run_qoutfifo(ahd);
505
975524e9 506 saved_scbptr = ahd_get_scbptr(ahd);
984263bc
MD
507 /*
508 * Manually update/complete any completed SCBs that are waiting to be
509 * DMA'ed back up to the host.
510 */
511 scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
512 while (!SCBID_IS_NULL(scbid)) {
513 uint8_t *hscb_ptr;
514 u_int i;
515
516 ahd_set_scbptr(ahd, scbid);
517 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
518 scb = ahd_lookup_scb(ahd, scbid);
519 if (scb == NULL) {
e3869ec7 520 kprintf("%s: Warning - DMA-up and complete "
984263bc
MD
521 "SCB %d invalid\n", ahd_name(ahd), scbid);
522 continue;
523 }
524 hscb_ptr = (uint8_t *)scb->hscb;
525 for (i = 0; i < sizeof(struct hardware_scb); i++)
526 *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
527
528 ahd_complete_scb(ahd, scb);
529 scbid = next_scbid;
530 }
531 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
7009d94e
PA
532 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
533
534 scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
535 while (!SCBID_IS_NULL(scbid)) {
536
537 ahd_set_scbptr(ahd, scbid);
538 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
539 scb = ahd_lookup_scb(ahd, scbid);
540 if (scb == NULL) {
541 kprintf("%s: Warning - Complete Qfrz SCB %d invalid\n",
542 ahd_name(ahd), scbid);
543 continue;
544 }
545
546 ahd_complete_scb(ahd, scb);
547 scbid = next_scbid;
548 }
549 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
984263bc
MD
550
551 scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
552 while (!SCBID_IS_NULL(scbid)) {
553
554 ahd_set_scbptr(ahd, scbid);
555 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
556 scb = ahd_lookup_scb(ahd, scbid);
557 if (scb == NULL) {
e3869ec7 558 kprintf("%s: Warning - Complete SCB %d invalid\n",
984263bc
MD
559 ahd_name(ahd), scbid);
560 continue;
561 }
562
563 ahd_complete_scb(ahd, scb);
564 scbid = next_scbid;
565 }
566 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
975524e9
PA
567
568 /*
569 * Restore state.
570 */
984263bc 571 ahd_set_scbptr(ahd, saved_scbptr);
975524e9
PA
572 ahd_restore_modes(ahd, saved_modes);
573 ahd->flags |= AHD_UPDATE_PEND_CMDS;
574}
575
576/*
577 * Determine if an SCB for a packetized transaction
578 * is active in a FIFO.
579 */
580static int
581ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
582{
984263bc
MD
583
584 /*
975524e9
PA
585 * The FIFO is only active for our transaction if
586 * the SCBPTR matches the SCB's ID and the firmware
587 * has installed a handler for the FIFO or we have
588 * a pending SAVEPTRS or CFG4DATA interrupt.
984263bc 589 */
975524e9
PA
590 if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
591 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
592 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
593 return (0);
594
595 return (1);
596}
597
598/*
599 * Run a data fifo to completion for a transaction we know
600 * has completed across the SCSI bus (good status has been
601 * received). We are already set to the correct FIFO mode
602 * on entry to this routine.
603 *
604 * This function attempts to operate exactly as the firmware
605 * would when running this FIFO. Care must be taken to update
606 * this routine any time the firmware's FIFO algorithm is
607 * changed.
608 */
609static void
610ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
611{
612 u_int seqintsrc;
613
750f3593
PA
614 seqintsrc = ahd_inb(ahd, SEQINTSRC);
615 if ((seqintsrc & CFG4DATA) != 0) {
616 uint32_t datacnt;
617 uint32_t sgptr;
975524e9 618
750f3593
PA
619 /*
620 * Clear full residual flag.
621 */
622 sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
623 ahd_outb(ahd, SCB_SGPTR, sgptr);
975524e9 624
750f3593
PA
625 /*
626 * Load datacnt and address.
627 */
628 datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
629 if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
630 sgptr |= LAST_SEG;
631 ahd_outb(ahd, SG_STATE, 0);
632 } else
633 ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
634 ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
635 ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
636 ahd_outb(ahd, SG_CACHE_PRE, sgptr);
637 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
975524e9 638
750f3593
PA
639 /*
640 * Initialize Residual Fields.
641 */
642 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
643 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
975524e9 644
750f3593
PA
645 /*
646 * Mark the SCB as having a FIFO in use.
647 */
648 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
649 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
975524e9 650
750f3593
PA
651 /*
652 * Install a "fake" handler for this FIFO.
653 */
654 ahd_outw(ahd, LONGJMP_ADDR, 0);
975524e9 655
750f3593
PA
656 /*
657 * Notify the hardware that we have satisfied
658 * this sequencer interrupt.
659 */
660 ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
661 } else if ((seqintsrc & SAVEPTRS) != 0) {
662 uint32_t sgptr;
663 uint32_t resid;
975524e9 664
750f3593 665 if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
975524e9 666 /*
750f3593
PA
667 * Snapshot Save Pointers. All that
668 * is necessary to clear the snapshot
669 * is a CLRCHN.
975524e9 670 */
750f3593
PA
671 goto clrchn;
672 }
975524e9 673
750f3593
PA
674 /*
675 * Disable S/G fetch so the DMA engine
676 * is available to future users.
677 */
678 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
679 ahd_outb(ahd, CCSGCTL, 0);
680 ahd_outb(ahd, SG_STATE, 0);
975524e9 681
750f3593
PA
682 /*
683 * Flush the data FIFO. Strickly only
684 * necessary for Rev A parts.
685 */
686 ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
975524e9 687
750f3593
PA
688 /*
689 * Calculate residual.
690 */
691 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
692 resid = ahd_inl(ahd, SHCNT);
693 resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
694 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
695 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
975524e9 696 /*
750f3593
PA
697 * Must back up to the correct S/G element.
698 * Typically this just means resetting our
699 * low byte to the offset in the SG_CACHE,
700 * but if we wrapped, we have to correct
701 * the other bytes of the sgptr too.
975524e9 702 */
750f3593
PA
703 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
704 && (sgptr & 0x80) == 0)
705 sgptr -= 0x100;
706 sgptr &= ~0xFF;
707 sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
708 & SG_ADDR_MASK;
709 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
710 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
711 } else if ((resid & AHD_SG_LEN_MASK) == 0) {
712 ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
713 sgptr | SG_LIST_NULL);
714 }
715 /*
716 * Save Pointers.
717 */
718 ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
719 ahd_outl(ahd, SCB_DATACNT, resid);
720 ahd_outl(ahd, SCB_SGPTR, sgptr);
721 ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
722 ahd_outb(ahd, SEQIMODE,
723 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
724 /*
725 * If the data is to the SCSI bus, we are
726 * done, otherwise wait for FIFOEMP.
727 */
728 if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
729 goto clrchn;
730 } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
731 uint32_t sgptr;
732 uint64_t data_addr;
733 uint32_t data_len;
734 u_int dfcntrl;
975524e9 735
750f3593
PA
736 /*
737 * Disable S/G fetch so the DMA engine
738 * is available to future users. We won't
739 * be using the DMA engine to load segments.
740 */
741 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
742 ahd_outb(ahd, CCSGCTL, 0);
743 ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
744 }
745
746 /*
747 * Wait for the DMA engine to notice that the
748 * host transfer is enabled and that there is
749 * space in the S/G FIFO for new segments before
750 * loading more segments.
751 */
752 if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
753 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
975524e9
PA
754
755 /*
756 * Determine the offset of the next S/G
757 * element to load.
758 */
759 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
760 sgptr &= SG_PTR_MASK;
761 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
762 struct ahd_dma64_seg *sg;
763
764 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
765 data_addr = sg->addr;
766 data_len = sg->len;
767 sgptr += sizeof(*sg);
768 } else {
769 struct ahd_dma_seg *sg;
770
771 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
772 data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
773 data_addr <<= 8;
774 data_addr |= sg->addr;
775 data_len = sg->len;
776 sgptr += sizeof(*sg);
777 }
778
779 /*
780 * Update residual information.
781 */
782 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
783 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
784
785 /*
786 * Load the S/G.
787 */
788 if (data_len & AHD_DMA_LAST_SEG) {
789 sgptr |= LAST_SEG;
790 ahd_outb(ahd, SG_STATE, 0);
791 }
792 ahd_outq(ahd, HADDR, data_addr);
793 ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
794 ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
795
796 /*
797 * Advertise the segment to the hardware.
798 */
799 dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
750f3593 800 if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
975524e9
PA
801 /*
802 * Use SCSIENWRDIS so that SCSIEN
803 * is never modified by this
804 * operation.
805 */
806 dfcntrl |= SCSIENWRDIS;
807 }
808 ahd_outb(ahd, DFCNTRL, dfcntrl);
984263bc 809 }
750f3593
PA
810 } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
811
812 /*
813 * Transfer completed to the end of SG list
814 * and has flushed to the host.
815 */
816 ahd_outb(ahd, SCB_SGPTR,
817 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
818 goto clrchn;
819 } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
820clrchn:
821 /*
822 * Clear any handler for this FIFO, decrement
823 * the FIFO use count for the SCB, and release
824 * the FIFO.
825 */
826 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
827 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
828 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
829 ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
984263bc 830 }
984263bc
MD
831}
832
7009d94e
PA
833/*
834 * Look for entries in the QoutFIFO that have completed.
835 * The valid_tag completion field indicates the validity
836 * of the entry - the valid value toggles each time through
837 * the queue. We use the sg_status field in the completion
838 * entry to avoid referencing the hscb if the completion
839 * occurred with no errors and no residual. sg_status is
840 * a copy of the first byte (little endian) of the sgptr
841 * hscb field.
842 */
984263bc
MD
843void
844ahd_run_qoutfifo(struct ahd_softc *ahd)
845{
7009d94e 846 struct ahd_completion *completion;
984263bc
MD
847 struct scb *scb;
848 u_int scb_index;
849
850 if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
851 panic("ahd_run_qoutfifo recursion");
852 ahd->flags |= AHD_RUNNING_QOUTFIFO;
853 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
7009d94e
PA
854 for (;;) {
855 completion = &ahd->qoutfifo[ahd->qoutfifonext];
856
857 if (completion->valid_tag != ahd->qoutfifonext_valid_tag)
858 break;
984263bc 859
7009d94e 860 scb_index = aic_le16toh(completion->tag);
984263bc
MD
861 scb = ahd_lookup_scb(ahd, scb_index);
862 if (scb == NULL) {
e3869ec7 863 kprintf("%s: WARNING no command for scb %d "
984263bc
MD
864 "(cmdcmplt)\nQOUTPOS = %d\n",
865 ahd_name(ahd), scb_index,
866 ahd->qoutfifonext);
867 ahd_dump_card_state(ahd);
7009d94e
PA
868 } else if ((completion->sg_status & SG_STATUS_VALID) != 0) {
869 ahd_handle_scb_status(ahd, scb);
870 } else {
871 ahd_done(ahd, scb);
872 }
984263bc
MD
873
874 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
875 if (ahd->qoutfifonext == 0)
7009d94e 876 ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID;
984263bc
MD
877 }
878 ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
879}
880
881/************************* Interrupt Handling *********************************/
882void
883ahd_handle_hwerrint(struct ahd_softc *ahd)
884{
885 /*
886 * Some catastrophic hardware error has occurred.
887 * Print it for the user and disable the controller.
888 */
889 int i;
890 int error;
891
892 error = ahd_inb(ahd, ERROR);
893 for (i = 0; i < num_errors; i++) {
71f385dc 894 if ((error & ahd_hard_errors[i].error) != 0)
e3869ec7 895 kprintf("%s: hwerrint, %s\n",
984263bc
MD
896 ahd_name(ahd), ahd_hard_errors[i].errmesg);
897 }
898
899 ahd_dump_card_state(ahd);
900 panic("BRKADRINT");
901
902 /* Tell everyone that this HBA is no longer available */
903 ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
904 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
905 CAM_NO_HBA);
906
907 /* Tell the system that this controller has gone away. */
908 ahd_free(ahd);
909}
910
911void
912ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
913{
914 u_int seqintcode;
915
916 /*
917 * Save the sequencer interrupt code and clear the SEQINT
918 * bit. We will unpause the sequencer, if appropriate,
919 * after servicing the request.
920 */
921 seqintcode = ahd_inb(ahd, SEQINTCODE);
922 ahd_outb(ahd, CLRINT, CLRSEQINT);
923 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
924 /*
925 * Unpause the sequencer and let it clear
926 * SEQINT by writing NO_SEQINT to it. This
927 * will cause the sequencer to be paused again,
928 * which is the expected state of this routine.
929 */
930 ahd_unpause(ahd);
931 while (!ahd_is_paused(ahd))
932 ;
933 ahd_outb(ahd, CLRINT, CLRSEQINT);
934 }
935 ahd_update_modes(ahd);
936#ifdef AHD_DEBUG
937 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 938 kprintf("%s: Handle Seqint Called for code %d\n",
984263bc
MD
939 ahd_name(ahd), seqintcode);
940#endif
941 switch (seqintcode) {
984263bc
MD
942 case ENTERING_NONPACK:
943 {
944 struct scb *scb;
945 u_int scbid;
946
947 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
948 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
949 scbid = ahd_get_scbptr(ahd);
950 scb = ahd_lookup_scb(ahd, scbid);
951 if (scb == NULL) {
952 /*
953 * Somehow need to know if this
954 * is from a selection or reselection.
b95ca0f4 955 * From that, we can determine target
984263bc
MD
956 * ID so we at least have an I_T nexus.
957 */
958 } else {
959 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
960 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
961 ahd_outb(ahd, SEQ_FLAGS, 0x0);
962 }
963 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
964 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
965 /*
966 * Phase change after read stream with
967 * CRC error with P0 asserted on last
968 * packet.
969 */
970#ifdef AHD_DEBUG
971 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
e3869ec7 972 kprintf("%s: Assuming LQIPHASE_NLQ with "
984263bc
MD
973 "P0 assertion\n", ahd_name(ahd));
974#endif
975 }
976#ifdef AHD_DEBUG
977 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
e3869ec7 978 kprintf("%s: Entering NONPACK\n", ahd_name(ahd));
984263bc
MD
979#endif
980 break;
981 }
982 case INVALID_SEQINT:
e3869ec7 983 kprintf("%s: Invalid Sequencer interrupt occurred.\n",
984263bc
MD
984 ahd_name(ahd));
985 ahd_dump_card_state(ahd);
986 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
987 break;
988 case STATUS_OVERRUN:
989 {
990 struct scb *scb;
991 u_int scbid;
992
993 scbid = ahd_get_scbptr(ahd);
994 scb = ahd_lookup_scb(ahd, scbid);
995 if (scb != NULL)
996 ahd_print_path(ahd, scb);
997 else
e3869ec7
SW
998 kprintf("%s: ", ahd_name(ahd));
999 kprintf("SCB %d Packetized Status Overrun", scbid);
984263bc
MD
1000 ahd_dump_card_state(ahd);
1001 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1002 break;
1003 }
1004 case CFG4ISTAT_INTR:
1005 {
1006 struct scb *scb;
1007 u_int scbid;
1008
1009 scbid = ahd_get_scbptr(ahd);
1010 scb = ahd_lookup_scb(ahd, scbid);
1011 if (scb == NULL) {
1012 ahd_dump_card_state(ahd);
e3869ec7 1013 kprintf("CFG4ISTAT: Free SCB %d referenced", scbid);
984263bc
MD
1014 panic("For safety");
1015 }
1016 ahd_outq(ahd, HADDR, scb->sense_busaddr);
1017 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
1018 ahd_outb(ahd, HCNT + 2, 0);
1019 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
1020 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
1021 break;
1022 }
1023 case ILLEGAL_PHASE:
1024 {
1025 u_int bus_phase;
1026
1027 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
e3869ec7 1028 kprintf("%s: ILLEGAL_PHASE 0x%x\n",
984263bc
MD
1029 ahd_name(ahd), bus_phase);
1030
1031 switch (bus_phase) {
1032 case P_DATAOUT:
1033 case P_DATAIN:
1034 case P_DATAOUT_DT:
1035 case P_DATAIN_DT:
1036 case P_MESGOUT:
1037 case P_STATUS:
1038 case P_MESGIN:
1039 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
e3869ec7 1040 kprintf("%s: Issued Bus Reset.\n", ahd_name(ahd));
984263bc
MD
1041 break;
1042 case P_COMMAND:
1043 {
1044 struct ahd_devinfo devinfo;
1045 struct scb *scb;
1046 struct ahd_initiator_tinfo *targ_info;
1047 struct ahd_tmode_tstate *tstate;
984263bc
MD
1048 u_int scbid;
1049
1050 /*
1051 * If a target takes us into the command phase
1052 * assume that it has been externally reset and
1053 * has thus lost our previous packetized negotiation
1054 * agreement. Since we have not sent an identify
1055 * message and may not have fully qualified the
1056 * connection, we change our command to TUR, assert
1057 * ATN and ABORT the task when we go to message in
1058 * phase. The OSM will see the REQUEUE_REQUEST
1059 * status and retry the command.
1060 */
1061 scbid = ahd_get_scbptr(ahd);
1062 scb = ahd_lookup_scb(ahd, scbid);
1063 if (scb == NULL) {
e3869ec7 1064 kprintf("Invalid phase with no valid SCB. "
984263bc
MD
1065 "Resetting bus.\n");
1066 ahd_reset_channel(ahd, 'A',
1067 /*Initiate Reset*/TRUE);
1068 break;
1069 }
1070 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1071 SCB_GET_TARGET(ahd, scb),
1072 SCB_GET_LUN(scb),
1073 SCB_GET_CHANNEL(ahd, scb),
1074 ROLE_INITIATOR);
1075 targ_info = ahd_fetch_transinfo(ahd,
1076 devinfo.channel,
1077 devinfo.our_scsiid,
1078 devinfo.target,
1079 &tstate);
984263bc
MD
1080 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1081 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1082 ahd_set_syncrate(ahd, &devinfo, /*period*/0,
1083 /*offset*/0, /*ppr_options*/0,
1084 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1085 ahd_outb(ahd, SCB_CDB_STORE, 0);
1086 ahd_outb(ahd, SCB_CDB_STORE+1, 0);
1087 ahd_outb(ahd, SCB_CDB_STORE+2, 0);
1088 ahd_outb(ahd, SCB_CDB_STORE+3, 0);
1089 ahd_outb(ahd, SCB_CDB_STORE+4, 0);
1090 ahd_outb(ahd, SCB_CDB_STORE+5, 0);
1091 ahd_outb(ahd, SCB_CDB_LEN, 6);
1092 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1093 scb->hscb->control |= MK_MESSAGE;
1094 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1095 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1096 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1097 /*
1098 * The lun is 0, regardless of the SCB's lun
1099 * as we have not sent an identify message.
1100 */
1101 ahd_outb(ahd, SAVED_LUN, 0);
1102 ahd_outb(ahd, SEQ_FLAGS, 0);
1103 ahd_assert_atn(ahd);
750f3593 1104 scb->flags &= ~SCB_PACKETIZED;
984263bc
MD
1105 scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
1106 ahd_freeze_devq(ahd, scb);
750f3593
PA
1107 aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
1108 aic_freeze_scb(scb);
984263bc
MD
1109
1110 /*
1111 * Allow the sequencer to continue with
1112 * non-pack processing.
1113 */
1114 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1115 ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
1116 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1117 ahd_outb(ahd, CLRLQOINT1, 0);
1118 }
1119#ifdef AHD_DEBUG
1120 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1121 ahd_print_path(ahd, scb);
e3869ec7 1122 kprintf("Unexpected command phase from "
984263bc
MD
1123 "packetized target\n");
1124 }
1125#endif
1126 break;
1127 }
1128 }
1129 break;
1130 }
1131 case CFG4OVERRUN:
1132 {
1133 struct scb *scb;
1134 u_int scb_index;
1135
1136#ifdef AHD_DEBUG
1137 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1138 kprintf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
984263bc
MD
1139 ahd_inb(ahd, MODE_PTR));
1140 }
1141#endif
1142 scb_index = ahd_get_scbptr(ahd);
1143 scb = ahd_lookup_scb(ahd, scb_index);
1144 if (scb == NULL) {
1145 /*
1146 * Attempt to transfer to an SCB that is
1147 * not outstanding.
1148 */
1149 ahd_assert_atn(ahd);
1150 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1151 ahd->msgout_buf[0] = MSG_ABORT_TASK;
1152 ahd->msgout_len = 1;
1153 ahd->msgout_index = 0;
1154 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1155 /*
1156 * Clear status received flag to prevent any
1157 * attempt to complete this bogus SCB.
1158 */
1159 ahd_outb(ahd, SCB_CONTROL,
975524e9
PA
1160 ahd_inb_scbram(ahd, SCB_CONTROL)
1161 & ~STATUS_RCVD);
984263bc
MD
1162 }
1163 break;
1164 }
1165 case DUMP_CARD_STATE:
1166 {
1167 ahd_dump_card_state(ahd);
1168 break;
1169 }
1170 case PDATA_REINIT:
1171 {
1172#ifdef AHD_DEBUG
1173 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1174 kprintf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
984263bc
MD
1175 "SG_CACHE_SHADOW = 0x%x\n",
1176 ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
1177 ahd_inb(ahd, SG_CACHE_SHADOW));
1178 }
1179#endif
1180 ahd_reinitialize_dataptrs(ahd);
1181 break;
1182 }
1183 case HOST_MSG_LOOP:
1184 {
1185 struct ahd_devinfo devinfo;
1186
1187 /*
1188 * The sequencer has encountered a message phase
1189 * that requires host assistance for completion.
1190 * While handling the message phase(s), we will be
1191 * notified by the sequencer after each byte is
1192 * transfered so we can track bus phase changes.
1193 *
1194 * If this is the first time we've seen a HOST_MSG_LOOP
1195 * interrupt, initialize the state of the host message
1196 * loop.
1197 */
1198 ahd_fetch_devinfo(ahd, &devinfo);
1199 if (ahd->msg_type == MSG_TYPE_NONE) {
1200 struct scb *scb;
1201 u_int scb_index;
1202 u_int bus_phase;
1203
1204 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1205 if (bus_phase != P_MESGIN
1206 && bus_phase != P_MESGOUT) {
e3869ec7 1207 kprintf("ahd_intr: HOST_MSG_LOOP bad "
984263bc
MD
1208 "phase 0x%x\n", bus_phase);
1209 /*
1210 * Probably transitioned to bus free before
1211 * we got here. Just punt the message.
1212 */
1213 ahd_dump_card_state(ahd);
1214 ahd_clear_intstat(ahd);
1215 ahd_restart(ahd);
1216 return;
1217 }
1218
1219 scb_index = ahd_get_scbptr(ahd);
1220 scb = ahd_lookup_scb(ahd, scb_index);
1221 if (devinfo.role == ROLE_INITIATOR) {
1222 if (bus_phase == P_MESGOUT)
1223 ahd_setup_initiator_msgout(ahd,
1224 &devinfo,
1225 scb);
1226 else {
1227 ahd->msg_type =
1228 MSG_TYPE_INITIATOR_MSGIN;
1229 ahd->msgin_index = 0;
1230 }
1231 }
9f00895f 1232#ifdef AHD_TARGET_MODE
984263bc
MD
1233 else {
1234 if (bus_phase == P_MESGOUT) {
1235 ahd->msg_type =
1236 MSG_TYPE_TARGET_MSGOUT;
1237 ahd->msgin_index = 0;
1238 }
1239 else
1240 ahd_setup_target_msgin(ahd,
1241 &devinfo,
1242 scb);
1243 }
1244#endif
1245 }
1246
1247 ahd_handle_message_phase(ahd);
1248 break;
1249 }
1250 case NO_MATCH:
1251 {
1252 /* Ensure we don't leave the selection hardware on */
1253 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
1254 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
1255
e3869ec7 1256 kprintf("%s:%c:%d: no active SCB for reconnecting "
984263bc
MD
1257 "target - issuing BUS DEVICE RESET\n",
1258 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
e3869ec7 1259 kprintf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
984263bc
MD
1260 "REG0 == 0x%x ACCUM = 0x%x\n",
1261 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
1262 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
e3869ec7 1263 kprintf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
984263bc
MD
1264 "SINDEX == 0x%x\n",
1265 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
1266 ahd_find_busy_tcl(ahd,
1267 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
1268 ahd_inb(ahd, SAVED_LUN))),
1269 ahd_inw(ahd, SINDEX));
e3869ec7 1270 kprintf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
984263bc
MD
1271 "SCB_CONTROL == 0x%x\n",
1272 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
1273 ahd_inb_scbram(ahd, SCB_LUN),
1274 ahd_inb_scbram(ahd, SCB_CONTROL));
e3869ec7 1275 kprintf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
984263bc 1276 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
e3869ec7
SW
1277 kprintf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
1278 kprintf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
984263bc
MD
1279 ahd_dump_card_state(ahd);
1280 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
1281 ahd->msgout_len = 1;
1282 ahd->msgout_index = 0;
1283 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1284 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1285 ahd_assert_atn(ahd);
1286 break;
1287 }
1288 case PROTO_VIOLATION:
1289 {
1290 ahd_handle_proto_violation(ahd);
1291 break;
1292 }
1293 case IGN_WIDE_RES:
1294 {
1295 struct ahd_devinfo devinfo;
1296
1297 ahd_fetch_devinfo(ahd, &devinfo);
1298 ahd_handle_ign_wide_residue(ahd, &devinfo);
1299 break;
1300 }
1301 case BAD_PHASE:
1302 {
1303 u_int lastphase;
1304
1305 lastphase = ahd_inb(ahd, LASTPHASE);
e3869ec7 1306 kprintf("%s:%c:%d: unknown scsi bus phase %x, "
984263bc
MD
1307 "lastphase = 0x%x. Attempting to continue\n",
1308 ahd_name(ahd), 'A',
1309 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1310 lastphase, ahd_inb(ahd, SCSISIGI));
1311 break;
1312 }
1313 case MISSED_BUSFREE:
1314 {
1315 u_int lastphase;
1316
1317 lastphase = ahd_inb(ahd, LASTPHASE);
e3869ec7 1318 kprintf("%s:%c:%d: Missed busfree. "
984263bc
MD
1319 "Lastphase = 0x%x, Curphase = 0x%x\n",
1320 ahd_name(ahd), 'A',
1321 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1322 lastphase, ahd_inb(ahd, SCSISIGI));
1323 ahd_restart(ahd);
1324 return;
1325 }
1326 case DATA_OVERRUN:
1327 {
1328 /*
1329 * When the sequencer detects an overrun, it
1330 * places the controller in "BITBUCKET" mode
1331 * and allows the target to complete its transfer.
1332 * Unfortunately, none of the counters get updated
1333 * when the controller is in this mode, so we have
1334 * no way of knowing how large the overrun was.
1335 */
1336 struct scb *scb;
1337 u_int scbindex;
1338#ifdef AHD_DEBUG
1339 u_int lastphase;
1340#endif
1341
1342 scbindex = ahd_get_scbptr(ahd);
1343 scb = ahd_lookup_scb(ahd, scbindex);
1344#ifdef AHD_DEBUG
1345 lastphase = ahd_inb(ahd, LASTPHASE);
1346 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1347 ahd_print_path(ahd, scb);
e3869ec7 1348 kprintf("data overrun detected %s. Tag == 0x%x.\n",
984263bc
MD
1349 ahd_lookup_phase_entry(lastphase)->phasemsg,
1350 SCB_GET_TAG(scb));
1351 ahd_print_path(ahd, scb);
e3869ec7 1352 kprintf("%s seen Data Phase. Length = %ld. "
984263bc
MD
1353 "NumSGs = %d.\n",
1354 ahd_inb(ahd, SEQ_FLAGS) & DPHASE
1355 ? "Have" : "Haven't",
750f3593 1356 aic_get_transfer_length(scb), scb->sg_count);
984263bc
MD
1357 ahd_dump_sglist(scb);
1358 }
1359#endif
1360
1361 /*
1362 * Set this and it will take effect when the
1363 * target does a command complete.
1364 */
1365 ahd_freeze_devq(ahd, scb);
750f3593
PA
1366 aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1367 aic_freeze_scb(scb);
984263bc
MD
1368 break;
1369 }
1370 case MKMSG_FAILED:
1371 {
1372 struct ahd_devinfo devinfo;
1373 struct scb *scb;
1374 u_int scbid;
1375
1376 ahd_fetch_devinfo(ahd, &devinfo);
e3869ec7 1377 kprintf("%s:%c:%d:%d: Attempt to issue message failed\n",
984263bc
MD
1378 ahd_name(ahd), devinfo.channel, devinfo.target,
1379 devinfo.lun);
1380 scbid = ahd_get_scbptr(ahd);
1381 scb = ahd_lookup_scb(ahd, scbid);
1382 if (scb != NULL
1383 && (scb->flags & SCB_RECOVERY_SCB) != 0)
1384 /*
1385 * Ensure that we didn't put a second instance of this
1386 * SCB into the QINFIFO.
1387 */
1388 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1389 SCB_GET_CHANNEL(ahd, scb),
1390 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1391 ROLE_INITIATOR, /*status*/0,
1392 SEARCH_REMOVE);
1393 ahd_outb(ahd, SCB_CONTROL,
975524e9 1394 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
984263bc
MD
1395 break;
1396 }
1397 case TASKMGMT_FUNC_COMPLETE:
1398 {
1399 u_int scbid;
1400 struct scb *scb;
1401
1402 scbid = ahd_get_scbptr(ahd);
1403 scb = ahd_lookup_scb(ahd, scbid);
1404 if (scb != NULL) {
1405 u_int lun;
1406 u_int tag;
1407 cam_status error;
1408
1409 ahd_print_path(ahd, scb);
e3869ec7 1410 kprintf("Task Management Func 0x%x Complete\n",
984263bc
MD
1411 scb->hscb->task_management);
1412 lun = CAM_LUN_WILDCARD;
1413 tag = SCB_LIST_NULL;
1414
1415 switch (scb->hscb->task_management) {
1416 case SIU_TASKMGMT_ABORT_TASK:
1417 tag = SCB_GET_TAG(scb);
1418 case SIU_TASKMGMT_ABORT_TASK_SET:
1419 case SIU_TASKMGMT_CLEAR_TASK_SET:
1420 lun = scb->hscb->lun;
1421 error = CAM_REQ_ABORTED;
1422 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1423 'A', lun, tag, ROLE_INITIATOR,
1424 error);
1425 break;
1426 case SIU_TASKMGMT_LUN_RESET:
1427 lun = scb->hscb->lun;
1428 case SIU_TASKMGMT_TARGET_RESET:
1429 {
1430 struct ahd_devinfo devinfo;
1431
1432 ahd_scb_devinfo(ahd, &devinfo, scb);
1433 error = CAM_BDR_SENT;
1434 ahd_handle_devreset(ahd, &devinfo, lun,
1435 CAM_BDR_SENT,
1436 lun != CAM_LUN_WILDCARD
1437 ? "Lun Reset"
1438 : "Target Reset",
1439 /*verbose_level*/0);
1440 break;
1441 }
1442 default:
ed20d0e3 1443 panic("Unexpected TaskMgmt Func");
984263bc
MD
1444 break;
1445 }
1446 }
1447 break;
1448 }
1449 case TASKMGMT_CMD_CMPLT_OKAY:
1450 {
1451 u_int scbid;
1452 struct scb *scb;
1453
1454 /*
1455 * An ABORT TASK TMF failed to be delivered before
1456 * the targeted command completed normally.
1457 */
1458 scbid = ahd_get_scbptr(ahd);
1459 scb = ahd_lookup_scb(ahd, scbid);
1460 if (scb != NULL) {
1461 /*
1462 * Remove the second instance of this SCB from
1463 * the QINFIFO if it is still there.
1464 */
1465 ahd_print_path(ahd, scb);
e3869ec7 1466 kprintf("SCB completes before TMF\n");
984263bc
MD
1467 /*
1468 * Handle losing the race. Wait until any
1469 * current selection completes. We will then
1470 * set the TMF back to zero in this SCB so that
1471 * the sequencer doesn't bother to issue another
1472 * sequencer interrupt for its completion.
1473 */
1474 while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
1475 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
1476 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
1477 ;
1478 ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
1479 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1480 SCB_GET_CHANNEL(ahd, scb),
1481 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1482 ROLE_INITIATOR, /*status*/0,
1483 SEARCH_REMOVE);
1484 }
1485 break;
1486 }
1487 case TRACEPOINT0:
1488 case TRACEPOINT1:
1489 case TRACEPOINT2:
1490 case TRACEPOINT3:
e3869ec7 1491 kprintf("%s: Tracepoint %d\n", ahd_name(ahd),
984263bc
MD
1492 seqintcode - TRACEPOINT0);
1493 break;
1494 case NO_SEQINT:
1495 break;
1496 case SAW_HWERR:
1497 ahd_handle_hwerrint(ahd);
1498 break;
1499 default:
e3869ec7 1500 kprintf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
984263bc
MD
1501 seqintcode);
1502 break;
1503 }
1504 /*
1505 * The sequencer is paused immediately on
1506 * a SEQINT, so we should restart it when
1507 * we're done.
1508 */
1509 ahd_unpause(ahd);
1510}
1511
1512void
1513ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1514{
1515 struct scb *scb;
1516 u_int status0;
1517 u_int status3;
1518 u_int status;
1519 u_int lqistat1;
1520 u_int lqostat0;
1521 u_int scbid;
1522 u_int busfreetime;
1523
1524 ahd_update_modes(ahd);
1525 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1526
1527 status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
1528 status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
1529 status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1530 lqistat1 = ahd_inb(ahd, LQISTAT1);
1531 lqostat0 = ahd_inb(ahd, LQOSTAT0);
1532 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1533 if ((status0 & (SELDI|SELDO)) != 0) {
1534 u_int simode0;
1535
1536 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1537 simode0 = ahd_inb(ahd, SIMODE0);
1538 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
1539 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1540 }
1541 scbid = ahd_get_scbptr(ahd);
1542 scb = ahd_lookup_scb(ahd, scbid);
1543 if (scb != NULL
1544 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1545 scb = NULL;
1546
984263bc
MD
1547 if ((status0 & IOERR) != 0) {
1548 u_int now_lvd;
1549
1550 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
e3869ec7 1551 kprintf("%s: Transceiver State Has Changed to %s mode\n",
984263bc
MD
1552 ahd_name(ahd), now_lvd ? "LVD" : "SE");
1553 ahd_outb(ahd, CLRSINT0, CLRIOERR);
1554 /*
1555 * A change in I/O mode is equivalent to a bus reset.
1556 */
1557 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1558 ahd_pause(ahd);
1559 ahd_setup_iocell_workaround(ahd);
1560 ahd_unpause(ahd);
1561 } else if ((status0 & OVERRUN) != 0) {
f39dcdf3 1562
e3869ec7 1563 kprintf("%s: SCSI offset overrun detected. Resetting bus.\n",
984263bc
MD
1564 ahd_name(ahd));
1565 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1566 } else if ((status & SCSIRSTI) != 0) {
f39dcdf3 1567
e3869ec7 1568 kprintf("%s: Someone reset channel A\n", ahd_name(ahd));
984263bc
MD
1569 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
1570 } else if ((status & SCSIPERR) != 0) {
f39dcdf3
PA
1571
1572 /* Make sure the sequencer is in a safe location. */
1573 ahd_clear_critical_section(ahd);
1574
984263bc
MD
1575 ahd_handle_transmission_error(ahd);
1576 } else if (lqostat0 != 0) {
f39dcdf3 1577
e3869ec7 1578 kprintf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
984263bc 1579 ahd_outb(ahd, CLRLQOINT0, lqostat0);
f39dcdf3 1580 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
984263bc 1581 ahd_outb(ahd, CLRLQOINT1, 0);
984263bc
MD
1582 } else if ((status & SELTO) != 0) {
1583 u_int scbid;
1584
1585 /* Stop the selection */
1586 ahd_outb(ahd, SCSISEQ0, 0);
1587
f39dcdf3
PA
1588 /* Make sure the sequencer is in a safe location. */
1589 ahd_clear_critical_section(ahd);
1590
984263bc
MD
1591 /* No more pending messages */
1592 ahd_clear_msg_state(ahd);
1593
1594 /* Clear interrupt state */
1595 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1596
1597 /*
1598 * Although the driver does not care about the
1599 * 'Selection in Progress' status bit, the busy
d0d91865 1600 * LED does. SELINGO is only cleared by a successful
984263bc 1601 * selection, so we must manually clear it to insure
d0d91865 1602 * the LED turns off just in case no future successful
984263bc
MD
1603 * selections occur (e.g. no devices on the bus).
1604 */
1605 ahd_outb(ahd, CLRSINT0, CLRSELINGO);
1606
1607 scbid = ahd_inw(ahd, WAITING_TID_HEAD);
1608 scb = ahd_lookup_scb(ahd, scbid);
1609 if (scb == NULL) {
e3869ec7 1610 kprintf("%s: ahd_intr - referenced scb not "
984263bc
MD
1611 "valid during SELTO scb(0x%x)\n",
1612 ahd_name(ahd), scbid);
1613 ahd_dump_card_state(ahd);
1614 } else {
1615 struct ahd_devinfo devinfo;
1616#ifdef AHD_DEBUG
1617 if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
1618 ahd_print_path(ahd, scb);
e3869ec7 1619 kprintf("Saw Selection Timeout for SCB 0x%x\n",
984263bc
MD
1620 scbid);
1621 }
1622#endif
984263bc 1623 ahd_scb_devinfo(ahd, &devinfo, scb);
750f3593 1624 aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
984263bc 1625 ahd_freeze_devq(ahd, scb);
f39dcdf3
PA
1626
1627 /*
1628 * Cancel any pending transactions on the device
1629 * now that it seems to be missing. This will
1630 * also revert us to async/narrow transfers until
1631 * we can renegotiate with the device.
1632 */
1633 ahd_handle_devreset(ahd, &devinfo,
1634 CAM_LUN_WILDCARD,
1635 CAM_SEL_TIMEOUT,
1636 "Selection Timeout",
1637 /*verbose_level*/1);
984263bc
MD
1638 }
1639 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1640 ahd_iocell_first_selection(ahd);
1641 ahd_unpause(ahd);
1642 } else if ((status0 & (SELDI|SELDO)) != 0) {
f39dcdf3 1643
984263bc
MD
1644 ahd_iocell_first_selection(ahd);
1645 ahd_unpause(ahd);
1646 } else if (status3 != 0) {
e3869ec7 1647 kprintf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
984263bc
MD
1648 ahd_name(ahd), status3);
1649 ahd_outb(ahd, CLRSINT3, status3);
1650 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
f39dcdf3
PA
1651
1652 /* Make sure the sequencer is in a safe location. */
1653 ahd_clear_critical_section(ahd);
1654
984263bc
MD
1655 ahd_handle_lqiphase_error(ahd, lqistat1);
1656 } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1657 /*
1658 * This status can be delayed during some
1659 * streaming operations. The SCSIPHASE
1660 * handler has already dealt with this case
1661 * so just clear the error.
1662 */
1663 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
fb5acdc8
PA
1664 } else if ((status & BUSFREE) != 0
1665 || (lqistat1 & LQOBUSFREE) != 0) {
984263bc
MD
1666 u_int lqostat1;
1667 int restart;
1668 int clear_fifo;
1669 int packetized;
1670 u_int mode;
1671
1672 /*
1673 * Clear our selection hardware as soon as possible.
1674 * We may have an entry in the waiting Q for this target,
1675 * that is affected by this busfree and we don't want to
1676 * go about selecting the target while we handle the event.
1677 */
1678 ahd_outb(ahd, SCSISEQ0, 0);
1679
f39dcdf3
PA
1680 /* Make sure the sequencer is in a safe location. */
1681 ahd_clear_critical_section(ahd);
1682
984263bc
MD
1683 /*
1684 * Determine what we were up to at the time of
1685 * the busfree.
1686 */
1687 mode = AHD_MODE_SCSI;
1688 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1689 lqostat1 = ahd_inb(ahd, LQOSTAT1);
1690 switch (busfreetime) {
1691 case BUSFREE_DFF0:
1692 case BUSFREE_DFF1:
1693 {
1694 u_int scbid;
1695 struct scb *scb;
1696
1697 mode = busfreetime == BUSFREE_DFF0
1698 ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
1699 ahd_set_modes(ahd, mode, mode);
1700 scbid = ahd_get_scbptr(ahd);
1701 scb = ahd_lookup_scb(ahd, scbid);
1702 if (scb == NULL) {
e3869ec7 1703 kprintf("%s: Invalid SCB %d in DFF%d "
984263bc
MD
1704 "during unexpected busfree\n",
1705 ahd_name(ahd), scbid, mode);
1706 packetized = 0;
1707 } else
1708 packetized = (scb->flags & SCB_PACKETIZED) != 0;
1709 clear_fifo = 1;
1710 break;
1711 }
1712 case BUSFREE_LQO:
1713 clear_fifo = 0;
1714 packetized = 1;
1715 break;
1716 default:
1717 clear_fifo = 0;
1718 packetized = (lqostat1 & LQOBUSFREE) != 0;
1719 if (!packetized
7009d94e 1720 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE
f39dcdf3 1721 && (ahd_inb(ahd, SSTAT0) & SELDI) == 0
7009d94e
PA
1722 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0
1723 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0))
1724 /*
1725 * Assume packetized if we are not
1726 * on the bus in a non-packetized
1727 * capacity and any pending selection
1728 * was a packetized selection.
1729 */
984263bc
MD
1730 packetized = 1;
1731 break;
1732 }
1733
1734#ifdef AHD_DEBUG
1735 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 1736 kprintf("Saw Busfree. Busfreetime = 0x%x.\n",
984263bc
MD
1737 busfreetime);
1738#endif
1739 /*
1740 * Busfrees that occur in non-packetized phases are
1741 * handled by the nonpkt_busfree handler.
1742 */
1743 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
1744 restart = ahd_handle_pkt_busfree(ahd, busfreetime);
1745 } else {
1746 packetized = 0;
1747 restart = ahd_handle_nonpkt_busfree(ahd);
1748 }
1749 /*
1750 * Clear the busfree interrupt status. The setting of
1751 * the interrupt is a pulse, so in a perfect world, we
1752 * would not need to muck with the ENBUSFREE logic. This
1753 * would ensure that if the bus moves on to another
1754 * connection, busfree protection is still in force. If
1755 * BUSFREEREV is broken, however, we must manually clear
1756 * the ENBUSFREE if the busfree occurred during a non-pack
1757 * connection so that we don't get false positives during
1758 * future, packetized, connections.
1759 */
1760 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
1761 if (packetized == 0
1762 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
1763 ahd_outb(ahd, SIMODE1,
1764 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
1765
1766 if (clear_fifo)
1767 ahd_clear_fifo(ahd, mode);
1768
1769 ahd_clear_msg_state(ahd);
1770 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1771 if (restart) {
1772 ahd_restart(ahd);
1773 } else {
1774 ahd_unpause(ahd);
1775 }
1776 } else {
e3869ec7 1777 kprintf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
984263bc
MD
1778 ahd_name(ahd), status);
1779 ahd_dump_card_state(ahd);
1780 ahd_clear_intstat(ahd);
1781 ahd_unpause(ahd);
1782 }
1783}
1784
1785static void
1786ahd_handle_transmission_error(struct ahd_softc *ahd)
1787{
1788 struct scb *scb;
1789 u_int scbid;
1790 u_int lqistat1;
1791 u_int lqistat2;
1792 u_int msg_out;
1793 u_int curphase;
1794 u_int lastphase;
1795 u_int perrdiag;
1796 u_int cur_col;
1797 int silent;
1798
1799 scb = NULL;
1800 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1801 lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
1802 lqistat2 = ahd_inb(ahd, LQISTAT2);
1803 if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
1804 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
1805 u_int lqistate;
1806
1807 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1808 lqistate = ahd_inb(ahd, LQISTATE);
1809 if ((lqistate >= 0x1E && lqistate <= 0x24)
1810 || (lqistate == 0x29)) {
1811#ifdef AHD_DEBUG
1812 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1813 kprintf("%s: NLQCRC found via LQISTATE\n",
984263bc
MD
1814 ahd_name(ahd));
1815 }
1816#endif
1817 lqistat1 |= LQICRCI_NLQ;
1818 }
1819 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1820 }
1821
1822 ahd_outb(ahd, CLRLQIINT1, lqistat1);
1823 lastphase = ahd_inb(ahd, LASTPHASE);
1824 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1825 perrdiag = ahd_inb(ahd, PERRDIAG);
1826 msg_out = MSG_INITIATOR_DET_ERR;
1827 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
1828
1829 /*
1830 * Try to find the SCB associated with this error.
1831 */
1832 silent = FALSE;
1833 if (lqistat1 == 0
1834 || (lqistat1 & LQICRCI_NLQ) != 0) {
1835 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
1836 ahd_set_active_fifo(ahd);
1837 scbid = ahd_get_scbptr(ahd);
1838 scb = ahd_lookup_scb(ahd, scbid);
1839 if (scb != NULL && SCB_IS_SILENT(scb))
1840 silent = TRUE;
1841 }
1842
1843 cur_col = 0;
1844 if (silent == FALSE) {
e3869ec7 1845 kprintf("%s: Transmission error detected\n", ahd_name(ahd));
984263bc
MD
1846 ahd_lqistat1_print(lqistat1, &cur_col, 50);
1847 ahd_lastphase_print(lastphase, &cur_col, 50);
1848 ahd_scsisigi_print(curphase, &cur_col, 50);
1849 ahd_perrdiag_print(perrdiag, &cur_col, 50);
e3869ec7 1850 kprintf("\n");
984263bc
MD
1851 ahd_dump_card_state(ahd);
1852 }
1853
1854 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
1855 if (silent == FALSE) {
e3869ec7 1856 kprintf("%s: Gross protocol error during incoming "
984263bc
MD
1857 "packet. lqistat1 == 0x%x. Resetting bus.\n",
1858 ahd_name(ahd), lqistat1);
1859 }
1860 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1861 return;
1862 } else if ((lqistat1 & LQICRCI_LQ) != 0) {
1863 /*
1864 * A CRC error has been detected on an incoming LQ.
1865 * The bus is currently hung on the last ACK.
1866 * Hit LQIRETRY to release the last ack, and
1867 * wait for the sequencer to determine that ATNO
1868 * is asserted while in message out to take us
1869 * to our host message loop. No NONPACKREQ or
1870 * LQIPHASE type errors will occur in this
1871 * scenario. After this first LQIRETRY, the LQI
1872 * manager will be in ISELO where it will
1873 * happily sit until another packet phase begins.
1874 * Unexpected bus free detection is enabled
1875 * through any phases that occur after we release
1876 * this last ack until the LQI manager sees a
1877 * packet phase. This implies we may have to
1878 * ignore a perfectly valid "unexected busfree"
1879 * after our "initiator detected error" message is
1880 * sent. A busfree is the expected response after
1881 * we tell the target that it's L_Q was corrupted.
1882 * (SPI4R09 10.7.3.3.3)
1883 */
1884 ahd_outb(ahd, LQCTL2, LQIRETRY);
e3869ec7 1885 kprintf("LQIRetry for LQICRCI_LQ to release ACK\n");
984263bc
MD
1886 } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1887 /*
1888 * We detected a CRC error in a NON-LQ packet.
1889 * The hardware has varying behavior in this situation
1890 * depending on whether this packet was part of a
1891 * stream or not.
1892 *
1893 * PKT by PKT mode:
1894 * The hardware has already acked the complete packet.
1895 * If the target honors our outstanding ATN condition,
1896 * we should be (or soon will be) in MSGOUT phase.
1897 * This will trigger the LQIPHASE_LQ status bit as the
1898 * hardware was expecting another LQ. Unexpected
1899 * busfree detection is enabled. Once LQIPHASE_LQ is
1900 * true (first entry into host message loop is much
1901 * the same), we must clear LQIPHASE_LQ and hit
1902 * LQIRETRY so the hardware is ready to handle
1903 * a future LQ. NONPACKREQ will not be asserted again
1904 * once we hit LQIRETRY until another packet is
1905 * processed. The target may either go busfree
1906 * or start another packet in response to our message.
1907 *
1908 * Read Streaming P0 asserted:
1909 * If we raise ATN and the target completes the entire
1910 * stream (P0 asserted during the last packet), the
1911 * hardware will ack all data and return to the ISTART
1912 * state. When the target reponds to our ATN condition,
1913 * LQIPHASE_LQ will be asserted. We should respond to
1914 * this with an LQIRETRY to prepare for any future
1915 * packets. NONPACKREQ will not be asserted again
1916 * once we hit LQIRETRY until another packet is
1917 * processed. The target may either go busfree or
1918 * start another packet in response to our message.
1919 * Busfree detection is enabled.
1920 *
1921 * Read Streaming P0 not asserted:
1922 * If we raise ATN and the target transitions to
1923 * MSGOUT in or after a packet where P0 is not
1924 * asserted, the hardware will assert LQIPHASE_NLQ.
1925 * We should respond to the LQIPHASE_NLQ with an
1926 * LQIRETRY. Should the target stay in a non-pkt
1927 * phase after we send our message, the hardware
1928 * will assert LQIPHASE_LQ. Recovery is then just as
1929 * listed above for the read streaming with P0 asserted.
1930 * Busfree detection is enabled.
1931 */
1932 if (silent == FALSE)
e3869ec7 1933 kprintf("LQICRC_NLQ\n");
984263bc 1934 if (scb == NULL) {
e3869ec7 1935 kprintf("%s: No SCB valid for LQICRC_NLQ. "
984263bc
MD
1936 "Resetting bus\n", ahd_name(ahd));
1937 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1938 return;
1939 }
1940 } else if ((lqistat1 & LQIBADLQI) != 0) {
e3869ec7 1941 kprintf("Need to handle BADLQI!\n");
984263bc
MD
1942 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1943 return;
1944 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
1945 if ((curphase & ~P_DATAIN_DT) != 0) {
1946 /* Ack the byte. So we can continue. */
1947 if (silent == FALSE)
e3869ec7 1948 kprintf("Acking %s to clear perror\n",
984263bc
MD
1949 ahd_lookup_phase_entry(curphase)->phasemsg);
1950 ahd_inb(ahd, SCSIDAT);
1951 }
1952
1953 if (curphase == P_MESGIN)
1954 msg_out = MSG_PARITY_ERROR;
1955 }
1956
1957 /*
1958 * We've set the hardware to assert ATN if we
1959 * get a parity error on "in" phases, so all we
1960 * need to do is stuff the message buffer with
1961 * the appropriate message. "In" phases have set
1962 * mesg_out to something other than MSG_NOP.
1963 */
1964 ahd->send_msg_perror = msg_out;
1965 if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
1966 scb->flags |= SCB_TRANSMISSION_ERROR;
1967 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1968 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1969 ahd_unpause(ahd);
1970}
1971
1972static void
1973ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
1974{
1975 /*
1976 * Clear the sources of the interrupts.
1977 */
1978 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1979 ahd_outb(ahd, CLRLQIINT1, lqistat1);
1980
1981 /*
1982 * If the "illegal" phase changes were in response
1983 * to our ATN to flag a CRC error, AND we ended up
1984 * on packet boundaries, clear the error, restart the
1985 * LQI manager as appropriate, and go on our merry
1986 * way toward sending the message. Otherwise, reset
1987 * the bus to clear the error.
1988 */
1989 ahd_set_active_fifo(ahd);
1990 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
1991 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
1992 if ((lqistat1 & LQIPHASE_LQ) != 0) {
e3869ec7 1993 kprintf("LQIRETRY for LQIPHASE_LQ\n");
984263bc
MD
1994 ahd_outb(ahd, LQCTL2, LQIRETRY);
1995 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
e3869ec7 1996 kprintf("LQIRETRY for LQIPHASE_NLQ\n");
984263bc
MD
1997 ahd_outb(ahd, LQCTL2, LQIRETRY);
1998 } else
ed20d0e3 1999 panic("ahd_handle_lqiphase_error: No phase errors");
984263bc
MD
2000 ahd_dump_card_state(ahd);
2001 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2002 ahd_unpause(ahd);
2003 } else {
e3869ec7 2004 kprintf("Reseting Channel for LQI Phase error\n");
984263bc
MD
2005 ahd_dump_card_state(ahd);
2006 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2007 }
2008}
2009
2010/*
2011 * Packetized unexpected or expected busfree.
2012 * Entered in mode based on busfreetime.
2013 */
2014static int
2015ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
2016{
2017 u_int lqostat1;
2018
2019 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2020 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2021 lqostat1 = ahd_inb(ahd, LQOSTAT1);
2022 if ((lqostat1 & LQOBUSFREE) != 0) {
2023 struct scb *scb;
2024 u_int scbid;
2025 u_int saved_scbptr;
2026 u_int waiting_h;
2027 u_int waiting_t;
2028 u_int next;
2029
984263bc
MD
2030 /*
2031 * The LQO manager detected an unexpected busfree
2032 * either:
2033 *
2034 * 1) During an outgoing LQ.
2035 * 2) After an outgoing LQ but before the first
2036 * REQ of the command packet.
2037 * 3) During an outgoing command packet.
2038 *
2039 * In all cases, CURRSCB is pointing to the
2040 * SCB that encountered the failure. Clean
2041 * up the queue, clear SELDO and LQOBUSFREE,
2042 * and allow the sequencer to restart the select
2043 * out at its lesure.
2044 */
2045 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2046 scbid = ahd_inw(ahd, CURRSCB);
2047 scb = ahd_lookup_scb(ahd, scbid);
2048 if (scb == NULL)
2049 panic("SCB not valid during LQOBUSFREE");
2050 /*
2051 * Clear the status.
2052 */
2053 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
2054 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2055 ahd_outb(ahd, CLRLQOINT1, 0);
2056 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2057 ahd_flush_device_writes(ahd);
2058 ahd_outb(ahd, CLRSINT0, CLRSELDO);
2059
2060 /*
2061 * Return the LQO manager to its idle loop. It will
2062 * not do this automatically if the busfree occurs
2063 * after the first REQ of either the LQ or command
2064 * packet or between the LQ and command packet.
2065 */
2066 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
2067
2068 /*
2069 * Update the waiting for selection queue so
2070 * we restart on the correct SCB.
2071 */
2072 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
2073 saved_scbptr = ahd_get_scbptr(ahd);
2074 if (waiting_h != scbid) {
2075
2076 ahd_outw(ahd, WAITING_TID_HEAD, scbid);
2077 waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
2078 if (waiting_t == waiting_h) {
2079 ahd_outw(ahd, WAITING_TID_TAIL, scbid);
2080 next = SCB_LIST_NULL;
2081 } else {
2082 ahd_set_scbptr(ahd, waiting_h);
2083 next = ahd_inw_scbram(ahd, SCB_NEXT2);
2084 }
2085 ahd_set_scbptr(ahd, scbid);
2086 ahd_outw(ahd, SCB_NEXT2, next);
2087 }
2088 ahd_set_scbptr(ahd, saved_scbptr);
2089 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2090 if (SCB_IS_SILENT(scb) == FALSE) {
2091 ahd_print_path(ahd, scb);
e3869ec7 2092 kprintf("Probable outgoing LQ CRC error. "
984263bc
MD
2093 "Retrying command\n");
2094 }
2095 scb->crc_retry_count++;
2096 } else {
750f3593
PA
2097 aic_set_transaction_status(scb, CAM_UNCOR_PARITY);
2098 aic_freeze_scb(scb);
984263bc
MD
2099 ahd_freeze_devq(ahd, scb);
2100 }
2101 /* Return unpausing the sequencer. */
2102 return (0);
2103 } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
2104 /*
2105 * Ignore what are really parity errors that
2106 * occur on the last REQ of a free running
2107 * clock prior to going busfree. Some drives
2108 * do not properly active negate just before
2109 * going busfree resulting in a parity glitch.
2110 */
2111 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2112#ifdef AHD_DEBUG
2113 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
e3869ec7 2114 kprintf("%s: Parity on last REQ detected "
984263bc
MD
2115 "during busfree phase.\n",
2116 ahd_name(ahd));
2117#endif
2118 /* Return unpausing the sequencer. */
2119 return (0);
2120 }
2121 if (ahd->src_mode != AHD_MODE_SCSI) {
2122 u_int scbid;
2123 struct scb *scb;
2124
2125 scbid = ahd_get_scbptr(ahd);
2126 scb = ahd_lookup_scb(ahd, scbid);
2127 ahd_print_path(ahd, scb);
e3869ec7 2128 kprintf("Unexpected PKT busfree condition\n");
984263bc
MD
2129 ahd_dump_card_state(ahd);
2130 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2131 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2132 ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
2133
2134 /* Return restarting the sequencer. */
2135 return (1);
2136 }
e3869ec7 2137 kprintf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
984263bc
MD
2138 ahd_dump_card_state(ahd);
2139 /* Restart the sequencer. */
2140 return (1);
2141}
2142
2143/*
2144 * Non-packetized unexpected or expected busfree.
2145 */
2146static int
2147ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
2148{
2149 struct ahd_devinfo devinfo;
2150 struct scb *scb;
2151 u_int lastphase;
2152 u_int saved_scsiid;
2153 u_int saved_lun;
2154 u_int target;
2155 u_int initiator_role_id;
2156 u_int scbid;
2157 u_int ppr_busfree;
2158 int printerror;
2159
2160 /*
2161 * Look at what phase we were last in. If its message out,
2162 * chances are pretty good that the busfree was in response
2163 * to one of our abort requests.
2164 */
2165 lastphase = ahd_inb(ahd, LASTPHASE);
2166 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2167 saved_lun = ahd_inb(ahd, SAVED_LUN);
2168 target = SCSIID_TARGET(ahd, saved_scsiid);
2169 initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
2170 ahd_compile_devinfo(&devinfo, initiator_role_id,
2171 target, saved_lun, 'A', ROLE_INITIATOR);
2172 printerror = 1;
2173
2174 scbid = ahd_get_scbptr(ahd);
2175 scb = ahd_lookup_scb(ahd, scbid);
2176 if (scb != NULL
2177 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
2178 scb = NULL;
2179
2180 ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
2181 if (lastphase == P_MESGOUT) {
2182 u_int tag;
2183
2184 tag = SCB_LIST_NULL;
2185 if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
2186 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
2187 int found;
2188 int sent_msg;
2189
2190 if (scb == NULL) {
2191 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2192 kprintf("Abort for unidentified "
984263bc
MD
2193 "connection completed.\n");
2194 /* restart the sequencer. */
2195 return (1);
2196 }
2197 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
2198 ahd_print_path(ahd, scb);
e3869ec7 2199 kprintf("SCB %d - Abort%s Completed.\n",
984263bc
MD
2200 SCB_GET_TAG(scb),
2201 sent_msg == MSG_ABORT_TAG ? "" : " Tag");
2202
2203 if (sent_msg == MSG_ABORT_TAG)
2204 tag = SCB_GET_TAG(scb);
2205
2206 if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
2207 /*
2208 * This abort is in response to an
2209 * unexpected switch to command phase
2210 * for a packetized connection. Since
2211 * the identify message was never sent,
2212 * "saved lun" is 0. We really want to
2213 * abort only the SCB that encountered
2214 * this error, which could have a different
2215 * lun. The SCB will be retried so the OS
2216 * will see the UA after renegotiating to
2217 * packetized.
2218 */
2219 tag = SCB_GET_TAG(scb);
2220 saved_lun = scb->hscb->lun;
2221 }
2222 found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
2223 tag, ROLE_INITIATOR,
2224 CAM_REQ_ABORTED);
e3869ec7 2225 kprintf("found == 0x%x\n", found);
984263bc
MD
2226 printerror = 0;
2227 } else if (ahd_sent_msg(ahd, AHDMSG_1B,
2228 MSG_BUS_DEV_RESET, TRUE)) {
84754cd0 2229#if defined(__DragonFly__) || defined(__FreeBSD__)
984263bc
MD
2230 /*
2231 * Don't mark the user's request for this BDR
2232 * as completing with CAM_BDR_SENT. CAM3
2233 * specifies CAM_REQ_CMP.
2234 */
2235 if (scb != NULL
2236 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
2237 && ahd_match_scb(ahd, scb, target, 'A',
2238 CAM_LUN_WILDCARD, SCB_LIST_NULL,
2239 ROLE_INITIATOR))
750f3593 2240 aic_set_transaction_status(scb, CAM_REQ_CMP);
984263bc
MD
2241#endif
2242 ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
2243 CAM_BDR_SENT, "Bus Device Reset",
2244 /*verbose_level*/0);
2245 printerror = 0;
2246 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
2247 && ppr_busfree == 0) {
2248 struct ahd_initiator_tinfo *tinfo;
2249 struct ahd_tmode_tstate *tstate;
2250
2251 /*
fb5acdc8
PA
2252 * PPR Rejected.
2253 *
2254 * If the previous negotiation was packetized,
2255 * this could be because the device has been
2256 * reset without our knowledge. Force our
2257 * current negotiation to async and retry the
2258 * negotiation. Otherwise retry the command
2259 * with non-ppr negotiation.
984263bc
MD
2260 */
2261#ifdef AHD_DEBUG
2262 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2263 kprintf("PPR negotiation rejected busfree.\n");
984263bc
MD
2264#endif
2265 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
2266 devinfo.our_scsiid,
2267 devinfo.target, &tstate);
fb5acdc8
PA
2268 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
2269 ahd_set_width(ahd, &devinfo,
2270 MSG_EXT_WDTR_BUS_8_BIT,
2271 AHD_TRANS_CUR,
2272 /*paused*/TRUE);
2273 ahd_set_syncrate(ahd, &devinfo,
2274 /*period*/0, /*offset*/0,
2275 /*ppr_options*/0,
2276 AHD_TRANS_CUR,
2277 /*paused*/TRUE);
2278 /*
2279 * The expect PPR busfree handler below
2280 * will effect the retry and necessary
2281 * abort.
2282 */
2283 } else {
2284 tinfo->curr.transport_version = 2;
2285 tinfo->goal.transport_version = 2;
2286 tinfo->goal.ppr_options = 0;
2287 /*
2288 * Remove any SCBs in the waiting for selection
2289 * queue that may also be for this target so
2290 * that command ordering is preserved.
2291 */
2292 ahd_freeze_devq(ahd, scb);
2293 ahd_qinfifo_requeue_tail(ahd, scb);
2294 printerror = 0;
2295 }
984263bc
MD
2296 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
2297 && ppr_busfree == 0) {
2298 /*
2299 * Negotiation Rejected. Go-narrow and
2300 * retry command.
2301 */
2302#ifdef AHD_DEBUG
2303 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2304 kprintf("WDTR negotiation rejected busfree.\n");
984263bc
MD
2305#endif
2306 ahd_set_width(ahd, &devinfo,
2307 MSG_EXT_WDTR_BUS_8_BIT,
2308 AHD_TRANS_CUR|AHD_TRANS_GOAL,
2309 /*paused*/TRUE);
fb5acdc8
PA
2310 /*
2311 * Remove any SCBs in the waiting for selection
2312 * queue that may also be for this target so that
2313 * command ordering is preserved.
2314 */
2315 ahd_freeze_devq(ahd, scb);
984263bc
MD
2316 ahd_qinfifo_requeue_tail(ahd, scb);
2317 printerror = 0;
2318 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
2319 && ppr_busfree == 0) {
2320 /*
2321 * Negotiation Rejected. Go-async and
2322 * retry command.
2323 */
2324#ifdef AHD_DEBUG
2325 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2326 kprintf("SDTR negotiation rejected busfree.\n");
984263bc
MD
2327#endif
2328 ahd_set_syncrate(ahd, &devinfo,
2329 /*period*/0, /*offset*/0,
2330 /*ppr_options*/0,
2331 AHD_TRANS_CUR|AHD_TRANS_GOAL,
2332 /*paused*/TRUE);
fb5acdc8
PA
2333 /*
2334 * Remove any SCBs in the waiting for selection
2335 * queue that may also be for this target so that
2336 * command ordering is preserved.
2337 */
2338 ahd_freeze_devq(ahd, scb);
984263bc
MD
2339 ahd_qinfifo_requeue_tail(ahd, scb);
2340 printerror = 0;
2341 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
2342 && ahd_sent_msg(ahd, AHDMSG_1B,
2343 MSG_INITIATOR_DET_ERR, TRUE)) {
2344
2345#ifdef AHD_DEBUG
2346 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2347 kprintf("Expected IDE Busfree\n");
984263bc
MD
2348#endif
2349 printerror = 0;
2350 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
2351 && ahd_sent_msg(ahd, AHDMSG_1B,
2352 MSG_MESSAGE_REJECT, TRUE)) {
2353
2354#ifdef AHD_DEBUG
2355 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2356 kprintf("Expected QAS Reject Busfree\n");
984263bc
MD
2357#endif
2358 printerror = 0;
2359 }
2360 }
2361
2362 /*
2363 * The busfree required flag is honored at the end of
2364 * the message phases. We check it last in case we
2365 * had to send some other message that caused a busfree.
2366 */
2367 if (printerror != 0
2368 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
2369 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
2370
2371 ahd_freeze_devq(ahd, scb);
750f3593
PA
2372 aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
2373 aic_freeze_scb(scb);
984263bc
MD
2374 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
2375 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2376 SCB_GET_CHANNEL(ahd, scb),
2377 SCB_GET_LUN(scb), SCB_LIST_NULL,
2378 ROLE_INITIATOR, CAM_REQ_ABORTED);
2379 } else {
2380#ifdef AHD_DEBUG
2381 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2382 kprintf("PPR Negotiation Busfree.\n");
984263bc
MD
2383#endif
2384 ahd_done(ahd, scb);
2385 }
2386 printerror = 0;
2387 }
2388 if (printerror != 0) {
2389 int aborted;
2390
2391 aborted = 0;
2392 if (scb != NULL) {
2393 u_int tag;
2394
2395 if ((scb->hscb->control & TAG_ENB) != 0)
2396 tag = SCB_GET_TAG(scb);
2397 else
2398 tag = SCB_LIST_NULL;
2399 ahd_print_path(ahd, scb);
2400 aborted = ahd_abort_scbs(ahd, target, 'A',
2401 SCB_GET_LUN(scb), tag,
2402 ROLE_INITIATOR,
2403 CAM_UNEXP_BUSFREE);
2404 } else {
2405 /*
2406 * We had not fully identified this connection,
2407 * so we cannot abort anything.
2408 */
e3869ec7 2409 kprintf("%s: ", ahd_name(ahd));
984263bc 2410 }
e3869ec7 2411 kprintf("Unexpected busfree %s, %d SCBs aborted, "
984263bc
MD
2412 "PRGMCNT == 0x%x\n",
2413 ahd_lookup_phase_entry(lastphase)->phasemsg,
2414 aborted,
750f3593 2415 ahd_inw(ahd, PRGMCNT));
984263bc 2416 ahd_dump_card_state(ahd);
fb5acdc8
PA
2417 if (lastphase != P_BUSFREE)
2418 ahd_force_renegotiation(ahd, &devinfo);
984263bc
MD
2419 }
2420 /* Always restart the sequencer. */
2421 return (1);
2422}
2423
2424static void
2425ahd_handle_proto_violation(struct ahd_softc *ahd)
2426{
2427 struct ahd_devinfo devinfo;
2428 struct scb *scb;
2429 u_int scbid;
2430 u_int seq_flags;
2431 u_int curphase;
2432 u_int lastphase;
2433 int found;
2434
2435 ahd_fetch_devinfo(ahd, &devinfo);
2436 scbid = ahd_get_scbptr(ahd);
2437 scb = ahd_lookup_scb(ahd, scbid);
2438 seq_flags = ahd_inb(ahd, SEQ_FLAGS);
2439 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2440 lastphase = ahd_inb(ahd, LASTPHASE);
2441 if ((seq_flags & NOT_IDENTIFIED) != 0) {
2442
2443 /*
2444 * The reconnecting target either did not send an
2445 * identify message, or did, but we didn't find an SCB
2446 * to match.
2447 */
2448 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2449 kprintf("Target did not send an IDENTIFY message. "
984263bc
MD
2450 "LASTPHASE = 0x%x.\n", lastphase);
2451 scb = NULL;
2452 } else if (scb == NULL) {
2453 /*
2454 * We don't seem to have an SCB active for this
2455 * transaction. Print an error and reset the bus.
2456 */
2457 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2458 kprintf("No SCB found during protocol violation\n");
984263bc
MD
2459 goto proto_violation_reset;
2460 } else {
750f3593 2461 aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
984263bc
MD
2462 if ((seq_flags & NO_CDB_SENT) != 0) {
2463 ahd_print_path(ahd, scb);
e3869ec7 2464 kprintf("No or incomplete CDB sent to device.\n");
984263bc
MD
2465 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
2466 & STATUS_RCVD) == 0) {
2467 /*
2468 * The target never bothered to provide status to
2469 * us prior to completing the command. Since we don't
2470 * know the disposition of this command, we must attempt
2471 * to abort it. Assert ATN and prepare to send an abort
2472 * message.
2473 */
2474 ahd_print_path(ahd, scb);
e3869ec7 2475 kprintf("Completed command without status.\n");
984263bc
MD
2476 } else {
2477 ahd_print_path(ahd, scb);
e3869ec7 2478 kprintf("Unknown protocol violation.\n");
984263bc
MD
2479 ahd_dump_card_state(ahd);
2480 }
2481 }
2482 if ((lastphase & ~P_DATAIN_DT) == 0
2483 || lastphase == P_COMMAND) {
2484proto_violation_reset:
2485 /*
2486 * Target either went directly to data
2487 * phase or didn't respond to our ATN.
2488 * The only safe thing to do is to blow
2489 * it away with a bus reset.
2490 */
2491 found = ahd_reset_channel(ahd, 'A', TRUE);
e3869ec7 2492 kprintf("%s: Issued Channel %c Bus Reset. "
984263bc
MD
2493 "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
2494 } else {
2495 /*
2496 * Leave the selection hardware off in case
2497 * this abort attempt will affect yet to
2498 * be sent commands.
2499 */
2500 ahd_outb(ahd, SCSISEQ0,
2501 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2502 ahd_assert_atn(ahd);
2503 ahd_outb(ahd, MSG_OUT, HOST_MSG);
2504 if (scb == NULL) {
2505 ahd_print_devinfo(ahd, &devinfo);
2506 ahd->msgout_buf[0] = MSG_ABORT_TASK;
2507 ahd->msgout_len = 1;
2508 ahd->msgout_index = 0;
2509 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2510 } else {
2511 ahd_print_path(ahd, scb);
2512 scb->flags |= SCB_ABORT;
2513 }
e3869ec7 2514 kprintf("Protocol violation %s. Attempting to abort.\n",
984263bc
MD
2515 ahd_lookup_phase_entry(curphase)->phasemsg);
2516 }
2517}
2518
2519/*
2520 * Force renegotiation to occur the next time we initiate
2521 * a command to the current device.
2522 */
2523static void
2524ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2525{
2526 struct ahd_initiator_tinfo *targ_info;
2527 struct ahd_tmode_tstate *tstate;
2528
2529#ifdef AHD_DEBUG
2530 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
2531 ahd_print_devinfo(ahd, devinfo);
e3869ec7 2532 kprintf("Forcing renegotiation\n");
984263bc
MD
2533 }
2534#endif
2535 targ_info = ahd_fetch_transinfo(ahd,
2536 devinfo->channel,
2537 devinfo->our_scsiid,
2538 devinfo->target,
2539 &tstate);
2540 ahd_update_neg_request(ahd, devinfo, tstate,
2541 targ_info, AHD_NEG_IF_NON_ASYNC);
2542}
2543
2544#define AHD_MAX_STEPS 2000
2545void
2546ahd_clear_critical_section(struct ahd_softc *ahd)
2547{
2548 ahd_mode_state saved_modes;
2549 int stepping;
2550 int steps;
2551 int first_instr;
2552 u_int simode0;
2553 u_int simode1;
2554 u_int simode3;
2555 u_int lqimode0;
2556 u_int lqimode1;
2557 u_int lqomode0;
2558 u_int lqomode1;
2559
2560 if (ahd->num_critical_sections == 0)
2561 return;
2562
2563 stepping = FALSE;
2564 steps = 0;
2565 first_instr = 0;
2566 simode0 = 0;
2567 simode1 = 0;
2568 simode3 = 0;
2569 lqimode0 = 0;
2570 lqimode1 = 0;
2571 lqomode0 = 0;
2572 lqomode1 = 0;
2573 saved_modes = ahd_save_modes(ahd);
2574 for (;;) {
2575 struct cs *cs;
2576 u_int seqaddr;
2577 u_int i;
2578
2579 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
750f3593 2580 seqaddr = ahd_inw(ahd, CURADDR);
984263bc
MD
2581
2582 cs = ahd->critical_sections;
2583 for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
2584
2585 if (cs->begin < seqaddr && cs->end >= seqaddr)
2586 break;
2587 }
2588
2589 if (i == ahd->num_critical_sections)
2590 break;
2591
2592 if (steps > AHD_MAX_STEPS) {
e3869ec7 2593 kprintf("%s: Infinite loop in critical section\n"
984263bc
MD
2594 "%s: First Instruction 0x%x now 0x%x\n",
2595 ahd_name(ahd), ahd_name(ahd), first_instr,
2596 seqaddr);
2597 ahd_dump_card_state(ahd);
2598 panic("critical section loop");
2599 }
2600
2601 steps++;
2602#ifdef AHD_DEBUG
2603 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 2604 kprintf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
984263bc
MD
2605 seqaddr);
2606#endif
2607 if (stepping == FALSE) {
2608
2609 first_instr = seqaddr;
2610 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2611 simode0 = ahd_inb(ahd, SIMODE0);
2612 simode3 = ahd_inb(ahd, SIMODE3);
2613 lqimode0 = ahd_inb(ahd, LQIMODE0);
2614 lqimode1 = ahd_inb(ahd, LQIMODE1);
2615 lqomode0 = ahd_inb(ahd, LQOMODE0);
2616 lqomode1 = ahd_inb(ahd, LQOMODE1);
2617 ahd_outb(ahd, SIMODE0, 0);
2618 ahd_outb(ahd, SIMODE3, 0);
2619 ahd_outb(ahd, LQIMODE0, 0);
2620 ahd_outb(ahd, LQIMODE1, 0);
2621 ahd_outb(ahd, LQOMODE0, 0);
2622 ahd_outb(ahd, LQOMODE1, 0);
2623 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
b95ca0f4
PA
2624 simode1 = ahd_inb(ahd, SIMODE1);
2625 /*
2626 * We don't clear ENBUSFREE. Unfortunately
2627 * we cannot re-enable busfree detection within
2628 * the current connection, so we must leave it
2629 * on while single stepping.
2630 */
2631 ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
984263bc
MD
2632 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
2633 stepping = TRUE;
2634 }
2635 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
2636 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2637 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
2638 ahd_outb(ahd, HCNTRL, ahd->unpause);
b95ca0f4 2639 while (!ahd_is_paused(ahd))
750f3593 2640 aic_delay(200);
984263bc
MD
2641 ahd_update_modes(ahd);
2642 }
2643 if (stepping) {
2644 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2645 ahd_outb(ahd, SIMODE0, simode0);
2646 ahd_outb(ahd, SIMODE3, simode3);
2647 ahd_outb(ahd, LQIMODE0, lqimode0);
2648 ahd_outb(ahd, LQIMODE1, lqimode1);
2649 ahd_outb(ahd, LQOMODE0, lqomode0);
2650 ahd_outb(ahd, LQOMODE1, lqomode1);
2651 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2652 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
2653 ahd_outb(ahd, SIMODE1, simode1);
2654 /*
2655 * SCSIINT seems to glitch occassionally when
2656 * the interrupt masks are restored. Clear SCSIINT
2657 * one more time so that only persistent errors
2658 * are seen as a real interrupt.
2659 */
2660 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2661 }
2662 ahd_restore_modes(ahd, saved_modes);
2663}
2664
2665/*
2666 * Clear any pending interrupt status.
2667 */
2668void
2669ahd_clear_intstat(struct ahd_softc *ahd)
2670{
2671 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2672 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2673 /* Clear any interrupt conditions this may have caused */
2674 ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
2675 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
2676 ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
2677 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
2678 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
2679 ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
2680 |CLRLQOATNPKT|CLRLQOTCRC);
2681 ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
2682 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
2683 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
2684 ahd_outb(ahd, CLRLQOINT0, 0);
2685 ahd_outb(ahd, CLRLQOINT1, 0);
2686 }
2687 ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
2688 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
2689 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
2690 ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
2691 |CLRIOERR|CLROVERRUN);
2692 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2693}
2694
2695/**************************** Debugging Routines ******************************/
2696#ifdef AHD_DEBUG
2697uint32_t ahd_debug = AHD_DEBUG_OPTS;
2698#endif
2699void
2700ahd_print_scb(struct scb *scb)
2701{
2702 struct hardware_scb *hscb;
2703 int i;
2704
2705 hscb = scb->hscb;
e3869ec7 2706 kprintf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
984263bc
MD
2707 (void *)scb,
2708 hscb->control,
2709 hscb->scsiid,
2710 hscb->lun,
2711 hscb->cdb_len);
e3869ec7 2712 kprintf("Shared Data: ");
984263bc 2713 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
e3869ec7
SW
2714 kprintf("%#02x", hscb->shared_data.idata.cdb[i]);
2715 kprintf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
750f3593
PA
2716 (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
2717 (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF),
2718 aic_le32toh(hscb->datacnt),
2719 aic_le32toh(hscb->sgptr),
984263bc
MD
2720 SCB_GET_TAG(scb));
2721 ahd_dump_sglist(scb);
2722}
2723
2724void
2725ahd_dump_sglist(struct scb *scb)
2726{
2727 int i;
2728
2729 if (scb->sg_count > 0) {
2730 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
2731 struct ahd_dma64_seg *sg_list;
2732
2733 sg_list = (struct ahd_dma64_seg*)scb->sg_list;
2734 for (i = 0; i < scb->sg_count; i++) {
2735 uint64_t addr;
984263bc 2736
750f3593 2737 addr = aic_le64toh(sg_list[i].addr);
e3869ec7 2738 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
984263bc
MD
2739 i,
2740 (uint32_t)((addr >> 32) & 0xFFFFFFFF),
2741 (uint32_t)(addr & 0xFFFFFFFF),
2742 sg_list[i].len & AHD_SG_LEN_MASK,
2743 (sg_list[i].len & AHD_DMA_LAST_SEG)
2744 ? " Last" : "");
2745 }
2746 } else {
2747 struct ahd_dma_seg *sg_list;
2748
2749 sg_list = (struct ahd_dma_seg*)scb->sg_list;
2750 for (i = 0; i < scb->sg_count; i++) {
2751 uint32_t len;
2752
750f3593 2753 len = aic_le32toh(sg_list[i].len);
e3869ec7 2754 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
984263bc 2755 i,
4b753d9e 2756 (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
750f3593 2757 aic_le32toh(sg_list[i].addr),
984263bc
MD
2758 len & AHD_SG_LEN_MASK,
2759 len & AHD_DMA_LAST_SEG ? " Last" : "");
2760 }
2761 }
2762 }
2763}
2764
2765/************************* Transfer Negotiation *******************************/
2766/*
2767 * Allocate per target mode instance (ID we respond to as a target)
2768 * transfer negotiation data structures.
2769 */
2770static struct ahd_tmode_tstate *
2771ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
2772{
2773 struct ahd_tmode_tstate *master_tstate;
2774 struct ahd_tmode_tstate *tstate;
2775 int i;
2776
2777 master_tstate = ahd->enabled_targets[ahd->our_id];
2778 if (ahd->enabled_targets[scsi_id] != NULL
2779 && ahd->enabled_targets[scsi_id] != master_tstate)
2780 panic("%s: ahd_alloc_tstate - Target already allocated",
2781 ahd_name(ahd));
efda3bd0 2782 tstate = kmalloc(sizeof(*tstate), M_DEVBUF, M_INTWAIT);
984263bc
MD
2783
2784 /*
2785 * If we have allocated a master tstate, copy user settings from
2786 * the master tstate (taken from SRAM or the EEPROM) for this
2787 * channel, but reset our current and goal settings to async/narrow
2788 * until an initiator talks to us.
2789 */
2790 if (master_tstate != NULL) {
2791 memcpy(tstate, master_tstate, sizeof(*tstate));
2792 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
2793 for (i = 0; i < 16; i++) {
2794 memset(&tstate->transinfo[i].curr, 0,
2795 sizeof(tstate->transinfo[i].curr));
2796 memset(&tstate->transinfo[i].goal, 0,
2797 sizeof(tstate->transinfo[i].goal));
2798 }
2799 } else
2800 memset(tstate, 0, sizeof(*tstate));
2801 ahd->enabled_targets[scsi_id] = tstate;
2802 return (tstate);
2803}
2804
2805#ifdef AHD_TARGET_MODE
2806/*
2807 * Free per target mode instance (ID we respond to as a target)
2808 * transfer negotiation data structures.
2809 */
2810static void
2811ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
2812{
2813 struct ahd_tmode_tstate *tstate;
2814
2815 /*
2816 * Don't clean up our "master" tstate.
2817 * It has our default user settings.
2818 */
2819 if (scsi_id == ahd->our_id
2820 && force == FALSE)
2821 return;
2822
2823 tstate = ahd->enabled_targets[scsi_id];
2824 if (tstate != NULL)
efda3bd0 2825 kfree(tstate, M_DEVBUF);
984263bc
MD
2826 ahd->enabled_targets[scsi_id] = NULL;
2827}
2828#endif
2829
2830/*
2831 * Called when we have an active connection to a target on the bus,
2832 * this function finds the nearest period to the input period limited
2833 * by the capabilities of the bus connectivity of and sync settings for
2834 * the target.
2835 */
2836void
2837ahd_devlimited_syncrate(struct ahd_softc *ahd,
2838 struct ahd_initiator_tinfo *tinfo,
2839 u_int *period, u_int *ppr_options, role_t role)
2840{
2841 struct ahd_transinfo *transinfo;
2842 u_int maxsync;
2843
2844 if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
2845 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
2846 maxsync = AHD_SYNCRATE_PACED;
2847 } else {
2848 maxsync = AHD_SYNCRATE_ULTRA;
2849 /* Can't do DT related options on an SE bus */
2850 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2851 }
2852 /*
2853 * Never allow a value higher than our current goal
2854 * period otherwise we may allow a target initiated
2855 * negotiation to go above the limit as set by the
2856 * user. In the case of an initiator initiated
2857 * sync negotiation, we limit based on the user
2858 * setting. This allows the system to still accept
2859 * incoming negotiations even if target initiated
2860 * negotiation is not performed.
2861 */
2862 if (role == ROLE_TARGET)
2863 transinfo = &tinfo->user;
2864 else
2865 transinfo = &tinfo->goal;
2866 *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
2867 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
2868 maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
2869 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2870 }
2871 if (transinfo->period == 0) {
2872 *period = 0;
2873 *ppr_options = 0;
2874 } else {
2875 *period = MAX(*period, transinfo->period);
2876 ahd_find_syncrate(ahd, period, ppr_options, maxsync);
2877 }
2878}
2879
2880/*
2881 * Look up the valid period to SCSIRATE conversion in our table.
2882 * Return the period and offset that should be sent to the target
2883 * if this was the beginning of an SDTR.
2884 */
2885void
2886ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
2887 u_int *ppr_options, u_int maxsync)
2888{
2889 if (*period < maxsync)
2890 *period = maxsync;
2891
2892 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
2893 && *period > AHD_SYNCRATE_MIN_DT)
2894 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2895
2896 if (*period > AHD_SYNCRATE_MIN)
2897 *period = 0;
2898
2899 /* Honor PPR option conformance rules. */
2900 if (*period > AHD_SYNCRATE_PACED)
2901 *ppr_options &= ~MSG_EXT_PPR_RTI;
2902
2903 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
2904 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
2905
2906 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
2907 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2908
2909 /* Skip all PACED only entries if IU is not available */
2910 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
2911 && *period < AHD_SYNCRATE_DT)
2912 *period = AHD_SYNCRATE_DT;
2913
2914 /* Skip all DT only entries if DT is not available */
2915 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2916 && *period < AHD_SYNCRATE_ULTRA2)
2917 *period = AHD_SYNCRATE_ULTRA2;
2918}
2919
2920/*
2921 * Truncate the given synchronous offset to a value the
2922 * current adapter type and syncrate are capable of.
2923 */
2924void
2925ahd_validate_offset(struct ahd_softc *ahd,
2926 struct ahd_initiator_tinfo *tinfo,
2927 u_int period, u_int *offset, int wide,
2928 role_t role)
2929{
2930 u_int maxoffset;
2931
2932 /* Limit offset to what we can do */
2933 if (period == 0)
2934 maxoffset = 0;
2935 else if (period <= AHD_SYNCRATE_PACED) {
2936 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
2937 maxoffset = MAX_OFFSET_PACED_BUG;
2938 else
2939 maxoffset = MAX_OFFSET_PACED;
2940 } else
2941 maxoffset = MAX_OFFSET_NON_PACED;
2942 *offset = MIN(*offset, maxoffset);
2943 if (tinfo != NULL) {
2944 if (role == ROLE_TARGET)
2945 *offset = MIN(*offset, tinfo->user.offset);
2946 else
2947 *offset = MIN(*offset, tinfo->goal.offset);
2948 }
2949}
2950
2951/*
2952 * Truncate the given transfer width parameter to a value the
2953 * current adapter type is capable of.
2954 */
2955void
2956ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
2957 u_int *bus_width, role_t role)
2958{
2959 switch (*bus_width) {
2960 default:
2961 if (ahd->features & AHD_WIDE) {
2962 /* Respond Wide */
2963 *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2964 break;
2965 }
2966 /* FALLTHROUGH */
2967 case MSG_EXT_WDTR_BUS_8_BIT:
2968 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2969 break;
2970 }
2971 if (tinfo != NULL) {
2972 if (role == ROLE_TARGET)
2973 *bus_width = MIN(tinfo->user.width, *bus_width);
2974 else
2975 *bus_width = MIN(tinfo->goal.width, *bus_width);
2976 }
2977}
2978
2979/*
2980 * Update the bitmask of targets for which the controller should
2981 * negotiate with at the next convenient oportunity. This currently
2982 * means the next time we send the initial identify messages for
2983 * a new transaction.
2984 */
2985int
2986ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2987 struct ahd_tmode_tstate *tstate,
2988 struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
2989{
2990 u_int auto_negotiate_orig;
2991
2992 auto_negotiate_orig = tstate->auto_negotiate;
2993 if (neg_type == AHD_NEG_ALWAYS) {
2994 /*
2995 * Force our "current" settings to be
2996 * unknown so that unless a bus reset
2997 * occurs the need to renegotiate is
2998 * recorded persistently.
2999 */
3000 if ((ahd->features & AHD_WIDE) != 0)
3001 tinfo->curr.width = AHD_WIDTH_UNKNOWN;
3002 tinfo->curr.period = AHD_PERIOD_UNKNOWN;
3003 tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
3004 }
3005 if (tinfo->curr.period != tinfo->goal.period
3006 || tinfo->curr.width != tinfo->goal.width
3007 || tinfo->curr.offset != tinfo->goal.offset
3008 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
3009 || (neg_type == AHD_NEG_IF_NON_ASYNC
3010 && (tinfo->goal.offset != 0
3011 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
3012 || tinfo->goal.ppr_options != 0)))
3013 tstate->auto_negotiate |= devinfo->target_mask;
3014 else
3015 tstate->auto_negotiate &= ~devinfo->target_mask;
3016
3017 return (auto_negotiate_orig != tstate->auto_negotiate);
3018}
3019
3020/*
3021 * Update the user/goal/curr tables of synchronous negotiation
3022 * parameters as well as, in the case of a current or active update,
3023 * any data structures on the host controller. In the case of an
3024 * active update, the specified target is currently talking to us on
3025 * the bus, so the transfer parameter update must take effect
3026 * immediately.
3027 */
3028void
3029ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3030 u_int period, u_int offset, u_int ppr_options,
3031 u_int type, int paused)
3032{
3033 struct ahd_initiator_tinfo *tinfo;
3034 struct ahd_tmode_tstate *tstate;
3035 u_int old_period;
3036 u_int old_offset;
3037 u_int old_ppr;
3038 int active;
3039 int update_needed;
3040
3041 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3042 update_needed = 0;
3043
3044 if (period == 0 || offset == 0) {
3045 period = 0;
3046 offset = 0;
3047 }
3048
3049 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3050 devinfo->target, &tstate);
3051
3052 if ((type & AHD_TRANS_USER) != 0) {
3053 tinfo->user.period = period;
3054 tinfo->user.offset = offset;
3055 tinfo->user.ppr_options = ppr_options;
3056 }
3057
3058 if ((type & AHD_TRANS_GOAL) != 0) {
3059 tinfo->goal.period = period;
3060 tinfo->goal.offset = offset;
3061 tinfo->goal.ppr_options = ppr_options;
3062 }
3063
3064 old_period = tinfo->curr.period;
3065 old_offset = tinfo->curr.offset;
3066 old_ppr = tinfo->curr.ppr_options;
3067
3068 if ((type & AHD_TRANS_CUR) != 0
3069 && (old_period != period
3070 || old_offset != offset
3071 || old_ppr != ppr_options)) {
3072
3073 update_needed++;
3074
3075 tinfo->curr.period = period;
3076 tinfo->curr.offset = offset;
3077 tinfo->curr.ppr_options = ppr_options;
3078
3079 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3080 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3081 if (bootverbose) {
3082 if (offset != 0) {
3083 int options;
3084
e3869ec7 3085 kprintf("%s: target %d synchronous with "
984263bc
MD
3086 "period = 0x%x, offset = 0x%x",
3087 ahd_name(ahd), devinfo->target,
3088 period, offset);
3089 options = 0;
3090 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
e3869ec7 3091 kprintf("(RDSTRM");
984263bc
MD
3092 options++;
3093 }
3094 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
e3869ec7 3095 kprintf("%s", options ? "|DT" : "(DT");
984263bc
MD
3096 options++;
3097 }
3098 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
e3869ec7 3099 kprintf("%s", options ? "|IU" : "(IU");
984263bc
MD
3100 options++;
3101 }
3102 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
e3869ec7 3103 kprintf("%s", options ? "|RTI" : "(RTI");
984263bc
MD
3104 options++;
3105 }
3106 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
e3869ec7 3107 kprintf("%s", options ? "|QAS" : "(QAS");
984263bc
MD
3108 options++;
3109 }
3110 if (options != 0)
e3869ec7 3111 kprintf(")\n");
984263bc 3112 else
e3869ec7 3113 kprintf("\n");
984263bc 3114 } else {
e3869ec7 3115 kprintf("%s: target %d using "
984263bc
MD
3116 "asynchronous transfers%s\n",
3117 ahd_name(ahd), devinfo->target,
3118 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
3119 ? "(QAS)" : "");
3120 }
3121 }
3122 }
3123 /*
3124 * Always refresh the neg-table to handle the case of the
3125 * sequencer setting the ENATNO bit for a MK_MESSAGE request.
3126 * We will always renegotiate in that case if this is a
3127 * packetized request. Also manage the busfree expected flag
3128 * from this common routine so that we catch changes due to
3129 * WDTR or SDTR messages.
3130 */
3131 if ((type & AHD_TRANS_CUR) != 0) {
3132 if (!paused)
3133 ahd_pause(ahd);
3134 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3135 if (!paused)
3136 ahd_unpause(ahd);
3137 if (ahd->msg_type != MSG_TYPE_NONE) {
3138 if ((old_ppr & MSG_EXT_PPR_IU_REQ)
3139 != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
3140#ifdef AHD_DEBUG
3141 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3142 ahd_print_devinfo(ahd, devinfo);
e3869ec7 3143 kprintf("Expecting IU Change busfree\n");
984263bc
MD
3144 }
3145#endif
3146 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
3147 | MSG_FLAG_IU_REQ_CHANGED;
3148 }
3149 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
3150#ifdef AHD_DEBUG
3151 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 3152 kprintf("PPR with IU_REQ outstanding\n");
984263bc
MD
3153#endif
3154 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
3155 }
3156 }
3157 }
3158
3159 update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3160 tinfo, AHD_NEG_TO_GOAL);
3161
3162 if (update_needed && active)
3163 ahd_update_pending_scbs(ahd);
3164}
3165
3166/*
3167 * Update the user/goal/curr tables of wide negotiation
3168 * parameters as well as, in the case of a current or active update,
3169 * any data structures on the host controller. In the case of an
3170 * active update, the specified target is currently talking to us on
3171 * the bus, so the transfer parameter update must take effect
3172 * immediately.
3173 */
3174void
3175ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3176 u_int width, u_int type, int paused)
3177{
3178 struct ahd_initiator_tinfo *tinfo;
3179 struct ahd_tmode_tstate *tstate;
3180 u_int oldwidth;
3181 int active;
3182 int update_needed;
3183
3184 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3185 update_needed = 0;
3186 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3187 devinfo->target, &tstate);
3188
3189 if ((type & AHD_TRANS_USER) != 0)
3190 tinfo->user.width = width;
3191
3192 if ((type & AHD_TRANS_GOAL) != 0)
3193 tinfo->goal.width = width;
3194
3195 oldwidth = tinfo->curr.width;
3196 if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
3197
3198 update_needed++;
3199
3200 tinfo->curr.width = width;
3201 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3202 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3203 if (bootverbose) {
e3869ec7 3204 kprintf("%s: target %d using %dbit transfers\n",
984263bc
MD
3205 ahd_name(ahd), devinfo->target,
3206 8 * (0x01 << width));
3207 }
3208 }
3209
3210 if ((type & AHD_TRANS_CUR) != 0) {
3211 if (!paused)
3212 ahd_pause(ahd);
3213 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3214 if (!paused)
3215 ahd_unpause(ahd);
3216 }
3217
3218 update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3219 tinfo, AHD_NEG_TO_GOAL);
3220 if (update_needed && active)
3221 ahd_update_pending_scbs(ahd);
3222
3223}
3224
3225/*
3226 * Update the current state of tagged queuing for a given target.
3227 */
3228void
3229ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3230 ahd_queue_alg alg)
3231{
3232 ahd_platform_set_tags(ahd, devinfo, alg);
3233 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3234 devinfo->lun, AC_TRANSFER_NEG, &alg);
3235}
3236
3237static void
3238ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3239 struct ahd_transinfo *tinfo)
3240{
3241 ahd_mode_state saved_modes;
3242 u_int period;
3243 u_int ppr_opts;
3244 u_int con_opts;
3245 u_int offset;
3246 u_int saved_negoaddr;
3247 uint8_t iocell_opts[sizeof(ahd->iocell_opts)];
3248
3249 saved_modes = ahd_save_modes(ahd);
3250 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3251
3252 saved_negoaddr = ahd_inb(ahd, NEGOADDR);
3253 ahd_outb(ahd, NEGOADDR, devinfo->target);
3254 period = tinfo->period;
3255 offset = tinfo->offset;
3256 memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
3257 ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
3258 |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
3259 con_opts = 0;
3260 if (period == 0)
3261 period = AHD_SYNCRATE_ASYNC;
3262 if (period == AHD_SYNCRATE_160) {
3263
3264 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3265 /*
3266 * When the SPI4 spec was finalized, PACE transfers
3267 * was not made a configurable option in the PPR
3268 * message. Instead it is assumed to be enabled for
3269 * any syncrate faster than 80MHz. Nevertheless,
3270 * Harpoon2A4 allows this to be configurable.
3271 *
3272 * Harpoon2A4 also assumes at most 2 data bytes per
3273 * negotiated REQ/ACK offset. Paced transfers take
3274 * 4, so we must adjust our offset.
3275 */
3276 ppr_opts |= PPROPT_PACE;
3277 offset *= 2;
3278
3279 /*
3280 * Harpoon2A assumed that there would be a
3281 * fallback rate between 160MHz and 80Mhz,
3282 * so 7 is used as the period factor rather
3283 * than 8 for 160MHz.
3284 */
3285 period = AHD_SYNCRATE_REVA_160;
3286 }
3287 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
3288 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3289 ~AHD_PRECOMP_MASK;
3290 } else {
3291 /*
3292 * Precomp should be disabled for non-paced transfers.
3293 */
3294 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
3295
3296 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
750f3593
PA
3297 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0
3298 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) {
984263bc
MD
3299 /*
3300 * Slow down our CRC interval to be
750f3593
PA
3301 * compatible with non-packetized
3302 * U160 devices that can't handle a
3303 * CRC at full speed.
984263bc
MD
3304 */
3305 con_opts |= ENSLOWCRC;
3306 }
750f3593
PA
3307
3308 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3309 /*
3310 * On H2A4, revert to a slower slewrate
3311 * on non-paced transfers.
3312 */
3313 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3314 ~AHD_SLEWRATE_MASK;
3315 }
984263bc
MD
3316 }
3317
3318 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
3319 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
3320 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
3321 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
3322
3323 ahd_outb(ahd, NEGPERIOD, period);
3324 ahd_outb(ahd, NEGPPROPTS, ppr_opts);
3325 ahd_outb(ahd, NEGOFFSET, offset);
3326
3327 if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
3328 con_opts |= WIDEXFER;
3329
3330 /*
3331 * During packetized transfers, the target will
3332 * give us the oportunity to send command packets
3333 * without us asserting attention.
3334 */
3335 if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
3336 con_opts |= ENAUTOATNO;
3337 ahd_outb(ahd, NEGCONOPTS, con_opts);
3338 ahd_outb(ahd, NEGOADDR, saved_negoaddr);
3339 ahd_restore_modes(ahd, saved_modes);
3340}
3341
3342/*
3343 * When the transfer settings for a connection change, setup for
3344 * negotiation in pending SCBs to effect the change as quickly as
3345 * possible. We also cancel any negotiations that are scheduled
3346 * for inflight SCBs that have not been started yet.
3347 */
3348static void
3349ahd_update_pending_scbs(struct ahd_softc *ahd)
3350{
3351 struct scb *pending_scb;
3352 int pending_scb_count;
984263bc
MD
3353 int paused;
3354 u_int saved_scbptr;
3355 ahd_mode_state saved_modes;
3356
3357 /*
3358 * Traverse the pending SCB list and ensure that all of the
3359 * SCBs there have the proper settings. We can only safely
3360 * clear the negotiation required flag (setting requires the
3361 * execution queue to be modified) and this is only possible
3362 * if we are not already attempting to select out for this
3363 * SCB. For this reason, all callers only call this routine
3364 * if we are changing the negotiation settings for the currently
3365 * active transaction on the bus.
3366 */
3367 pending_scb_count = 0;
3368 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3369 struct ahd_devinfo devinfo;
984263bc
MD
3370 struct ahd_initiator_tinfo *tinfo;
3371 struct ahd_tmode_tstate *tstate;
3372
3373 ahd_scb_devinfo(ahd, &devinfo, pending_scb);
3374 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
3375 devinfo.our_scsiid,
3376 devinfo.target, &tstate);
984263bc
MD
3377 if ((tstate->auto_negotiate & devinfo.target_mask) == 0
3378 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
3379 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
fb5acdc8 3380 pending_scb->hscb->control &= ~MK_MESSAGE;
984263bc
MD
3381 }
3382 ahd_sync_scb(ahd, pending_scb,
3383 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3384 pending_scb_count++;
3385 }
3386
3387 if (pending_scb_count == 0)
3388 return;
3389
3390 if (ahd_is_paused(ahd)) {
3391 paused = 1;
3392 } else {
3393 paused = 0;
3394 ahd_pause(ahd);
3395 }
3396
3397 /*
3398 * Force the sequencer to reinitialize the selection for
3399 * the command at the head of the execution queue if it
3400 * has already been setup. The negotiation changes may
f39dcdf3
PA
3401 * effect whether we select-out with ATN. It is only
3402 * safe to clear ENSELO when the bus is not free and no
3403 * selection is in progres or completed.
984263bc
MD
3404 */
3405 saved_modes = ahd_save_modes(ahd);
3406 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
f39dcdf3
PA
3407 if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0
3408 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
3409 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
984263bc
MD
3410 saved_scbptr = ahd_get_scbptr(ahd);
3411 /* Ensure that the hscbs down on the card match the new information */
fb5acdc8
PA
3412 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3413 u_int scb_tag;
984263bc 3414 u_int control;
984263bc 3415
fb5acdc8 3416 scb_tag = SCB_GET_TAG(pending_scb);
4b753d9e 3417 ahd_set_scbptr(ahd, scb_tag);
984263bc
MD
3418 control = ahd_inb_scbram(ahd, SCB_CONTROL);
3419 control &= ~MK_MESSAGE;
fb5acdc8 3420 control |= pending_scb->hscb->control & MK_MESSAGE;
984263bc
MD
3421 ahd_outb(ahd, SCB_CONTROL, control);
3422 }
3423 ahd_set_scbptr(ahd, saved_scbptr);
3424 ahd_restore_modes(ahd, saved_modes);
3425
3426 if (paused == 0)
3427 ahd_unpause(ahd);
3428}
3429
3430/**************************** Pathing Information *****************************/
3431static void
3432ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3433{
3434 ahd_mode_state saved_modes;
3435 u_int saved_scsiid;
3436 role_t role;
3437 int our_id;
3438
3439 saved_modes = ahd_save_modes(ahd);
3440 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3441
3442 if (ahd_inb(ahd, SSTAT0) & TARGET)
3443 role = ROLE_TARGET;
3444 else
3445 role = ROLE_INITIATOR;
3446
3447 if (role == ROLE_TARGET
3448 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
3449 /* We were selected, so pull our id from TARGIDIN */
3450 our_id = ahd_inb(ahd, TARGIDIN) & OID;
3451 } else if (role == ROLE_TARGET)
3452 our_id = ahd_inb(ahd, TOWNID);
3453 else
3454 our_id = ahd_inb(ahd, IOWNID);
3455
3456 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
3457 ahd_compile_devinfo(devinfo,
3458 our_id,
3459 SCSIID_TARGET(ahd, saved_scsiid),
3460 ahd_inb(ahd, SAVED_LUN),
3461 SCSIID_CHANNEL(ahd, saved_scsiid),
3462 role);
3463 ahd_restore_modes(ahd, saved_modes);
3464}
3465
3466void
3467ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3468{
e3869ec7 3469 kprintf("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
984263bc
MD
3470 devinfo->target, devinfo->lun);
3471}
3472
3473struct ahd_phase_table_entry*
3474ahd_lookup_phase_entry(int phase)
3475{
3476 struct ahd_phase_table_entry *entry;
3477 struct ahd_phase_table_entry *last_entry;
3478
3479 /*
3480 * num_phases doesn't include the default entry which
3481 * will be returned if the phase doesn't match.
3482 */
3483 last_entry = &ahd_phase_table[num_phases];
3484 for (entry = ahd_phase_table; entry < last_entry; entry++) {
3485 if (phase == entry->phase)
3486 break;
3487 }
3488 return (entry);
3489}
3490
3491void
3492ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
3493 u_int lun, char channel, role_t role)
3494{
3495 devinfo->our_scsiid = our_id;
3496 devinfo->target = target;
3497 devinfo->lun = lun;
3498 devinfo->target_offset = target;
3499 devinfo->channel = channel;
3500 devinfo->role = role;
3501 if (channel == 'B')
3502 devinfo->target_offset += 8;
3503 devinfo->target_mask = (0x01 << devinfo->target_offset);
3504}
3505
3506static void
3507ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3508 struct scb *scb)
3509{
3510 role_t role;
3511 int our_id;
3512
3513 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
3514 role = ROLE_INITIATOR;
3515 if ((scb->hscb->control & TARGET_SCB) != 0)
3516 role = ROLE_TARGET;
3517 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
3518 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
3519}
3520
3521
3522/************************ Message Phase Processing ****************************/
3523/*
3524 * When an initiator transaction with the MK_MESSAGE flag either reconnects
3525 * or enters the initial message out phase, we are interrupted. Fill our
3526 * outgoing message buffer with the appropriate message and beging handing
3527 * the message phase(s) manually.
3528 */
3529static void
3530ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3531 struct scb *scb)
3532{
3533 /*
3534 * To facilitate adding multiple messages together,
3535 * each routine should increment the index and len
3536 * variables instead of setting them explicitly.
3537 */
3538 ahd->msgout_index = 0;
3539 ahd->msgout_len = 0;
3540
3541 if (ahd_currently_packetized(ahd))
3542 ahd->msg_flags |= MSG_FLAG_PACKETIZED;
3543
3544 if (ahd->send_msg_perror
3545 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
3546 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
3547 ahd->msgout_len++;
3548 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3549#ifdef AHD_DEBUG
3550 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 3551 kprintf("Setting up for Parity Error delivery\n");
984263bc
MD
3552#endif
3553 return;
3554 } else if (scb == NULL) {
e3869ec7 3555 kprintf("%s: WARNING. No pending message for "
984263bc
MD
3556 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
3557 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
3558 ahd->msgout_len++;
3559 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3560 return;
3561 }
3562
3563 if ((scb->flags & SCB_DEVICE_RESET) == 0
3564 && (scb->flags & SCB_PACKETIZED) == 0
3565 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
3566 u_int identify_msg;
3567
3568 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3569 if ((scb->hscb->control & DISCENB) != 0)
3570 identify_msg |= MSG_IDENTIFY_DISCFLAG;
3571 ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
3572 ahd->msgout_len++;
3573
3574 if ((scb->hscb->control & TAG_ENB) != 0) {
3575 ahd->msgout_buf[ahd->msgout_index++] =
3576 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
3577 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
3578 ahd->msgout_len += 2;
3579 }
3580 }
3581
3582 if (scb->flags & SCB_DEVICE_RESET) {
3583 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
3584 ahd->msgout_len++;
3585 ahd_print_path(ahd, scb);
e3869ec7 3586 kprintf("Bus Device Reset Message Sent\n");
984263bc
MD
3587 /*
3588 * Clear our selection hardware in advance of
3589 * the busfree. We may have an entry in the waiting
3590 * Q for this target, and we don't want to go about
3591 * selecting while we handle the busfree and blow it
3592 * away.
3593 */
3594 ahd_outb(ahd, SCSISEQ0, 0);
3595 } else if ((scb->flags & SCB_ABORT) != 0) {
3596
3597 if ((scb->hscb->control & TAG_ENB) != 0) {
3598 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
3599 } else {
3600 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
3601 }
3602 ahd->msgout_len++;
3603 ahd_print_path(ahd, scb);
e3869ec7 3604 kprintf("Abort%s Message Sent\n",
984263bc
MD
3605 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
3606 /*
3607 * Clear our selection hardware in advance of
3608 * the busfree. We may have an entry in the waiting
3609 * Q for this target, and we don't want to go about
3610 * selecting while we handle the busfree and blow it
3611 * away.
3612 */
3613 ahd_outb(ahd, SCSISEQ0, 0);
3614 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
3615 ahd_build_transfer_msg(ahd, devinfo);
3616 /*
3617 * Clear our selection hardware in advance of potential
3618 * PPR IU status change busfree. We may have an entry in
3619 * the waiting Q for this target, and we don't want to go
3620 * about selecting while we handle the busfree and blow
3621 * it away.
3622 */
3623 ahd_outb(ahd, SCSISEQ0, 0);
3624 } else {
e3869ec7 3625 kprintf("ahd_intr: AWAITING_MSG for an SCB that "
984263bc 3626 "does not have a waiting message\n");
e3869ec7 3627 kprintf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
984263bc
MD
3628 devinfo->target_mask);
3629 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
3630 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
975524e9 3631 ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
984263bc
MD
3632 scb->flags);
3633 }
3634
3635 /*
3636 * Clear the MK_MESSAGE flag from the SCB so we aren't
3637 * asked to send this message again.
3638 */
3639 ahd_outb(ahd, SCB_CONTROL,
3640 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
3641 scb->hscb->control &= ~MK_MESSAGE;
3642 ahd->msgout_index = 0;
3643 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3644}
3645
3646/*
3647 * Build an appropriate transfer negotiation message for the
3648 * currently active target.
3649 */
3650static void
3651ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3652{
3653 /*
3654 * We need to initiate transfer negotiations.
3655 * If our current and goal settings are identical,
3656 * we want to renegotiate due to a check condition.
3657 */
3658 struct ahd_initiator_tinfo *tinfo;
3659 struct ahd_tmode_tstate *tstate;
3660 int dowide;
3661 int dosync;
3662 int doppr;
3663 u_int period;
3664 u_int ppr_options;
3665 u_int offset;