MPSAFE locking for the ahc/ahd drivers using lockmgr locks.
[dragonfly.git] / sys / dev / disk / aic7xxx / aic79xx.c
CommitLineData
984263bc
MD
1/*
2 * Core routines and tables shareable across OS platforms.
3 *
fb5acdc8 4 * Copyright (c) 1994-2002, 2004 Justin T. Gibbs.
984263bc
MD
5 * Copyright (c) 2000-2003 Adaptec Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * substantially similar to the "NO WARRANTY" disclaimer below
16 * ("Disclaimer") and any redistribution must be conditioned upon
17 * including a substantially similar Disclaimer requirement for further
18 * binary redistribution.
19 * 3. Neither the names of the above-listed copyright holders nor the names
20 * of any contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * Alternatively, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2 as published by the Free
25 * Software Foundation.
26 *
27 * NO WARRANTY
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES.
39 *
f39dcdf3 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#246 $
984263bc 41 *
2923a98d 42 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx.c,v 1.40 2007/04/19 18:53:52 scottl Exp $
ef8ef949 43 * $DragonFly: src/sys/dev/disk/aic7xxx/aic79xx.c,v 1.30 2008/02/09 18:13:13 pavalos Exp $
984263bc
MD
44 */
45
984263bc
MD
46#include "aic79xx_osm.h"
47#include "aic79xx_inline.h"
48#include "aicasm/aicasm_insformat.h"
984263bc
MD
49
50/******************************** Globals *************************************/
51struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
7009d94e 52uint32_t ahd_attach_to_HostRAID_controllers = 1;
984263bc
MD
53
54/***************************** Lookup Tables **********************************/
55char *ahd_chip_names[] =
56{
57 "NONE",
58 "aic7901",
59 "aic7902",
60 "aic7901A"
61};
62static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names);
63
64/*
65 * Hardware error codes.
66 */
67struct ahd_hard_error_entry {
71f385dc 68 uint8_t error;
984263bc
MD
69 char *errmesg;
70};
71
72static struct ahd_hard_error_entry ahd_hard_errors[] = {
73 { DSCTMOUT, "Discard Timer has timed out" },
74 { ILLOPCODE, "Illegal Opcode in sequencer program" },
75 { SQPARERR, "Sequencer Parity Error" },
76 { DPARERR, "Data-path Parity Error" },
77 { MPARERR, "Scratch or SCB Memory Parity Error" },
78 { CIOPARERR, "CIOBUS Parity Error" },
79};
80static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
81
82static struct ahd_phase_table_entry ahd_phase_table[] =
83{
84 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
85 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
86 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
87 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
88 { P_COMMAND, MSG_NOOP, "in Command phase" },
89 { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
90 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
91 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
92 { P_BUSFREE, MSG_NOOP, "while idle" },
93 { 0, MSG_NOOP, "in unknown phase" }
94};
95
96/*
97 * In most cases we only wish to itterate over real phases, so
98 * exclude the last element from the count.
99 */
100static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
101
102/* Our Sequencer Program */
103#include "aic79xx_seq.h"
104
105/**************************** Function Declarations ***************************/
106static void ahd_handle_transmission_error(struct ahd_softc *ahd);
107static void ahd_handle_lqiphase_error(struct ahd_softc *ahd,
108 u_int lqistat1);
109static int ahd_handle_pkt_busfree(struct ahd_softc *ahd,
110 u_int busfreetime);
111static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
112static void ahd_handle_proto_violation(struct ahd_softc *ahd);
113static void ahd_force_renegotiation(struct ahd_softc *ahd,
114 struct ahd_devinfo *devinfo);
115
116static struct ahd_tmode_tstate*
117 ahd_alloc_tstate(struct ahd_softc *ahd,
118 u_int scsi_id, char channel);
119#ifdef AHD_TARGET_MODE
120static void ahd_free_tstate(struct ahd_softc *ahd,
121 u_int scsi_id, char channel, int force);
122#endif
123static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
124 struct ahd_initiator_tinfo *,
125 u_int *period,
126 u_int *ppr_options,
127 role_t role);
128static void ahd_update_neg_table(struct ahd_softc *ahd,
129 struct ahd_devinfo *devinfo,
130 struct ahd_transinfo *tinfo);
131static void ahd_update_pending_scbs(struct ahd_softc *ahd);
132static void ahd_fetch_devinfo(struct ahd_softc *ahd,
133 struct ahd_devinfo *devinfo);
134static void ahd_scb_devinfo(struct ahd_softc *ahd,
135 struct ahd_devinfo *devinfo,
136 struct scb *scb);
137static void ahd_setup_initiator_msgout(struct ahd_softc *ahd,
138 struct ahd_devinfo *devinfo,
139 struct scb *scb);
140static void ahd_build_transfer_msg(struct ahd_softc *ahd,
141 struct ahd_devinfo *devinfo);
142static void ahd_construct_sdtr(struct ahd_softc *ahd,
143 struct ahd_devinfo *devinfo,
144 u_int period, u_int offset);
145static void ahd_construct_wdtr(struct ahd_softc *ahd,
146 struct ahd_devinfo *devinfo,
147 u_int bus_width);
148static void ahd_construct_ppr(struct ahd_softc *ahd,
149 struct ahd_devinfo *devinfo,
150 u_int period, u_int offset,
151 u_int bus_width, u_int ppr_options);
152static void ahd_clear_msg_state(struct ahd_softc *ahd);
153static void ahd_handle_message_phase(struct ahd_softc *ahd);
154typedef enum {
155 AHDMSG_1B,
156 AHDMSG_2B,
157 AHDMSG_EXT
158} ahd_msgtype;
159static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
160 u_int msgval, int full);
161static int ahd_parse_msg(struct ahd_softc *ahd,
162 struct ahd_devinfo *devinfo);
163static int ahd_handle_msg_reject(struct ahd_softc *ahd,
164 struct ahd_devinfo *devinfo);
165static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
166 struct ahd_devinfo *devinfo);
167static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
168static void ahd_handle_devreset(struct ahd_softc *ahd,
169 struct ahd_devinfo *devinfo,
170 u_int lun, cam_status status,
171 char *message, int verbose_level);
9f00895f 172#ifdef AHD_TARGET_MODE
984263bc
MD
173static void ahd_setup_target_msgin(struct ahd_softc *ahd,
174 struct ahd_devinfo *devinfo,
175 struct scb *scb);
176#endif
177
178static u_int ahd_sglist_size(struct ahd_softc *ahd);
179static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
180static bus_dmamap_callback_t
181 ahd_dmamap_cb;
182static void ahd_initialize_hscbs(struct ahd_softc *ahd);
183static int ahd_init_scbdata(struct ahd_softc *ahd);
184static void ahd_fini_scbdata(struct ahd_softc *ahd);
185static void ahd_setup_iocell_workaround(struct ahd_softc *ahd);
186static void ahd_iocell_first_selection(struct ahd_softc *ahd);
187static void ahd_add_col_list(struct ahd_softc *ahd,
188 struct scb *scb, u_int col_idx);
189static void ahd_rem_col_list(struct ahd_softc *ahd,
190 struct scb *scb);
191static void ahd_chip_init(struct ahd_softc *ahd);
192static void ahd_qinfifo_requeue(struct ahd_softc *ahd,
193 struct scb *prev_scb,
194 struct scb *scb);
195static int ahd_qinfifo_count(struct ahd_softc *ahd);
196static int ahd_search_scb_list(struct ahd_softc *ahd, int target,
197 char channel, int lun, u_int tag,
198 role_t role, uint32_t status,
199 ahd_search_action action,
fb5acdc8
PA
200 u_int *list_head, u_int *list_tail,
201 u_int tid);
984263bc
MD
202static void ahd_stitch_tid_list(struct ahd_softc *ahd,
203 u_int tid_prev, u_int tid_cur,
204 u_int tid_next);
205static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
206 u_int scbid);
207static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
208 u_int prev, u_int next, u_int tid);
209static void ahd_reset_current_bus(struct ahd_softc *ahd);
210static ahd_callback_t ahd_reset_poll;
211static ahd_callback_t ahd_stat_timer;
212#ifdef AHD_DUMP_SEQ
213static void ahd_dumpseq(struct ahd_softc *ahd);
214#endif
215static void ahd_loadseq(struct ahd_softc *ahd);
216static int ahd_check_patch(struct ahd_softc *ahd,
217 struct patch **start_patch,
218 u_int start_instr, u_int *skip_addr);
219static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
220 u_int address);
221static void ahd_download_instr(struct ahd_softc *ahd,
222 u_int instrptr, uint8_t *dconsts);
223static int ahd_probe_stack_size(struct ahd_softc *ahd);
f39dcdf3 224static int ahd_other_scb_timeout(struct ahd_softc *ahd,
750f3593
PA
225 struct scb *scb,
226 struct scb *other_scb);
975524e9
PA
227static int ahd_scb_active_in_fifo(struct ahd_softc *ahd,
228 struct scb *scb);
229static void ahd_run_data_fifo(struct ahd_softc *ahd,
230 struct scb *scb);
231
984263bc
MD
232#ifdef AHD_TARGET_MODE
233static void ahd_queue_lstate_event(struct ahd_softc *ahd,
234 struct ahd_tmode_lstate *lstate,
235 u_int initiator_id,
236 u_int event_type,
237 u_int event_arg);
238static void ahd_update_scsiid(struct ahd_softc *ahd,
239 u_int targid_mask);
240static int ahd_handle_target_cmd(struct ahd_softc *ahd,
241 struct target_cmd *cmd);
242#endif
243
244/******************************** Private Inlines *****************************/
245static __inline void ahd_assert_atn(struct ahd_softc *ahd);
246static __inline int ahd_currently_packetized(struct ahd_softc *ahd);
247static __inline int ahd_set_active_fifo(struct ahd_softc *ahd);
248
249static __inline void
250ahd_assert_atn(struct ahd_softc *ahd)
251{
252 ahd_outb(ahd, SCSISIGO, ATNO);
253}
254
255/*
256 * Determine if the current connection has a packetized
257 * agreement. This does not necessarily mean that we
258 * are currently in a packetized transfer. We could
259 * just as easily be sending or receiving a message.
260 */
261static __inline int
262ahd_currently_packetized(struct ahd_softc *ahd)
263{
264 ahd_mode_state saved_modes;
265 int packetized;
266
267 saved_modes = ahd_save_modes(ahd);
268 if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
269 /*
270 * The packetized bit refers to the last
271 * connection, not the current one. Check
272 * for non-zero LQISTATE instead.
273 */
274 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
275 packetized = ahd_inb(ahd, LQISTATE) != 0;
276 } else {
277 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
278 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
279 }
280 ahd_restore_modes(ahd, saved_modes);
281 return (packetized);
282}
283
284static __inline int
285ahd_set_active_fifo(struct ahd_softc *ahd)
286{
287 u_int active_fifo;
288
289 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
290 active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
291 switch (active_fifo) {
292 case 0:
293 case 1:
294 ahd_set_modes(ahd, active_fifo, active_fifo);
295 return (1);
296 default:
297 return (0);
298 }
299}
300
301/************************* Sequencer Execution Control ************************/
302/*
303 * Restart the sequencer program from address zero
304 */
305void
306ahd_restart(struct ahd_softc *ahd)
307{
308
309 ahd_pause(ahd);
310
311 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
312
313 /* No more pending messages */
314 ahd_clear_msg_state(ahd);
315 ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */
316 ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */
317 ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
318 ahd_outb(ahd, SEQINTCTL, 0);
319 ahd_outb(ahd, LASTPHASE, P_BUSFREE);
320 ahd_outb(ahd, SEQ_FLAGS, 0);
321 ahd_outb(ahd, SAVED_SCSIID, 0xFF);
322 ahd_outb(ahd, SAVED_LUN, 0xFF);
323
324 /*
325 * Ensure that the sequencer's idea of TQINPOS
326 * matches our own. The sequencer increments TQINPOS
327 * only after it sees a DMA complete and a reset could
328 * occur before the increment leaving the kernel to believe
329 * the command arrived but the sequencer to not.
330 */
331 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
332
333 /* Always allow reselection */
334 ahd_outb(ahd, SCSISEQ1,
335 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
984263bc 336 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
f39dcdf3
PA
337
338 /*
339 * Clear any pending sequencer interrupt. It is no
340 * longer relevant since we're resetting the Program
341 * Counter.
342 */
343 ahd_outb(ahd, CLRINT, CLRSEQINT);
344
984263bc
MD
345 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
346 ahd_unpause(ahd);
347}
348
349void
350ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
351{
352 ahd_mode_state saved_modes;
353
354#ifdef AHD_DEBUG
355 if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
e3869ec7 356 kprintf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
984263bc
MD
357#endif
358 saved_modes = ahd_save_modes(ahd);
359 ahd_set_modes(ahd, fifo, fifo);
360 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
361 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
362 ahd_outb(ahd, CCSGCTL, CCSGRESET);
363 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
364 ahd_outb(ahd, SG_STATE, 0);
365 ahd_restore_modes(ahd, saved_modes);
366}
367
368/************************* Input/Output Queues ********************************/
369/*
370 * Flush and completed commands that are sitting in the command
371 * complete queues down on the chip but have yet to be dma'ed back up.
372 */
373void
374ahd_flush_qoutfifo(struct ahd_softc *ahd)
375{
376 struct scb *scb;
377 ahd_mode_state saved_modes;
378 u_int saved_scbptr;
379 u_int ccscbctl;
380 u_int scbid;
381 u_int next_scbid;
382
383 saved_modes = ahd_save_modes(ahd);
975524e9
PA
384
385 /*
750f3593 386 * Flush the good status FIFO for completed packetized commands.
975524e9
PA
387 */
388 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
984263bc 389 saved_scbptr = ahd_get_scbptr(ahd);
975524e9
PA
390 while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
391 u_int fifo_mode;
392 u_int i;
393
750f3593 394 scbid = ahd_inw(ahd, GSFIFO);
975524e9
PA
395 scb = ahd_lookup_scb(ahd, scbid);
396 if (scb == NULL) {
397 kprintf("%s: Warning - GSFIFO SCB %d invalid\n",
398 ahd_name(ahd), scbid);
399 continue;
400 }
401 /*
402 * Determine if this transaction is still active in
403 * any FIFO. If it is, we must flush that FIFO to
404 * the host before completing the command.
405 */
406 fifo_mode = 0;
750f3593 407rescan_fifos:
975524e9
PA
408 for (i = 0; i < 2; i++) {
409 /* Toggle to the other mode. */
410 fifo_mode ^= 1;
411 ahd_set_modes(ahd, fifo_mode, fifo_mode);
750f3593 412
975524e9
PA
413 if (ahd_scb_active_in_fifo(ahd, scb) == 0)
414 continue;
415
416 ahd_run_data_fifo(ahd, scb);
417
418 /*
750f3593
PA
419 * Running this FIFO may cause a CFG4DATA for
420 * this same transaction to assert in the other
421 * FIFO or a new snapshot SAVEPTRS interrupt
422 * in this FIFO. Even running a FIFO may not
423 * clear the transaction if we are still waiting
424 * for data to drain to the host. We must loop
425 * until the transaction is not active in either
426 * FIFO just to be sure. Reset our loop counter
427 * so we will visit both FIFOs again before
428 * declaring this transaction finished. We
429 * also delay a bit so that status has a chance
430 * to change before we look at this FIFO again.
975524e9 431 */
750f3593
PA
432 aic_delay(200);
433 goto rescan_fifos;
975524e9
PA
434 }
435 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
436 ahd_set_scbptr(ahd, scbid);
437 if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
438 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
439 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
440 & SG_LIST_NULL) != 0)) {
441 u_int comp_head;
442
443 /*
444 * The transfer completed with a residual.
445 * Place this SCB on the complete DMA list
750f3593 446 * so that we update our in-core copy of the
975524e9
PA
447 * SCB before completing the command.
448 */
449 ahd_outb(ahd, SCB_SCSI_STATUS, 0);
450 ahd_outb(ahd, SCB_SGPTR,
451 ahd_inb_scbram(ahd, SCB_SGPTR)
452 | SG_STATUS_VALID);
7009d94e
PA
453 ahd_outw(ahd, SCB_TAG, scbid);
454 ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
975524e9 455 comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
7009d94e
PA
456 if (SCBID_IS_NULL(comp_head)) {
457 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
458 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
459 } else {
460 u_int tail;
461
462 tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL);
463 ahd_set_scbptr(ahd, tail);
464 ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid);
465 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
466 ahd_set_scbptr(ahd, scbid);
467 }
975524e9
PA
468 } else
469 ahd_complete_scb(ahd, scb);
470 }
471 ahd_set_scbptr(ahd, saved_scbptr);
472
473 /*
474 * Setup for command channel portion of flush.
475 */
476 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
984263bc
MD
477
478 /*
479 * Wait for any inprogress DMA to complete and clear DMA state
480 * if this if for an SCB in the qinfifo.
481 */
593f3c4b 482 while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
984263bc
MD
483
484 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
485 if ((ccscbctl & ARRDONE) != 0)
486 break;
487 } else if ((ccscbctl & CCSCBDONE) != 0)
488 break;
750f3593 489 aic_delay(200);
984263bc 490 }
750f3593
PA
491 /*
492 * We leave the sequencer to cleanup in the case of DMA's to
493 * update the qoutfifo. In all other cases (DMA's to the
494 * chip or a push of an SCB from the COMPLETE_DMA_SCB list),
495 * we disable the DMA engine so that the sequencer will not
496 * attempt to handle the DMA completion.
497 */
498 if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
984263bc
MD
499 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
500
750f3593
PA
501 /*
502 * Complete any SCBs that just finished
503 * being DMA'ed into the qoutfifo.
504 */
505 ahd_run_qoutfifo(ahd);
506
975524e9 507 saved_scbptr = ahd_get_scbptr(ahd);
984263bc
MD
508 /*
509 * Manually update/complete any completed SCBs that are waiting to be
510 * DMA'ed back up to the host.
511 */
512 scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
513 while (!SCBID_IS_NULL(scbid)) {
514 uint8_t *hscb_ptr;
515 u_int i;
516
517 ahd_set_scbptr(ahd, scbid);
518 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
519 scb = ahd_lookup_scb(ahd, scbid);
520 if (scb == NULL) {
e3869ec7 521 kprintf("%s: Warning - DMA-up and complete "
984263bc
MD
522 "SCB %d invalid\n", ahd_name(ahd), scbid);
523 continue;
524 }
525 hscb_ptr = (uint8_t *)scb->hscb;
526 for (i = 0; i < sizeof(struct hardware_scb); i++)
527 *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
528
529 ahd_complete_scb(ahd, scb);
530 scbid = next_scbid;
531 }
532 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
7009d94e
PA
533 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
534
535 scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
536 while (!SCBID_IS_NULL(scbid)) {
537
538 ahd_set_scbptr(ahd, scbid);
539 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
540 scb = ahd_lookup_scb(ahd, scbid);
541 if (scb == NULL) {
542 kprintf("%s: Warning - Complete Qfrz SCB %d invalid\n",
543 ahd_name(ahd), scbid);
544 continue;
545 }
546
547 ahd_complete_scb(ahd, scb);
548 scbid = next_scbid;
549 }
550 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
984263bc
MD
551
552 scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
553 while (!SCBID_IS_NULL(scbid)) {
554
555 ahd_set_scbptr(ahd, scbid);
556 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
557 scb = ahd_lookup_scb(ahd, scbid);
558 if (scb == NULL) {
e3869ec7 559 kprintf("%s: Warning - Complete SCB %d invalid\n",
984263bc
MD
560 ahd_name(ahd), scbid);
561 continue;
562 }
563
564 ahd_complete_scb(ahd, scb);
565 scbid = next_scbid;
566 }
567 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
975524e9
PA
568
569 /*
570 * Restore state.
571 */
984263bc 572 ahd_set_scbptr(ahd, saved_scbptr);
975524e9
PA
573 ahd_restore_modes(ahd, saved_modes);
574 ahd->flags |= AHD_UPDATE_PEND_CMDS;
575}
576
577/*
578 * Determine if an SCB for a packetized transaction
579 * is active in a FIFO.
580 */
581static int
582ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
583{
984263bc
MD
584
585 /*
975524e9
PA
586 * The FIFO is only active for our transaction if
587 * the SCBPTR matches the SCB's ID and the firmware
588 * has installed a handler for the FIFO or we have
589 * a pending SAVEPTRS or CFG4DATA interrupt.
984263bc 590 */
975524e9
PA
591 if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
592 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
593 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
594 return (0);
595
596 return (1);
597}
598
599/*
600 * Run a data fifo to completion for a transaction we know
601 * has completed across the SCSI bus (good status has been
602 * received). We are already set to the correct FIFO mode
603 * on entry to this routine.
604 *
605 * This function attempts to operate exactly as the firmware
606 * would when running this FIFO. Care must be taken to update
607 * this routine any time the firmware's FIFO algorithm is
608 * changed.
609 */
610static void
611ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
612{
613 u_int seqintsrc;
614
750f3593
PA
615 seqintsrc = ahd_inb(ahd, SEQINTSRC);
616 if ((seqintsrc & CFG4DATA) != 0) {
617 uint32_t datacnt;
618 uint32_t sgptr;
975524e9 619
750f3593
PA
620 /*
621 * Clear full residual flag.
622 */
623 sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
624 ahd_outb(ahd, SCB_SGPTR, sgptr);
975524e9 625
750f3593
PA
626 /*
627 * Load datacnt and address.
628 */
629 datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
630 if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
631 sgptr |= LAST_SEG;
632 ahd_outb(ahd, SG_STATE, 0);
633 } else
634 ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
635 ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
636 ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
637 ahd_outb(ahd, SG_CACHE_PRE, sgptr);
638 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
975524e9 639
750f3593
PA
640 /*
641 * Initialize Residual Fields.
642 */
643 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
644 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
975524e9 645
750f3593
PA
646 /*
647 * Mark the SCB as having a FIFO in use.
648 */
649 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
650 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
975524e9 651
750f3593
PA
652 /*
653 * Install a "fake" handler for this FIFO.
654 */
655 ahd_outw(ahd, LONGJMP_ADDR, 0);
975524e9 656
750f3593
PA
657 /*
658 * Notify the hardware that we have satisfied
659 * this sequencer interrupt.
660 */
661 ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
662 } else if ((seqintsrc & SAVEPTRS) != 0) {
663 uint32_t sgptr;
664 uint32_t resid;
975524e9 665
750f3593 666 if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
975524e9 667 /*
750f3593
PA
668 * Snapshot Save Pointers. All that
669 * is necessary to clear the snapshot
670 * is a CLRCHN.
975524e9 671 */
750f3593
PA
672 goto clrchn;
673 }
975524e9 674
750f3593
PA
675 /*
676 * Disable S/G fetch so the DMA engine
677 * is available to future users.
678 */
679 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
680 ahd_outb(ahd, CCSGCTL, 0);
681 ahd_outb(ahd, SG_STATE, 0);
975524e9 682
750f3593
PA
683 /*
684 * Flush the data FIFO. Strickly only
685 * necessary for Rev A parts.
686 */
687 ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
975524e9 688
750f3593
PA
689 /*
690 * Calculate residual.
691 */
692 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
693 resid = ahd_inl(ahd, SHCNT);
694 resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
695 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
696 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
975524e9 697 /*
750f3593
PA
698 * Must back up to the correct S/G element.
699 * Typically this just means resetting our
700 * low byte to the offset in the SG_CACHE,
701 * but if we wrapped, we have to correct
702 * the other bytes of the sgptr too.
975524e9 703 */
750f3593
PA
704 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
705 && (sgptr & 0x80) == 0)
706 sgptr -= 0x100;
707 sgptr &= ~0xFF;
708 sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
709 & SG_ADDR_MASK;
710 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
711 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
712 } else if ((resid & AHD_SG_LEN_MASK) == 0) {
713 ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
714 sgptr | SG_LIST_NULL);
715 }
716 /*
717 * Save Pointers.
718 */
719 ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
720 ahd_outl(ahd, SCB_DATACNT, resid);
721 ahd_outl(ahd, SCB_SGPTR, sgptr);
722 ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
723 ahd_outb(ahd, SEQIMODE,
724 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
725 /*
726 * If the data is to the SCSI bus, we are
727 * done, otherwise wait for FIFOEMP.
728 */
729 if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
730 goto clrchn;
731 } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
732 uint32_t sgptr;
733 uint64_t data_addr;
734 uint32_t data_len;
735 u_int dfcntrl;
975524e9 736
750f3593
PA
737 /*
738 * Disable S/G fetch so the DMA engine
739 * is available to future users. We won't
740 * be using the DMA engine to load segments.
741 */
742 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
743 ahd_outb(ahd, CCSGCTL, 0);
744 ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
745 }
746
747 /*
748 * Wait for the DMA engine to notice that the
749 * host transfer is enabled and that there is
750 * space in the S/G FIFO for new segments before
751 * loading more segments.
752 */
753 if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
754 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
975524e9
PA
755
756 /*
757 * Determine the offset of the next S/G
758 * element to load.
759 */
760 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
761 sgptr &= SG_PTR_MASK;
762 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
763 struct ahd_dma64_seg *sg;
764
765 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
766 data_addr = sg->addr;
767 data_len = sg->len;
768 sgptr += sizeof(*sg);
769 } else {
770 struct ahd_dma_seg *sg;
771
772 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
773 data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
774 data_addr <<= 8;
775 data_addr |= sg->addr;
776 data_len = sg->len;
777 sgptr += sizeof(*sg);
778 }
779
780 /*
781 * Update residual information.
782 */
783 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
784 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
785
786 /*
787 * Load the S/G.
788 */
789 if (data_len & AHD_DMA_LAST_SEG) {
790 sgptr |= LAST_SEG;
791 ahd_outb(ahd, SG_STATE, 0);
792 }
793 ahd_outq(ahd, HADDR, data_addr);
794 ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
795 ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
796
797 /*
798 * Advertise the segment to the hardware.
799 */
800 dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
750f3593 801 if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
975524e9
PA
802 /*
803 * Use SCSIENWRDIS so that SCSIEN
804 * is never modified by this
805 * operation.
806 */
807 dfcntrl |= SCSIENWRDIS;
808 }
809 ahd_outb(ahd, DFCNTRL, dfcntrl);
984263bc 810 }
750f3593
PA
811 } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
812
813 /*
814 * Transfer completed to the end of SG list
815 * and has flushed to the host.
816 */
817 ahd_outb(ahd, SCB_SGPTR,
818 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
819 goto clrchn;
820 } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
821clrchn:
822 /*
823 * Clear any handler for this FIFO, decrement
824 * the FIFO use count for the SCB, and release
825 * the FIFO.
826 */
827 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
828 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
829 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
830 ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
984263bc 831 }
984263bc
MD
832}
833
7009d94e
PA
834/*
835 * Look for entries in the QoutFIFO that have completed.
836 * The valid_tag completion field indicates the validity
837 * of the entry - the valid value toggles each time through
838 * the queue. We use the sg_status field in the completion
839 * entry to avoid referencing the hscb if the completion
840 * occurred with no errors and no residual. sg_status is
841 * a copy of the first byte (little endian) of the sgptr
842 * hscb field.
843 */
984263bc
MD
844void
845ahd_run_qoutfifo(struct ahd_softc *ahd)
846{
7009d94e 847 struct ahd_completion *completion;
984263bc
MD
848 struct scb *scb;
849 u_int scb_index;
850
851 if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
852 panic("ahd_run_qoutfifo recursion");
853 ahd->flags |= AHD_RUNNING_QOUTFIFO;
854 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
7009d94e
PA
855 for (;;) {
856 completion = &ahd->qoutfifo[ahd->qoutfifonext];
857
858 if (completion->valid_tag != ahd->qoutfifonext_valid_tag)
859 break;
984263bc 860
7009d94e 861 scb_index = aic_le16toh(completion->tag);
984263bc
MD
862 scb = ahd_lookup_scb(ahd, scb_index);
863 if (scb == NULL) {
e3869ec7 864 kprintf("%s: WARNING no command for scb %d "
984263bc
MD
865 "(cmdcmplt)\nQOUTPOS = %d\n",
866 ahd_name(ahd), scb_index,
867 ahd->qoutfifonext);
868 ahd_dump_card_state(ahd);
7009d94e
PA
869 } else if ((completion->sg_status & SG_STATUS_VALID) != 0) {
870 ahd_handle_scb_status(ahd, scb);
871 } else {
872 ahd_done(ahd, scb);
873 }
984263bc
MD
874
875 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
876 if (ahd->qoutfifonext == 0)
7009d94e 877 ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID;
984263bc
MD
878 }
879 ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
880}
881
882/************************* Interrupt Handling *********************************/
883void
884ahd_handle_hwerrint(struct ahd_softc *ahd)
885{
886 /*
887 * Some catastrophic hardware error has occurred.
888 * Print it for the user and disable the controller.
889 */
890 int i;
891 int error;
892
893 error = ahd_inb(ahd, ERROR);
894 for (i = 0; i < num_errors; i++) {
71f385dc 895 if ((error & ahd_hard_errors[i].error) != 0)
e3869ec7 896 kprintf("%s: hwerrint, %s\n",
984263bc
MD
897 ahd_name(ahd), ahd_hard_errors[i].errmesg);
898 }
899
900 ahd_dump_card_state(ahd);
901 panic("BRKADRINT");
902
903 /* Tell everyone that this HBA is no longer available */
904 ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
905 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
906 CAM_NO_HBA);
907
908 /* Tell the system that this controller has gone away. */
909 ahd_free(ahd);
910}
911
912void
913ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
914{
915 u_int seqintcode;
916
917 /*
918 * Save the sequencer interrupt code and clear the SEQINT
919 * bit. We will unpause the sequencer, if appropriate,
920 * after servicing the request.
921 */
922 seqintcode = ahd_inb(ahd, SEQINTCODE);
923 ahd_outb(ahd, CLRINT, CLRSEQINT);
924 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
925 /*
926 * Unpause the sequencer and let it clear
927 * SEQINT by writing NO_SEQINT to it. This
928 * will cause the sequencer to be paused again,
929 * which is the expected state of this routine.
930 */
931 ahd_unpause(ahd);
932 while (!ahd_is_paused(ahd))
933 ;
934 ahd_outb(ahd, CLRINT, CLRSEQINT);
935 }
936 ahd_update_modes(ahd);
937#ifdef AHD_DEBUG
938 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 939 kprintf("%s: Handle Seqint Called for code %d\n",
984263bc
MD
940 ahd_name(ahd), seqintcode);
941#endif
942 switch (seqintcode) {
984263bc
MD
943 case ENTERING_NONPACK:
944 {
945 struct scb *scb;
946 u_int scbid;
947
948 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
949 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
950 scbid = ahd_get_scbptr(ahd);
951 scb = ahd_lookup_scb(ahd, scbid);
952 if (scb == NULL) {
953 /*
954 * Somehow need to know if this
955 * is from a selection or reselection.
b95ca0f4 956 * From that, we can determine target
984263bc
MD
957 * ID so we at least have an I_T nexus.
958 */
959 } else {
960 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
961 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
962 ahd_outb(ahd, SEQ_FLAGS, 0x0);
963 }
964 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
965 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
966 /*
967 * Phase change after read stream with
968 * CRC error with P0 asserted on last
969 * packet.
970 */
971#ifdef AHD_DEBUG
972 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
e3869ec7 973 kprintf("%s: Assuming LQIPHASE_NLQ with "
984263bc
MD
974 "P0 assertion\n", ahd_name(ahd));
975#endif
976 }
977#ifdef AHD_DEBUG
978 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
e3869ec7 979 kprintf("%s: Entering NONPACK\n", ahd_name(ahd));
984263bc
MD
980#endif
981 break;
982 }
983 case INVALID_SEQINT:
e3869ec7 984 kprintf("%s: Invalid Sequencer interrupt occurred.\n",
984263bc
MD
985 ahd_name(ahd));
986 ahd_dump_card_state(ahd);
987 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
988 break;
989 case STATUS_OVERRUN:
990 {
991 struct scb *scb;
992 u_int scbid;
993
994 scbid = ahd_get_scbptr(ahd);
995 scb = ahd_lookup_scb(ahd, scbid);
996 if (scb != NULL)
997 ahd_print_path(ahd, scb);
998 else
e3869ec7
SW
999 kprintf("%s: ", ahd_name(ahd));
1000 kprintf("SCB %d Packetized Status Overrun", scbid);
984263bc
MD
1001 ahd_dump_card_state(ahd);
1002 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1003 break;
1004 }
1005 case CFG4ISTAT_INTR:
1006 {
1007 struct scb *scb;
1008 u_int scbid;
1009
1010 scbid = ahd_get_scbptr(ahd);
1011 scb = ahd_lookup_scb(ahd, scbid);
1012 if (scb == NULL) {
1013 ahd_dump_card_state(ahd);
e3869ec7 1014 kprintf("CFG4ISTAT: Free SCB %d referenced", scbid);
984263bc
MD
1015 panic("For safety");
1016 }
1017 ahd_outq(ahd, HADDR, scb->sense_busaddr);
1018 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
1019 ahd_outb(ahd, HCNT + 2, 0);
1020 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
1021 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
1022 break;
1023 }
1024 case ILLEGAL_PHASE:
1025 {
1026 u_int bus_phase;
1027
1028 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
e3869ec7 1029 kprintf("%s: ILLEGAL_PHASE 0x%x\n",
984263bc
MD
1030 ahd_name(ahd), bus_phase);
1031
1032 switch (bus_phase) {
1033 case P_DATAOUT:
1034 case P_DATAIN:
1035 case P_DATAOUT_DT:
1036 case P_DATAIN_DT:
1037 case P_MESGOUT:
1038 case P_STATUS:
1039 case P_MESGIN:
1040 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
e3869ec7 1041 kprintf("%s: Issued Bus Reset.\n", ahd_name(ahd));
984263bc
MD
1042 break;
1043 case P_COMMAND:
1044 {
1045 struct ahd_devinfo devinfo;
1046 struct scb *scb;
1047 struct ahd_initiator_tinfo *targ_info;
1048 struct ahd_tmode_tstate *tstate;
1049 struct ahd_transinfo *tinfo;
1050 u_int scbid;
1051
1052 /*
1053 * If a target takes us into the command phase
1054 * assume that it has been externally reset and
1055 * has thus lost our previous packetized negotiation
1056 * agreement. Since we have not sent an identify
1057 * message and may not have fully qualified the
1058 * connection, we change our command to TUR, assert
1059 * ATN and ABORT the task when we go to message in
1060 * phase. The OSM will see the REQUEUE_REQUEST
1061 * status and retry the command.
1062 */
1063 scbid = ahd_get_scbptr(ahd);
1064 scb = ahd_lookup_scb(ahd, scbid);
1065 if (scb == NULL) {
e3869ec7 1066 kprintf("Invalid phase with no valid SCB. "
984263bc
MD
1067 "Resetting bus.\n");
1068 ahd_reset_channel(ahd, 'A',
1069 /*Initiate Reset*/TRUE);
1070 break;
1071 }
1072 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1073 SCB_GET_TARGET(ahd, scb),
1074 SCB_GET_LUN(scb),
1075 SCB_GET_CHANNEL(ahd, scb),
1076 ROLE_INITIATOR);
1077 targ_info = ahd_fetch_transinfo(ahd,
1078 devinfo.channel,
1079 devinfo.our_scsiid,
1080 devinfo.target,
1081 &tstate);
1082 tinfo = &targ_info->curr;
1083 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1084 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1085 ahd_set_syncrate(ahd, &devinfo, /*period*/0,
1086 /*offset*/0, /*ppr_options*/0,
1087 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1088 ahd_outb(ahd, SCB_CDB_STORE, 0);
1089 ahd_outb(ahd, SCB_CDB_STORE+1, 0);
1090 ahd_outb(ahd, SCB_CDB_STORE+2, 0);
1091 ahd_outb(ahd, SCB_CDB_STORE+3, 0);
1092 ahd_outb(ahd, SCB_CDB_STORE+4, 0);
1093 ahd_outb(ahd, SCB_CDB_STORE+5, 0);
1094 ahd_outb(ahd, SCB_CDB_LEN, 6);
1095 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1096 scb->hscb->control |= MK_MESSAGE;
1097 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1098 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1099 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1100 /*
1101 * The lun is 0, regardless of the SCB's lun
1102 * as we have not sent an identify message.
1103 */
1104 ahd_outb(ahd, SAVED_LUN, 0);
1105 ahd_outb(ahd, SEQ_FLAGS, 0);
1106 ahd_assert_atn(ahd);
750f3593 1107 scb->flags &= ~SCB_PACKETIZED;
984263bc
MD
1108 scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
1109 ahd_freeze_devq(ahd, scb);
750f3593
PA
1110 aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
1111 aic_freeze_scb(scb);
984263bc
MD
1112
1113 /*
1114 * Allow the sequencer to continue with
1115 * non-pack processing.
1116 */
1117 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1118 ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
1119 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1120 ahd_outb(ahd, CLRLQOINT1, 0);
1121 }
1122#ifdef AHD_DEBUG
1123 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1124 ahd_print_path(ahd, scb);
e3869ec7 1125 kprintf("Unexpected command phase from "
984263bc
MD
1126 "packetized target\n");
1127 }
1128#endif
1129 break;
1130 }
1131 }
1132 break;
1133 }
1134 case CFG4OVERRUN:
1135 {
1136 struct scb *scb;
1137 u_int scb_index;
1138
1139#ifdef AHD_DEBUG
1140 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1141 kprintf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
984263bc
MD
1142 ahd_inb(ahd, MODE_PTR));
1143 }
1144#endif
1145 scb_index = ahd_get_scbptr(ahd);
1146 scb = ahd_lookup_scb(ahd, scb_index);
1147 if (scb == NULL) {
1148 /*
1149 * Attempt to transfer to an SCB that is
1150 * not outstanding.
1151 */
1152 ahd_assert_atn(ahd);
1153 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1154 ahd->msgout_buf[0] = MSG_ABORT_TASK;
1155 ahd->msgout_len = 1;
1156 ahd->msgout_index = 0;
1157 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1158 /*
1159 * Clear status received flag to prevent any
1160 * attempt to complete this bogus SCB.
1161 */
1162 ahd_outb(ahd, SCB_CONTROL,
975524e9
PA
1163 ahd_inb_scbram(ahd, SCB_CONTROL)
1164 & ~STATUS_RCVD);
984263bc
MD
1165 }
1166 break;
1167 }
1168 case DUMP_CARD_STATE:
1169 {
1170 ahd_dump_card_state(ahd);
1171 break;
1172 }
1173 case PDATA_REINIT:
1174 {
1175#ifdef AHD_DEBUG
1176 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1177 kprintf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
984263bc
MD
1178 "SG_CACHE_SHADOW = 0x%x\n",
1179 ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
1180 ahd_inb(ahd, SG_CACHE_SHADOW));
1181 }
1182#endif
1183 ahd_reinitialize_dataptrs(ahd);
1184 break;
1185 }
1186 case HOST_MSG_LOOP:
1187 {
1188 struct ahd_devinfo devinfo;
1189
1190 /*
1191 * The sequencer has encountered a message phase
1192 * that requires host assistance for completion.
1193 * While handling the message phase(s), we will be
1194 * notified by the sequencer after each byte is
1195 * transfered so we can track bus phase changes.
1196 *
1197 * If this is the first time we've seen a HOST_MSG_LOOP
1198 * interrupt, initialize the state of the host message
1199 * loop.
1200 */
1201 ahd_fetch_devinfo(ahd, &devinfo);
1202 if (ahd->msg_type == MSG_TYPE_NONE) {
1203 struct scb *scb;
1204 u_int scb_index;
1205 u_int bus_phase;
1206
1207 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1208 if (bus_phase != P_MESGIN
1209 && bus_phase != P_MESGOUT) {
e3869ec7 1210 kprintf("ahd_intr: HOST_MSG_LOOP bad "
984263bc
MD
1211 "phase 0x%x\n", bus_phase);
1212 /*
1213 * Probably transitioned to bus free before
1214 * we got here. Just punt the message.
1215 */
1216 ahd_dump_card_state(ahd);
1217 ahd_clear_intstat(ahd);
1218 ahd_restart(ahd);
1219 return;
1220 }
1221
1222 scb_index = ahd_get_scbptr(ahd);
1223 scb = ahd_lookup_scb(ahd, scb_index);
1224 if (devinfo.role == ROLE_INITIATOR) {
1225 if (bus_phase == P_MESGOUT)
1226 ahd_setup_initiator_msgout(ahd,
1227 &devinfo,
1228 scb);
1229 else {
1230 ahd->msg_type =
1231 MSG_TYPE_INITIATOR_MSGIN;
1232 ahd->msgin_index = 0;
1233 }
1234 }
9f00895f 1235#ifdef AHD_TARGET_MODE
984263bc
MD
1236 else {
1237 if (bus_phase == P_MESGOUT) {
1238 ahd->msg_type =
1239 MSG_TYPE_TARGET_MSGOUT;
1240 ahd->msgin_index = 0;
1241 }
1242 else
1243 ahd_setup_target_msgin(ahd,
1244 &devinfo,
1245 scb);
1246 }
1247#endif
1248 }
1249
1250 ahd_handle_message_phase(ahd);
1251 break;
1252 }
1253 case NO_MATCH:
1254 {
1255 /* Ensure we don't leave the selection hardware on */
1256 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
1257 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
1258
e3869ec7 1259 kprintf("%s:%c:%d: no active SCB for reconnecting "
984263bc
MD
1260 "target - issuing BUS DEVICE RESET\n",
1261 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
e3869ec7 1262 kprintf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
984263bc
MD
1263 "REG0 == 0x%x ACCUM = 0x%x\n",
1264 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
1265 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
e3869ec7 1266 kprintf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
984263bc
MD
1267 "SINDEX == 0x%x\n",
1268 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
1269 ahd_find_busy_tcl(ahd,
1270 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
1271 ahd_inb(ahd, SAVED_LUN))),
1272 ahd_inw(ahd, SINDEX));
e3869ec7 1273 kprintf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
984263bc
MD
1274 "SCB_CONTROL == 0x%x\n",
1275 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
1276 ahd_inb_scbram(ahd, SCB_LUN),
1277 ahd_inb_scbram(ahd, SCB_CONTROL));
e3869ec7 1278 kprintf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
984263bc 1279 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
e3869ec7
SW
1280 kprintf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
1281 kprintf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
984263bc
MD
1282 ahd_dump_card_state(ahd);
1283 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
1284 ahd->msgout_len = 1;
1285 ahd->msgout_index = 0;
1286 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1287 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1288 ahd_assert_atn(ahd);
1289 break;
1290 }
1291 case PROTO_VIOLATION:
1292 {
1293 ahd_handle_proto_violation(ahd);
1294 break;
1295 }
1296 case IGN_WIDE_RES:
1297 {
1298 struct ahd_devinfo devinfo;
1299
1300 ahd_fetch_devinfo(ahd, &devinfo);
1301 ahd_handle_ign_wide_residue(ahd, &devinfo);
1302 break;
1303 }
1304 case BAD_PHASE:
1305 {
1306 u_int lastphase;
1307
1308 lastphase = ahd_inb(ahd, LASTPHASE);
e3869ec7 1309 kprintf("%s:%c:%d: unknown scsi bus phase %x, "
984263bc
MD
1310 "lastphase = 0x%x. Attempting to continue\n",
1311 ahd_name(ahd), 'A',
1312 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1313 lastphase, ahd_inb(ahd, SCSISIGI));
1314 break;
1315 }
1316 case MISSED_BUSFREE:
1317 {
1318 u_int lastphase;
1319
1320 lastphase = ahd_inb(ahd, LASTPHASE);
e3869ec7 1321 kprintf("%s:%c:%d: Missed busfree. "
984263bc
MD
1322 "Lastphase = 0x%x, Curphase = 0x%x\n",
1323 ahd_name(ahd), 'A',
1324 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1325 lastphase, ahd_inb(ahd, SCSISIGI));
1326 ahd_restart(ahd);
1327 return;
1328 }
1329 case DATA_OVERRUN:
1330 {
1331 /*
1332 * When the sequencer detects an overrun, it
1333 * places the controller in "BITBUCKET" mode
1334 * and allows the target to complete its transfer.
1335 * Unfortunately, none of the counters get updated
1336 * when the controller is in this mode, so we have
1337 * no way of knowing how large the overrun was.
1338 */
1339 struct scb *scb;
1340 u_int scbindex;
1341#ifdef AHD_DEBUG
1342 u_int lastphase;
1343#endif
1344
1345 scbindex = ahd_get_scbptr(ahd);
1346 scb = ahd_lookup_scb(ahd, scbindex);
1347#ifdef AHD_DEBUG
1348 lastphase = ahd_inb(ahd, LASTPHASE);
1349 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1350 ahd_print_path(ahd, scb);
e3869ec7 1351 kprintf("data overrun detected %s. Tag == 0x%x.\n",
984263bc
MD
1352 ahd_lookup_phase_entry(lastphase)->phasemsg,
1353 SCB_GET_TAG(scb));
1354 ahd_print_path(ahd, scb);
e3869ec7 1355 kprintf("%s seen Data Phase. Length = %ld. "
984263bc
MD
1356 "NumSGs = %d.\n",
1357 ahd_inb(ahd, SEQ_FLAGS) & DPHASE
1358 ? "Have" : "Haven't",
750f3593 1359 aic_get_transfer_length(scb), scb->sg_count);
984263bc
MD
1360 ahd_dump_sglist(scb);
1361 }
1362#endif
1363
1364 /*
1365 * Set this and it will take effect when the
1366 * target does a command complete.
1367 */
1368 ahd_freeze_devq(ahd, scb);
750f3593
PA
1369 aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1370 aic_freeze_scb(scb);
984263bc
MD
1371 break;
1372 }
1373 case MKMSG_FAILED:
1374 {
1375 struct ahd_devinfo devinfo;
1376 struct scb *scb;
1377 u_int scbid;
1378
1379 ahd_fetch_devinfo(ahd, &devinfo);
e3869ec7 1380 kprintf("%s:%c:%d:%d: Attempt to issue message failed\n",
984263bc
MD
1381 ahd_name(ahd), devinfo.channel, devinfo.target,
1382 devinfo.lun);
1383 scbid = ahd_get_scbptr(ahd);
1384 scb = ahd_lookup_scb(ahd, scbid);
1385 if (scb != NULL
1386 && (scb->flags & SCB_RECOVERY_SCB) != 0)
1387 /*
1388 * Ensure that we didn't put a second instance of this
1389 * SCB into the QINFIFO.
1390 */
1391 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1392 SCB_GET_CHANNEL(ahd, scb),
1393 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1394 ROLE_INITIATOR, /*status*/0,
1395 SEARCH_REMOVE);
1396 ahd_outb(ahd, SCB_CONTROL,
975524e9 1397 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
984263bc
MD
1398 break;
1399 }
1400 case TASKMGMT_FUNC_COMPLETE:
1401 {
1402 u_int scbid;
1403 struct scb *scb;
1404
1405 scbid = ahd_get_scbptr(ahd);
1406 scb = ahd_lookup_scb(ahd, scbid);
1407 if (scb != NULL) {
1408 u_int lun;
1409 u_int tag;
1410 cam_status error;
1411
1412 ahd_print_path(ahd, scb);
e3869ec7 1413 kprintf("Task Management Func 0x%x Complete\n",
984263bc
MD
1414 scb->hscb->task_management);
1415 lun = CAM_LUN_WILDCARD;
1416 tag = SCB_LIST_NULL;
1417
1418 switch (scb->hscb->task_management) {
1419 case SIU_TASKMGMT_ABORT_TASK:
1420 tag = SCB_GET_TAG(scb);
1421 case SIU_TASKMGMT_ABORT_TASK_SET:
1422 case SIU_TASKMGMT_CLEAR_TASK_SET:
1423 lun = scb->hscb->lun;
1424 error = CAM_REQ_ABORTED;
1425 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1426 'A', lun, tag, ROLE_INITIATOR,
1427 error);
1428 break;
1429 case SIU_TASKMGMT_LUN_RESET:
1430 lun = scb->hscb->lun;
1431 case SIU_TASKMGMT_TARGET_RESET:
1432 {
1433 struct ahd_devinfo devinfo;
1434
1435 ahd_scb_devinfo(ahd, &devinfo, scb);
1436 error = CAM_BDR_SENT;
1437 ahd_handle_devreset(ahd, &devinfo, lun,
1438 CAM_BDR_SENT,
1439 lun != CAM_LUN_WILDCARD
1440 ? "Lun Reset"
1441 : "Target Reset",
1442 /*verbose_level*/0);
1443 break;
1444 }
1445 default:
1446 panic("Unexpected TaskMgmt Func\n");
1447 break;
1448 }
1449 }
1450 break;
1451 }
1452 case TASKMGMT_CMD_CMPLT_OKAY:
1453 {
1454 u_int scbid;
1455 struct scb *scb;
1456
1457 /*
1458 * An ABORT TASK TMF failed to be delivered before
1459 * the targeted command completed normally.
1460 */
1461 scbid = ahd_get_scbptr(ahd);
1462 scb = ahd_lookup_scb(ahd, scbid);
1463 if (scb != NULL) {
1464 /*
1465 * Remove the second instance of this SCB from
1466 * the QINFIFO if it is still there.
1467 */
1468 ahd_print_path(ahd, scb);
e3869ec7 1469 kprintf("SCB completes before TMF\n");
984263bc
MD
1470 /*
1471 * Handle losing the race. Wait until any
1472 * current selection completes. We will then
1473 * set the TMF back to zero in this SCB so that
1474 * the sequencer doesn't bother to issue another
1475 * sequencer interrupt for its completion.
1476 */
1477 while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
1478 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
1479 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
1480 ;
1481 ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
1482 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1483 SCB_GET_CHANNEL(ahd, scb),
1484 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1485 ROLE_INITIATOR, /*status*/0,
1486 SEARCH_REMOVE);
1487 }
1488 break;
1489 }
1490 case TRACEPOINT0:
1491 case TRACEPOINT1:
1492 case TRACEPOINT2:
1493 case TRACEPOINT3:
e3869ec7 1494 kprintf("%s: Tracepoint %d\n", ahd_name(ahd),
984263bc
MD
1495 seqintcode - TRACEPOINT0);
1496 break;
1497 case NO_SEQINT:
1498 break;
1499 case SAW_HWERR:
1500 ahd_handle_hwerrint(ahd);
1501 break;
1502 default:
e3869ec7 1503 kprintf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
984263bc
MD
1504 seqintcode);
1505 break;
1506 }
1507 /*
1508 * The sequencer is paused immediately on
1509 * a SEQINT, so we should restart it when
1510 * we're done.
1511 */
1512 ahd_unpause(ahd);
1513}
1514
1515void
1516ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1517{
1518 struct scb *scb;
1519 u_int status0;
1520 u_int status3;
1521 u_int status;
1522 u_int lqistat1;
1523 u_int lqostat0;
1524 u_int scbid;
1525 u_int busfreetime;
1526
1527 ahd_update_modes(ahd);
1528 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1529
1530 status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
1531 status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
1532 status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1533 lqistat1 = ahd_inb(ahd, LQISTAT1);
1534 lqostat0 = ahd_inb(ahd, LQOSTAT0);
1535 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1536 if ((status0 & (SELDI|SELDO)) != 0) {
1537 u_int simode0;
1538
1539 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1540 simode0 = ahd_inb(ahd, SIMODE0);
1541 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
1542 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1543 }
1544 scbid = ahd_get_scbptr(ahd);
1545 scb = ahd_lookup_scb(ahd, scbid);
1546 if (scb != NULL
1547 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1548 scb = NULL;
1549
984263bc
MD
1550 if ((status0 & IOERR) != 0) {
1551 u_int now_lvd;
1552
1553 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
e3869ec7 1554 kprintf("%s: Transceiver State Has Changed to %s mode\n",
984263bc
MD
1555 ahd_name(ahd), now_lvd ? "LVD" : "SE");
1556 ahd_outb(ahd, CLRSINT0, CLRIOERR);
1557 /*
1558 * A change in I/O mode is equivalent to a bus reset.
1559 */
1560 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1561 ahd_pause(ahd);
1562 ahd_setup_iocell_workaround(ahd);
1563 ahd_unpause(ahd);
1564 } else if ((status0 & OVERRUN) != 0) {
f39dcdf3 1565
e3869ec7 1566 kprintf("%s: SCSI offset overrun detected. Resetting bus.\n",
984263bc
MD
1567 ahd_name(ahd));
1568 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1569 } else if ((status & SCSIRSTI) != 0) {
f39dcdf3 1570
e3869ec7 1571 kprintf("%s: Someone reset channel A\n", ahd_name(ahd));
984263bc
MD
1572 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
1573 } else if ((status & SCSIPERR) != 0) {
f39dcdf3
PA
1574
1575 /* Make sure the sequencer is in a safe location. */
1576 ahd_clear_critical_section(ahd);
1577
984263bc
MD
1578 ahd_handle_transmission_error(ahd);
1579 } else if (lqostat0 != 0) {
f39dcdf3 1580
e3869ec7 1581 kprintf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
984263bc 1582 ahd_outb(ahd, CLRLQOINT0, lqostat0);
f39dcdf3 1583 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
984263bc 1584 ahd_outb(ahd, CLRLQOINT1, 0);
984263bc
MD
1585 } else if ((status & SELTO) != 0) {
1586 u_int scbid;
1587
1588 /* Stop the selection */
1589 ahd_outb(ahd, SCSISEQ0, 0);
1590
f39dcdf3
PA
1591 /* Make sure the sequencer is in a safe location. */
1592 ahd_clear_critical_section(ahd);
1593
984263bc
MD
1594 /* No more pending messages */
1595 ahd_clear_msg_state(ahd);
1596
1597 /* Clear interrupt state */
1598 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1599
1600 /*
1601 * Although the driver does not care about the
1602 * 'Selection in Progress' status bit, the busy
d0d91865 1603 * LED does. SELINGO is only cleared by a successful
984263bc 1604 * selection, so we must manually clear it to insure
d0d91865 1605 * the LED turns off just in case no future successful
984263bc
MD
1606 * selections occur (e.g. no devices on the bus).
1607 */
1608 ahd_outb(ahd, CLRSINT0, CLRSELINGO);
1609
1610 scbid = ahd_inw(ahd, WAITING_TID_HEAD);
1611 scb = ahd_lookup_scb(ahd, scbid);
1612 if (scb == NULL) {
e3869ec7 1613 kprintf("%s: ahd_intr - referenced scb not "
984263bc
MD
1614 "valid during SELTO scb(0x%x)\n",
1615 ahd_name(ahd), scbid);
1616 ahd_dump_card_state(ahd);
1617 } else {
1618 struct ahd_devinfo devinfo;
1619#ifdef AHD_DEBUG
1620 if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
1621 ahd_print_path(ahd, scb);
e3869ec7 1622 kprintf("Saw Selection Timeout for SCB 0x%x\n",
984263bc
MD
1623 scbid);
1624 }
1625#endif
984263bc 1626 ahd_scb_devinfo(ahd, &devinfo, scb);
750f3593 1627 aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
984263bc 1628 ahd_freeze_devq(ahd, scb);
f39dcdf3
PA
1629
1630 /*
1631 * Cancel any pending transactions on the device
1632 * now that it seems to be missing. This will
1633 * also revert us to async/narrow transfers until
1634 * we can renegotiate with the device.
1635 */
1636 ahd_handle_devreset(ahd, &devinfo,
1637 CAM_LUN_WILDCARD,
1638 CAM_SEL_TIMEOUT,
1639 "Selection Timeout",
1640 /*verbose_level*/1);
984263bc
MD
1641 }
1642 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1643 ahd_iocell_first_selection(ahd);
1644 ahd_unpause(ahd);
1645 } else if ((status0 & (SELDI|SELDO)) != 0) {
f39dcdf3 1646
984263bc
MD
1647 ahd_iocell_first_selection(ahd);
1648 ahd_unpause(ahd);
1649 } else if (status3 != 0) {
e3869ec7 1650 kprintf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
984263bc
MD
1651 ahd_name(ahd), status3);
1652 ahd_outb(ahd, CLRSINT3, status3);
1653 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
f39dcdf3
PA
1654
1655 /* Make sure the sequencer is in a safe location. */
1656 ahd_clear_critical_section(ahd);
1657
984263bc
MD
1658 ahd_handle_lqiphase_error(ahd, lqistat1);
1659 } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1660 /*
1661 * This status can be delayed during some
1662 * streaming operations. The SCSIPHASE
1663 * handler has already dealt with this case
1664 * so just clear the error.
1665 */
1666 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
fb5acdc8
PA
1667 } else if ((status & BUSFREE) != 0
1668 || (lqistat1 & LQOBUSFREE) != 0) {
984263bc
MD
1669 u_int lqostat1;
1670 int restart;
1671 int clear_fifo;
1672 int packetized;
1673 u_int mode;
1674
1675 /*
1676 * Clear our selection hardware as soon as possible.
1677 * We may have an entry in the waiting Q for this target,
1678 * that is affected by this busfree and we don't want to
1679 * go about selecting the target while we handle the event.
1680 */
1681 ahd_outb(ahd, SCSISEQ0, 0);
1682
f39dcdf3
PA
1683 /* Make sure the sequencer is in a safe location. */
1684 ahd_clear_critical_section(ahd);
1685
984263bc
MD
1686 /*
1687 * Determine what we were up to at the time of
1688 * the busfree.
1689 */
1690 mode = AHD_MODE_SCSI;
1691 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1692 lqostat1 = ahd_inb(ahd, LQOSTAT1);
1693 switch (busfreetime) {
1694 case BUSFREE_DFF0:
1695 case BUSFREE_DFF1:
1696 {
1697 u_int scbid;
1698 struct scb *scb;
1699
1700 mode = busfreetime == BUSFREE_DFF0
1701 ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
1702 ahd_set_modes(ahd, mode, mode);
1703 scbid = ahd_get_scbptr(ahd);
1704 scb = ahd_lookup_scb(ahd, scbid);
1705 if (scb == NULL) {
e3869ec7 1706 kprintf("%s: Invalid SCB %d in DFF%d "
984263bc
MD
1707 "during unexpected busfree\n",
1708 ahd_name(ahd), scbid, mode);
1709 packetized = 0;
1710 } else
1711 packetized = (scb->flags & SCB_PACKETIZED) != 0;
1712 clear_fifo = 1;
1713 break;
1714 }
1715 case BUSFREE_LQO:
1716 clear_fifo = 0;
1717 packetized = 1;
1718 break;
1719 default:
1720 clear_fifo = 0;
1721 packetized = (lqostat1 & LQOBUSFREE) != 0;
1722 if (!packetized
7009d94e 1723 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE
f39dcdf3 1724 && (ahd_inb(ahd, SSTAT0) & SELDI) == 0
7009d94e
PA
1725 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0
1726 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0))
1727 /*
1728 * Assume packetized if we are not
1729 * on the bus in a non-packetized
1730 * capacity and any pending selection
1731 * was a packetized selection.
1732 */
984263bc
MD
1733 packetized = 1;
1734 break;
1735 }
1736
1737#ifdef AHD_DEBUG
1738 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 1739 kprintf("Saw Busfree. Busfreetime = 0x%x.\n",
984263bc
MD
1740 busfreetime);
1741#endif
1742 /*
1743 * Busfrees that occur in non-packetized phases are
1744 * handled by the nonpkt_busfree handler.
1745 */
1746 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
1747 restart = ahd_handle_pkt_busfree(ahd, busfreetime);
1748 } else {
1749 packetized = 0;
1750 restart = ahd_handle_nonpkt_busfree(ahd);
1751 }
1752 /*
1753 * Clear the busfree interrupt status. The setting of
1754 * the interrupt is a pulse, so in a perfect world, we
1755 * would not need to muck with the ENBUSFREE logic. This
1756 * would ensure that if the bus moves on to another
1757 * connection, busfree protection is still in force. If
1758 * BUSFREEREV is broken, however, we must manually clear
1759 * the ENBUSFREE if the busfree occurred during a non-pack
1760 * connection so that we don't get false positives during
1761 * future, packetized, connections.
1762 */
1763 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
1764 if (packetized == 0
1765 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
1766 ahd_outb(ahd, SIMODE1,
1767 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
1768
1769 if (clear_fifo)
1770 ahd_clear_fifo(ahd, mode);
1771
1772 ahd_clear_msg_state(ahd);
1773 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1774 if (restart) {
1775 ahd_restart(ahd);
1776 } else {
1777 ahd_unpause(ahd);
1778 }
1779 } else {
e3869ec7 1780 kprintf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
984263bc
MD
1781 ahd_name(ahd), status);
1782 ahd_dump_card_state(ahd);
1783 ahd_clear_intstat(ahd);
1784 ahd_unpause(ahd);
1785 }
1786}
1787
1788static void
1789ahd_handle_transmission_error(struct ahd_softc *ahd)
1790{
1791 struct scb *scb;
1792 u_int scbid;
1793 u_int lqistat1;
1794 u_int lqistat2;
1795 u_int msg_out;
1796 u_int curphase;
1797 u_int lastphase;
1798 u_int perrdiag;
1799 u_int cur_col;
1800 int silent;
1801
1802 scb = NULL;
1803 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1804 lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
1805 lqistat2 = ahd_inb(ahd, LQISTAT2);
1806 if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
1807 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
1808 u_int lqistate;
1809
1810 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1811 lqistate = ahd_inb(ahd, LQISTATE);
1812 if ((lqistate >= 0x1E && lqistate <= 0x24)
1813 || (lqistate == 0x29)) {
1814#ifdef AHD_DEBUG
1815 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
e3869ec7 1816 kprintf("%s: NLQCRC found via LQISTATE\n",
984263bc
MD
1817 ahd_name(ahd));
1818 }
1819#endif
1820 lqistat1 |= LQICRCI_NLQ;
1821 }
1822 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1823 }
1824
1825 ahd_outb(ahd, CLRLQIINT1, lqistat1);
1826 lastphase = ahd_inb(ahd, LASTPHASE);
1827 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1828 perrdiag = ahd_inb(ahd, PERRDIAG);
1829 msg_out = MSG_INITIATOR_DET_ERR;
1830 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
1831
1832 /*
1833 * Try to find the SCB associated with this error.
1834 */
1835 silent = FALSE;
1836 if (lqistat1 == 0
1837 || (lqistat1 & LQICRCI_NLQ) != 0) {
1838 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
1839 ahd_set_active_fifo(ahd);
1840 scbid = ahd_get_scbptr(ahd);
1841 scb = ahd_lookup_scb(ahd, scbid);
1842 if (scb != NULL && SCB_IS_SILENT(scb))
1843 silent = TRUE;
1844 }
1845
1846 cur_col = 0;
1847 if (silent == FALSE) {
e3869ec7 1848 kprintf("%s: Transmission error detected\n", ahd_name(ahd));
984263bc
MD
1849 ahd_lqistat1_print(lqistat1, &cur_col, 50);
1850 ahd_lastphase_print(lastphase, &cur_col, 50);
1851 ahd_scsisigi_print(curphase, &cur_col, 50);
1852 ahd_perrdiag_print(perrdiag, &cur_col, 50);
e3869ec7 1853 kprintf("\n");
984263bc
MD
1854 ahd_dump_card_state(ahd);
1855 }
1856
1857 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
1858 if (silent == FALSE) {
e3869ec7 1859 kprintf("%s: Gross protocol error during incoming "
984263bc
MD
1860 "packet. lqistat1 == 0x%x. Resetting bus.\n",
1861 ahd_name(ahd), lqistat1);
1862 }
1863 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1864 return;
1865 } else if ((lqistat1 & LQICRCI_LQ) != 0) {
1866 /*
1867 * A CRC error has been detected on an incoming LQ.
1868 * The bus is currently hung on the last ACK.
1869 * Hit LQIRETRY to release the last ack, and
1870 * wait for the sequencer to determine that ATNO
1871 * is asserted while in message out to take us
1872 * to our host message loop. No NONPACKREQ or
1873 * LQIPHASE type errors will occur in this
1874 * scenario. After this first LQIRETRY, the LQI
1875 * manager will be in ISELO where it will
1876 * happily sit until another packet phase begins.
1877 * Unexpected bus free detection is enabled
1878 * through any phases that occur after we release
1879 * this last ack until the LQI manager sees a
1880 * packet phase. This implies we may have to
1881 * ignore a perfectly valid "unexected busfree"
1882 * after our "initiator detected error" message is
1883 * sent. A busfree is the expected response after
1884 * we tell the target that it's L_Q was corrupted.
1885 * (SPI4R09 10.7.3.3.3)
1886 */
1887 ahd_outb(ahd, LQCTL2, LQIRETRY);
e3869ec7 1888 kprintf("LQIRetry for LQICRCI_LQ to release ACK\n");
984263bc
MD
1889 } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1890 /*
1891 * We detected a CRC error in a NON-LQ packet.
1892 * The hardware has varying behavior in this situation
1893 * depending on whether this packet was part of a
1894 * stream or not.
1895 *
1896 * PKT by PKT mode:
1897 * The hardware has already acked the complete packet.
1898 * If the target honors our outstanding ATN condition,
1899 * we should be (or soon will be) in MSGOUT phase.
1900 * This will trigger the LQIPHASE_LQ status bit as the
1901 * hardware was expecting another LQ. Unexpected
1902 * busfree detection is enabled. Once LQIPHASE_LQ is
1903 * true (first entry into host message loop is much
1904 * the same), we must clear LQIPHASE_LQ and hit
1905 * LQIRETRY so the hardware is ready to handle
1906 * a future LQ. NONPACKREQ will not be asserted again
1907 * once we hit LQIRETRY until another packet is
1908 * processed. The target may either go busfree
1909 * or start another packet in response to our message.
1910 *
1911 * Read Streaming P0 asserted:
1912 * If we raise ATN and the target completes the entire
1913 * stream (P0 asserted during the last packet), the
1914 * hardware will ack all data and return to the ISTART
1915 * state. When the target reponds to our ATN condition,
1916 * LQIPHASE_LQ will be asserted. We should respond to
1917 * this with an LQIRETRY to prepare for any future
1918 * packets. NONPACKREQ will not be asserted again
1919 * once we hit LQIRETRY until another packet is
1920 * processed. The target may either go busfree or
1921 * start another packet in response to our message.
1922 * Busfree detection is enabled.
1923 *
1924 * Read Streaming P0 not asserted:
1925 * If we raise ATN and the target transitions to
1926 * MSGOUT in or after a packet where P0 is not
1927 * asserted, the hardware will assert LQIPHASE_NLQ.
1928 * We should respond to the LQIPHASE_NLQ with an
1929 * LQIRETRY. Should the target stay in a non-pkt
1930 * phase after we send our message, the hardware
1931 * will assert LQIPHASE_LQ. Recovery is then just as
1932 * listed above for the read streaming with P0 asserted.
1933 * Busfree detection is enabled.
1934 */
1935 if (silent == FALSE)
e3869ec7 1936 kprintf("LQICRC_NLQ\n");
984263bc 1937 if (scb == NULL) {
e3869ec7 1938 kprintf("%s: No SCB valid for LQICRC_NLQ. "
984263bc
MD
1939 "Resetting bus\n", ahd_name(ahd));
1940 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1941 return;
1942 }
1943 } else if ((lqistat1 & LQIBADLQI) != 0) {
e3869ec7 1944 kprintf("Need to handle BADLQI!\n");
984263bc
MD
1945 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1946 return;
1947 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
1948 if ((curphase & ~P_DATAIN_DT) != 0) {
1949 /* Ack the byte. So we can continue. */
1950 if (silent == FALSE)
e3869ec7 1951 kprintf("Acking %s to clear perror\n",
984263bc
MD
1952 ahd_lookup_phase_entry(curphase)->phasemsg);
1953 ahd_inb(ahd, SCSIDAT);
1954 }
1955
1956 if (curphase == P_MESGIN)
1957 msg_out = MSG_PARITY_ERROR;
1958 }
1959
1960 /*
1961 * We've set the hardware to assert ATN if we
1962 * get a parity error on "in" phases, so all we
1963 * need to do is stuff the message buffer with
1964 * the appropriate message. "In" phases have set
1965 * mesg_out to something other than MSG_NOP.
1966 */
1967 ahd->send_msg_perror = msg_out;
1968 if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
1969 scb->flags |= SCB_TRANSMISSION_ERROR;
1970 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1971 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1972 ahd_unpause(ahd);
1973}
1974
1975static void
1976ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
1977{
1978 /*
1979 * Clear the sources of the interrupts.
1980 */
1981 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1982 ahd_outb(ahd, CLRLQIINT1, lqistat1);
1983
1984 /*
1985 * If the "illegal" phase changes were in response
1986 * to our ATN to flag a CRC error, AND we ended up
1987 * on packet boundaries, clear the error, restart the
1988 * LQI manager as appropriate, and go on our merry
1989 * way toward sending the message. Otherwise, reset
1990 * the bus to clear the error.
1991 */
1992 ahd_set_active_fifo(ahd);
1993 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
1994 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
1995 if ((lqistat1 & LQIPHASE_LQ) != 0) {
e3869ec7 1996 kprintf("LQIRETRY for LQIPHASE_LQ\n");
984263bc
MD
1997 ahd_outb(ahd, LQCTL2, LQIRETRY);
1998 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
e3869ec7 1999 kprintf("LQIRETRY for LQIPHASE_NLQ\n");
984263bc
MD
2000 ahd_outb(ahd, LQCTL2, LQIRETRY);
2001 } else
2002 panic("ahd_handle_lqiphase_error: No phase errors\n");
2003 ahd_dump_card_state(ahd);
2004 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2005 ahd_unpause(ahd);
2006 } else {
e3869ec7 2007 kprintf("Reseting Channel for LQI Phase error\n");
984263bc
MD
2008 ahd_dump_card_state(ahd);
2009 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2010 }
2011}
2012
2013/*
2014 * Packetized unexpected or expected busfree.
2015 * Entered in mode based on busfreetime.
2016 */
2017static int
2018ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
2019{
2020 u_int lqostat1;
2021
2022 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2023 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2024 lqostat1 = ahd_inb(ahd, LQOSTAT1);
2025 if ((lqostat1 & LQOBUSFREE) != 0) {
2026 struct scb *scb;
2027 u_int scbid;
2028 u_int saved_scbptr;
2029 u_int waiting_h;
2030 u_int waiting_t;
2031 u_int next;
2032
984263bc
MD
2033 /*
2034 * The LQO manager detected an unexpected busfree
2035 * either:
2036 *
2037 * 1) During an outgoing LQ.
2038 * 2) After an outgoing LQ but before the first
2039 * REQ of the command packet.
2040 * 3) During an outgoing command packet.
2041 *
2042 * In all cases, CURRSCB is pointing to the
2043 * SCB that encountered the failure. Clean
2044 * up the queue, clear SELDO and LQOBUSFREE,
2045 * and allow the sequencer to restart the select
2046 * out at its lesure.
2047 */
2048 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2049 scbid = ahd_inw(ahd, CURRSCB);
2050 scb = ahd_lookup_scb(ahd, scbid);
2051 if (scb == NULL)
2052 panic("SCB not valid during LQOBUSFREE");
2053 /*
2054 * Clear the status.
2055 */
2056 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
2057 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2058 ahd_outb(ahd, CLRLQOINT1, 0);
2059 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2060 ahd_flush_device_writes(ahd);
2061 ahd_outb(ahd, CLRSINT0, CLRSELDO);
2062
2063 /*
2064 * Return the LQO manager to its idle loop. It will
2065 * not do this automatically if the busfree occurs
2066 * after the first REQ of either the LQ or command
2067 * packet or between the LQ and command packet.
2068 */
2069 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
2070
2071 /*
2072 * Update the waiting for selection queue so
2073 * we restart on the correct SCB.
2074 */
2075 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
2076 saved_scbptr = ahd_get_scbptr(ahd);
2077 if (waiting_h != scbid) {
2078
2079 ahd_outw(ahd, WAITING_TID_HEAD, scbid);
2080 waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
2081 if (waiting_t == waiting_h) {
2082 ahd_outw(ahd, WAITING_TID_TAIL, scbid);
2083 next = SCB_LIST_NULL;
2084 } else {
2085 ahd_set_scbptr(ahd, waiting_h);
2086 next = ahd_inw_scbram(ahd, SCB_NEXT2);
2087 }
2088 ahd_set_scbptr(ahd, scbid);
2089 ahd_outw(ahd, SCB_NEXT2, next);
2090 }
2091 ahd_set_scbptr(ahd, saved_scbptr);
2092 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2093 if (SCB_IS_SILENT(scb) == FALSE) {
2094 ahd_print_path(ahd, scb);
e3869ec7 2095 kprintf("Probable outgoing LQ CRC error. "
984263bc
MD
2096 "Retrying command\n");
2097 }
2098 scb->crc_retry_count++;
2099 } else {
750f3593
PA
2100 aic_set_transaction_status(scb, CAM_UNCOR_PARITY);
2101 aic_freeze_scb(scb);
984263bc
MD
2102 ahd_freeze_devq(ahd, scb);
2103 }
2104 /* Return unpausing the sequencer. */
2105 return (0);
2106 } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
2107 /*
2108 * Ignore what are really parity errors that
2109 * occur on the last REQ of a free running
2110 * clock prior to going busfree. Some drives
2111 * do not properly active negate just before
2112 * going busfree resulting in a parity glitch.
2113 */
2114 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2115#ifdef AHD_DEBUG
2116 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
e3869ec7 2117 kprintf("%s: Parity on last REQ detected "
984263bc
MD
2118 "during busfree phase.\n",
2119 ahd_name(ahd));
2120#endif
2121 /* Return unpausing the sequencer. */
2122 return (0);
2123 }
2124 if (ahd->src_mode != AHD_MODE_SCSI) {
2125 u_int scbid;
2126 struct scb *scb;
2127
2128 scbid = ahd_get_scbptr(ahd);
2129 scb = ahd_lookup_scb(ahd, scbid);
2130 ahd_print_path(ahd, scb);
e3869ec7 2131 kprintf("Unexpected PKT busfree condition\n");
984263bc
MD
2132 ahd_dump_card_state(ahd);
2133 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2134 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2135 ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
2136
2137 /* Return restarting the sequencer. */
2138 return (1);
2139 }
e3869ec7 2140 kprintf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
984263bc
MD
2141 ahd_dump_card_state(ahd);
2142 /* Restart the sequencer. */
2143 return (1);
2144}
2145
2146/*
2147 * Non-packetized unexpected or expected busfree.
2148 */
2149static int
2150ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
2151{
2152 struct ahd_devinfo devinfo;
2153 struct scb *scb;
2154 u_int lastphase;
2155 u_int saved_scsiid;
2156 u_int saved_lun;
2157 u_int target;
2158 u_int initiator_role_id;
2159 u_int scbid;
2160 u_int ppr_busfree;
2161 int printerror;
2162
2163 /*
2164 * Look at what phase we were last in. If its message out,
2165 * chances are pretty good that the busfree was in response
2166 * to one of our abort requests.
2167 */
2168 lastphase = ahd_inb(ahd, LASTPHASE);
2169 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2170 saved_lun = ahd_inb(ahd, SAVED_LUN);
2171 target = SCSIID_TARGET(ahd, saved_scsiid);
2172 initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
2173 ahd_compile_devinfo(&devinfo, initiator_role_id,
2174 target, saved_lun, 'A', ROLE_INITIATOR);
2175 printerror = 1;
2176
2177 scbid = ahd_get_scbptr(ahd);
2178 scb = ahd_lookup_scb(ahd, scbid);
2179 if (scb != NULL
2180 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
2181 scb = NULL;
2182
2183 ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
2184 if (lastphase == P_MESGOUT) {
2185 u_int tag;
2186
2187 tag = SCB_LIST_NULL;
2188 if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
2189 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
2190 int found;
2191 int sent_msg;
2192
2193 if (scb == NULL) {
2194 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2195 kprintf("Abort for unidentified "
984263bc
MD
2196 "connection completed.\n");
2197 /* restart the sequencer. */
2198 return (1);
2199 }
2200 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
2201 ahd_print_path(ahd, scb);
e3869ec7 2202 kprintf("SCB %d - Abort%s Completed.\n",
984263bc
MD
2203 SCB_GET_TAG(scb),
2204 sent_msg == MSG_ABORT_TAG ? "" : " Tag");
2205
2206 if (sent_msg == MSG_ABORT_TAG)
2207 tag = SCB_GET_TAG(scb);
2208
2209 if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
2210 /*
2211 * This abort is in response to an
2212 * unexpected switch to command phase
2213 * for a packetized connection. Since
2214 * the identify message was never sent,
2215 * "saved lun" is 0. We really want to
2216 * abort only the SCB that encountered
2217 * this error, which could have a different
2218 * lun. The SCB will be retried so the OS
2219 * will see the UA after renegotiating to
2220 * packetized.
2221 */
2222 tag = SCB_GET_TAG(scb);
2223 saved_lun = scb->hscb->lun;
2224 }
2225 found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
2226 tag, ROLE_INITIATOR,
2227 CAM_REQ_ABORTED);
e3869ec7 2228 kprintf("found == 0x%x\n", found);
984263bc
MD
2229 printerror = 0;
2230 } else if (ahd_sent_msg(ahd, AHDMSG_1B,
2231 MSG_BUS_DEV_RESET, TRUE)) {
84754cd0 2232#if defined(__DragonFly__) || defined(__FreeBSD__)
984263bc
MD
2233 /*
2234 * Don't mark the user's request for this BDR
2235 * as completing with CAM_BDR_SENT. CAM3
2236 * specifies CAM_REQ_CMP.
2237 */
2238 if (scb != NULL
2239 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
2240 && ahd_match_scb(ahd, scb, target, 'A',
2241 CAM_LUN_WILDCARD, SCB_LIST_NULL,
2242 ROLE_INITIATOR))
750f3593 2243 aic_set_transaction_status(scb, CAM_REQ_CMP);
984263bc
MD
2244#endif
2245 ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
2246 CAM_BDR_SENT, "Bus Device Reset",
2247 /*verbose_level*/0);
2248 printerror = 0;
2249 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
2250 && ppr_busfree == 0) {
2251 struct ahd_initiator_tinfo *tinfo;
2252 struct ahd_tmode_tstate *tstate;
2253
2254 /*
fb5acdc8
PA
2255 * PPR Rejected.
2256 *
2257 * If the previous negotiation was packetized,
2258 * this could be because the device has been
2259 * reset without our knowledge. Force our
2260 * current negotiation to async and retry the
2261 * negotiation. Otherwise retry the command
2262 * with non-ppr negotiation.
984263bc
MD
2263 */
2264#ifdef AHD_DEBUG
2265 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2266 kprintf("PPR negotiation rejected busfree.\n");
984263bc
MD
2267#endif
2268 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
2269 devinfo.our_scsiid,
2270 devinfo.target, &tstate);
fb5acdc8
PA
2271 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
2272 ahd_set_width(ahd, &devinfo,
2273 MSG_EXT_WDTR_BUS_8_BIT,
2274 AHD_TRANS_CUR,
2275 /*paused*/TRUE);
2276 ahd_set_syncrate(ahd, &devinfo,
2277 /*period*/0, /*offset*/0,
2278 /*ppr_options*/0,
2279 AHD_TRANS_CUR,
2280 /*paused*/TRUE);
2281 /*
2282 * The expect PPR busfree handler below
2283 * will effect the retry and necessary
2284 * abort.
2285 */
2286 } else {
2287 tinfo->curr.transport_version = 2;
2288 tinfo->goal.transport_version = 2;
2289 tinfo->goal.ppr_options = 0;
2290 /*
2291 * Remove any SCBs in the waiting for selection
2292 * queue that may also be for this target so
2293 * that command ordering is preserved.
2294 */
2295 ahd_freeze_devq(ahd, scb);
2296 ahd_qinfifo_requeue_tail(ahd, scb);
2297 printerror = 0;
2298 }
984263bc
MD
2299 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
2300 && ppr_busfree == 0) {
2301 /*
2302 * Negotiation Rejected. Go-narrow and
2303 * retry command.
2304 */
2305#ifdef AHD_DEBUG
2306 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2307 kprintf("WDTR negotiation rejected busfree.\n");
984263bc
MD
2308#endif
2309 ahd_set_width(ahd, &devinfo,
2310 MSG_EXT_WDTR_BUS_8_BIT,
2311 AHD_TRANS_CUR|AHD_TRANS_GOAL,
2312 /*paused*/TRUE);
fb5acdc8
PA
2313 /*
2314 * Remove any SCBs in the waiting for selection
2315 * queue that may also be for this target so that
2316 * command ordering is preserved.
2317 */
2318 ahd_freeze_devq(ahd, scb);
984263bc
MD
2319 ahd_qinfifo_requeue_tail(ahd, scb);
2320 printerror = 0;
2321 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
2322 && ppr_busfree == 0) {
2323 /*
2324 * Negotiation Rejected. Go-async and
2325 * retry command.
2326 */
2327#ifdef AHD_DEBUG
2328 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2329 kprintf("SDTR negotiation rejected busfree.\n");
984263bc
MD
2330#endif
2331 ahd_set_syncrate(ahd, &devinfo,
2332 /*period*/0, /*offset*/0,
2333 /*ppr_options*/0,
2334 AHD_TRANS_CUR|AHD_TRANS_GOAL,
2335 /*paused*/TRUE);
fb5acdc8
PA
2336 /*
2337 * Remove any SCBs in the waiting for selection
2338 * queue that may also be for this target so that
2339 * command ordering is preserved.
2340 */
2341 ahd_freeze_devq(ahd, scb);
984263bc
MD
2342 ahd_qinfifo_requeue_tail(ahd, scb);
2343 printerror = 0;
2344 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
2345 && ahd_sent_msg(ahd, AHDMSG_1B,
2346 MSG_INITIATOR_DET_ERR, TRUE)) {
2347
2348#ifdef AHD_DEBUG
2349 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2350 kprintf("Expected IDE Busfree\n");
984263bc
MD
2351#endif
2352 printerror = 0;
2353 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
2354 && ahd_sent_msg(ahd, AHDMSG_1B,
2355 MSG_MESSAGE_REJECT, TRUE)) {
2356
2357#ifdef AHD_DEBUG
2358 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2359 kprintf("Expected QAS Reject Busfree\n");
984263bc
MD
2360#endif
2361 printerror = 0;
2362 }
2363 }
2364
2365 /*
2366 * The busfree required flag is honored at the end of
2367 * the message phases. We check it last in case we
2368 * had to send some other message that caused a busfree.
2369 */
2370 if (printerror != 0
2371 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
2372 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
2373
2374 ahd_freeze_devq(ahd, scb);
750f3593
PA
2375 aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
2376 aic_freeze_scb(scb);
984263bc
MD
2377 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
2378 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2379 SCB_GET_CHANNEL(ahd, scb),
2380 SCB_GET_LUN(scb), SCB_LIST_NULL,
2381 ROLE_INITIATOR, CAM_REQ_ABORTED);
2382 } else {
2383#ifdef AHD_DEBUG
2384 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 2385 kprintf("PPR Negotiation Busfree.\n");
984263bc
MD
2386#endif
2387 ahd_done(ahd, scb);
2388 }
2389 printerror = 0;
2390 }
2391 if (printerror != 0) {
2392 int aborted;
2393
2394 aborted = 0;
2395 if (scb != NULL) {
2396 u_int tag;
2397
2398 if ((scb->hscb->control & TAG_ENB) != 0)
2399 tag = SCB_GET_TAG(scb);
2400 else
2401 tag = SCB_LIST_NULL;
2402 ahd_print_path(ahd, scb);
2403 aborted = ahd_abort_scbs(ahd, target, 'A',
2404 SCB_GET_LUN(scb), tag,
2405 ROLE_INITIATOR,
2406 CAM_UNEXP_BUSFREE);
2407 } else {
2408 /*
2409 * We had not fully identified this connection,
2410 * so we cannot abort anything.
2411 */
e3869ec7 2412 kprintf("%s: ", ahd_name(ahd));
984263bc 2413 }
e3869ec7 2414 kprintf("Unexpected busfree %s, %d SCBs aborted, "
984263bc
MD
2415 "PRGMCNT == 0x%x\n",
2416 ahd_lookup_phase_entry(lastphase)->phasemsg,
2417 aborted,
750f3593 2418 ahd_inw(ahd, PRGMCNT));
984263bc 2419 ahd_dump_card_state(ahd);
fb5acdc8
PA
2420 if (lastphase != P_BUSFREE)
2421 ahd_force_renegotiation(ahd, &devinfo);
984263bc
MD
2422 }
2423 /* Always restart the sequencer. */
2424 return (1);
2425}
2426
2427static void
2428ahd_handle_proto_violation(struct ahd_softc *ahd)
2429{
2430 struct ahd_devinfo devinfo;
2431 struct scb *scb;
2432 u_int scbid;
2433 u_int seq_flags;
2434 u_int curphase;
2435 u_int lastphase;
2436 int found;
2437
2438 ahd_fetch_devinfo(ahd, &devinfo);
2439 scbid = ahd_get_scbptr(ahd);
2440 scb = ahd_lookup_scb(ahd, scbid);
2441 seq_flags = ahd_inb(ahd, SEQ_FLAGS);
2442 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2443 lastphase = ahd_inb(ahd, LASTPHASE);
2444 if ((seq_flags & NOT_IDENTIFIED) != 0) {
2445
2446 /*
2447 * The reconnecting target either did not send an
2448 * identify message, or did, but we didn't find an SCB
2449 * to match.
2450 */
2451 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2452 kprintf("Target did not send an IDENTIFY message. "
984263bc
MD
2453 "LASTPHASE = 0x%x.\n", lastphase);
2454 scb = NULL;
2455 } else if (scb == NULL) {
2456 /*
2457 * We don't seem to have an SCB active for this
2458 * transaction. Print an error and reset the bus.
2459 */
2460 ahd_print_devinfo(ahd, &devinfo);
e3869ec7 2461 kprintf("No SCB found during protocol violation\n");
984263bc
MD
2462 goto proto_violation_reset;
2463 } else {
750f3593 2464 aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
984263bc
MD
2465 if ((seq_flags & NO_CDB_SENT) != 0) {
2466 ahd_print_path(ahd, scb);
e3869ec7 2467 kprintf("No or incomplete CDB sent to device.\n");
984263bc
MD
2468 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
2469 & STATUS_RCVD) == 0) {
2470 /*
2471 * The target never bothered to provide status to
2472 * us prior to completing the command. Since we don't
2473 * know the disposition of this command, we must attempt
2474 * to abort it. Assert ATN and prepare to send an abort
2475 * message.
2476 */
2477 ahd_print_path(ahd, scb);
e3869ec7 2478 kprintf("Completed command without status.\n");
984263bc
MD
2479 } else {
2480 ahd_print_path(ahd, scb);
e3869ec7 2481 kprintf("Unknown protocol violation.\n");
984263bc
MD
2482 ahd_dump_card_state(ahd);
2483 }
2484 }
2485 if ((lastphase & ~P_DATAIN_DT) == 0
2486 || lastphase == P_COMMAND) {
2487proto_violation_reset:
2488 /*
2489 * Target either went directly to data
2490 * phase or didn't respond to our ATN.
2491 * The only safe thing to do is to blow
2492 * it away with a bus reset.
2493 */
2494 found = ahd_reset_channel(ahd, 'A', TRUE);
e3869ec7 2495 kprintf("%s: Issued Channel %c Bus Reset. "
984263bc
MD
2496 "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
2497 } else {
2498 /*
2499 * Leave the selection hardware off in case
2500 * this abort attempt will affect yet to
2501 * be sent commands.
2502 */
2503 ahd_outb(ahd, SCSISEQ0,
2504 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2505 ahd_assert_atn(ahd);
2506 ahd_outb(ahd, MSG_OUT, HOST_MSG);
2507 if (scb == NULL) {
2508 ahd_print_devinfo(ahd, &devinfo);
2509 ahd->msgout_buf[0] = MSG_ABORT_TASK;
2510 ahd->msgout_len = 1;
2511 ahd->msgout_index = 0;
2512 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2513 } else {
2514 ahd_print_path(ahd, scb);
2515 scb->flags |= SCB_ABORT;
2516 }
e3869ec7 2517 kprintf("Protocol violation %s. Attempting to abort.\n",
984263bc
MD
2518 ahd_lookup_phase_entry(curphase)->phasemsg);
2519 }
2520}
2521
2522/*
2523 * Force renegotiation to occur the next time we initiate
2524 * a command to the current device.
2525 */
2526static void
2527ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2528{
2529 struct ahd_initiator_tinfo *targ_info;
2530 struct ahd_tmode_tstate *tstate;
2531
2532#ifdef AHD_DEBUG
2533 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
2534 ahd_print_devinfo(ahd, devinfo);
e3869ec7 2535 kprintf("Forcing renegotiation\n");
984263bc
MD
2536 }
2537#endif
2538 targ_info = ahd_fetch_transinfo(ahd,
2539 devinfo->channel,
2540 devinfo->our_scsiid,
2541 devinfo->target,
2542 &tstate);
2543 ahd_update_neg_request(ahd, devinfo, tstate,
2544 targ_info, AHD_NEG_IF_NON_ASYNC);
2545}
2546
2547#define AHD_MAX_STEPS 2000
2548void
2549ahd_clear_critical_section(struct ahd_softc *ahd)
2550{
2551 ahd_mode_state saved_modes;
2552 int stepping;
2553 int steps;
2554 int first_instr;
2555 u_int simode0;
2556 u_int simode1;
2557 u_int simode3;
2558 u_int lqimode0;
2559 u_int lqimode1;
2560 u_int lqomode0;
2561 u_int lqomode1;
2562
2563 if (ahd->num_critical_sections == 0)
2564 return;
2565
2566 stepping = FALSE;
2567 steps = 0;
2568 first_instr = 0;
2569 simode0 = 0;
2570 simode1 = 0;
2571 simode3 = 0;
2572 lqimode0 = 0;
2573 lqimode1 = 0;
2574 lqomode0 = 0;
2575 lqomode1 = 0;
2576 saved_modes = ahd_save_modes(ahd);
2577 for (;;) {
2578 struct cs *cs;
2579 u_int seqaddr;
2580 u_int i;
2581
2582 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
750f3593 2583 seqaddr = ahd_inw(ahd, CURADDR);
984263bc
MD
2584
2585 cs = ahd->critical_sections;
2586 for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
2587
2588 if (cs->begin < seqaddr && cs->end >= seqaddr)
2589 break;
2590 }
2591
2592 if (i == ahd->num_critical_sections)
2593 break;
2594
2595 if (steps > AHD_MAX_STEPS) {
e3869ec7 2596 kprintf("%s: Infinite loop in critical section\n"
984263bc
MD
2597 "%s: First Instruction 0x%x now 0x%x\n",
2598 ahd_name(ahd), ahd_name(ahd), first_instr,
2599 seqaddr);
2600 ahd_dump_card_state(ahd);
2601 panic("critical section loop");
2602 }
2603
2604 steps++;
2605#ifdef AHD_DEBUG
2606 if ((ahd_debug & AHD_SHOW_MISC) != 0)
e3869ec7 2607 kprintf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
984263bc
MD
2608 seqaddr);
2609#endif
2610 if (stepping == FALSE) {
2611
2612 first_instr = seqaddr;
2613 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2614 simode0 = ahd_inb(ahd, SIMODE0);
2615 simode3 = ahd_inb(ahd, SIMODE3);
2616 lqimode0 = ahd_inb(ahd, LQIMODE0);
2617 lqimode1 = ahd_inb(ahd, LQIMODE1);
2618 lqomode0 = ahd_inb(ahd, LQOMODE0);
2619 lqomode1 = ahd_inb(ahd, LQOMODE1);
2620 ahd_outb(ahd, SIMODE0, 0);
2621 ahd_outb(ahd, SIMODE3, 0);
2622 ahd_outb(ahd, LQIMODE0, 0);
2623 ahd_outb(ahd, LQIMODE1, 0);
2624 ahd_outb(ahd, LQOMODE0, 0);
2625 ahd_outb(ahd, LQOMODE1, 0);
2626 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
b95ca0f4
PA
2627 simode1 = ahd_inb(ahd, SIMODE1);
2628 /*
2629 * We don't clear ENBUSFREE. Unfortunately
2630 * we cannot re-enable busfree detection within
2631 * the current connection, so we must leave it
2632 * on while single stepping.
2633 */
2634 ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
984263bc
MD
2635 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
2636 stepping = TRUE;
2637 }
2638 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
2639 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2640 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
2641 ahd_outb(ahd, HCNTRL, ahd->unpause);
b95ca0f4 2642 while (!ahd_is_paused(ahd))
750f3593 2643 aic_delay(200);
984263bc
MD
2644 ahd_update_modes(ahd);
2645 }
2646 if (stepping) {
2647 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2648 ahd_outb(ahd, SIMODE0, simode0);
2649 ahd_outb(ahd, SIMODE3, simode3);
2650 ahd_outb(ahd, LQIMODE0, lqimode0);
2651 ahd_outb(ahd, LQIMODE1, lqimode1);
2652 ahd_outb(ahd, LQOMODE0, lqomode0);
2653 ahd_outb(ahd, LQOMODE1, lqomode1);
2654 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2655 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
2656 ahd_outb(ahd, SIMODE1, simode1);
2657 /*
2658 * SCSIINT seems to glitch occassionally when
2659 * the interrupt masks are restored. Clear SCSIINT
2660 * one more time so that only persistent errors
2661 * are seen as a real interrupt.
2662 */
2663 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2664 }
2665 ahd_restore_modes(ahd, saved_modes);
2666}
2667
2668/*
2669 * Clear any pending interrupt status.
2670 */
2671void
2672ahd_clear_intstat(struct ahd_softc *ahd)
2673{
2674 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2675 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2676 /* Clear any interrupt conditions this may have caused */
2677 ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
2678 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
2679 ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
2680 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
2681 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
2682 ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
2683 |CLRLQOATNPKT|CLRLQOTCRC);
2684 ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
2685 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
2686 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
2687 ahd_outb(ahd, CLRLQOINT0, 0);
2688 ahd_outb(ahd, CLRLQOINT1, 0);
2689 }
2690 ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
2691 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
2692 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
2693 ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
2694 |CLRIOERR|CLROVERRUN);
2695 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2696}
2697
2698/**************************** Debugging Routines ******************************/
2699#ifdef AHD_DEBUG
2700uint32_t ahd_debug = AHD_DEBUG_OPTS;
2701#endif
2702void
2703ahd_print_scb(struct scb *scb)
2704{
2705 struct hardware_scb *hscb;
2706 int i;
2707
2708 hscb = scb->hscb;
e3869ec7 2709 kprintf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
984263bc
MD
2710 (void *)scb,
2711 hscb->control,
2712 hscb->scsiid,
2713 hscb->lun,
2714 hscb->cdb_len);
e3869ec7 2715 kprintf("Shared Data: ");
984263bc 2716 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
e3869ec7
SW
2717 kprintf("%#02x", hscb->shared_data.idata.cdb[i]);
2718 kprintf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
750f3593
PA
2719 (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
2720 (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF),
2721 aic_le32toh(hscb->datacnt),
2722 aic_le32toh(hscb->sgptr),
984263bc
MD
2723 SCB_GET_TAG(scb));
2724 ahd_dump_sglist(scb);
2725}
2726
2727void
2728ahd_dump_sglist(struct scb *scb)
2729{
2730 int i;
2731
2732 if (scb->sg_count > 0) {
2733 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
2734 struct ahd_dma64_seg *sg_list;
2735
2736 sg_list = (struct ahd_dma64_seg*)scb->sg_list;
2737 for (i = 0; i < scb->sg_count; i++) {
2738 uint64_t addr;
2739 uint32_t len;
2740
750f3593
PA
2741 addr = aic_le64toh(sg_list[i].addr);
2742 len = aic_le32toh(sg_list[i].len);
e3869ec7 2743 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
984263bc
MD
2744 i,
2745 (uint32_t)((addr >> 32) & 0xFFFFFFFF),
2746 (uint32_t)(addr & 0xFFFFFFFF),
2747 sg_list[i].len & AHD_SG_LEN_MASK,
2748 (sg_list[i].len & AHD_DMA_LAST_SEG)
2749 ? " Last" : "");
2750 }
2751 } else {
2752 struct ahd_dma_seg *sg_list;
2753
2754 sg_list = (struct ahd_dma_seg*)scb->sg_list;
2755 for (i = 0; i < scb->sg_count; i++) {
2756 uint32_t len;
2757
750f3593 2758 len = aic_le32toh(sg_list[i].len);
e3869ec7 2759 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
984263bc 2760 i,
4b753d9e 2761 (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
750f3593 2762 aic_le32toh(sg_list[i].addr),
984263bc
MD
2763 len & AHD_SG_LEN_MASK,
2764 len & AHD_DMA_LAST_SEG ? " Last" : "");
2765 }
2766 }
2767 }
2768}
2769
2770/************************* Transfer Negotiation *******************************/
2771/*
2772 * Allocate per target mode instance (ID we respond to as a target)
2773 * transfer negotiation data structures.
2774 */
2775static struct ahd_tmode_tstate *
2776ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
2777{
2778 struct ahd_tmode_tstate *master_tstate;
2779 struct ahd_tmode_tstate *tstate;
2780 int i;
2781
2782 master_tstate = ahd->enabled_targets[ahd->our_id];
2783 if (ahd->enabled_targets[scsi_id] != NULL
2784 && ahd->enabled_targets[scsi_id] != master_tstate)
2785 panic("%s: ahd_alloc_tstate - Target already allocated",
2786 ahd_name(ahd));
efda3bd0 2787 tstate = kmalloc(sizeof(*tstate), M_DEVBUF, M_INTWAIT);
984263bc
MD
2788
2789 /*
2790 * If we have allocated a master tstate, copy user settings from
2791 * the master tstate (taken from SRAM or the EEPROM) for this
2792 * channel, but reset our current and goal settings to async/narrow
2793 * until an initiator talks to us.
2794 */
2795 if (master_tstate != NULL) {
2796 memcpy(tstate, master_tstate, sizeof(*tstate));
2797 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
2798 for (i = 0; i < 16; i++) {
2799 memset(&tstate->transinfo[i].curr, 0,
2800 sizeof(tstate->transinfo[i].curr));
2801 memset(&tstate->transinfo[i].goal, 0,
2802 sizeof(tstate->transinfo[i].goal));
2803 }
2804 } else
2805 memset(tstate, 0, sizeof(*tstate));
2806 ahd->enabled_targets[scsi_id] = tstate;
2807 return (tstate);
2808}
2809
2810#ifdef AHD_TARGET_MODE
2811/*
2812 * Free per target mode instance (ID we respond to as a target)
2813 * transfer negotiation data structures.
2814 */
2815static void
2816ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
2817{
2818 struct ahd_tmode_tstate *tstate;
2819
2820 /*
2821 * Don't clean up our "master" tstate.
2822 * It has our default user settings.
2823 */
2824 if (scsi_id == ahd->our_id
2825 && force == FALSE)
2826 return;
2827
2828 tstate = ahd->enabled_targets[scsi_id];
2829 if (tstate != NULL)
efda3bd0 2830 kfree(tstate, M_DEVBUF);
984263bc
MD
2831 ahd->enabled_targets[scsi_id] = NULL;
2832}
2833#endif
2834
2835/*
2836 * Called when we have an active connection to a target on the bus,
2837 * this function finds the nearest period to the input period limited
2838 * by the capabilities of the bus connectivity of and sync settings for
2839 * the target.
2840 */
2841void
2842ahd_devlimited_syncrate(struct ahd_softc *ahd,
2843 struct ahd_initiator_tinfo *tinfo,
2844 u_int *period, u_int *ppr_options, role_t role)
2845{
2846 struct ahd_transinfo *transinfo;
2847 u_int maxsync;
2848
2849 if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
2850 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
2851 maxsync = AHD_SYNCRATE_PACED;
2852 } else {
2853 maxsync = AHD_SYNCRATE_ULTRA;
2854 /* Can't do DT related options on an SE bus */
2855 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2856 }
2857 /*
2858 * Never allow a value higher than our current goal
2859 * period otherwise we may allow a target initiated
2860 * negotiation to go above the limit as set by the
2861 * user. In the case of an initiator initiated
2862 * sync negotiation, we limit based on the user
2863 * setting. This allows the system to still accept
2864 * incoming negotiations even if target initiated
2865 * negotiation is not performed.
2866 */
2867 if (role == ROLE_TARGET)
2868 transinfo = &tinfo->user;
2869 else
2870 transinfo = &tinfo->goal;
2871 *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
2872 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
2873 maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
2874 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2875 }
2876 if (transinfo->period == 0) {
2877 *period = 0;
2878 *ppr_options = 0;
2879 } else {
2880 *period = MAX(*period, transinfo->period);
2881 ahd_find_syncrate(ahd, period, ppr_options, maxsync);
2882 }
2883}
2884
2885/*
2886 * Look up the valid period to SCSIRATE conversion in our table.
2887 * Return the period and offset that should be sent to the target
2888 * if this was the beginning of an SDTR.
2889 */
2890void
2891ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
2892 u_int *ppr_options, u_int maxsync)
2893{
2894 if (*period < maxsync)
2895 *period = maxsync;
2896
2897 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
2898 && *period > AHD_SYNCRATE_MIN_DT)
2899 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2900
2901 if (*period > AHD_SYNCRATE_MIN)
2902 *period = 0;
2903
2904 /* Honor PPR option conformance rules. */
2905 if (*period > AHD_SYNCRATE_PACED)
2906 *ppr_options &= ~MSG_EXT_PPR_RTI;
2907
2908 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
2909 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
2910
2911 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
2912 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2913
2914 /* Skip all PACED only entries if IU is not available */
2915 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
2916 && *period < AHD_SYNCRATE_DT)
2917 *period = AHD_SYNCRATE_DT;
2918
2919 /* Skip all DT only entries if DT is not available */
2920 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2921 && *period < AHD_SYNCRATE_ULTRA2)
2922 *period = AHD_SYNCRATE_ULTRA2;
2923}
2924
2925/*
2926 * Truncate the given synchronous offset to a value the
2927 * current adapter type and syncrate are capable of.
2928 */
2929void
2930ahd_validate_offset(struct ahd_softc *ahd,
2931 struct ahd_initiator_tinfo *tinfo,
2932 u_int period, u_int *offset, int wide,
2933 role_t role)
2934{
2935 u_int maxoffset;
2936
2937 /* Limit offset to what we can do */
2938 if (period == 0)
2939 maxoffset = 0;
2940 else if (period <= AHD_SYNCRATE_PACED) {
2941 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
2942 maxoffset = MAX_OFFSET_PACED_BUG;
2943 else
2944 maxoffset = MAX_OFFSET_PACED;
2945 } else
2946 maxoffset = MAX_OFFSET_NON_PACED;
2947 *offset = MIN(*offset, maxoffset);
2948 if (tinfo != NULL) {
2949 if (role == ROLE_TARGET)
2950 *offset = MIN(*offset, tinfo->user.offset);
2951 else
2952 *offset = MIN(*offset, tinfo->goal.offset);
2953 }
2954}
2955
2956/*
2957 * Truncate the given transfer width parameter to a value the
2958 * current adapter type is capable of.
2959 */
2960void
2961ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
2962 u_int *bus_width, role_t role)
2963{
2964 switch (*bus_width) {
2965 default:
2966 if (ahd->features & AHD_WIDE) {
2967 /* Respond Wide */
2968 *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2969 break;
2970 }
2971 /* FALLTHROUGH */
2972 case MSG_EXT_WDTR_BUS_8_BIT:
2973 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2974 break;
2975 }
2976 if (tinfo != NULL) {
2977 if (role == ROLE_TARGET)
2978 *bus_width = MIN(tinfo->user.width, *bus_width);
2979 else
2980 *bus_width = MIN(tinfo->goal.width, *bus_width);
2981 }
2982}
2983
2984/*
2985 * Update the bitmask of targets for which the controller should
2986 * negotiate with at the next convenient oportunity. This currently
2987 * means the next time we send the initial identify messages for
2988 * a new transaction.
2989 */
2990int
2991ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2992 struct ahd_tmode_tstate *tstate,
2993 struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
2994{
2995 u_int auto_negotiate_orig;
2996
2997 auto_negotiate_orig = tstate->auto_negotiate;
2998 if (neg_type == AHD_NEG_ALWAYS) {
2999 /*
3000 * Force our "current" settings to be
3001 * unknown so that unless a bus reset
3002 * occurs the need to renegotiate is
3003 * recorded persistently.
3004 */
3005 if ((ahd->features & AHD_WIDE) != 0)
3006 tinfo->curr.width = AHD_WIDTH_UNKNOWN;
3007 tinfo->curr.period = AHD_PERIOD_UNKNOWN;
3008 tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
3009 }
3010 if (tinfo->curr.period != tinfo->goal.period
3011 || tinfo->curr.width != tinfo->goal.width
3012 || tinfo->curr.offset != tinfo->goal.offset
3013 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
3014 || (neg_type == AHD_NEG_IF_NON_ASYNC
3015 && (tinfo->goal.offset != 0
3016 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
3017 || tinfo->goal.ppr_options != 0)))
3018 tstate->auto_negotiate |= devinfo->target_mask;
3019 else
3020 tstate->auto_negotiate &= ~devinfo->target_mask;
3021
3022 return (auto_negotiate_orig != tstate->auto_negotiate);
3023}
3024
3025/*
3026 * Update the user/goal/curr tables of synchronous negotiation
3027 * parameters as well as, in the case of a current or active update,
3028 * any data structures on the host controller. In the case of an
3029 * active update, the specified target is currently talking to us on
3030 * the bus, so the transfer parameter update must take effect
3031 * immediately.
3032 */
3033void
3034ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3035 u_int period, u_int offset, u_int ppr_options,
3036 u_int type, int paused)
3037{
3038 struct ahd_initiator_tinfo *tinfo;
3039 struct ahd_tmode_tstate *tstate;
3040 u_int old_period;
3041 u_int old_offset;
3042 u_int old_ppr;
3043 int active;
3044 int update_needed;
3045
3046 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3047 update_needed = 0;
3048
3049 if (period == 0 || offset == 0) {
3050 period = 0;
3051 offset = 0;
3052 }
3053
3054 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3055 devinfo->target, &tstate);
3056
3057 if ((type & AHD_TRANS_USER) != 0) {
3058 tinfo->user.period = period;
3059 tinfo->user.offset = offset;
3060 tinfo->user.ppr_options = ppr_options;
3061 }
3062
3063 if ((type & AHD_TRANS_GOAL) != 0) {
3064 tinfo->goal.period = period;
3065 tinfo->goal.offset = offset;
3066 tinfo->goal.ppr_options = ppr_options;
3067 }
3068
3069 old_period = tinfo->curr.period;
3070 old_offset = tinfo->curr.offset;
3071 old_ppr = tinfo->curr.ppr_options;
3072
3073 if ((type & AHD_TRANS_CUR) != 0
3074 && (old_period != period
3075 || old_offset != offset
3076 || old_ppr != ppr_options)) {
3077
3078 update_needed++;
3079
3080 tinfo->curr.period = period;
3081 tinfo->curr.offset = offset;
3082 tinfo->curr.ppr_options = ppr_options;
3083
3084 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3085 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3086 if (bootverbose) {
3087 if (offset != 0) {
3088 int options;
3089
e3869ec7 3090 kprintf("%s: target %d synchronous with "
984263bc
MD
3091 "period = 0x%x, offset = 0x%x",
3092 ahd_name(ahd), devinfo->target,
3093 period, offset);
3094 options = 0;
3095 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
e3869ec7 3096 kprintf("(RDSTRM");
984263bc
MD
3097 options++;
3098 }
3099 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
e3869ec7 3100 kprintf("%s", options ? "|DT" : "(DT");
984263bc
MD
3101 options++;
3102 }
3103 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
e3869ec7 3104 kprintf("%s", options ? "|IU" : "(IU");
984263bc
MD
3105 options++;
3106 }
3107 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
e3869ec7 3108 kprintf("%s", options ? "|RTI" : "(RTI");
984263bc
MD
3109 options++;
3110 }
3111 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
e3869ec7 3112 kprintf("%s", options ? "|QAS" : "(QAS");
984263bc
MD
3113 options++;
3114 }
3115 if (options != 0)
e3869ec7 3116 kprintf(")\n");
984263bc 3117 else
e3869ec7 3118 kprintf("\n");
984263bc 3119 } else {
e3869ec7 3120 kprintf("%s: target %d using "
984263bc
MD
3121 "asynchronous transfers%s\n",
3122 ahd_name(ahd), devinfo->target,
3123 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
3124 ? "(QAS)" : "");
3125 }
3126 }
3127 }
3128 /*
3129 * Always refresh the neg-table to handle the case of the
3130 * sequencer setting the ENATNO bit for a MK_MESSAGE request.
3131 * We will always renegotiate in that case if this is a
3132 * packetized request. Also manage the busfree expected flag
3133 * from this common routine so that we catch changes due to
3134 * WDTR or SDTR messages.
3135 */
3136 if ((type & AHD_TRANS_CUR) != 0) {
3137 if (!paused)
3138 ahd_pause(ahd);
3139 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3140 if (!paused)
3141 ahd_unpause(ahd);
3142 if (ahd->msg_type != MSG_TYPE_NONE) {
3143 if ((old_ppr & MSG_EXT_PPR_IU_REQ)
3144 != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
3145#ifdef AHD_DEBUG
3146 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3147 ahd_print_devinfo(ahd, devinfo);
e3869ec7 3148 kprintf("Expecting IU Change busfree\n");
984263bc
MD
3149 }
3150#endif
3151 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
3152 | MSG_FLAG_IU_REQ_CHANGED;
3153 }
3154 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
3155#ifdef AHD_DEBUG
3156 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 3157 kprintf("PPR with IU_REQ outstanding\n");
984263bc
MD
3158#endif
3159 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
3160 }
3161 }
3162 }
3163
3164 update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3165 tinfo, AHD_NEG_TO_GOAL);
3166
3167 if (update_needed && active)
3168 ahd_update_pending_scbs(ahd);
3169}
3170
3171/*
3172 * Update the user/goal/curr tables of wide negotiation
3173 * parameters as well as, in the case of a current or active update,
3174 * any data structures on the host controller. In the case of an
3175 * active update, the specified target is currently talking to us on
3176 * the bus, so the transfer parameter update must take effect
3177 * immediately.
3178 */
3179void
3180ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3181 u_int width, u_int type, int paused)
3182{
3183 struct ahd_initiator_tinfo *tinfo;
3184 struct ahd_tmode_tstate *tstate;
3185 u_int oldwidth;
3186 int active;
3187 int update_needed;
3188
3189 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3190 update_needed = 0;
3191 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3192 devinfo->target, &tstate);
3193
3194 if ((type & AHD_TRANS_USER) != 0)
3195 tinfo->user.width = width;
3196
3197 if ((type & AHD_TRANS_GOAL) != 0)
3198 tinfo->goal.width = width;
3199
3200 oldwidth = tinfo->curr.width;
3201 if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
3202
3203 update_needed++;
3204
3205 tinfo->curr.width = width;
3206 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3207 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3208 if (bootverbose) {
e3869ec7 3209 kprintf("%s: target %d using %dbit transfers\n",
984263bc
MD
3210 ahd_name(ahd), devinfo->target,
3211 8 * (0x01 << width));
3212 }
3213 }
3214
3215 if ((type & AHD_TRANS_CUR) != 0) {
3216 if (!paused)
3217 ahd_pause(ahd);
3218 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3219 if (!paused)
3220 ahd_unpause(ahd);
3221 }
3222
3223 update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3224 tinfo, AHD_NEG_TO_GOAL);
3225 if (update_needed && active)
3226 ahd_update_pending_scbs(ahd);
3227
3228}
3229
3230/*
3231 * Update the current state of tagged queuing for a given target.
3232 */
3233void
3234ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3235 ahd_queue_alg alg)
3236{
3237 ahd_platform_set_tags(ahd, devinfo, alg);
3238 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3239 devinfo->lun, AC_TRANSFER_NEG, &alg);
3240}
3241
3242static void
3243ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3244 struct ahd_transinfo *tinfo)
3245{
3246 ahd_mode_state saved_modes;
3247 u_int period;
3248 u_int ppr_opts;
3249 u_int con_opts;
3250 u_int offset;
3251 u_int saved_negoaddr;
3252 uint8_t iocell_opts[sizeof(ahd->iocell_opts)];
3253
3254 saved_modes = ahd_save_modes(ahd);
3255 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3256
3257 saved_negoaddr = ahd_inb(ahd, NEGOADDR);
3258 ahd_outb(ahd, NEGOADDR, devinfo->target);
3259 period = tinfo->period;
3260 offset = tinfo->offset;
3261 memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
3262 ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
3263 |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
3264 con_opts = 0;
3265 if (period == 0)
3266 period = AHD_SYNCRATE_ASYNC;
3267 if (period == AHD_SYNCRATE_160) {
3268
3269 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3270 /*
3271 * When the SPI4 spec was finalized, PACE transfers
3272 * was not made a configurable option in the PPR
3273 * message. Instead it is assumed to be enabled for
3274 * any syncrate faster than 80MHz. Nevertheless,
3275 * Harpoon2A4 allows this to be configurable.
3276 *
3277 * Harpoon2A4 also assumes at most 2 data bytes per
3278 * negotiated REQ/ACK offset. Paced transfers take
3279 * 4, so we must adjust our offset.
3280 */
3281 ppr_opts |= PPROPT_PACE;
3282 offset *= 2;
3283
3284 /*
3285 * Harpoon2A assumed that there would be a
3286 * fallback rate between 160MHz and 80Mhz,
3287 * so 7 is used as the period factor rather
3288 * than 8 for 160MHz.
3289 */
3290 period = AHD_SYNCRATE_REVA_160;
3291 }
3292 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
3293 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3294 ~AHD_PRECOMP_MASK;
3295 } else {
3296 /*
3297 * Precomp should be disabled for non-paced transfers.
3298 */
3299 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
3300
3301 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
750f3593
PA
3302 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0
3303 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) {
984263bc
MD
3304 /*
3305 * Slow down our CRC interval to be
750f3593
PA
3306 * compatible with non-packetized
3307 * U160 devices that can't handle a
3308 * CRC at full speed.
984263bc
MD
3309 */
3310 con_opts |= ENSLOWCRC;
3311 }
750f3593
PA
3312
3313 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3314 /*
3315 * On H2A4, revert to a slower slewrate
3316 * on non-paced transfers.
3317 */
3318 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3319 ~AHD_SLEWRATE_MASK;
3320 }
984263bc
MD
3321 }
3322
3323 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
3324 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
3325 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
3326 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
3327
3328 ahd_outb(ahd, NEGPERIOD, period);
3329 ahd_outb(ahd, NEGPPROPTS, ppr_opts);
3330 ahd_outb(ahd, NEGOFFSET, offset);
3331
3332 if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
3333 con_opts |= WIDEXFER;
3334
3335 /*
3336 * During packetized transfers, the target will
3337 * give us the oportunity to send command packets
3338 * without us asserting attention.
3339 */
3340 if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
3341 con_opts |= ENAUTOATNO;
3342 ahd_outb(ahd, NEGCONOPTS, con_opts);
3343 ahd_outb(ahd, NEGOADDR, saved_negoaddr);
3344 ahd_restore_modes(ahd, saved_modes);
3345}
3346
3347/*
3348 * When the transfer settings for a connection change, setup for
3349 * negotiation in pending SCBs to effect the change as quickly as
3350 * possible. We also cancel any negotiations that are scheduled
3351 * for inflight SCBs that have not been started yet.
3352 */
3353static void
3354ahd_update_pending_scbs(struct ahd_softc *ahd)
3355{
3356 struct scb *pending_scb;
3357 int pending_scb_count;
984263bc
MD
3358 int paused;
3359 u_int saved_scbptr;
3360 ahd_mode_state saved_modes;
3361
3362 /*
3363 * Traverse the pending SCB list and ensure that all of the
3364 * SCBs there have the proper settings. We can only safely
3365 * clear the negotiation required flag (setting requires the
3366 * execution queue to be modified) and this is only possible
3367 * if we are not already attempting to select out for this
3368 * SCB. For this reason, all callers only call this routine
3369 * if we are changing the negotiation settings for the currently
3370 * active transaction on the bus.
3371 */
3372 pending_scb_count = 0;
3373 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3374 struct ahd_devinfo devinfo;
984263bc
MD
3375 struct ahd_initiator_tinfo *tinfo;
3376 struct ahd_tmode_tstate *tstate;
3377
3378 ahd_scb_devinfo(ahd, &devinfo, pending_scb);
3379 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
3380 devinfo.our_scsiid,
3381 devinfo.target, &tstate);
984263bc
MD
3382 if ((tstate->auto_negotiate & devinfo.target_mask) == 0
3383 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
3384 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
fb5acdc8 3385 pending_scb->hscb->control &= ~MK_MESSAGE;
984263bc
MD
3386 }
3387 ahd_sync_scb(ahd, pending_scb,
3388 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3389 pending_scb_count++;
3390 }
3391
3392 if (pending_scb_count == 0)
3393 return;
3394
3395 if (ahd_is_paused(ahd)) {
3396 paused = 1;
3397 } else {
3398 paused = 0;
3399 ahd_pause(ahd);
3400 }
3401
3402 /*
3403 * Force the sequencer to reinitialize the selection for
3404 * the command at the head of the execution queue if it
3405 * has already been setup. The negotiation changes may
f39dcdf3
PA
3406 * effect whether we select-out with ATN. It is only
3407 * safe to clear ENSELO when the bus is not free and no
3408 * selection is in progres or completed.
984263bc
MD
3409 */
3410 saved_modes = ahd_save_modes(ahd);
3411 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
f39dcdf3
PA
3412 if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0
3413 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
3414 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
984263bc
MD
3415 saved_scbptr = ahd_get_scbptr(ahd);
3416 /* Ensure that the hscbs down on the card match the new information */
fb5acdc8
PA
3417 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3418 u_int scb_tag;
984263bc 3419 u_int control;
984263bc 3420
fb5acdc8 3421 scb_tag = SCB_GET_TAG(pending_scb);
4b753d9e 3422 ahd_set_scbptr(ahd, scb_tag);
984263bc
MD
3423 control = ahd_inb_scbram(ahd, SCB_CONTROL);
3424 control &= ~MK_MESSAGE;
fb5acdc8 3425 control |= pending_scb->hscb->control & MK_MESSAGE;
984263bc
MD
3426 ahd_outb(ahd, SCB_CONTROL, control);
3427 }
3428 ahd_set_scbptr(ahd, saved_scbptr);
3429 ahd_restore_modes(ahd, saved_modes);
3430
3431 if (paused == 0)
3432 ahd_unpause(ahd);
3433}
3434
3435/**************************** Pathing Information *****************************/
3436static void
3437ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3438{
3439 ahd_mode_state saved_modes;
3440 u_int saved_scsiid;
3441 role_t role;
3442 int our_id;
3443
3444 saved_modes = ahd_save_modes(ahd);
3445 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3446
3447 if (ahd_inb(ahd, SSTAT0) & TARGET)
3448 role = ROLE_TARGET;
3449 else
3450 role = ROLE_INITIATOR;
3451
3452 if (role == ROLE_TARGET
3453 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
3454 /* We were selected, so pull our id from TARGIDIN */
3455 our_id = ahd_inb(ahd, TARGIDIN) & OID;
3456 } else if (role == ROLE_TARGET)
3457 our_id = ahd_inb(ahd, TOWNID);
3458 else
3459 our_id = ahd_inb(ahd, IOWNID);
3460
3461 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
3462 ahd_compile_devinfo(devinfo,
3463 our_id,
3464 SCSIID_TARGET(ahd, saved_scsiid),
3465 ahd_inb(ahd, SAVED_LUN),
3466 SCSIID_CHANNEL(ahd, saved_scsiid),
3467 role);
3468 ahd_restore_modes(ahd, saved_modes);
3469}
3470
3471void
3472ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3473{
e3869ec7 3474 kprintf("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
984263bc
MD
3475 devinfo->target, devinfo->lun);
3476}
3477
3478struct ahd_phase_table_entry*
3479ahd_lookup_phase_entry(int phase)
3480{
3481 struct ahd_phase_table_entry *entry;
3482 struct ahd_phase_table_entry *last_entry;
3483
3484 /*
3485 * num_phases doesn't include the default entry which
3486 * will be returned if the phase doesn't match.
3487 */
3488 last_entry = &ahd_phase_table[num_phases];
3489 for (entry = ahd_phase_table; entry < last_entry; entry++) {
3490 if (phase == entry->phase)
3491 break;
3492 }
3493 return (entry);
3494}
3495
3496void
3497ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
3498 u_int lun, char channel, role_t role)
3499{
3500 devinfo->our_scsiid = our_id;
3501 devinfo->target = target;
3502 devinfo->lun = lun;
3503 devinfo->target_offset = target;
3504 devinfo->channel = channel;
3505 devinfo->role = role;
3506 if (channel == 'B')
3507 devinfo->target_offset += 8;
3508 devinfo->target_mask = (0x01 << devinfo->target_offset);
3509}
3510
3511static void
3512ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3513 struct scb *scb)
3514{
3515 role_t role;
3516 int our_id;
3517
3518 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
3519 role = ROLE_INITIATOR;
3520 if ((scb->hscb->control & TARGET_SCB) != 0)
3521 role = ROLE_TARGET;
3522 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
3523 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
3524}
3525
3526
3527/************************ Message Phase Processing ****************************/
3528/*
3529 * When an initiator transaction with the MK_MESSAGE flag either reconnects
3530 * or enters the initial message out phase, we are interrupted. Fill our
3531 * outgoing message buffer with the appropriate message and beging handing
3532 * the message phase(s) manually.
3533 */
3534static void
3535ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3536 struct scb *scb)
3537{
3538 /*
3539 * To facilitate adding multiple messages together,
3540 * each routine should increment the index and len
3541 * variables instead of setting them explicitly.
3542 */
3543 ahd->msgout_index = 0;
3544 ahd->msgout_len = 0;
3545
3546 if (ahd_currently_packetized(ahd))
3547 ahd->msg_flags |= MSG_FLAG_PACKETIZED;
3548
3549 if (ahd->send_msg_perror
3550 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
3551 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
3552 ahd->msgout_len++;
3553 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3554#ifdef AHD_DEBUG
3555 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
e3869ec7 3556 kprintf("Setting up for Parity Error delivery\n");
984263bc
MD
3557#endif
3558 return;
3559 } else if (scb == NULL) {
e3869ec7 3560 kprintf("%s: WARNING. No pending message for "
984263bc
MD
3561 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
3562 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
3563 ahd->msgout_len++;
3564 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3565 return;
3566 }
3567
3568 if ((scb->flags & SCB_DEVICE_RESET) == 0
3569 && (scb->flags & SCB_PACKETIZED) == 0
3570 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
3571 u_int identify_msg;
3572
3573 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3574 if ((scb->hscb->control & DISCENB) != 0)
3575 identify_msg |= MSG_IDENTIFY_DISCFLAG;
3576 ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
3577 ahd->msgout_len++;
3578
3579 if ((scb->hscb->control & TAG_ENB) != 0) {
3580 ahd->msgout_buf[ahd->msgout_index++] =
3581 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
3582 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
3583 ahd->msgout_len += 2;
3584 }
3585 }
3586
3587 if (scb->flags & SCB_DEVICE_RESET) {
3588 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
3589 ahd->msgout_len++;
3590 ahd_print_path(ahd, scb);
e3869ec7 3591 kprintf("Bus Device Reset Message Sent\n");
984263bc
MD
3592 /*
3593 * Clear our selection hardware in advance of
3594 * the busfree. We may have an entry in the waiting
3595 * Q for this target, and we don't want to go about
3596 * selecting while we handle the busfree and blow it
3597 * away.
3598 */
3599 ahd_outb(ahd, SCSISEQ0, 0);
3600 } else if ((scb->flags & SCB_ABORT) != 0) {
3601
3602 if ((scb->hscb->control & TAG_ENB) != 0) {
3603 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
3604 } else {
3605 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
3606 }
3607 ahd->msgout_len++;
3608 ahd_print_path(ahd, scb);
e3869ec7 3609 kprintf("Abort%s Message Sent\n",
984263bc
MD
3610 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
3611 /*
3612 * Clear our selection hardware in advance of
3613 * the busfree. We may have an entry in the waiting
3614 * Q for this target, and we don't want to go about
3615 * selecting while we handle the busfree and blow it
3616 * away.
3617 */
3618 ahd_outb(ahd, SCSISEQ0, 0);
3619 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
3620 ahd_build_transfer_msg(ahd, devinfo);
3621 /*
3622 * Clear our selection hardware in advance of potential
3623 * PPR IU status change busfree. We may have an entry in
3624 * the waiting Q for this target, and we don't want to go
3625 * about selecting while we handle the busfree and blow
3626 * it away.
3627 */
3628 ahd_outb(ahd, SCSISEQ0, 0);
3629 } else {
e3869ec7 3630 kprintf("ahd_intr: AWAITING_MSG for an SCB that "
984263bc 3631 "does not have a waiting message\n");
e3869ec7 3632 kprintf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
984263bc
MD
3633 devinfo->target_mask);
3634 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
3635 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
975524e9 3636 ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
984263bc
MD
3637 scb->flags);
3638 }
3639
3640 /*
3641 * Clear the MK_MESSAGE flag from the SCB so we aren't
3642 * asked to send this message again.
3643 */
3644 ahd_outb(ahd, SCB_CONTROL,
3645 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
3646 scb->hscb->control &= ~MK_MESSAGE;
3647 ahd->msgout_index = 0;
3648 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3649}
3650
3651/*
3652 * Build an appropriate transfer negotiation message for the
3653 * currently active target.
3654 */
3655static void
3656ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3657{
3658 /*
3659 * We need to initiate transfer negotiations.
3660 * If our current and goal settings are identical,
3661 * we want to renegotiate due to a check condition.
3662 */
3663 struct ahd_initiator_tinfo *tinfo;
3664 struct ahd_tmode_tstate *tstate;
3665 int dowide;
3666 int dosync;
3667 int doppr;
3668 u_int period;
3669 u_int ppr_options;
3670 u_int offset;