aic79xx.c:
[dragonfly.git] / sys / dev / disk / aic7xxx / aic79xx.c
1 /*
2  * Core routines and tables shareable across OS platforms.
3  *
4  * Copyright (c) 1994-2002 Justin T. Gibbs.
5  * Copyright (c) 2000-2003 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    substantially similar to the "NO WARRANTY" disclaimer below
16  *    ("Disclaimer") and any redistribution must be conditioned upon
17  *    including a substantially similar Disclaimer requirement for further
18  *    binary redistribution.
19  * 3. Neither the names of the above-listed copyright holders nor the names
20  *    of any contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * Alternatively, this software may be distributed under the terms of the
24  * GNU General Public License ("GPL") version 2 as published by the Free
25  * Software Foundation.
26  *
27  * NO WARRANTY
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGES.
39  *
40  * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#238 $
41  *
42  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx.c,v 1.28 2004/02/04 16:38:38 gibbs Exp $
43  * $DragonFly: src/sys/dev/disk/aic7xxx/aic79xx.c,v 1.19 2007/07/06 00:56:38 pavalos Exp $
44  */
45
46 #include "aic79xx_osm.h"
47 #include "aic79xx_inline.h"
48 #include "aicasm/aicasm_insformat.h"
49
50 /******************************** Globals *************************************/
51 struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
52 uint32_t ahd_attach_to_HostRAID_controllers = 1;
53
54 /***************************** Lookup Tables **********************************/
55 char *ahd_chip_names[] =
56 {
57         "NONE",
58         "aic7901",
59         "aic7902",
60         "aic7901A"
61 };
62 static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names);
63
64 /*
65  * Hardware error codes.
66  */
67 struct ahd_hard_error_entry {
68         uint8_t error;
69         char *errmesg;
70 };
71
72 static struct ahd_hard_error_entry ahd_hard_errors[] = {
73         { DSCTMOUT,     "Discard Timer has timed out" },
74         { ILLOPCODE,    "Illegal Opcode in sequencer program" },
75         { SQPARERR,     "Sequencer Parity Error" },
76         { DPARERR,      "Data-path Parity Error" },
77         { MPARERR,      "Scratch or SCB Memory Parity Error" },
78         { CIOPARERR,    "CIOBUS Parity Error" },
79 };
80 static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
81
82 static struct ahd_phase_table_entry ahd_phase_table[] =
83 {
84         { P_DATAOUT,    MSG_NOOP,               "in Data-out phase"     },
85         { P_DATAIN,     MSG_INITIATOR_DET_ERR,  "in Data-in phase"      },
86         { P_DATAOUT_DT, MSG_NOOP,               "in DT Data-out phase"  },
87         { P_DATAIN_DT,  MSG_INITIATOR_DET_ERR,  "in DT Data-in phase"   },
88         { P_COMMAND,    MSG_NOOP,               "in Command phase"      },
89         { P_MESGOUT,    MSG_NOOP,               "in Message-out phase"  },
90         { P_STATUS,     MSG_INITIATOR_DET_ERR,  "in Status phase"       },
91         { P_MESGIN,     MSG_PARITY_ERROR,       "in Message-in phase"   },
92         { P_BUSFREE,    MSG_NOOP,               "while idle"            },
93         { 0,            MSG_NOOP,               "in unknown phase"      }
94 };
95
96 /*
97  * In most cases we only wish to itterate over real phases, so
98  * exclude the last element from the count.
99  */
100 static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
101
102 /* Our Sequencer Program */
103 #include "aic79xx_seq.h"
104
105 /**************************** Function Declarations ***************************/
106 static void             ahd_handle_transmission_error(struct ahd_softc *ahd);
107 static void             ahd_handle_lqiphase_error(struct ahd_softc *ahd,
108                                                   u_int lqistat1);
109 static int              ahd_handle_pkt_busfree(struct ahd_softc *ahd,
110                                                u_int busfreetime);
111 static int              ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
112 static void             ahd_handle_proto_violation(struct ahd_softc *ahd);
113 static void             ahd_force_renegotiation(struct ahd_softc *ahd,
114                                                 struct ahd_devinfo *devinfo);
115
116 static struct ahd_tmode_tstate*
117                         ahd_alloc_tstate(struct ahd_softc *ahd,
118                                          u_int scsi_id, char channel);
119 #ifdef AHD_TARGET_MODE
120 static void             ahd_free_tstate(struct ahd_softc *ahd,
121                                         u_int scsi_id, char channel, int force);
122 #endif
123 static void             ahd_devlimited_syncrate(struct ahd_softc *ahd,
124                                                 struct ahd_initiator_tinfo *,
125                                                 u_int *period,
126                                                 u_int *ppr_options,
127                                                 role_t role);
128 static void             ahd_update_neg_table(struct ahd_softc *ahd,
129                                              struct ahd_devinfo *devinfo,
130                                              struct ahd_transinfo *tinfo);
131 static void             ahd_update_pending_scbs(struct ahd_softc *ahd);
132 static void             ahd_fetch_devinfo(struct ahd_softc *ahd,
133                                           struct ahd_devinfo *devinfo);
134 static void             ahd_scb_devinfo(struct ahd_softc *ahd,
135                                         struct ahd_devinfo *devinfo,
136                                         struct scb *scb);
137 static void             ahd_setup_initiator_msgout(struct ahd_softc *ahd,
138                                                    struct ahd_devinfo *devinfo,
139                                                    struct scb *scb);
140 static void             ahd_build_transfer_msg(struct ahd_softc *ahd,
141                                                struct ahd_devinfo *devinfo);
142 static void             ahd_construct_sdtr(struct ahd_softc *ahd,
143                                            struct ahd_devinfo *devinfo,
144                                            u_int period, u_int offset);
145 static void             ahd_construct_wdtr(struct ahd_softc *ahd,
146                                            struct ahd_devinfo *devinfo,
147                                            u_int bus_width);
148 static void             ahd_construct_ppr(struct ahd_softc *ahd,
149                                           struct ahd_devinfo *devinfo,
150                                           u_int period, u_int offset,
151                                           u_int bus_width, u_int ppr_options);
152 static void             ahd_clear_msg_state(struct ahd_softc *ahd);
153 static void             ahd_handle_message_phase(struct ahd_softc *ahd);
154 typedef enum {
155         AHDMSG_1B,
156         AHDMSG_2B,
157         AHDMSG_EXT
158 } ahd_msgtype;
159 static int              ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
160                                      u_int msgval, int full);
161 static int              ahd_parse_msg(struct ahd_softc *ahd,
162                                       struct ahd_devinfo *devinfo);
163 static int              ahd_handle_msg_reject(struct ahd_softc *ahd,
164                                               struct ahd_devinfo *devinfo);
165 static void             ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
166                                                 struct ahd_devinfo *devinfo);
167 static void             ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
168 static void             ahd_handle_devreset(struct ahd_softc *ahd,
169                                             struct ahd_devinfo *devinfo,
170                                             u_int lun, cam_status status,
171                                             char *message, int verbose_level);
172 #if AHD_TARGET_MODE
173 static void             ahd_setup_target_msgin(struct ahd_softc *ahd,
174                                                struct ahd_devinfo *devinfo,
175                                                struct scb *scb);
176 #endif
177
178 static u_int            ahd_sglist_size(struct ahd_softc *ahd);
179 static u_int            ahd_sglist_allocsize(struct ahd_softc *ahd);
180 static bus_dmamap_callback_t
181                         ahd_dmamap_cb; 
182 static void             ahd_initialize_hscbs(struct ahd_softc *ahd);
183 static int              ahd_init_scbdata(struct ahd_softc *ahd);
184 static void             ahd_fini_scbdata(struct ahd_softc *ahd);
185 static void             ahd_setup_iocell_workaround(struct ahd_softc *ahd);
186 static void             ahd_iocell_first_selection(struct ahd_softc *ahd);
187 static void             ahd_add_col_list(struct ahd_softc *ahd,
188                                          struct scb *scb, u_int col_idx);
189 static void             ahd_rem_col_list(struct ahd_softc *ahd,
190                                          struct scb *scb);
191 static void             ahd_chip_init(struct ahd_softc *ahd);
192 static void             ahd_qinfifo_requeue(struct ahd_softc *ahd,
193                                             struct scb *prev_scb,
194                                             struct scb *scb);
195 static int              ahd_qinfifo_count(struct ahd_softc *ahd);
196 static int              ahd_search_scb_list(struct ahd_softc *ahd, int target,
197                                             char channel, int lun, u_int tag,
198                                             role_t role, uint32_t status,
199                                             ahd_search_action action,
200                                             u_int *list_head, u_int tid);
201 static void             ahd_stitch_tid_list(struct ahd_softc *ahd,
202                                             u_int tid_prev, u_int tid_cur,
203                                             u_int tid_next);
204 static void             ahd_add_scb_to_free_list(struct ahd_softc *ahd,
205                                                  u_int scbid);
206 static u_int            ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
207                                      u_int prev, u_int next, u_int tid);
208 static void             ahd_reset_current_bus(struct ahd_softc *ahd);
209 static ahd_callback_t   ahd_reset_poll;
210 static ahd_callback_t   ahd_stat_timer;
211 #ifdef AHD_DUMP_SEQ
212 static void             ahd_dumpseq(struct ahd_softc *ahd);
213 #endif
214 static void             ahd_loadseq(struct ahd_softc *ahd);
215 static int              ahd_check_patch(struct ahd_softc *ahd,
216                                         struct patch **start_patch,
217                                         u_int start_instr, u_int *skip_addr);
218 static u_int            ahd_resolve_seqaddr(struct ahd_softc *ahd,
219                                             u_int address);
220 static void             ahd_download_instr(struct ahd_softc *ahd,
221                                            u_int instrptr, uint8_t *dconsts);
222 static int              ahd_probe_stack_size(struct ahd_softc *ahd);
223 static void             ahd_other_scb_timeout(struct ahd_softc *ahd,
224                                               struct scb *scb,
225                                               struct scb *other_scb);
226 static int              ahd_scb_active_in_fifo(struct ahd_softc *ahd,
227                                                struct scb *scb);
228 static void             ahd_run_data_fifo(struct ahd_softc *ahd,
229                                           struct scb *scb);
230
231 #ifdef AHD_TARGET_MODE
232 static void             ahd_queue_lstate_event(struct ahd_softc *ahd,
233                                                struct ahd_tmode_lstate *lstate,
234                                                u_int initiator_id,
235                                                u_int event_type,
236                                                u_int event_arg);
237 static void             ahd_update_scsiid(struct ahd_softc *ahd,
238                                           u_int targid_mask);
239 static int              ahd_handle_target_cmd(struct ahd_softc *ahd,
240                                               struct target_cmd *cmd);
241 #endif
242
243 /******************************** Private Inlines *****************************/
244 static __inline void    ahd_assert_atn(struct ahd_softc *ahd);
245 static __inline int     ahd_currently_packetized(struct ahd_softc *ahd);
246 static __inline int     ahd_set_active_fifo(struct ahd_softc *ahd);
247
248 static __inline void
249 ahd_assert_atn(struct ahd_softc *ahd)
250 {
251         ahd_outb(ahd, SCSISIGO, ATNO);
252 }
253
254 /*
255  * Determine if the current connection has a packetized
256  * agreement.  This does not necessarily mean that we
257  * are currently in a packetized transfer.  We could
258  * just as easily be sending or receiving a message.
259  */
260 static __inline int
261 ahd_currently_packetized(struct ahd_softc *ahd)
262 {
263         ahd_mode_state   saved_modes;
264         int              packetized;
265
266         saved_modes = ahd_save_modes(ahd);
267         if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
268                 /*
269                  * The packetized bit refers to the last
270                  * connection, not the current one.  Check
271                  * for non-zero LQISTATE instead.
272                  */
273                 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
274                 packetized = ahd_inb(ahd, LQISTATE) != 0;
275         } else {
276                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
277                 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
278         }
279         ahd_restore_modes(ahd, saved_modes);
280         return (packetized);
281 }
282
283 static __inline int
284 ahd_set_active_fifo(struct ahd_softc *ahd)
285 {
286         u_int active_fifo;
287
288         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
289         active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
290         switch (active_fifo) {
291         case 0:
292         case 1:
293                 ahd_set_modes(ahd, active_fifo, active_fifo);
294                 return (1);
295         default:
296                 return (0);
297         }
298 }
299
300 /************************* Sequencer Execution Control ************************/
301 /*
302  * Restart the sequencer program from address zero
303  */
304 void
305 ahd_restart(struct ahd_softc *ahd)
306 {
307
308         ahd_pause(ahd);
309
310         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
311
312         /* No more pending messages */
313         ahd_clear_msg_state(ahd);
314         ahd_outb(ahd, SCSISIGO, 0);             /* De-assert BSY */
315         ahd_outb(ahd, MSG_OUT, MSG_NOOP);       /* No message to send */
316         ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
317         ahd_outb(ahd, SEQINTCTL, 0);
318         ahd_outb(ahd, LASTPHASE, P_BUSFREE);
319         ahd_outb(ahd, SEQ_FLAGS, 0);
320         ahd_outb(ahd, SAVED_SCSIID, 0xFF);
321         ahd_outb(ahd, SAVED_LUN, 0xFF);
322
323         /*
324          * Ensure that the sequencer's idea of TQINPOS
325          * matches our own.  The sequencer increments TQINPOS
326          * only after it sees a DMA complete and a reset could
327          * occur before the increment leaving the kernel to believe
328          * the command arrived but the sequencer to not.
329          */
330         ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
331
332         /* Always allow reselection */
333         ahd_outb(ahd, SCSISEQ1,
334                  ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
335         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
336         ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
337         ahd_unpause(ahd);
338 }
339
340 void
341 ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
342 {
343         ahd_mode_state   saved_modes;
344
345 #ifdef AHD_DEBUG
346         if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
347                 kprintf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
348 #endif
349         saved_modes = ahd_save_modes(ahd);
350         ahd_set_modes(ahd, fifo, fifo);
351         ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
352         if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
353                 ahd_outb(ahd, CCSGCTL, CCSGRESET);
354         ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
355         ahd_outb(ahd, SG_STATE, 0);
356         ahd_restore_modes(ahd, saved_modes);
357 }
358
359 /************************* Input/Output Queues ********************************/
360 /*
361  * Flush and completed commands that are sitting in the command
362  * complete queues down on the chip but have yet to be dma'ed back up.
363  */
364 void
365 ahd_flush_qoutfifo(struct ahd_softc *ahd)
366 {
367         struct          scb *scb;
368         ahd_mode_state  saved_modes;
369         u_int           saved_scbptr;
370         u_int           ccscbctl;
371         u_int           scbid;
372         u_int           next_scbid;
373
374         saved_modes = ahd_save_modes(ahd);
375
376         /*
377          * Flush the good status FIFO for completed packetized commands.
378          */
379         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
380         saved_scbptr = ahd_get_scbptr(ahd);
381         while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
382                 u_int fifo_mode;
383                 u_int i;
384                 
385                 scbid = ahd_inw(ahd, GSFIFO);
386                 scb = ahd_lookup_scb(ahd, scbid);
387                 if (scb == NULL) {
388                         kprintf("%s: Warning - GSFIFO SCB %d invalid\n",
389                                ahd_name(ahd), scbid);
390                         continue;
391                 }
392                 /*
393                  * Determine if this transaction is still active in
394                  * any FIFO.  If it is, we must flush that FIFO to
395                  * the host before completing the  command.
396                  */
397                 fifo_mode = 0;
398 rescan_fifos:
399                 for (i = 0; i < 2; i++) {
400                         /* Toggle to the other mode. */
401                         fifo_mode ^= 1;
402                         ahd_set_modes(ahd, fifo_mode, fifo_mode);
403
404                         if (ahd_scb_active_in_fifo(ahd, scb) == 0)
405                                 continue;
406
407                         ahd_run_data_fifo(ahd, scb);
408
409                         /*
410                          * Running this FIFO may cause a CFG4DATA for
411                          * this same transaction to assert in the other
412                          * FIFO or a new snapshot SAVEPTRS interrupt
413                          * in this FIFO.  Even running a FIFO may not
414                          * clear the transaction if we are still waiting
415                          * for data to drain to the host. We must loop
416                          * until the transaction is not active in either
417                          * FIFO just to be sure.  Reset our loop counter
418                          * so we will visit both FIFOs again before
419                          * declaring this transaction finished.  We
420                          * also delay a bit so that status has a chance
421                          * to change before we look at this FIFO again.
422                          */
423                         aic_delay(200);
424                         goto rescan_fifos;
425                 }
426                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
427                 ahd_set_scbptr(ahd, scbid);
428                 if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
429                  && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
430                   || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
431                       & SG_LIST_NULL) != 0)) {
432                         u_int comp_head;
433
434                         /*
435                          * The transfer completed with a residual.
436                          * Place this SCB on the complete DMA list
437                          * so that we update our in-core copy of the
438                          * SCB before completing the command.
439                          */
440                         ahd_outb(ahd, SCB_SCSI_STATUS, 0);
441                         ahd_outb(ahd, SCB_SGPTR,
442                                  ahd_inb_scbram(ahd, SCB_SGPTR)
443                                  | SG_STATUS_VALID);
444                         ahd_outw(ahd, SCB_TAG, scbid);
445                         ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
446                         comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
447                         if (SCBID_IS_NULL(comp_head)) {
448                                 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
449                                 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
450                         } else {
451                                 u_int tail;
452
453                                 tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL);
454                                 ahd_set_scbptr(ahd, tail);
455                                 ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid);
456                                 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
457                                 ahd_set_scbptr(ahd, scbid);
458                         }
459                 } else
460                         ahd_complete_scb(ahd, scb);
461         }
462         ahd_set_scbptr(ahd, saved_scbptr);
463
464         /*
465          * Setup for command channel portion of flush.
466          */
467         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
468
469         /*
470          * Wait for any inprogress DMA to complete and clear DMA state
471          * if this if for an SCB in the qinfifo.
472          */
473         while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
474
475                 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
476                         if ((ccscbctl & ARRDONE) != 0)
477                                 break;
478                 } else if ((ccscbctl & CCSCBDONE) != 0)
479                         break;
480                 aic_delay(200);
481         }
482         /*
483          * We leave the sequencer to cleanup in the case of DMA's to
484          * update the qoutfifo.  In all other cases (DMA's to the
485          * chip or a push of an SCB from the COMPLETE_DMA_SCB list),
486          * we disable the DMA engine so that the sequencer will not
487          * attempt to handle the DMA completion.
488          */
489         if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
490                 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
491
492         /*
493          * Complete any SCBs that just finished
494          * being DMA'ed into the qoutfifo.
495          */
496         ahd_run_qoutfifo(ahd);
497
498         saved_scbptr = ahd_get_scbptr(ahd);
499         /*
500          * Manually update/complete any completed SCBs that are waiting to be
501          * DMA'ed back up to the host.
502          */
503         scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
504         while (!SCBID_IS_NULL(scbid)) {
505                 uint8_t *hscb_ptr;
506                 u_int    i;
507                 
508                 ahd_set_scbptr(ahd, scbid);
509                 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
510                 scb = ahd_lookup_scb(ahd, scbid);
511                 if (scb == NULL) {
512                         kprintf("%s: Warning - DMA-up and complete "
513                                "SCB %d invalid\n", ahd_name(ahd), scbid);
514                         continue;
515                 }
516                 hscb_ptr = (uint8_t *)scb->hscb;
517                 for (i = 0; i < sizeof(struct hardware_scb); i++)
518                         *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
519
520                 ahd_complete_scb(ahd, scb);
521                 scbid = next_scbid;
522         }
523         ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
524         ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
525
526         scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
527         while (!SCBID_IS_NULL(scbid)) {
528
529                 ahd_set_scbptr(ahd, scbid);
530                 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
531                 scb = ahd_lookup_scb(ahd, scbid);
532                 if (scb == NULL) {
533                         kprintf("%s: Warning - Complete Qfrz SCB %d invalid\n",
534                                ahd_name(ahd), scbid);
535                         continue;
536                 }
537
538                 ahd_complete_scb(ahd, scb);
539                 scbid = next_scbid;
540         }
541         ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
542
543         scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
544         while (!SCBID_IS_NULL(scbid)) {
545
546                 ahd_set_scbptr(ahd, scbid);
547                 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
548                 scb = ahd_lookup_scb(ahd, scbid);
549                 if (scb == NULL) {
550                         kprintf("%s: Warning - Complete SCB %d invalid\n",
551                                ahd_name(ahd), scbid);
552                         continue;
553                 }
554
555                 ahd_complete_scb(ahd, scb);
556                 scbid = next_scbid;
557         }
558         ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
559
560         /*
561          * Restore state.
562          */
563         ahd_set_scbptr(ahd, saved_scbptr);
564         ahd_restore_modes(ahd, saved_modes);
565         ahd->flags |= AHD_UPDATE_PEND_CMDS;
566 }
567
568 /*
569  * Determine if an SCB for a packetized transaction
570  * is active in a FIFO.
571  */
572 static int
573 ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
574 {
575
576         /*
577          * The FIFO is only active for our transaction if
578          * the SCBPTR matches the SCB's ID and the firmware
579          * has installed a handler for the FIFO or we have
580          * a pending SAVEPTRS or CFG4DATA interrupt.
581          */
582         if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
583          || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
584           && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
585                 return (0);
586
587         return (1);
588 }
589
590 /*
591  * Run a data fifo to completion for a transaction we know
592  * has completed across the SCSI bus (good status has been
593  * received).  We are already set to the correct FIFO mode
594  * on entry to this routine.
595  *
596  * This function attempts to operate exactly as the firmware
597  * would when running this FIFO.  Care must be taken to update
598  * this routine any time the firmware's FIFO algorithm is
599  * changed.
600  */
601 static void
602 ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
603 {
604         u_int seqintsrc;
605
606         seqintsrc = ahd_inb(ahd, SEQINTSRC);
607         if ((seqintsrc & CFG4DATA) != 0) {
608                 uint32_t datacnt;
609                 uint32_t sgptr;
610
611                 /*
612                  * Clear full residual flag.
613                  */
614                 sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
615                 ahd_outb(ahd, SCB_SGPTR, sgptr);
616
617                 /*
618                  * Load datacnt and address.
619                  */
620                 datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
621                 if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
622                         sgptr |= LAST_SEG;
623                         ahd_outb(ahd, SG_STATE, 0);
624                 } else
625                         ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
626                 ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
627                 ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
628                 ahd_outb(ahd, SG_CACHE_PRE, sgptr);
629                 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
630
631                 /*
632                  * Initialize Residual Fields.
633                  */
634                 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
635                 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
636
637                 /*
638                  * Mark the SCB as having a FIFO in use.
639                  */
640                 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
641                          ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
642
643                 /*
644                  * Install a "fake" handler for this FIFO.
645                  */
646                 ahd_outw(ahd, LONGJMP_ADDR, 0);
647
648                 /*
649                  * Notify the hardware that we have satisfied
650                  * this sequencer interrupt.
651                  */
652                 ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
653         } else if ((seqintsrc & SAVEPTRS) != 0) {
654                 uint32_t sgptr;
655                 uint32_t resid;
656
657                 if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
658                         /*
659                          * Snapshot Save Pointers.  All that
660                          * is necessary to clear the snapshot
661                          * is a CLRCHN.
662                          */
663                         goto clrchn;
664                 }
665
666                 /*
667                  * Disable S/G fetch so the DMA engine
668                  * is available to future users.
669                  */
670                 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
671                         ahd_outb(ahd, CCSGCTL, 0);
672                 ahd_outb(ahd, SG_STATE, 0);
673
674                 /*
675                  * Flush the data FIFO.  Strickly only
676                  * necessary for Rev A parts.
677                  */
678                 ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
679
680                 /*
681                  * Calculate residual.
682                  */
683                 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
684                 resid = ahd_inl(ahd, SHCNT);
685                 resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
686                 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
687                 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
688                         /*
689                          * Must back up to the correct S/G element.
690                          * Typically this just means resetting our
691                          * low byte to the offset in the SG_CACHE,
692                          * but if we wrapped, we have to correct
693                          * the other bytes of the sgptr too.
694                          */
695                         if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
696                          && (sgptr & 0x80) == 0)
697                                 sgptr -= 0x100;
698                         sgptr &= ~0xFF;
699                         sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
700                                & SG_ADDR_MASK;
701                         ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
702                         ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
703                 } else if ((resid & AHD_SG_LEN_MASK) == 0) {
704                         ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
705                                  sgptr | SG_LIST_NULL);
706                 }
707                 /*
708                  * Save Pointers.
709                  */
710                 ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
711                 ahd_outl(ahd, SCB_DATACNT, resid);
712                 ahd_outl(ahd, SCB_SGPTR, sgptr);
713                 ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
714                 ahd_outb(ahd, SEQIMODE,
715                          ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
716                 /*
717                  * If the data is to the SCSI bus, we are
718                  * done, otherwise wait for FIFOEMP.
719                  */
720                 if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
721                         goto clrchn;
722         } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
723                 uint32_t sgptr;
724                 uint64_t data_addr;
725                 uint32_t data_len;
726                 u_int    dfcntrl;
727
728                 /*
729                  * Disable S/G fetch so the DMA engine
730                  * is available to future users.  We won't
731                  * be using the DMA engine to load segments.
732                  */
733                 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
734                         ahd_outb(ahd, CCSGCTL, 0);
735                         ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
736                 }
737
738                 /*
739                  * Wait for the DMA engine to notice that the
740                  * host transfer is enabled and that there is
741                  * space in the S/G FIFO for new segments before
742                  * loading more segments.
743                  */
744                 if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
745                  && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
746
747                         /*
748                          * Determine the offset of the next S/G
749                          * element to load.
750                          */
751                         sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
752                         sgptr &= SG_PTR_MASK;
753                         if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
754                                 struct ahd_dma64_seg *sg;
755
756                                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
757                                 data_addr = sg->addr;
758                                 data_len = sg->len;
759                                 sgptr += sizeof(*sg);
760                         } else {
761                                 struct  ahd_dma_seg *sg;
762
763                                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
764                                 data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
765                                 data_addr <<= 8;
766                                 data_addr |= sg->addr;
767                                 data_len = sg->len;
768                                 sgptr += sizeof(*sg);
769                         }
770
771                         /*
772                          * Update residual information.
773                          */
774                         ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
775                         ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
776
777                         /*
778                          * Load the S/G.
779                          */
780                         if (data_len & AHD_DMA_LAST_SEG) {
781                                 sgptr |= LAST_SEG;
782                                 ahd_outb(ahd, SG_STATE, 0);
783                         }
784                         ahd_outq(ahd, HADDR, data_addr);
785                         ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
786                         ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
787
788                         /*
789                          * Advertise the segment to the hardware.
790                          */
791                         dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
792                         if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
793                                 /*
794                                  * Use SCSIENWRDIS so that SCSIEN
795                                  * is never modified by this
796                                  * operation.
797                                  */
798                                 dfcntrl |= SCSIENWRDIS;
799                         }
800                         ahd_outb(ahd, DFCNTRL, dfcntrl);
801                 }
802         } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
803
804                 /*
805                  * Transfer completed to the end of SG list
806                  * and has flushed to the host.
807                  */
808                 ahd_outb(ahd, SCB_SGPTR,
809                          ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
810                 goto clrchn;
811         } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
812 clrchn:
813                 /*
814                  * Clear any handler for this FIFO, decrement
815                  * the FIFO use count for the SCB, and release
816                  * the FIFO.
817                  */
818                 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
819                 ahd_outb(ahd, SCB_FIFO_USE_COUNT,
820                          ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
821                 ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
822         }
823 }
824
825 /*
826  * Look for entries in the QoutFIFO that have completed.
827  * The valid_tag completion field indicates the validity
828  * of the entry - the valid value toggles each time through
829  * the queue. We use the sg_status field in the completion
830  * entry to avoid referencing the hscb if the completion
831  * occurred with no errors and no residual.  sg_status is
832  * a copy of the first byte (little endian) of the sgptr
833  * hscb field.
834  */
835 void
836 ahd_run_qoutfifo(struct ahd_softc *ahd)
837 {
838         struct ahd_completion *completion;
839         struct scb *scb;
840         u_int  scb_index;
841
842         if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
843                 panic("ahd_run_qoutfifo recursion");
844         ahd->flags |= AHD_RUNNING_QOUTFIFO;
845         ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
846         for (;;) {
847                 completion = &ahd->qoutfifo[ahd->qoutfifonext];
848
849                 if (completion->valid_tag != ahd->qoutfifonext_valid_tag)
850                         break;
851
852                 scb_index = aic_le16toh(completion->tag);
853                 scb = ahd_lookup_scb(ahd, scb_index);
854                 if (scb == NULL) {
855                         kprintf("%s: WARNING no command for scb %d "
856                                "(cmdcmplt)\nQOUTPOS = %d\n",
857                                ahd_name(ahd), scb_index,
858                                ahd->qoutfifonext);
859                         ahd_dump_card_state(ahd);
860                 } else if ((completion->sg_status & SG_STATUS_VALID) != 0) {
861                         ahd_handle_scb_status(ahd, scb);
862                 } else {
863                         ahd_done(ahd, scb);
864                 }
865
866                 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
867                 if (ahd->qoutfifonext == 0)
868                         ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID;
869         }
870         ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
871 }
872
873 /************************* Interrupt Handling *********************************/
874 void
875 ahd_handle_hwerrint(struct ahd_softc *ahd)
876 {
877         /*
878          * Some catastrophic hardware error has occurred.
879          * Print it for the user and disable the controller.
880          */
881         int i;
882         int error;
883
884         error = ahd_inb(ahd, ERROR);
885         for (i = 0; i < num_errors; i++) {
886                 if ((error & ahd_hard_errors[i].error) != 0)
887                         kprintf("%s: hwerrint, %s\n",
888                                ahd_name(ahd), ahd_hard_errors[i].errmesg);
889         }
890
891         ahd_dump_card_state(ahd);
892         panic("BRKADRINT");
893
894         /* Tell everyone that this HBA is no longer available */
895         ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
896                        CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
897                        CAM_NO_HBA);
898
899         /* Tell the system that this controller has gone away. */
900         ahd_free(ahd);
901 }
902
903 void
904 ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
905 {
906         u_int seqintcode;
907
908         /*
909          * Save the sequencer interrupt code and clear the SEQINT
910          * bit. We will unpause the sequencer, if appropriate,
911          * after servicing the request.
912          */
913         seqintcode = ahd_inb(ahd, SEQINTCODE);
914         ahd_outb(ahd, CLRINT, CLRSEQINT);
915         if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
916                 /*
917                  * Unpause the sequencer and let it clear
918                  * SEQINT by writing NO_SEQINT to it.  This
919                  * will cause the sequencer to be paused again,
920                  * which is the expected state of this routine.
921                  */
922                 ahd_unpause(ahd);
923                 while (!ahd_is_paused(ahd))
924                         ;
925                 ahd_outb(ahd, CLRINT, CLRSEQINT);
926         }
927         ahd_update_modes(ahd);
928 #ifdef AHD_DEBUG
929         if ((ahd_debug & AHD_SHOW_MISC) != 0)
930                 kprintf("%s: Handle Seqint Called for code %d\n",
931                        ahd_name(ahd), seqintcode);
932 #endif
933         switch (seqintcode) {
934         case ENTERING_NONPACK:
935         {
936                 struct  scb *scb;
937                 u_int   scbid;
938
939                 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
940                                  ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
941                 scbid = ahd_get_scbptr(ahd);
942                 scb = ahd_lookup_scb(ahd, scbid);
943                 if (scb == NULL) {
944                         /*
945                          * Somehow need to know if this
946                          * is from a selection or reselection.
947                          * From that, we can determine target
948                          * ID so we at least have an I_T nexus.
949                          */
950                 } else {
951                         ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
952                         ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
953                         ahd_outb(ahd, SEQ_FLAGS, 0x0);
954                 }
955                 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
956                  && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
957                         /*
958                          * Phase change after read stream with
959                          * CRC error with P0 asserted on last
960                          * packet.
961                          */
962 #ifdef AHD_DEBUG
963                         if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
964                                 kprintf("%s: Assuming LQIPHASE_NLQ with "
965                                        "P0 assertion\n", ahd_name(ahd));
966 #endif
967                 }
968 #ifdef AHD_DEBUG
969                 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
970                         kprintf("%s: Entering NONPACK\n", ahd_name(ahd));
971 #endif
972                 break;
973         }
974         case INVALID_SEQINT:
975                 kprintf("%s: Invalid Sequencer interrupt occurred.\n",
976                        ahd_name(ahd));
977                 ahd_dump_card_state(ahd);
978                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
979                 break;
980         case STATUS_OVERRUN:
981         {
982                 struct  scb *scb;
983                 u_int   scbid;
984
985                 scbid = ahd_get_scbptr(ahd);
986                 scb = ahd_lookup_scb(ahd, scbid);
987                 if (scb != NULL)
988                         ahd_print_path(ahd, scb);
989                 else
990                         kprintf("%s: ", ahd_name(ahd));
991                 kprintf("SCB %d Packetized Status Overrun", scbid);
992                 ahd_dump_card_state(ahd);
993                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
994                 break;
995         }
996         case CFG4ISTAT_INTR:
997         {
998                 struct  scb *scb;
999                 u_int   scbid;
1000
1001                 scbid = ahd_get_scbptr(ahd);
1002                 scb = ahd_lookup_scb(ahd, scbid);
1003                 if (scb == NULL) {
1004                         ahd_dump_card_state(ahd);
1005                         kprintf("CFG4ISTAT: Free SCB %d referenced", scbid);
1006                         panic("For safety");
1007                 }
1008                 ahd_outq(ahd, HADDR, scb->sense_busaddr);
1009                 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
1010                 ahd_outb(ahd, HCNT + 2, 0);
1011                 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
1012                 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
1013                 break;
1014         }
1015         case ILLEGAL_PHASE:
1016         {
1017                 u_int bus_phase;
1018
1019                 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1020                 kprintf("%s: ILLEGAL_PHASE 0x%x\n",
1021                        ahd_name(ahd), bus_phase);
1022
1023                 switch (bus_phase) {
1024                 case P_DATAOUT:
1025                 case P_DATAIN:
1026                 case P_DATAOUT_DT:
1027                 case P_DATAIN_DT:
1028                 case P_MESGOUT:
1029                 case P_STATUS:
1030                 case P_MESGIN:
1031                         ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1032                         kprintf("%s: Issued Bus Reset.\n", ahd_name(ahd));
1033                         break;
1034                 case P_COMMAND:
1035                 {
1036                         struct  ahd_devinfo devinfo;
1037                         struct  scb *scb;
1038                         struct  ahd_initiator_tinfo *targ_info;
1039                         struct  ahd_tmode_tstate *tstate;
1040                         struct  ahd_transinfo *tinfo;
1041                         u_int   scbid;
1042
1043                         /*
1044                          * If a target takes us into the command phase
1045                          * assume that it has been externally reset and
1046                          * has thus lost our previous packetized negotiation
1047                          * agreement.  Since we have not sent an identify
1048                          * message and may not have fully qualified the
1049                          * connection, we change our command to TUR, assert
1050                          * ATN and ABORT the task when we go to message in
1051                          * phase.  The OSM will see the REQUEUE_REQUEST
1052                          * status and retry the command.
1053                          */
1054                         scbid = ahd_get_scbptr(ahd);
1055                         scb = ahd_lookup_scb(ahd, scbid);
1056                         if (scb == NULL) {
1057                                 kprintf("Invalid phase with no valid SCB.  "
1058                                        "Resetting bus.\n");
1059                                 ahd_reset_channel(ahd, 'A',
1060                                                   /*Initiate Reset*/TRUE);
1061                                 break;
1062                         }
1063                         ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1064                                             SCB_GET_TARGET(ahd, scb),
1065                                             SCB_GET_LUN(scb),
1066                                             SCB_GET_CHANNEL(ahd, scb),
1067                                             ROLE_INITIATOR);
1068                         targ_info = ahd_fetch_transinfo(ahd,
1069                                                         devinfo.channel,
1070                                                         devinfo.our_scsiid,
1071                                                         devinfo.target,
1072                                                         &tstate);
1073                         tinfo = &targ_info->curr;
1074                         ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1075                                       AHD_TRANS_ACTIVE, /*paused*/TRUE);
1076                         ahd_set_syncrate(ahd, &devinfo, /*period*/0,
1077                                          /*offset*/0, /*ppr_options*/0,
1078                                          AHD_TRANS_ACTIVE, /*paused*/TRUE);
1079                         ahd_outb(ahd, SCB_CDB_STORE, 0);
1080                         ahd_outb(ahd, SCB_CDB_STORE+1, 0);
1081                         ahd_outb(ahd, SCB_CDB_STORE+2, 0);
1082                         ahd_outb(ahd, SCB_CDB_STORE+3, 0);
1083                         ahd_outb(ahd, SCB_CDB_STORE+4, 0);
1084                         ahd_outb(ahd, SCB_CDB_STORE+5, 0);
1085                         ahd_outb(ahd, SCB_CDB_LEN, 6);
1086                         scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1087                         scb->hscb->control |= MK_MESSAGE;
1088                         ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1089                         ahd_outb(ahd, MSG_OUT, HOST_MSG);
1090                         ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1091                         /*
1092                          * The lun is 0, regardless of the SCB's lun
1093                          * as we have not sent an identify message.
1094                          */
1095                         ahd_outb(ahd, SAVED_LUN, 0);
1096                         ahd_outb(ahd, SEQ_FLAGS, 0);
1097                         ahd_assert_atn(ahd);
1098                         scb->flags &= ~SCB_PACKETIZED;
1099                         scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
1100                         ahd_freeze_devq(ahd, scb);
1101                         aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
1102                         aic_freeze_scb(scb);
1103
1104                         /*
1105                          * Allow the sequencer to continue with
1106                          * non-pack processing.
1107                          */
1108                         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1109                         ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
1110                         if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1111                                 ahd_outb(ahd, CLRLQOINT1, 0);
1112                         }
1113 #ifdef AHD_DEBUG
1114                         if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1115                                 ahd_print_path(ahd, scb);
1116                                 kprintf("Unexpected command phase from "
1117                                        "packetized target\n");
1118                         }
1119 #endif
1120                         break;
1121                 }
1122                 }
1123                 break;
1124         }
1125         case CFG4OVERRUN:
1126         {
1127                 struct  scb *scb;
1128                 u_int   scb_index;
1129                 
1130 #ifdef AHD_DEBUG
1131                 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1132                         kprintf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
1133                                ahd_inb(ahd, MODE_PTR));
1134                 }
1135 #endif
1136                 scb_index = ahd_get_scbptr(ahd);
1137                 scb = ahd_lookup_scb(ahd, scb_index);
1138                 if (scb == NULL) {
1139                         /*
1140                          * Attempt to transfer to an SCB that is
1141                          * not outstanding.
1142                          */
1143                         ahd_assert_atn(ahd);
1144                         ahd_outb(ahd, MSG_OUT, HOST_MSG);
1145                         ahd->msgout_buf[0] = MSG_ABORT_TASK;
1146                         ahd->msgout_len = 1;
1147                         ahd->msgout_index = 0;
1148                         ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1149                         /*
1150                          * Clear status received flag to prevent any
1151                          * attempt to complete this bogus SCB.
1152                          */
1153                         ahd_outb(ahd, SCB_CONTROL,
1154                                  ahd_inb_scbram(ahd, SCB_CONTROL)
1155                                  & ~STATUS_RCVD);
1156                 }
1157                 break;
1158         }
1159         case DUMP_CARD_STATE:
1160         {
1161                 ahd_dump_card_state(ahd);
1162                 break;
1163         }
1164         case PDATA_REINIT:
1165         {
1166 #ifdef AHD_DEBUG
1167                 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1168                         kprintf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
1169                                "SG_CACHE_SHADOW = 0x%x\n",
1170                                ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
1171                                ahd_inb(ahd, SG_CACHE_SHADOW));
1172                 }
1173 #endif
1174                 ahd_reinitialize_dataptrs(ahd);
1175                 break;
1176         }
1177         case HOST_MSG_LOOP:
1178         {
1179                 struct ahd_devinfo devinfo;
1180
1181                 /*
1182                  * The sequencer has encountered a message phase
1183                  * that requires host assistance for completion.
1184                  * While handling the message phase(s), we will be
1185                  * notified by the sequencer after each byte is
1186                  * transfered so we can track bus phase changes.
1187                  *
1188                  * If this is the first time we've seen a HOST_MSG_LOOP
1189                  * interrupt, initialize the state of the host message
1190                  * loop.
1191                  */
1192                 ahd_fetch_devinfo(ahd, &devinfo);
1193                 if (ahd->msg_type == MSG_TYPE_NONE) {
1194                         struct scb *scb;
1195                         u_int scb_index;
1196                         u_int bus_phase;
1197
1198                         bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1199                         if (bus_phase != P_MESGIN
1200                          && bus_phase != P_MESGOUT) {
1201                                 kprintf("ahd_intr: HOST_MSG_LOOP bad "
1202                                        "phase 0x%x\n", bus_phase);
1203                                 /*
1204                                  * Probably transitioned to bus free before
1205                                  * we got here.  Just punt the message.
1206                                  */
1207                                 ahd_dump_card_state(ahd);
1208                                 ahd_clear_intstat(ahd);
1209                                 ahd_restart(ahd);
1210                                 return;
1211                         }
1212
1213                         scb_index = ahd_get_scbptr(ahd);
1214                         scb = ahd_lookup_scb(ahd, scb_index);
1215                         if (devinfo.role == ROLE_INITIATOR) {
1216                                 if (bus_phase == P_MESGOUT)
1217                                         ahd_setup_initiator_msgout(ahd,
1218                                                                    &devinfo,
1219                                                                    scb);
1220                                 else {
1221                                         ahd->msg_type =
1222                                             MSG_TYPE_INITIATOR_MSGIN;
1223                                         ahd->msgin_index = 0;
1224                                 }
1225                         }
1226 #if AHD_TARGET_MODE
1227                         else {
1228                                 if (bus_phase == P_MESGOUT) {
1229                                         ahd->msg_type =
1230                                             MSG_TYPE_TARGET_MSGOUT;
1231                                         ahd->msgin_index = 0;
1232                                 }
1233                                 else 
1234                                         ahd_setup_target_msgin(ahd,
1235                                                                &devinfo,
1236                                                                scb);
1237                         }
1238 #endif
1239                 }
1240
1241                 ahd_handle_message_phase(ahd);
1242                 break;
1243         }
1244         case NO_MATCH:
1245         {
1246                 /* Ensure we don't leave the selection hardware on */
1247                 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
1248                 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
1249
1250                 kprintf("%s:%c:%d: no active SCB for reconnecting "
1251                        "target - issuing BUS DEVICE RESET\n",
1252                        ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
1253                 kprintf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
1254                        "REG0 == 0x%x ACCUM = 0x%x\n",
1255                        ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
1256                        ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
1257                 kprintf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
1258                        "SINDEX == 0x%x\n",
1259                        ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
1260                        ahd_find_busy_tcl(ahd,
1261                                          BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
1262                                                    ahd_inb(ahd, SAVED_LUN))),
1263                        ahd_inw(ahd, SINDEX));
1264                 kprintf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
1265                        "SCB_CONTROL == 0x%x\n",
1266                        ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
1267                        ahd_inb_scbram(ahd, SCB_LUN),
1268                        ahd_inb_scbram(ahd, SCB_CONTROL));
1269                 kprintf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
1270                        ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
1271                 kprintf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
1272                 kprintf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
1273                 ahd_dump_card_state(ahd);
1274                 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
1275                 ahd->msgout_len = 1;
1276                 ahd->msgout_index = 0;
1277                 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1278                 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1279                 ahd_assert_atn(ahd);
1280                 break;
1281         }
1282         case PROTO_VIOLATION:
1283         {
1284                 ahd_handle_proto_violation(ahd);
1285                 break;
1286         }
1287         case IGN_WIDE_RES:
1288         {
1289                 struct ahd_devinfo devinfo;
1290
1291                 ahd_fetch_devinfo(ahd, &devinfo);
1292                 ahd_handle_ign_wide_residue(ahd, &devinfo);
1293                 break;
1294         }
1295         case BAD_PHASE:
1296         {
1297                 u_int lastphase;
1298
1299                 lastphase = ahd_inb(ahd, LASTPHASE);
1300                 kprintf("%s:%c:%d: unknown scsi bus phase %x, "
1301                        "lastphase = 0x%x.  Attempting to continue\n",
1302                        ahd_name(ahd), 'A',
1303                        SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1304                        lastphase, ahd_inb(ahd, SCSISIGI));
1305                 break;
1306         }
1307         case MISSED_BUSFREE:
1308         {
1309                 u_int lastphase;
1310
1311                 lastphase = ahd_inb(ahd, LASTPHASE);
1312                 kprintf("%s:%c:%d: Missed busfree. "
1313                        "Lastphase = 0x%x, Curphase = 0x%x\n",
1314                        ahd_name(ahd), 'A',
1315                        SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1316                        lastphase, ahd_inb(ahd, SCSISIGI));
1317                 ahd_restart(ahd);
1318                 return;
1319         }
1320         case DATA_OVERRUN:
1321         {
1322                 /*
1323                  * When the sequencer detects an overrun, it
1324                  * places the controller in "BITBUCKET" mode
1325                  * and allows the target to complete its transfer.
1326                  * Unfortunately, none of the counters get updated
1327                  * when the controller is in this mode, so we have
1328                  * no way of knowing how large the overrun was.
1329                  */
1330                 struct  scb *scb;
1331                 u_int   scbindex;
1332 #ifdef AHD_DEBUG
1333                 u_int   lastphase;
1334 #endif
1335
1336                 scbindex = ahd_get_scbptr(ahd);
1337                 scb = ahd_lookup_scb(ahd, scbindex);
1338 #ifdef AHD_DEBUG
1339                 lastphase = ahd_inb(ahd, LASTPHASE);
1340                 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1341                         ahd_print_path(ahd, scb);
1342                         kprintf("data overrun detected %s.  Tag == 0x%x.\n",
1343                                ahd_lookup_phase_entry(lastphase)->phasemsg,
1344                                SCB_GET_TAG(scb));
1345                         ahd_print_path(ahd, scb);
1346                         kprintf("%s seen Data Phase.  Length = %ld.  "
1347                                "NumSGs = %d.\n",
1348                                ahd_inb(ahd, SEQ_FLAGS) & DPHASE
1349                                ? "Have" : "Haven't",
1350                                aic_get_transfer_length(scb), scb->sg_count);
1351                         ahd_dump_sglist(scb);
1352                 }
1353 #endif
1354
1355                 /*
1356                  * Set this and it will take effect when the
1357                  * target does a command complete.
1358                  */
1359                 ahd_freeze_devq(ahd, scb);
1360                 aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1361                 aic_freeze_scb(scb);
1362                 break;
1363         }
1364         case MKMSG_FAILED:
1365         {
1366                 struct ahd_devinfo devinfo;
1367                 struct scb *scb;
1368                 u_int scbid;
1369
1370                 ahd_fetch_devinfo(ahd, &devinfo);
1371                 kprintf("%s:%c:%d:%d: Attempt to issue message failed\n",
1372                        ahd_name(ahd), devinfo.channel, devinfo.target,
1373                        devinfo.lun);
1374                 scbid = ahd_get_scbptr(ahd);
1375                 scb = ahd_lookup_scb(ahd, scbid);
1376                 if (scb != NULL
1377                  && (scb->flags & SCB_RECOVERY_SCB) != 0)
1378                         /*
1379                          * Ensure that we didn't put a second instance of this
1380                          * SCB into the QINFIFO.
1381                          */
1382                         ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1383                                            SCB_GET_CHANNEL(ahd, scb),
1384                                            SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1385                                            ROLE_INITIATOR, /*status*/0,
1386                                            SEARCH_REMOVE);
1387                 ahd_outb(ahd, SCB_CONTROL,
1388                          ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
1389                 break;
1390         }
1391         case TASKMGMT_FUNC_COMPLETE:
1392         {
1393                 u_int   scbid;
1394                 struct  scb *scb;
1395
1396                 scbid = ahd_get_scbptr(ahd);
1397                 scb = ahd_lookup_scb(ahd, scbid);
1398                 if (scb != NULL) {
1399                         u_int      lun;
1400                         u_int      tag;
1401                         cam_status error;
1402
1403                         ahd_print_path(ahd, scb);
1404                         kprintf("Task Management Func 0x%x Complete\n",
1405                                scb->hscb->task_management);
1406                         lun = CAM_LUN_WILDCARD;
1407                         tag = SCB_LIST_NULL;
1408
1409                         switch (scb->hscb->task_management) {
1410                         case SIU_TASKMGMT_ABORT_TASK:
1411                                 tag = SCB_GET_TAG(scb);
1412                         case SIU_TASKMGMT_ABORT_TASK_SET:
1413                         case SIU_TASKMGMT_CLEAR_TASK_SET:
1414                                 lun = scb->hscb->lun;
1415                                 error = CAM_REQ_ABORTED;
1416                                 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1417                                                'A', lun, tag, ROLE_INITIATOR,
1418                                                error);
1419                                 break;
1420                         case SIU_TASKMGMT_LUN_RESET:
1421                                 lun = scb->hscb->lun;
1422                         case SIU_TASKMGMT_TARGET_RESET:
1423                         {
1424                                 struct ahd_devinfo devinfo;
1425
1426                                 ahd_scb_devinfo(ahd, &devinfo, scb);
1427                                 error = CAM_BDR_SENT;
1428                                 ahd_handle_devreset(ahd, &devinfo, lun,
1429                                                     CAM_BDR_SENT,
1430                                                     lun != CAM_LUN_WILDCARD
1431                                                     ? "Lun Reset"
1432                                                     : "Target Reset",
1433                                                     /*verbose_level*/0);
1434                                 break;
1435                         }
1436                         default:
1437                                 panic("Unexpected TaskMgmt Func\n");
1438                                 break;
1439                         }
1440                 }
1441                 break;
1442         }
1443         case TASKMGMT_CMD_CMPLT_OKAY:
1444         {
1445                 u_int   scbid;
1446                 struct  scb *scb;
1447
1448                 /*
1449                  * An ABORT TASK TMF failed to be delivered before
1450                  * the targeted command completed normally.
1451                  */
1452                 scbid = ahd_get_scbptr(ahd);
1453                 scb = ahd_lookup_scb(ahd, scbid);
1454                 if (scb != NULL) {
1455                         /*
1456                          * Remove the second instance of this SCB from
1457                          * the QINFIFO if it is still there.
1458                          */
1459                         ahd_print_path(ahd, scb);
1460                         kprintf("SCB completes before TMF\n");
1461                         /*
1462                          * Handle losing the race.  Wait until any
1463                          * current selection completes.  We will then
1464                          * set the TMF back to zero in this SCB so that
1465                          * the sequencer doesn't bother to issue another
1466                          * sequencer interrupt for its completion.
1467                          */
1468                         while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
1469                             && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
1470                             && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
1471                                 ;
1472                         ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
1473                         ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1474                                            SCB_GET_CHANNEL(ahd, scb),  
1475                                            SCB_GET_LUN(scb), SCB_GET_TAG(scb), 
1476                                            ROLE_INITIATOR, /*status*/0,   
1477                                            SEARCH_REMOVE);
1478                 }
1479                 break;
1480         }
1481         case TRACEPOINT0:
1482         case TRACEPOINT1:
1483         case TRACEPOINT2:
1484         case TRACEPOINT3:
1485                 kprintf("%s: Tracepoint %d\n", ahd_name(ahd),
1486                        seqintcode - TRACEPOINT0);
1487                 break;
1488         case NO_SEQINT:
1489                 break;
1490         case SAW_HWERR:
1491                 ahd_handle_hwerrint(ahd);
1492                 break;
1493         default:
1494                 kprintf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
1495                        seqintcode);
1496                 break;
1497         }
1498         /*
1499          *  The sequencer is paused immediately on
1500          *  a SEQINT, so we should restart it when
1501          *  we're done.
1502          */
1503         ahd_unpause(ahd);
1504 }
1505
1506 void
1507 ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1508 {
1509         struct scb      *scb;
1510         u_int            status0;
1511         u_int            status3;
1512         u_int            status;
1513         u_int            lqistat1;
1514         u_int            lqostat0;
1515         u_int            scbid;
1516         u_int            busfreetime;
1517
1518         ahd_update_modes(ahd);
1519         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1520
1521         status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
1522         status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
1523         status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1524         lqistat1 = ahd_inb(ahd, LQISTAT1);
1525         lqostat0 = ahd_inb(ahd, LQOSTAT0);
1526         busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1527         if ((status0 & (SELDI|SELDO)) != 0) {
1528                 u_int simode0;
1529
1530                 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1531                 simode0 = ahd_inb(ahd, SIMODE0);
1532                 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
1533                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1534         }
1535         scbid = ahd_get_scbptr(ahd);
1536         scb = ahd_lookup_scb(ahd, scbid);
1537         if (scb != NULL
1538          && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1539                 scb = NULL;
1540
1541         /* Make sure the sequencer is in a safe location. */
1542         ahd_clear_critical_section(ahd);
1543
1544         if ((status0 & IOERR) != 0) {
1545                 u_int now_lvd;
1546
1547                 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
1548                 kprintf("%s: Transceiver State Has Changed to %s mode\n",
1549                        ahd_name(ahd), now_lvd ? "LVD" : "SE");
1550                 ahd_outb(ahd, CLRSINT0, CLRIOERR);
1551                 /*
1552                  * A change in I/O mode is equivalent to a bus reset.
1553                  */
1554                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1555                 ahd_pause(ahd);
1556                 ahd_setup_iocell_workaround(ahd);
1557                 ahd_unpause(ahd);
1558         } else if ((status0 & OVERRUN) != 0) {
1559                 kprintf("%s: SCSI offset overrun detected.  Resetting bus.\n",
1560                        ahd_name(ahd));
1561                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1562         } else if ((status & SCSIRSTI) != 0) {
1563                 kprintf("%s: Someone reset channel A\n", ahd_name(ahd));
1564                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
1565         } else if ((status & SCSIPERR) != 0) {
1566                 ahd_handle_transmission_error(ahd);
1567         } else if (lqostat0 != 0) {
1568                 kprintf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
1569                 ahd_outb(ahd, CLRLQOINT0, lqostat0);
1570                 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1571                         ahd_outb(ahd, CLRLQOINT1, 0);
1572                 }
1573         } else if ((status & SELTO) != 0) {
1574                 u_int  scbid;
1575
1576                 /* Stop the selection */
1577                 ahd_outb(ahd, SCSISEQ0, 0);
1578
1579                 /* No more pending messages */
1580                 ahd_clear_msg_state(ahd);
1581
1582                 /* Clear interrupt state */
1583                 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1584
1585                 /*
1586                  * Although the driver does not care about the
1587                  * 'Selection in Progress' status bit, the busy
1588                  * LED does.  SELINGO is only cleared by a successful
1589                  * selection, so we must manually clear it to insure
1590                  * the LED turns off just in case no future successful
1591                  * selections occur (e.g. no devices on the bus).
1592                  */
1593                 ahd_outb(ahd, CLRSINT0, CLRSELINGO);
1594
1595                 scbid = ahd_inw(ahd, WAITING_TID_HEAD);
1596                 scb = ahd_lookup_scb(ahd, scbid);
1597                 if (scb == NULL) {
1598                         kprintf("%s: ahd_intr - referenced scb not "
1599                                "valid during SELTO scb(0x%x)\n",
1600                                ahd_name(ahd), scbid);
1601                         ahd_dump_card_state(ahd);
1602                 } else {
1603                         struct ahd_devinfo devinfo;
1604 #ifdef AHD_DEBUG
1605                         if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
1606                                 ahd_print_path(ahd, scb);
1607                                 kprintf("Saw Selection Timeout for SCB 0x%x\n",
1608                                        scbid);
1609                         }
1610 #endif
1611                         /*
1612                          * Force a renegotiation with this target just in
1613                          * case the cable was pulled and will later be
1614                          * re-attached.  The target may forget its negotiation
1615                          * settings with us should it attempt to reselect
1616                          * during the interruption.  The target will not issue
1617                          * a unit attention in this case, so we must always
1618                          * renegotiate.
1619                          */
1620                         ahd_scb_devinfo(ahd, &devinfo, scb);
1621                         ahd_force_renegotiation(ahd, &devinfo);
1622                         aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1623                         ahd_freeze_devq(ahd, scb);
1624                 }
1625                 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1626                 ahd_iocell_first_selection(ahd);
1627                 ahd_unpause(ahd);
1628         } else if ((status0 & (SELDI|SELDO)) != 0) {
1629                 ahd_iocell_first_selection(ahd);
1630                 ahd_unpause(ahd);
1631         } else if (status3 != 0) {
1632                 kprintf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
1633                        ahd_name(ahd), status3);
1634                 ahd_outb(ahd, CLRSINT3, status3);
1635         } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
1636                 ahd_handle_lqiphase_error(ahd, lqistat1);
1637         } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1638                 /*
1639                  * This status can be delayed during some
1640                  * streaming operations.  The SCSIPHASE
1641                  * handler has already dealt with this case
1642                  * so just clear the error.
1643                  */
1644                 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
1645         } else if ((status & BUSFREE) != 0) {
1646                 u_int lqostat1;
1647                 int   restart;
1648                 int   clear_fifo;
1649                 int   packetized;
1650                 u_int mode;
1651
1652                 /*
1653                  * Clear our selection hardware as soon as possible.
1654                  * We may have an entry in the waiting Q for this target,
1655                  * that is affected by this busfree and we don't want to
1656                  * go about selecting the target while we handle the event.
1657                  */
1658                 ahd_outb(ahd, SCSISEQ0, 0);
1659
1660                 /*
1661                  * Determine what we were up to at the time of
1662                  * the busfree.
1663                  */
1664                 mode = AHD_MODE_SCSI;
1665                 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1666                 lqostat1 = ahd_inb(ahd, LQOSTAT1);
1667                 switch (busfreetime) {
1668                 case BUSFREE_DFF0:
1669                 case BUSFREE_DFF1:
1670                 {
1671                         u_int   scbid;
1672                         struct  scb *scb;
1673
1674                         mode = busfreetime == BUSFREE_DFF0
1675                              ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
1676                         ahd_set_modes(ahd, mode, mode);
1677                         scbid = ahd_get_scbptr(ahd);
1678                         scb = ahd_lookup_scb(ahd, scbid);
1679                         if (scb == NULL) {
1680                                 kprintf("%s: Invalid SCB %d in DFF%d "
1681                                        "during unexpected busfree\n",
1682                                        ahd_name(ahd), scbid, mode);
1683                                 packetized = 0;
1684                         } else
1685                                 packetized = (scb->flags & SCB_PACKETIZED) != 0;
1686                         clear_fifo = 1;
1687                         break;
1688                 }
1689                 case BUSFREE_LQO:
1690                         clear_fifo = 0;
1691                         packetized = 1;
1692                         break;
1693                 default:
1694                         clear_fifo = 0;
1695                         packetized =  (lqostat1 & LQOBUSFREE) != 0;
1696                         if (!packetized
1697                          && ahd_inb(ahd, LASTPHASE) == P_BUSFREE
1698                          && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0
1699                           || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0))
1700                                 /*
1701                                  * Assume packetized if we are not
1702                                  * on the bus in a non-packetized
1703                                  * capacity and any pending selection
1704                                  * was a packetized selection.
1705                                  */
1706                                 packetized = 1;
1707                         break;
1708                 }
1709
1710 #ifdef AHD_DEBUG
1711                 if ((ahd_debug & AHD_SHOW_MISC) != 0)
1712                         kprintf("Saw Busfree.  Busfreetime = 0x%x.\n",
1713                                busfreetime);
1714 #endif
1715                 /*
1716                  * Busfrees that occur in non-packetized phases are
1717                  * handled by the nonpkt_busfree handler.
1718                  */
1719                 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
1720                         restart = ahd_handle_pkt_busfree(ahd, busfreetime);
1721                 } else {
1722                         packetized = 0;
1723                         restart = ahd_handle_nonpkt_busfree(ahd);
1724                 }
1725                 /*
1726                  * Clear the busfree interrupt status.  The setting of
1727                  * the interrupt is a pulse, so in a perfect world, we
1728                  * would not need to muck with the ENBUSFREE logic.  This
1729                  * would ensure that if the bus moves on to another
1730                  * connection, busfree protection is still in force.  If
1731                  * BUSFREEREV is broken, however, we must manually clear
1732                  * the ENBUSFREE if the busfree occurred during a non-pack
1733                  * connection so that we don't get false positives during
1734                  * future, packetized, connections.
1735                  */
1736                 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
1737                 if (packetized == 0
1738                  && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
1739                         ahd_outb(ahd, SIMODE1,
1740                                  ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
1741
1742                 if (clear_fifo)
1743                         ahd_clear_fifo(ahd, mode);
1744
1745                 ahd_clear_msg_state(ahd);
1746                 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1747                 if (restart) {
1748                         ahd_restart(ahd);
1749                 } else {
1750                         ahd_unpause(ahd);
1751                 }
1752         } else {
1753                 kprintf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
1754                        ahd_name(ahd), status);
1755                 ahd_dump_card_state(ahd);
1756                 ahd_clear_intstat(ahd);
1757                 ahd_unpause(ahd);
1758         }
1759 }
1760
1761 static void
1762 ahd_handle_transmission_error(struct ahd_softc *ahd)
1763 {
1764         struct  scb *scb;
1765         u_int   scbid;
1766         u_int   lqistat1;
1767         u_int   lqistat2;
1768         u_int   msg_out;
1769         u_int   curphase;
1770         u_int   lastphase;
1771         u_int   perrdiag;
1772         u_int   cur_col;
1773         int     silent;
1774
1775         scb = NULL;
1776         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1777         lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
1778         lqistat2 = ahd_inb(ahd, LQISTAT2);
1779         if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
1780          && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
1781                 u_int lqistate;
1782
1783                 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1784                 lqistate = ahd_inb(ahd, LQISTATE);
1785                 if ((lqistate >= 0x1E && lqistate <= 0x24)
1786                  || (lqistate == 0x29)) {
1787 #ifdef AHD_DEBUG
1788                         if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1789                                 kprintf("%s: NLQCRC found via LQISTATE\n",
1790                                        ahd_name(ahd));
1791                         }
1792 #endif
1793                         lqistat1 |= LQICRCI_NLQ;
1794                 }
1795                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1796         }
1797
1798         ahd_outb(ahd, CLRLQIINT1, lqistat1);
1799         lastphase = ahd_inb(ahd, LASTPHASE);
1800         curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1801         perrdiag = ahd_inb(ahd, PERRDIAG);
1802         msg_out = MSG_INITIATOR_DET_ERR;
1803         ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
1804         
1805         /*
1806          * Try to find the SCB associated with this error.
1807          */
1808         silent = FALSE;
1809         if (lqistat1 == 0
1810          || (lqistat1 & LQICRCI_NLQ) != 0) {
1811                 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
1812                         ahd_set_active_fifo(ahd);
1813                 scbid = ahd_get_scbptr(ahd);
1814                 scb = ahd_lookup_scb(ahd, scbid);
1815                 if (scb != NULL && SCB_IS_SILENT(scb))
1816                         silent = TRUE;
1817         }
1818
1819         cur_col = 0;
1820         if (silent == FALSE) {
1821                 kprintf("%s: Transmission error detected\n", ahd_name(ahd));
1822                 ahd_lqistat1_print(lqistat1, &cur_col, 50);
1823                 ahd_lastphase_print(lastphase, &cur_col, 50);
1824                 ahd_scsisigi_print(curphase, &cur_col, 50);
1825                 ahd_perrdiag_print(perrdiag, &cur_col, 50);
1826                 kprintf("\n");
1827                 ahd_dump_card_state(ahd);
1828         }
1829
1830         if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
1831                 if (silent == FALSE) {
1832                         kprintf("%s: Gross protocol error during incoming "
1833                                "packet.  lqistat1 == 0x%x.  Resetting bus.\n",
1834                                ahd_name(ahd), lqistat1);
1835                 }
1836                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1837                 return;
1838         } else if ((lqistat1 & LQICRCI_LQ) != 0) {
1839                 /*
1840                  * A CRC error has been detected on an incoming LQ.
1841                  * The bus is currently hung on the last ACK.
1842                  * Hit LQIRETRY to release the last ack, and
1843                  * wait for the sequencer to determine that ATNO
1844                  * is asserted while in message out to take us
1845                  * to our host message loop.  No NONPACKREQ or
1846                  * LQIPHASE type errors will occur in this
1847                  * scenario.  After this first LQIRETRY, the LQI
1848                  * manager will be in ISELO where it will
1849                  * happily sit until another packet phase begins.
1850                  * Unexpected bus free detection is enabled
1851                  * through any phases that occur after we release
1852                  * this last ack until the LQI manager sees a
1853                  * packet phase.  This implies we may have to
1854                  * ignore a perfectly valid "unexected busfree"
1855                  * after our "initiator detected error" message is
1856                  * sent.  A busfree is the expected response after
1857                  * we tell the target that it's L_Q was corrupted.
1858                  * (SPI4R09 10.7.3.3.3)
1859                  */
1860                 ahd_outb(ahd, LQCTL2, LQIRETRY);
1861                 kprintf("LQIRetry for LQICRCI_LQ to release ACK\n");
1862         } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1863                 /*
1864                  * We detected a CRC error in a NON-LQ packet.
1865                  * The hardware has varying behavior in this situation
1866                  * depending on whether this packet was part of a
1867                  * stream or not.
1868                  *
1869                  * PKT by PKT mode:
1870                  * The hardware has already acked the complete packet.
1871                  * If the target honors our outstanding ATN condition,
1872                  * we should be (or soon will be) in MSGOUT phase.
1873                  * This will trigger the LQIPHASE_LQ status bit as the
1874                  * hardware was expecting another LQ.  Unexpected
1875                  * busfree detection is enabled.  Once LQIPHASE_LQ is
1876                  * true (first entry into host message loop is much
1877                  * the same), we must clear LQIPHASE_LQ and hit
1878                  * LQIRETRY so the hardware is ready to handle
1879                  * a future LQ.  NONPACKREQ will not be asserted again
1880                  * once we hit LQIRETRY until another packet is
1881                  * processed.  The target may either go busfree
1882                  * or start another packet in response to our message.
1883                  *
1884                  * Read Streaming P0 asserted:
1885                  * If we raise ATN and the target completes the entire
1886                  * stream (P0 asserted during the last packet), the
1887                  * hardware will ack all data and return to the ISTART
1888                  * state.  When the target reponds to our ATN condition,
1889                  * LQIPHASE_LQ will be asserted.  We should respond to
1890                  * this with an LQIRETRY to prepare for any future
1891                  * packets.  NONPACKREQ will not be asserted again
1892                  * once we hit LQIRETRY until another packet is
1893                  * processed.  The target may either go busfree or
1894                  * start another packet in response to our message.
1895                  * Busfree detection is enabled.
1896                  *
1897                  * Read Streaming P0 not asserted:
1898                  * If we raise ATN and the target transitions to
1899                  * MSGOUT in or after a packet where P0 is not
1900                  * asserted, the hardware will assert LQIPHASE_NLQ.
1901                  * We should respond to the LQIPHASE_NLQ with an
1902                  * LQIRETRY.  Should the target stay in a non-pkt
1903                  * phase after we send our message, the hardware
1904                  * will assert LQIPHASE_LQ.  Recovery is then just as
1905                  * listed above for the read streaming with P0 asserted.
1906                  * Busfree detection is enabled.
1907                  */
1908                 if (silent == FALSE)
1909                         kprintf("LQICRC_NLQ\n");
1910                 if (scb == NULL) {
1911                         kprintf("%s: No SCB valid for LQICRC_NLQ.  "
1912                                "Resetting bus\n", ahd_name(ahd));
1913                         ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1914                         return;
1915                 }
1916         } else if ((lqistat1 & LQIBADLQI) != 0) {
1917                 kprintf("Need to handle BADLQI!\n");
1918                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1919                 return;
1920         } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
1921                 if ((curphase & ~P_DATAIN_DT) != 0) {
1922                         /* Ack the byte.  So we can continue. */
1923                         if (silent == FALSE)
1924                                 kprintf("Acking %s to clear perror\n",
1925                                     ahd_lookup_phase_entry(curphase)->phasemsg);
1926                         ahd_inb(ahd, SCSIDAT);
1927                 }
1928         
1929                 if (curphase == P_MESGIN)
1930                         msg_out = MSG_PARITY_ERROR;
1931         }
1932
1933         /*
1934          * We've set the hardware to assert ATN if we 
1935          * get a parity error on "in" phases, so all we
1936          * need to do is stuff the message buffer with
1937          * the appropriate message.  "In" phases have set
1938          * mesg_out to something other than MSG_NOP.
1939          */
1940         ahd->send_msg_perror = msg_out;
1941         if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
1942                 scb->flags |= SCB_TRANSMISSION_ERROR;
1943         ahd_outb(ahd, MSG_OUT, HOST_MSG);
1944         ahd_outb(ahd, CLRINT, CLRSCSIINT);
1945         ahd_unpause(ahd);
1946 }
1947
1948 static void
1949 ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
1950 {
1951         /*
1952          * Clear the sources of the interrupts.
1953          */
1954         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1955         ahd_outb(ahd, CLRLQIINT1, lqistat1);
1956
1957         /*
1958          * If the "illegal" phase changes were in response
1959          * to our ATN to flag a CRC error, AND we ended up
1960          * on packet boundaries, clear the error, restart the
1961          * LQI manager as appropriate, and go on our merry
1962          * way toward sending the message.  Otherwise, reset
1963          * the bus to clear the error.
1964          */
1965         ahd_set_active_fifo(ahd);
1966         if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
1967          && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
1968                 if ((lqistat1 & LQIPHASE_LQ) != 0) {
1969                         kprintf("LQIRETRY for LQIPHASE_LQ\n");
1970                         ahd_outb(ahd, LQCTL2, LQIRETRY);
1971                 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
1972                         kprintf("LQIRETRY for LQIPHASE_NLQ\n");
1973                         ahd_outb(ahd, LQCTL2, LQIRETRY);
1974                 } else
1975                         panic("ahd_handle_lqiphase_error: No phase errors\n");
1976                 ahd_dump_card_state(ahd);
1977                 ahd_outb(ahd, CLRINT, CLRSCSIINT);
1978                 ahd_unpause(ahd);
1979         } else {
1980                 kprintf("Reseting Channel for LQI Phase error\n");
1981                 ahd_dump_card_state(ahd);
1982                 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1983         }
1984 }
1985
1986 /*
1987  * Packetized unexpected or expected busfree.
1988  * Entered in mode based on busfreetime.
1989  */
1990 static int
1991 ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
1992 {
1993         u_int lqostat1;
1994
1995         AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
1996                          ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
1997         lqostat1 = ahd_inb(ahd, LQOSTAT1);
1998         if ((lqostat1 & LQOBUSFREE) != 0) {
1999                 struct scb *scb;
2000                 u_int scbid;
2001                 u_int saved_scbptr;
2002                 u_int waiting_h;
2003                 u_int waiting_t;
2004                 u_int next;
2005
2006                 if ((busfreetime & BUSFREE_LQO) == 0)
2007                         kprintf("%s: Warning, BUSFREE time is 0x%x.  "
2008                                "Expected BUSFREE_LQO.\n",
2009                                ahd_name(ahd), busfreetime);
2010                 /*
2011                  * The LQO manager detected an unexpected busfree
2012                  * either:
2013                  *
2014                  * 1) During an outgoing LQ.
2015                  * 2) After an outgoing LQ but before the first
2016                  *    REQ of the command packet.
2017                  * 3) During an outgoing command packet.
2018                  *
2019                  * In all cases, CURRSCB is pointing to the
2020                  * SCB that encountered the failure.  Clean
2021                  * up the queue, clear SELDO and LQOBUSFREE,
2022                  * and allow the sequencer to restart the select
2023                  * out at its lesure.
2024                  */
2025                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2026                 scbid = ahd_inw(ahd, CURRSCB);
2027                 scb = ahd_lookup_scb(ahd, scbid);
2028                 if (scb == NULL)
2029                        panic("SCB not valid during LQOBUSFREE");
2030                 /*
2031                  * Clear the status.
2032                  */
2033                 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
2034                 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2035                         ahd_outb(ahd, CLRLQOINT1, 0);
2036                 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2037                 ahd_flush_device_writes(ahd);
2038                 ahd_outb(ahd, CLRSINT0, CLRSELDO);
2039
2040                 /*
2041                  * Return the LQO manager to its idle loop.  It will
2042                  * not do this automatically if the busfree occurs
2043                  * after the first REQ of either the LQ or command
2044                  * packet or between the LQ and command packet.
2045                  */
2046                 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
2047
2048                 /*
2049                  * Update the waiting for selection queue so
2050                  * we restart on the correct SCB.
2051                  */
2052                 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
2053                 saved_scbptr = ahd_get_scbptr(ahd);
2054                 if (waiting_h != scbid) {
2055
2056                         ahd_outw(ahd, WAITING_TID_HEAD, scbid);
2057                         waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
2058                         if (waiting_t == waiting_h) {
2059                                 ahd_outw(ahd, WAITING_TID_TAIL, scbid);
2060                                 next = SCB_LIST_NULL;
2061                         } else {
2062                                 ahd_set_scbptr(ahd, waiting_h);
2063                                 next = ahd_inw_scbram(ahd, SCB_NEXT2);
2064                         }
2065                         ahd_set_scbptr(ahd, scbid);
2066                         ahd_outw(ahd, SCB_NEXT2, next);
2067                 }
2068                 ahd_set_scbptr(ahd, saved_scbptr);
2069                 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2070                         if (SCB_IS_SILENT(scb) == FALSE) {
2071                                 ahd_print_path(ahd, scb);
2072                                 kprintf("Probable outgoing LQ CRC error.  "
2073                                        "Retrying command\n");
2074                         }
2075                         scb->crc_retry_count++;
2076                 } else {
2077                         aic_set_transaction_status(scb, CAM_UNCOR_PARITY);
2078                         aic_freeze_scb(scb);
2079                         ahd_freeze_devq(ahd, scb);
2080                 }
2081                 /* Return unpausing the sequencer. */
2082                 return (0);
2083         } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
2084                 /*
2085                  * Ignore what are really parity errors that
2086                  * occur on the last REQ of a free running
2087                  * clock prior to going busfree.  Some drives
2088                  * do not properly active negate just before
2089                  * going busfree resulting in a parity glitch.
2090                  */
2091                 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2092 #ifdef AHD_DEBUG
2093                 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
2094                         kprintf("%s: Parity on last REQ detected "
2095                                "during busfree phase.\n",
2096                                ahd_name(ahd));
2097 #endif
2098                 /* Return unpausing the sequencer. */
2099                 return (0);
2100         }
2101         if (ahd->src_mode != AHD_MODE_SCSI) {
2102                 u_int   scbid;
2103                 struct  scb *scb;
2104
2105                 scbid = ahd_get_scbptr(ahd);
2106                 scb = ahd_lookup_scb(ahd, scbid);
2107                 ahd_print_path(ahd, scb);
2108                 kprintf("Unexpected PKT busfree condition\n");
2109                 ahd_dump_card_state(ahd);
2110                 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2111                                SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2112                                ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
2113
2114                 /* Return restarting the sequencer. */
2115                 return (1);
2116         }
2117         kprintf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
2118         ahd_dump_card_state(ahd);
2119         /* Restart the sequencer. */
2120         return (1);
2121 }
2122
2123 /*
2124  * Non-packetized unexpected or expected busfree.
2125  */
2126 static int
2127 ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
2128 {
2129         struct  ahd_devinfo devinfo;
2130         struct  scb *scb;
2131         u_int   lastphase;
2132         u_int   saved_scsiid;
2133         u_int   saved_lun;
2134         u_int   target;
2135         u_int   initiator_role_id;
2136         u_int   scbid;
2137         u_int   ppr_busfree;
2138         int     printerror;
2139
2140         /*
2141          * Look at what phase we were last in.  If its message out,
2142          * chances are pretty good that the busfree was in response
2143          * to one of our abort requests.
2144          */
2145         lastphase = ahd_inb(ahd, LASTPHASE);
2146         saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2147         saved_lun = ahd_inb(ahd, SAVED_LUN);
2148         target = SCSIID_TARGET(ahd, saved_scsiid);
2149         initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
2150         ahd_compile_devinfo(&devinfo, initiator_role_id,
2151                             target, saved_lun, 'A', ROLE_INITIATOR);
2152         printerror = 1;
2153
2154         scbid = ahd_get_scbptr(ahd);
2155         scb = ahd_lookup_scb(ahd, scbid);
2156         if (scb != NULL
2157          && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
2158                 scb = NULL;
2159
2160         ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
2161         if (lastphase == P_MESGOUT) {
2162                 u_int tag;
2163
2164                 tag = SCB_LIST_NULL;
2165                 if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
2166                  || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
2167                         int found;
2168                         int sent_msg;
2169
2170                         if (scb == NULL) {
2171                                 ahd_print_devinfo(ahd, &devinfo);
2172                                 kprintf("Abort for unidentified "
2173                                        "connection completed.\n");
2174                                 /* restart the sequencer. */
2175                                 return (1);
2176                         }
2177                         sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
2178                         ahd_print_path(ahd, scb);
2179                         kprintf("SCB %d - Abort%s Completed.\n",
2180                                SCB_GET_TAG(scb),
2181                                sent_msg == MSG_ABORT_TAG ? "" : " Tag");
2182
2183                         if (sent_msg == MSG_ABORT_TAG)
2184                                 tag = SCB_GET_TAG(scb);
2185
2186                         if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
2187                                 /*
2188                                  * This abort is in response to an
2189                                  * unexpected switch to command phase
2190                                  * for a packetized connection.  Since
2191                                  * the identify message was never sent,
2192                                  * "saved lun" is 0.  We really want to
2193                                  * abort only the SCB that encountered
2194                                  * this error, which could have a different
2195                                  * lun.  The SCB will be retried so the OS
2196                                  * will see the UA after renegotiating to
2197                                  * packetized.
2198                                  */
2199                                 tag = SCB_GET_TAG(scb);
2200                                 saved_lun = scb->hscb->lun;
2201                         }
2202                         found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
2203                                                tag, ROLE_INITIATOR,
2204                                                CAM_REQ_ABORTED);
2205                         kprintf("found == 0x%x\n", found);
2206                         printerror = 0;
2207                 } else if (ahd_sent_msg(ahd, AHDMSG_1B,
2208                                         MSG_BUS_DEV_RESET, TRUE)) {
2209 #if defined(__DragonFly__) || defined(__FreeBSD__)
2210                         /*
2211                          * Don't mark the user's request for this BDR
2212                          * as completing with CAM_BDR_SENT.  CAM3
2213                          * specifies CAM_REQ_CMP.
2214                          */
2215                         if (scb != NULL
2216                          && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
2217                          && ahd_match_scb(ahd, scb, target, 'A',
2218                                           CAM_LUN_WILDCARD, SCB_LIST_NULL,
2219                                           ROLE_INITIATOR))
2220                                 aic_set_transaction_status(scb, CAM_REQ_CMP);
2221 #endif
2222                         ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
2223                                             CAM_BDR_SENT, "Bus Device Reset",
2224                                             /*verbose_level*/0);
2225                         printerror = 0;
2226                 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
2227                         && ppr_busfree == 0) {
2228                         struct ahd_initiator_tinfo *tinfo;
2229                         struct ahd_tmode_tstate *tstate;
2230
2231                         /*
2232                          * PPR Rejected.  Try non-ppr negotiation
2233                          * and retry command.
2234                          */
2235 #ifdef AHD_DEBUG
2236                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2237                                 kprintf("PPR negotiation rejected busfree.\n");
2238 #endif
2239                         tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
2240                                                     devinfo.our_scsiid,
2241                                                     devinfo.target, &tstate);
2242                         tinfo->curr.transport_version = 2;
2243                         tinfo->goal.transport_version = 2;
2244                         tinfo->goal.ppr_options = 0;
2245                         ahd_qinfifo_requeue_tail(ahd, scb);
2246                         printerror = 0;
2247                 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
2248                         && ppr_busfree == 0) {
2249                         /*
2250                          * Negotiation Rejected.  Go-narrow and
2251                          * retry command.
2252                          */
2253 #ifdef AHD_DEBUG
2254                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2255                                 kprintf("WDTR negotiation rejected busfree.\n");
2256 #endif
2257                         ahd_set_width(ahd, &devinfo,
2258                                       MSG_EXT_WDTR_BUS_8_BIT,
2259                                       AHD_TRANS_CUR|AHD_TRANS_GOAL,
2260                                       /*paused*/TRUE);
2261                         ahd_qinfifo_requeue_tail(ahd, scb);
2262                         printerror = 0;
2263                 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
2264                         && ppr_busfree == 0) {
2265                         /*
2266                          * Negotiation Rejected.  Go-async and
2267                          * retry command.
2268                          */
2269 #ifdef AHD_DEBUG
2270                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2271                                 kprintf("SDTR negotiation rejected busfree.\n");
2272 #endif
2273                         ahd_set_syncrate(ahd, &devinfo,
2274                                         /*period*/0, /*offset*/0,
2275                                         /*ppr_options*/0,
2276                                         AHD_TRANS_CUR|AHD_TRANS_GOAL,
2277                                         /*paused*/TRUE);
2278                         ahd_qinfifo_requeue_tail(ahd, scb);
2279                         printerror = 0;
2280                 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
2281                         && ahd_sent_msg(ahd, AHDMSG_1B,
2282                                          MSG_INITIATOR_DET_ERR, TRUE)) {
2283
2284 #ifdef AHD_DEBUG
2285                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2286                                 kprintf("Expected IDE Busfree\n");
2287 #endif
2288                         printerror = 0;
2289                 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
2290                         && ahd_sent_msg(ahd, AHDMSG_1B,
2291                                         MSG_MESSAGE_REJECT, TRUE)) {
2292
2293 #ifdef AHD_DEBUG
2294                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2295                                 kprintf("Expected QAS Reject Busfree\n");
2296 #endif
2297                         printerror = 0;
2298                 }
2299         }
2300
2301         /*
2302          * The busfree required flag is honored at the end of
2303          * the message phases.  We check it last in case we
2304          * had to send some other message that caused a busfree.
2305          */
2306         if (printerror != 0
2307          && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
2308          && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
2309
2310                 ahd_freeze_devq(ahd, scb);
2311                 aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
2312                 aic_freeze_scb(scb);
2313                 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
2314                         ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2315                                        SCB_GET_CHANNEL(ahd, scb),
2316                                        SCB_GET_LUN(scb), SCB_LIST_NULL,
2317                                        ROLE_INITIATOR, CAM_REQ_ABORTED);
2318                 } else {
2319 #ifdef AHD_DEBUG
2320                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2321                                 kprintf("PPR Negotiation Busfree.\n");
2322 #endif
2323                         ahd_done(ahd, scb);
2324                 }
2325                 printerror = 0;
2326         }
2327         if (printerror != 0) {
2328                 int aborted;
2329
2330                 aborted = 0;
2331                 if (scb != NULL) {
2332                         u_int tag;
2333
2334                         if ((scb->hscb->control & TAG_ENB) != 0)
2335                                 tag = SCB_GET_TAG(scb);
2336                         else
2337                                 tag = SCB_LIST_NULL;
2338                         ahd_print_path(ahd, scb);
2339                         aborted = ahd_abort_scbs(ahd, target, 'A',
2340                                        SCB_GET_LUN(scb), tag,
2341                                        ROLE_INITIATOR,
2342                                        CAM_UNEXP_BUSFREE);
2343                 } else {
2344                         /*
2345                          * We had not fully identified this connection,
2346                          * so we cannot abort anything.
2347                          */
2348                         kprintf("%s: ", ahd_name(ahd));
2349                 }
2350                 if (lastphase != P_BUSFREE)
2351                         ahd_force_renegotiation(ahd, &devinfo);
2352                 kprintf("Unexpected busfree %s, %d SCBs aborted, "
2353                        "PRGMCNT == 0x%x\n",
2354                        ahd_lookup_phase_entry(lastphase)->phasemsg,
2355                        aborted,
2356                        ahd_inw(ahd, PRGMCNT));
2357                 ahd_dump_card_state(ahd);
2358         }
2359         /* Always restart the sequencer. */
2360         return (1);
2361 }
2362
2363 static void
2364 ahd_handle_proto_violation(struct ahd_softc *ahd)
2365 {
2366         struct  ahd_devinfo devinfo;
2367         struct  scb *scb;
2368         u_int   scbid;
2369         u_int   seq_flags;
2370         u_int   curphase;
2371         u_int   lastphase;
2372         int     found;
2373
2374         ahd_fetch_devinfo(ahd, &devinfo);
2375         scbid = ahd_get_scbptr(ahd);
2376         scb = ahd_lookup_scb(ahd, scbid);
2377         seq_flags = ahd_inb(ahd, SEQ_FLAGS);
2378         curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2379         lastphase = ahd_inb(ahd, LASTPHASE);
2380         if ((seq_flags & NOT_IDENTIFIED) != 0) {
2381
2382                 /*
2383                  * The reconnecting target either did not send an
2384                  * identify message, or did, but we didn't find an SCB
2385                  * to match.
2386                  */
2387                 ahd_print_devinfo(ahd, &devinfo);
2388                 kprintf("Target did not send an IDENTIFY message. "
2389                        "LASTPHASE = 0x%x.\n", lastphase);
2390                 scb = NULL;
2391         } else if (scb == NULL) {
2392                 /*
2393                  * We don't seem to have an SCB active for this
2394                  * transaction.  Print an error and reset the bus.
2395                  */
2396                 ahd_print_devinfo(ahd, &devinfo);
2397                 kprintf("No SCB found during protocol violation\n");
2398                 goto proto_violation_reset;
2399         } else {
2400                 aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2401                 if ((seq_flags & NO_CDB_SENT) != 0) {
2402                         ahd_print_path(ahd, scb);
2403                         kprintf("No or incomplete CDB sent to device.\n");
2404                 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
2405                           & STATUS_RCVD) == 0) {
2406                         /*
2407                          * The target never bothered to provide status to
2408                          * us prior to completing the command.  Since we don't
2409                          * know the disposition of this command, we must attempt
2410                          * to abort it.  Assert ATN and prepare to send an abort
2411                          * message.
2412                          */
2413                         ahd_print_path(ahd, scb);
2414                         kprintf("Completed command without status.\n");
2415                 } else {
2416                         ahd_print_path(ahd, scb);
2417                         kprintf("Unknown protocol violation.\n");
2418                         ahd_dump_card_state(ahd);
2419                 }
2420         }
2421         if ((lastphase & ~P_DATAIN_DT) == 0
2422          || lastphase == P_COMMAND) {
2423 proto_violation_reset:
2424                 /*
2425                  * Target either went directly to data
2426                  * phase or didn't respond to our ATN.
2427                  * The only safe thing to do is to blow
2428                  * it away with a bus reset.
2429                  */
2430                 found = ahd_reset_channel(ahd, 'A', TRUE);
2431                 kprintf("%s: Issued Channel %c Bus Reset. "
2432                        "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
2433         } else {
2434                 /*
2435                  * Leave the selection hardware off in case
2436                  * this abort attempt will affect yet to
2437                  * be sent commands.
2438                  */
2439                 ahd_outb(ahd, SCSISEQ0,
2440                          ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2441                 ahd_assert_atn(ahd);
2442                 ahd_outb(ahd, MSG_OUT, HOST_MSG);
2443                 if (scb == NULL) {
2444                         ahd_print_devinfo(ahd, &devinfo);
2445                         ahd->msgout_buf[0] = MSG_ABORT_TASK;
2446                         ahd->msgout_len = 1;
2447                         ahd->msgout_index = 0;
2448                         ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2449                 } else {
2450                         ahd_print_path(ahd, scb);
2451                         scb->flags |= SCB_ABORT;
2452                 }
2453                 kprintf("Protocol violation %s.  Attempting to abort.\n",
2454                        ahd_lookup_phase_entry(curphase)->phasemsg);
2455         }
2456 }
2457
2458 /*
2459  * Force renegotiation to occur the next time we initiate
2460  * a command to the current device.
2461  */
2462 static void
2463 ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2464 {
2465         struct  ahd_initiator_tinfo *targ_info;
2466         struct  ahd_tmode_tstate *tstate;
2467
2468 #ifdef AHD_DEBUG
2469         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
2470                 ahd_print_devinfo(ahd, devinfo);
2471                 kprintf("Forcing renegotiation\n");
2472         }
2473 #endif
2474         targ_info = ahd_fetch_transinfo(ahd,
2475                                         devinfo->channel,
2476                                         devinfo->our_scsiid,
2477                                         devinfo->target,
2478                                         &tstate);
2479         ahd_update_neg_request(ahd, devinfo, tstate,
2480                                targ_info, AHD_NEG_IF_NON_ASYNC);
2481 }
2482
2483 #define AHD_MAX_STEPS 2000
2484 void
2485 ahd_clear_critical_section(struct ahd_softc *ahd)
2486 {
2487         ahd_mode_state  saved_modes;
2488         int             stepping;
2489         int             steps;
2490         int             first_instr;
2491         u_int           simode0;
2492         u_int           simode1;
2493         u_int           simode3;
2494         u_int           lqimode0;
2495         u_int           lqimode1;
2496         u_int           lqomode0;
2497         u_int           lqomode1;
2498
2499         if (ahd->num_critical_sections == 0)
2500                 return;
2501
2502         stepping = FALSE;
2503         steps = 0;
2504         first_instr = 0;
2505         simode0 = 0;
2506         simode1 = 0;
2507         simode3 = 0;
2508         lqimode0 = 0;
2509         lqimode1 = 0;
2510         lqomode0 = 0;
2511         lqomode1 = 0;
2512         saved_modes = ahd_save_modes(ahd);
2513         for (;;) {
2514                 struct  cs *cs;
2515                 u_int   seqaddr;
2516                 u_int   i;
2517
2518                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2519                 seqaddr = ahd_inw(ahd, CURADDR);
2520
2521                 cs = ahd->critical_sections;
2522                 for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
2523                         
2524                         if (cs->begin < seqaddr && cs->end >= seqaddr)
2525                                 break;
2526                 }
2527
2528                 if (i == ahd->num_critical_sections)
2529                         break;
2530
2531                 if (steps > AHD_MAX_STEPS) {
2532                         kprintf("%s: Infinite loop in critical section\n"
2533                                "%s: First Instruction 0x%x now 0x%x\n",
2534                                ahd_name(ahd), ahd_name(ahd), first_instr,
2535                                seqaddr);
2536                         ahd_dump_card_state(ahd);
2537                         panic("critical section loop");
2538                 }
2539
2540                 steps++;
2541 #ifdef AHD_DEBUG
2542                 if ((ahd_debug & AHD_SHOW_MISC) != 0)
2543                         kprintf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
2544                                seqaddr);
2545 #endif
2546                 if (stepping == FALSE) {
2547
2548                         first_instr = seqaddr;
2549                         ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2550                         simode0 = ahd_inb(ahd, SIMODE0);
2551                         simode3 = ahd_inb(ahd, SIMODE3);
2552                         lqimode0 = ahd_inb(ahd, LQIMODE0);
2553                         lqimode1 = ahd_inb(ahd, LQIMODE1);
2554                         lqomode0 = ahd_inb(ahd, LQOMODE0);
2555                         lqomode1 = ahd_inb(ahd, LQOMODE1);
2556                         ahd_outb(ahd, SIMODE0, 0);
2557                         ahd_outb(ahd, SIMODE3, 0);
2558                         ahd_outb(ahd, LQIMODE0, 0);
2559                         ahd_outb(ahd, LQIMODE1, 0);
2560                         ahd_outb(ahd, LQOMODE0, 0);
2561                         ahd_outb(ahd, LQOMODE1, 0);
2562                         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2563                         simode1 = ahd_inb(ahd, SIMODE1);
2564                         /*
2565                          * We don't clear ENBUSFREE.  Unfortunately
2566                          * we cannot re-enable busfree detection within
2567                          * the current connection, so we must leave it
2568                          * on while single stepping.
2569                          */
2570                         ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
2571                         ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
2572                         stepping = TRUE;
2573                 }
2574                 ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
2575                 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2576                 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
2577                 ahd_outb(ahd, HCNTRL, ahd->unpause);
2578                 while (!ahd_is_paused(ahd))
2579                         aic_delay(200);
2580                 ahd_update_modes(ahd);
2581         }
2582         if (stepping) {
2583                 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2584                 ahd_outb(ahd, SIMODE0, simode0);
2585                 ahd_outb(ahd, SIMODE3, simode3);
2586                 ahd_outb(ahd, LQIMODE0, lqimode0);
2587                 ahd_outb(ahd, LQIMODE1, lqimode1);
2588                 ahd_outb(ahd, LQOMODE0, lqomode0);
2589                 ahd_outb(ahd, LQOMODE1, lqomode1);
2590                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2591                 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
2592                 ahd_outb(ahd, SIMODE1, simode1);
2593                 /*
2594                  * SCSIINT seems to glitch occassionally when
2595                  * the interrupt masks are restored.  Clear SCSIINT
2596                  * one more time so that only persistent errors
2597                  * are seen as a real interrupt.
2598                  */
2599                 ahd_outb(ahd, CLRINT, CLRSCSIINT);
2600         }
2601         ahd_restore_modes(ahd, saved_modes);
2602 }
2603
2604 /*
2605  * Clear any pending interrupt status.
2606  */
2607 void
2608 ahd_clear_intstat(struct ahd_softc *ahd)
2609 {
2610         AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2611                          ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2612         /* Clear any interrupt conditions this may have caused */
2613         ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
2614                                  |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
2615         ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
2616                                  |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
2617                                  |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
2618         ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
2619                                  |CLRLQOATNPKT|CLRLQOTCRC);
2620         ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
2621                                  |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
2622         if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
2623                 ahd_outb(ahd, CLRLQOINT0, 0);
2624                 ahd_outb(ahd, CLRLQOINT1, 0);
2625         }
2626         ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
2627         ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
2628                                 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
2629         ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
2630                                 |CLRIOERR|CLROVERRUN);
2631         ahd_outb(ahd, CLRINT, CLRSCSIINT);
2632 }
2633
2634 /**************************** Debugging Routines ******************************/
2635 #ifdef AHD_DEBUG
2636 uint32_t ahd_debug = AHD_DEBUG_OPTS;
2637 #endif
2638 void
2639 ahd_print_scb(struct scb *scb)
2640 {
2641         struct hardware_scb *hscb;
2642         int i;
2643
2644         hscb = scb->hscb;
2645         kprintf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
2646                (void *)scb,
2647                hscb->control,
2648                hscb->scsiid,
2649                hscb->lun,
2650                hscb->cdb_len);
2651         kprintf("Shared Data: ");
2652         for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
2653                 kprintf("%#02x", hscb->shared_data.idata.cdb[i]);
2654         kprintf("        dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
2655                (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
2656                (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF),
2657                aic_le32toh(hscb->datacnt),
2658                aic_le32toh(hscb->sgptr),
2659                SCB_GET_TAG(scb));
2660         ahd_dump_sglist(scb);
2661 }
2662
2663 void
2664 ahd_dump_sglist(struct scb *scb)
2665 {
2666         int i;
2667
2668         if (scb->sg_count > 0) {
2669                 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
2670                         struct ahd_dma64_seg *sg_list;
2671
2672                         sg_list = (struct ahd_dma64_seg*)scb->sg_list;
2673                         for (i = 0; i < scb->sg_count; i++) {
2674                                 uint64_t addr;
2675                                 uint32_t len;
2676
2677                                 addr = aic_le64toh(sg_list[i].addr);
2678                                 len = aic_le32toh(sg_list[i].len);
2679                                 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2680                                        i,
2681                                        (uint32_t)((addr >> 32) & 0xFFFFFFFF),
2682                                        (uint32_t)(addr & 0xFFFFFFFF),
2683                                        sg_list[i].len & AHD_SG_LEN_MASK,
2684                                        (sg_list[i].len & AHD_DMA_LAST_SEG)
2685                                      ? " Last" : "");
2686                         }
2687                 } else {
2688                         struct ahd_dma_seg *sg_list;
2689
2690                         sg_list = (struct ahd_dma_seg*)scb->sg_list;
2691                         for (i = 0; i < scb->sg_count; i++) {
2692                                 uint32_t len;
2693
2694                                 len = aic_le32toh(sg_list[i].len);
2695                                 kprintf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2696                                        i,
2697                                        (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
2698                                        aic_le32toh(sg_list[i].addr),
2699                                        len & AHD_SG_LEN_MASK,
2700                                        len & AHD_DMA_LAST_SEG ? " Last" : "");
2701                         }
2702                 }
2703         }
2704 }
2705
2706 /************************* Transfer Negotiation *******************************/
2707 /*
2708  * Allocate per target mode instance (ID we respond to as a target)
2709  * transfer negotiation data structures.
2710  */
2711 static struct ahd_tmode_tstate *
2712 ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
2713 {
2714         struct ahd_tmode_tstate *master_tstate;
2715         struct ahd_tmode_tstate *tstate;
2716         int i;
2717
2718         master_tstate = ahd->enabled_targets[ahd->our_id];
2719         if (ahd->enabled_targets[scsi_id] != NULL
2720          && ahd->enabled_targets[scsi_id] != master_tstate)
2721                 panic("%s: ahd_alloc_tstate - Target already allocated",
2722                       ahd_name(ahd));
2723         tstate = kmalloc(sizeof(*tstate), M_DEVBUF, M_INTWAIT);
2724
2725         /*
2726          * If we have allocated a master tstate, copy user settings from
2727          * the master tstate (taken from SRAM or the EEPROM) for this
2728          * channel, but reset our current and goal settings to async/narrow
2729          * until an initiator talks to us.
2730          */
2731         if (master_tstate != NULL) {
2732                 memcpy(tstate, master_tstate, sizeof(*tstate));
2733                 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
2734                 for (i = 0; i < 16; i++) {
2735                         memset(&tstate->transinfo[i].curr, 0,
2736                               sizeof(tstate->transinfo[i].curr));
2737                         memset(&tstate->transinfo[i].goal, 0,
2738                               sizeof(tstate->transinfo[i].goal));
2739                 }
2740         } else
2741                 memset(tstate, 0, sizeof(*tstate));
2742         ahd->enabled_targets[scsi_id] = tstate;
2743         return (tstate);
2744 }
2745
2746 #ifdef AHD_TARGET_MODE
2747 /*
2748  * Free per target mode instance (ID we respond to as a target)
2749  * transfer negotiation data structures.
2750  */
2751 static void
2752 ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
2753 {
2754         struct ahd_tmode_tstate *tstate;
2755
2756         /*
2757          * Don't clean up our "master" tstate.
2758          * It has our default user settings.
2759          */
2760         if (scsi_id == ahd->our_id
2761          && force == FALSE)
2762                 return;
2763
2764         tstate = ahd->enabled_targets[scsi_id];
2765         if (tstate != NULL)
2766                 kfree(tstate, M_DEVBUF);
2767         ahd->enabled_targets[scsi_id] = NULL;
2768 }
2769 #endif
2770
2771 /*
2772  * Called when we have an active connection to a target on the bus,
2773  * this function finds the nearest period to the input period limited
2774  * by the capabilities of the bus connectivity of and sync settings for
2775  * the target.
2776  */
2777 void
2778 ahd_devlimited_syncrate(struct ahd_softc *ahd,
2779                         struct ahd_initiator_tinfo *tinfo,
2780                         u_int *period, u_int *ppr_options, role_t role)
2781 {
2782         struct  ahd_transinfo *transinfo;
2783         u_int   maxsync;
2784
2785         if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
2786          && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
2787                 maxsync = AHD_SYNCRATE_PACED;
2788         } else {
2789                 maxsync = AHD_SYNCRATE_ULTRA;
2790                 /* Can't do DT related options on an SE bus */
2791                 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2792         }
2793         /*
2794          * Never allow a value higher than our current goal
2795          * period otherwise we may allow a target initiated
2796          * negotiation to go above the limit as set by the
2797          * user.  In the case of an initiator initiated
2798          * sync negotiation, we limit based on the user
2799          * setting.  This allows the system to still accept
2800          * incoming negotiations even if target initiated
2801          * negotiation is not performed.
2802          */
2803         if (role == ROLE_TARGET)
2804                 transinfo = &tinfo->user;
2805         else 
2806                 transinfo = &tinfo->goal;
2807         *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
2808         if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
2809                 maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
2810                 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2811         }
2812         if (transinfo->period == 0) {
2813                 *period = 0;
2814                 *ppr_options = 0;
2815         } else {
2816                 *period = MAX(*period, transinfo->period);
2817                 ahd_find_syncrate(ahd, period, ppr_options, maxsync);
2818         }
2819 }
2820
2821 /*
2822  * Look up the valid period to SCSIRATE conversion in our table.
2823  * Return the period and offset that should be sent to the target
2824  * if this was the beginning of an SDTR.
2825  */
2826 void
2827 ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
2828                   u_int *ppr_options, u_int maxsync)
2829 {
2830         if (*period < maxsync)
2831                 *period = maxsync;
2832
2833         if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
2834          && *period > AHD_SYNCRATE_MIN_DT)
2835                 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2836                 
2837         if (*period > AHD_SYNCRATE_MIN)
2838                 *period = 0;
2839
2840         /* Honor PPR option conformance rules. */
2841         if (*period > AHD_SYNCRATE_PACED)
2842                 *ppr_options &= ~MSG_EXT_PPR_RTI;
2843
2844         if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
2845                 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
2846
2847         if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
2848                 *ppr_options &= MSG_EXT_PPR_QAS_REQ;
2849
2850         /* Skip all PACED only entries if IU is not available */
2851         if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
2852          && *period < AHD_SYNCRATE_DT)
2853                 *period = AHD_SYNCRATE_DT;
2854
2855         /* Skip all DT only entries if DT is not available */
2856         if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2857          && *period < AHD_SYNCRATE_ULTRA2)
2858                 *period = AHD_SYNCRATE_ULTRA2;
2859 }
2860
2861 /*
2862  * Truncate the given synchronous offset to a value the
2863  * current adapter type and syncrate are capable of.
2864  */
2865 void
2866 ahd_validate_offset(struct ahd_softc *ahd,
2867                     struct ahd_initiator_tinfo *tinfo,
2868                     u_int period, u_int *offset, int wide,
2869                     role_t role)
2870 {
2871         u_int maxoffset;
2872
2873         /* Limit offset to what we can do */
2874         if (period == 0)
2875                 maxoffset = 0;
2876         else if (period <= AHD_SYNCRATE_PACED) {
2877                 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
2878                         maxoffset = MAX_OFFSET_PACED_BUG;
2879                 else
2880                         maxoffset = MAX_OFFSET_PACED;
2881         } else
2882                 maxoffset = MAX_OFFSET_NON_PACED;
2883         *offset = MIN(*offset, maxoffset);
2884         if (tinfo != NULL) {
2885                 if (role == ROLE_TARGET)
2886                         *offset = MIN(*offset, tinfo->user.offset);
2887                 else
2888                         *offset = MIN(*offset, tinfo->goal.offset);
2889         }
2890 }
2891
2892 /*
2893  * Truncate the given transfer width parameter to a value the
2894  * current adapter type is capable of.
2895  */
2896 void
2897 ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
2898                    u_int *bus_width, role_t role)
2899 {
2900         switch (*bus_width) {
2901         default:
2902                 if (ahd->features & AHD_WIDE) {
2903                         /* Respond Wide */
2904                         *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2905                         break;
2906                 }
2907                 /* FALLTHROUGH */
2908         case MSG_EXT_WDTR_BUS_8_BIT:
2909                 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2910                 break;
2911         }
2912         if (tinfo != NULL) {
2913                 if (role == ROLE_TARGET)
2914                         *bus_width = MIN(tinfo->user.width, *bus_width);
2915                 else
2916                         *bus_width = MIN(tinfo->goal.width, *bus_width);
2917         }
2918 }
2919
2920 /*
2921  * Update the bitmask of targets for which the controller should
2922  * negotiate with at the next convenient oportunity.  This currently
2923  * means the next time we send the initial identify messages for
2924  * a new transaction.
2925  */
2926 int
2927 ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2928                        struct ahd_tmode_tstate *tstate,
2929                        struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
2930 {
2931         u_int auto_negotiate_orig;
2932
2933         auto_negotiate_orig = tstate->auto_negotiate;
2934         if (neg_type == AHD_NEG_ALWAYS) {
2935                 /*
2936                  * Force our "current" settings to be
2937                  * unknown so that unless a bus reset
2938                  * occurs the need to renegotiate is
2939                  * recorded persistently.
2940                  */
2941                 if ((ahd->features & AHD_WIDE) != 0)
2942                         tinfo->curr.width = AHD_WIDTH_UNKNOWN;
2943                 tinfo->curr.period = AHD_PERIOD_UNKNOWN;
2944                 tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
2945         }
2946         if (tinfo->curr.period != tinfo->goal.period
2947          || tinfo->curr.width != tinfo->goal.width
2948          || tinfo->curr.offset != tinfo->goal.offset
2949          || tinfo->curr.ppr_options != tinfo->goal.ppr_options
2950          || (neg_type == AHD_NEG_IF_NON_ASYNC
2951           && (tinfo->goal.offset != 0
2952            || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
2953            || tinfo->goal.ppr_options != 0)))
2954                 tstate->auto_negotiate |= devinfo->target_mask;
2955         else
2956                 tstate->auto_negotiate &= ~devinfo->target_mask;
2957
2958         return (auto_negotiate_orig != tstate->auto_negotiate);
2959 }
2960
2961 /*
2962  * Update the user/goal/curr tables of synchronous negotiation
2963  * parameters as well as, in the case of a current or active update,
2964  * any data structures on the host controller.  In the case of an
2965  * active update, the specified target is currently talking to us on
2966  * the bus, so the transfer parameter update must take effect
2967  * immediately.
2968  */
2969 void
2970 ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2971                  u_int period, u_int offset, u_int ppr_options,
2972                  u_int type, int paused)
2973 {
2974         struct  ahd_initiator_tinfo *tinfo;
2975         struct  ahd_tmode_tstate *tstate;
2976         u_int   old_period;
2977         u_int   old_offset;
2978         u_int   old_ppr;
2979         int     active;
2980         int     update_needed;
2981
2982         active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
2983         update_needed = 0;
2984
2985         if (period == 0 || offset == 0) {
2986                 period = 0;
2987                 offset = 0;
2988         }
2989
2990         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
2991                                     devinfo->target, &tstate);
2992
2993         if ((type & AHD_TRANS_USER) != 0) {
2994                 tinfo->user.period = period;
2995                 tinfo->user.offset = offset;
2996                 tinfo->user.ppr_options = ppr_options;
2997         }
2998
2999         if ((type & AHD_TRANS_GOAL) != 0) {
3000                 tinfo->goal.period = period;
3001                 tinfo->goal.offset = offset;
3002                 tinfo->goal.ppr_options = ppr_options;
3003         }
3004
3005         old_period = tinfo->curr.period;
3006         old_offset = tinfo->curr.offset;
3007         old_ppr    = tinfo->curr.ppr_options;
3008
3009         if ((type & AHD_TRANS_CUR) != 0
3010          && (old_period != period
3011           || old_offset != offset
3012           || old_ppr != ppr_options)) {
3013
3014                 update_needed++;
3015
3016                 tinfo->curr.period = period;
3017                 tinfo->curr.offset = offset;
3018                 tinfo->curr.ppr_options = ppr_options;
3019
3020                 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3021                                CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3022                 if (bootverbose) {
3023                         if (offset != 0) {
3024                                 int options;
3025
3026                                 kprintf("%s: target %d synchronous with "
3027                                        "period = 0x%x, offset = 0x%x",
3028                                        ahd_name(ahd), devinfo->target,
3029                                        period, offset);
3030                                 options = 0;
3031                                 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
3032                                         kprintf("(RDSTRM");
3033                                         options++;
3034                                 }
3035                                 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3036                                         kprintf("%s", options ? "|DT" : "(DT");
3037                                         options++;
3038                                 }
3039                                 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3040                                         kprintf("%s", options ? "|IU" : "(IU");
3041                                         options++;
3042                                 }
3043                                 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
3044                                         kprintf("%s", options ? "|RTI" : "(RTI");
3045                                         options++;
3046                                 }
3047                                 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
3048                                         kprintf("%s", options ? "|QAS" : "(QAS");
3049                                         options++;
3050                                 }
3051                                 if (options != 0)
3052                                         kprintf(")\n");
3053                                 else
3054                                         kprintf("\n");
3055                         } else {
3056                                 kprintf("%s: target %d using "
3057                                        "asynchronous transfers%s\n",
3058                                        ahd_name(ahd), devinfo->target,
3059                                        (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
3060                                      ?  "(QAS)" : "");
3061                         }
3062                 }
3063         }
3064         /*
3065          * Always refresh the neg-table to handle the case of the
3066          * sequencer setting the ENATNO bit for a MK_MESSAGE request.
3067          * We will always renegotiate in that case if this is a
3068          * packetized request.  Also manage the busfree expected flag
3069          * from this common routine so that we catch changes due to
3070          * WDTR or SDTR messages.
3071          */
3072         if ((type & AHD_TRANS_CUR) != 0) {
3073                 if (!paused)
3074                         ahd_pause(ahd);
3075                 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3076                 if (!paused)
3077                         ahd_unpause(ahd);
3078                 if (ahd->msg_type != MSG_TYPE_NONE) {
3079                         if ((old_ppr & MSG_EXT_PPR_IU_REQ)
3080                          != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
3081 #ifdef AHD_DEBUG
3082                                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3083                                         ahd_print_devinfo(ahd, devinfo);
3084                                         kprintf("Expecting IU Change busfree\n");
3085                                 }
3086 #endif
3087                                 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
3088                                                |  MSG_FLAG_IU_REQ_CHANGED;
3089                         }
3090                         if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
3091 #ifdef AHD_DEBUG
3092                                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3093                                         kprintf("PPR with IU_REQ outstanding\n");
3094 #endif
3095                                 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
3096                         }
3097                 }
3098         }
3099
3100         update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3101                                                 tinfo, AHD_NEG_TO_GOAL);
3102
3103         if (update_needed && active)
3104                 ahd_update_pending_scbs(ahd);
3105 }
3106
3107 /*
3108  * Update the user/goal/curr tables of wide negotiation
3109  * parameters as well as, in the case of a current or active update,
3110  * any data structures on the host controller.  In the case of an
3111  * active update, the specified target is currently talking to us on
3112  * the bus, so the transfer parameter update must take effect
3113  * immediately.
3114  */
3115 void
3116 ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3117               u_int width, u_int type, int paused)
3118 {
3119         struct  ahd_initiator_tinfo *tinfo;
3120         struct  ahd_tmode_tstate *tstate;
3121         u_int   oldwidth;
3122         int     active;
3123         int     update_needed;
3124
3125         active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3126         update_needed = 0;
3127         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3128                                     devinfo->target, &tstate);
3129
3130         if ((type & AHD_TRANS_USER) != 0)
3131                 tinfo->user.width = width;
3132
3133         if ((type & AHD_TRANS_GOAL) != 0)
3134                 tinfo->goal.width = width;
3135
3136         oldwidth = tinfo->curr.width;
3137         if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
3138
3139                 update_needed++;
3140
3141                 tinfo->curr.width = width;
3142                 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3143                                CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3144                 if (bootverbose) {
3145                         kprintf("%s: target %d using %dbit transfers\n",
3146                                ahd_name(ahd), devinfo->target,
3147                                8 * (0x01 << width));
3148                 }
3149         }
3150
3151         if ((type & AHD_TRANS_CUR) != 0) {
3152                 if (!paused)
3153                         ahd_pause(ahd);
3154                 ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3155                 if (!paused)
3156                         ahd_unpause(ahd);
3157         }
3158
3159         update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3160                                                 tinfo, AHD_NEG_TO_GOAL);
3161         if (update_needed && active)
3162                 ahd_update_pending_scbs(ahd);
3163
3164 }
3165
3166 /*
3167  * Update the current state of tagged queuing for a given target.
3168  */
3169 void
3170 ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3171              ahd_queue_alg alg)
3172 {
3173         ahd_platform_set_tags(ahd, devinfo, alg);
3174         ahd_send_async(ahd, devinfo->channel, devinfo->target,
3175                        devinfo->lun, AC_TRANSFER_NEG, &alg);
3176 }
3177
3178 static void
3179 ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3180                      struct ahd_transinfo *tinfo)
3181 {
3182         ahd_mode_state  saved_modes;
3183         u_int           period;
3184         u_int           ppr_opts;
3185         u_int           con_opts;
3186         u_int           offset;
3187         u_int           saved_negoaddr;
3188         uint8_t         iocell_opts[sizeof(ahd->iocell_opts)];
3189
3190         saved_modes = ahd_save_modes(ahd);
3191         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3192
3193         saved_negoaddr = ahd_inb(ahd, NEGOADDR);
3194         ahd_outb(ahd, NEGOADDR, devinfo->target);
3195         period = tinfo->period;
3196         offset = tinfo->offset;
3197         memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); 
3198         ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
3199                                         |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
3200         con_opts = 0;
3201         if (period == 0)
3202                 period = AHD_SYNCRATE_ASYNC;
3203         if (period == AHD_SYNCRATE_160) {
3204
3205                 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3206                         /*
3207                          * When the SPI4 spec was finalized, PACE transfers
3208                          * was not made a configurable option in the PPR
3209                          * message.  Instead it is assumed to be enabled for
3210                          * any syncrate faster than 80MHz.  Nevertheless,
3211                          * Harpoon2A4 allows this to be configurable.
3212                          *
3213                          * Harpoon2A4 also assumes at most 2 data bytes per
3214                          * negotiated REQ/ACK offset.  Paced transfers take
3215                          * 4, so we must adjust our offset.
3216                          */
3217                         ppr_opts |= PPROPT_PACE;
3218                         offset *= 2;
3219
3220                         /*
3221                          * Harpoon2A assumed that there would be a
3222                          * fallback rate between 160MHz and 80Mhz,
3223                          * so 7 is used as the period factor rather
3224                          * than 8 for 160MHz.
3225                          */
3226                         period = AHD_SYNCRATE_REVA_160;
3227                 }
3228                 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
3229                         iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3230                             ~AHD_PRECOMP_MASK;
3231         } else {
3232                 /*
3233                  * Precomp should be disabled for non-paced transfers.
3234                  */
3235                 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
3236
3237                 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
3238                  && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0
3239                  && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) {
3240                         /*
3241                          * Slow down our CRC interval to be
3242                          * compatible with non-packetized
3243                          * U160 devices that can't handle a
3244                          * CRC at full speed.
3245                          */
3246                         con_opts |= ENSLOWCRC;
3247                 }
3248
3249                 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3250                         /*
3251                          * On H2A4, revert to a slower slewrate
3252                          * on non-paced transfers.
3253                          */
3254                         iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3255                             ~AHD_SLEWRATE_MASK;
3256                 }
3257         }
3258
3259         ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
3260         ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
3261         ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
3262         ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
3263
3264         ahd_outb(ahd, NEGPERIOD, period);
3265         ahd_outb(ahd, NEGPPROPTS, ppr_opts);
3266         ahd_outb(ahd, NEGOFFSET, offset);
3267
3268         if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
3269                 con_opts |= WIDEXFER;
3270
3271         /*
3272          * During packetized transfers, the target will
3273          * give us the oportunity to send command packets
3274          * without us asserting attention.
3275          */
3276         if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
3277                 con_opts |= ENAUTOATNO;
3278         ahd_outb(ahd, NEGCONOPTS, con_opts);
3279         ahd_outb(ahd, NEGOADDR, saved_negoaddr);
3280         ahd_restore_modes(ahd, saved_modes);
3281 }
3282
3283 /*
3284  * When the transfer settings for a connection change, setup for
3285  * negotiation in pending SCBs to effect the change as quickly as
3286  * possible.  We also cancel any negotiations that are scheduled
3287  * for inflight SCBs that have not been started yet.
3288  */
3289 static void
3290 ahd_update_pending_scbs(struct ahd_softc *ahd)
3291 {
3292         struct          scb *pending_scb;
3293         int             pending_scb_count;
3294         u_int           scb_tag;
3295         int             paused;
3296         u_int           saved_scbptr;
3297         ahd_mode_state  saved_modes;
3298
3299         /*
3300          * Traverse the pending SCB list and ensure that all of the
3301          * SCBs there have the proper settings.  We can only safely
3302          * clear the negotiation required flag (setting requires the
3303          * execution queue to be modified) and this is only possible
3304          * if we are not already attempting to select out for this
3305          * SCB.  For this reason, all callers only call this routine
3306          * if we are changing the negotiation settings for the currently
3307          * active transaction on the bus.
3308          */
3309         pending_scb_count = 0;
3310         LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3311                 struct ahd_devinfo devinfo;
3312                 struct hardware_scb *pending_hscb;
3313                 struct ahd_initiator_tinfo *tinfo;
3314                 struct ahd_tmode_tstate *tstate;
3315
3316                 ahd_scb_devinfo(ahd, &devinfo, pending_scb);
3317                 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
3318                                             devinfo.our_scsiid,
3319                                             devinfo.target, &tstate);
3320                 pending_hscb = pending_scb->hscb;
3321                 if ((tstate->auto_negotiate & devinfo.target_mask) == 0
3322                  && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
3323                         pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
3324                         pending_hscb->control &= ~MK_MESSAGE;
3325                 }
3326                 ahd_sync_scb(ahd, pending_scb,
3327                              BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3328                 pending_scb_count++;
3329         }
3330
3331         if (pending_scb_count == 0)
3332                 return;
3333
3334         if (ahd_is_paused(ahd)) {
3335                 paused = 1;
3336         } else {
3337                 paused = 0;
3338                 ahd_pause(ahd);
3339         }
3340
3341         /*
3342          * Force the sequencer to reinitialize the selection for
3343          * the command at the head of the execution queue if it
3344          * has already been setup.  The negotiation changes may
3345          * effect whether we select-out with ATN.
3346          */
3347         saved_modes = ahd_save_modes(ahd);
3348         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3349         ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
3350         saved_scbptr = ahd_get_scbptr(ahd);
3351         /* Ensure that the hscbs down on the card match the new information */
3352         for (scb_tag = 0; scb_tag < ahd->scb_data.maxhscbs; scb_tag++) {
3353                 struct  hardware_scb *pending_hscb;
3354                 u_int   control;
3355
3356                 pending_scb = ahd_lookup_scb(ahd, scb_tag);
3357                 if (pending_scb == NULL)
3358                         continue;
3359                 ahd_set_scbptr(ahd, scb_tag);
3360                 pending_hscb = pending_scb->hscb;
3361                 control = ahd_inb_scbram(ahd, SCB_CONTROL);
3362                 control &= ~MK_MESSAGE;
3363                 control |= pending_hscb->control & MK_MESSAGE;
3364                 ahd_outb(ahd, SCB_CONTROL, control);
3365         }
3366         ahd_set_scbptr(ahd, saved_scbptr);
3367         ahd_restore_modes(ahd, saved_modes);
3368
3369         if (paused == 0)
3370                 ahd_unpause(ahd);
3371 }
3372
3373 /**************************** Pathing Information *****************************/
3374 static void
3375 ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3376 {
3377         ahd_mode_state  saved_modes;
3378         u_int           saved_scsiid;
3379         role_t          role;
3380         int             our_id;
3381
3382         saved_modes = ahd_save_modes(ahd);
3383         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3384
3385         if (ahd_inb(ahd, SSTAT0) & TARGET)
3386                 role = ROLE_TARGET;
3387         else
3388                 role = ROLE_INITIATOR;
3389
3390         if (role == ROLE_TARGET
3391          && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
3392                 /* We were selected, so pull our id from TARGIDIN */
3393                 our_id = ahd_inb(ahd, TARGIDIN) & OID;
3394         } else if (role == ROLE_TARGET)
3395                 our_id = ahd_inb(ahd, TOWNID);
3396         else
3397                 our_id = ahd_inb(ahd, IOWNID);
3398
3399         saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
3400         ahd_compile_devinfo(devinfo,
3401                             our_id,
3402                             SCSIID_TARGET(ahd, saved_scsiid),
3403                             ahd_inb(ahd, SAVED_LUN),
3404                             SCSIID_CHANNEL(ahd, saved_scsiid),
3405                             role);
3406         ahd_restore_modes(ahd, saved_modes);
3407 }
3408
3409 void
3410 ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3411 {
3412         kprintf("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
3413                devinfo->target, devinfo->lun);
3414 }
3415
3416 struct ahd_phase_table_entry*
3417 ahd_lookup_phase_entry(int phase)
3418 {
3419         struct ahd_phase_table_entry *entry;
3420         struct ahd_phase_table_entry *last_entry;
3421
3422         /*
3423          * num_phases doesn't include the default entry which
3424          * will be returned if the phase doesn't match.
3425          */
3426         last_entry = &ahd_phase_table[num_phases];
3427         for (entry = ahd_phase_table; entry < last_entry; entry++) {
3428                 if (phase == entry->phase)
3429                         break;
3430         }
3431         return (entry);
3432 }
3433
3434 void
3435 ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
3436                     u_int lun, char channel, role_t role)
3437 {
3438         devinfo->our_scsiid = our_id;
3439         devinfo->target = target;
3440         devinfo->lun = lun;
3441         devinfo->target_offset = target;
3442         devinfo->channel = channel;
3443         devinfo->role = role;
3444         if (channel == 'B')
3445                 devinfo->target_offset += 8;
3446         devinfo->target_mask = (0x01 << devinfo->target_offset);
3447 }
3448
3449 static void
3450 ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3451                 struct scb *scb)
3452 {
3453         role_t  role;
3454         int     our_id;
3455
3456         our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
3457         role = ROLE_INITIATOR;
3458         if ((scb->hscb->control & TARGET_SCB) != 0)
3459                 role = ROLE_TARGET;
3460         ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
3461                             SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
3462 }
3463
3464
3465 /************************ Message Phase Processing ****************************/
3466 /*
3467  * When an initiator transaction with the MK_MESSAGE flag either reconnects
3468  * or enters the initial message out phase, we are interrupted.  Fill our
3469  * outgoing message buffer with the appropriate message and beging handing
3470  * the message phase(s) manually.
3471  */
3472 static void
3473 ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3474                            struct scb *scb)
3475 {
3476         /*
3477          * To facilitate adding multiple messages together,
3478          * each routine should increment the index and len
3479          * variables instead of setting them explicitly.
3480          */
3481         ahd->msgout_index = 0;
3482         ahd->msgout_len = 0;
3483
3484         if (ahd_currently_packetized(ahd))
3485                 ahd->msg_flags |= MSG_FLAG_PACKETIZED;
3486
3487         if (ahd->send_msg_perror
3488          && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
3489                 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
3490                 ahd->msgout_len++;
3491                 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3492 #ifdef AHD_DEBUG
3493                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3494                         kprintf("Setting up for Parity Error delivery\n");
3495 #endif
3496                 return;
3497         } else if (scb == NULL) {
3498                 kprintf("%s: WARNING. No pending message for "
3499                        "I_T msgin.  Issuing NO-OP\n", ahd_name(ahd));
3500                 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
3501                 ahd->msgout_len++;
3502                 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3503                 return;
3504         }
3505
3506         if ((scb->flags & SCB_DEVICE_RESET) == 0
3507          && (scb->flags & SCB_PACKETIZED) == 0
3508          && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
3509                 u_int identify_msg;
3510
3511                 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3512                 if ((scb->hscb->control & DISCENB) != 0)
3513                         identify_msg |= MSG_IDENTIFY_DISCFLAG;
3514                 ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
3515                 ahd->msgout_len++;
3516
3517                 if ((scb->hscb->control & TAG_ENB) != 0) {
3518                         ahd->msgout_buf[ahd->msgout_index++] =
3519                             scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
3520                         ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
3521                         ahd->msgout_len += 2;
3522                 }
3523         }
3524
3525         if (scb->flags & SCB_DEVICE_RESET) {
3526                 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
3527                 ahd->msgout_len++;
3528                 ahd_print_path(ahd, scb);
3529                 kprintf("Bus Device Reset Message Sent\n");
3530                 /*
3531                  * Clear our selection hardware in advance of
3532                  * the busfree.  We may have an entry in the waiting
3533                  * Q for this target, and we don't want to go about
3534                  * selecting while we handle the busfree and blow it
3535                  * away.
3536                  */
3537                 ahd_outb(ahd, SCSISEQ0, 0);
3538         } else if ((scb->flags & SCB_ABORT) != 0) {
3539
3540                 if ((scb->hscb->control & TAG_ENB) != 0) {
3541                         ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
3542                 } else {
3543                         ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
3544                 }
3545                 ahd->msgout_len++;
3546                 ahd_print_path(ahd, scb);
3547                 kprintf("Abort%s Message Sent\n",
3548                        (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
3549                 /*
3550                  * Clear our selection hardware in advance of
3551                  * the busfree.  We may have an entry in the waiting
3552                  * Q for this target, and we don't want to go about
3553                  * selecting while we handle the busfree and blow it
3554                  * away.
3555                  */
3556                 ahd_outb(ahd, SCSISEQ0, 0);
3557         } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
3558                 ahd_build_transfer_msg(ahd, devinfo);
3559                 /*
3560                  * Clear our selection hardware in advance of potential
3561                  * PPR IU status change busfree.  We may have an entry in
3562                  * the waiting Q for this target, and we don't want to go
3563                  * about selecting while we handle the busfree and blow
3564                  * it away.
3565                  */
3566                 ahd_outb(ahd, SCSISEQ0, 0);
3567         } else {
3568                 kprintf("ahd_intr: AWAITING_MSG for an SCB that "
3569                        "does not have a waiting message\n");
3570                 kprintf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
3571                        devinfo->target_mask);
3572                 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
3573                       "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
3574                       ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
3575                       scb->flags);
3576         }
3577
3578         /*
3579          * Clear the MK_MESSAGE flag from the SCB so we aren't
3580          * asked to send this message again.
3581          */
3582         ahd_outb(ahd, SCB_CONTROL,
3583                  ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
3584         scb->hscb->control &= ~MK_MESSAGE;
3585         ahd->msgout_index = 0;
3586         ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3587 }
3588
3589 /*
3590  * Build an appropriate transfer negotiation message for the
3591  * currently active target.
3592  */
3593 static void
3594 ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3595 {
3596         /*
3597          * We need to initiate transfer negotiations.
3598          * If our current and goal settings are identical,
3599          * we want to renegotiate due to a check condition.
3600          */
3601         struct  ahd_initiator_tinfo *tinfo;
3602         struct  ahd_tmode_tstate *tstate;
3603         int     dowide;
3604         int     dosync;
3605         int     doppr;
3606         u_int   period;
3607         u_int   ppr_options;
3608         u_int   offset;
3609
3610         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3611                                     devinfo->target, &tstate);
3612         /*
3613          * Filter our period based on the current connection.
3614          * If we can't perform DT transfers on this segment (not in LVD
3615          * mode for instance), then our decision to issue a PPR message
3616          * may change.
3617          */
3618         period = tinfo->goal.period;
3619         offset = tinfo->goal.offset;
3620         ppr_options = tinfo->goal.ppr_options;
3621         /* Target initiated PPR is not allowed in the SCSI spec */
3622         if (devinfo->role == ROLE_TARGET)
3623                 ppr_options = 0;
3624         ahd_devlimited_syncrate(ahd, tinfo, &period,
3625                                 &ppr_options, devinfo->role);
3626         dowide = tinfo->curr.width != tinfo->goal.width;
3627         dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
3628         /*
3629          * Only use PPR if we have options that need it, even if the device
3630          * claims to support it.  There might be an expander in the way
3631          * that doesn't.
3632          */
3633         doppr = ppr_options != 0;
3634
3635         if (!dowide && !dosync && !doppr) {
3636                 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
3637                 dosync = tinfo->goal.offset != 0;
3638         }
3639
3640         if (!dowide && !dosync && !doppr) {
3641                 /*
3642                  * Force async with a WDTR message if we have a wide bus,
3643                  * or just issue an SDTR with a 0 offset.
3644                  */
3645                 if ((ahd->features & AHD_WIDE) != 0)
3646                         dowide = 1;
3647                 else
3648                         dosync = 1;
3649
3650                 if (bootverbose) {
3651                         ahd_print_devinfo(ahd, devinfo);
3652                         kprintf("Ensuring async\n");
3653                 }
3654         }
3655         /* Target initiated PPR is not allowed in the SCSI spec */
3656         if (devinfo->role == ROLE_TARGET)
3657                 doppr = 0;
3658
3659         /*
3660          * Both the PPR message and SDTR message require the
3661          * goal syncrate to be limited to what the target device
3662          * is capable of handling (based on whether an LVD->SE
3663          * expander is on the bus), so combine these two cases.
3664          * Regardless, guarantee that if we are using WDTR and SDTR
3665          * messages that WDTR comes first.
3666          */
3667         if (doppr || (dosync && !dowide)) {
3668
3669                 offset = tinfo->goal.offset;
3670                 ahd_validate_offset(ahd, tinfo, period, &offset,
3671                                     doppr ? tinfo->goal.width
3672                                           : tinfo->curr.width,
3673                                     devinfo->role);
3674                 if (doppr) {
3675                         ahd_construct_ppr(ahd, devinfo, period, offset,
3676                                           tinfo->goal.width, ppr_options);
3677                 } else {
3678                         ahd_construct_sdtr(ahd, devinfo, period, offset);
3679                 }
3680         } else {
3681                 ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width);
3682         }
3683 }
3684
3685 /*
3686  * Build a synchronous negotiation message in our message
3687  * buffer based on the input parameters.
3688  */
3689 static void
3690 ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3691                    u_int period, u_int offset)
3692 {
3693         if (offset == 0)
3694                 period = AHD_ASYNC_XFER_PERIOD;
3695         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3696         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN;
3697         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR;
3698         ahd->msgout_buf[ahd->msgout_index++] = period;
3699         ahd->msgout_buf[ahd->msgout_index++] = offset;
3700         ahd->msgout_len += 5;
3701         if (bootverbose) {
3702                 kprintf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
3703                        ahd_name(ahd), devinfo->channel, devinfo->target,
3704                        devinfo->lun, period, offset);
3705         }
3706 }
3707
3708 /*
3709  * Build a wide negotiateion message in our message
3710  * buffer based on the input parameters.
3711  */
3712 static void
3713 ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3714                    u_int bus_width)
3715 {
3716         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3717         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN;
3718         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR;
3719         ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3720         ahd->msgout_len += 4;
3721         if (bootverbose) {
3722                 kprintf("(%s:%c:%d:%d): Sending WDTR %x\n",
3723                        ahd_name(ahd), devinfo->channel, devinfo->target,
3724                        devinfo->lun, bus_width);
3725         }
3726 }
3727
3728 /*
3729  * Build a parallel protocol request message in our message
3730  * buffer based on the input parameters.
3731  */
3732 static void
3733 ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3734                   u_int period, u_int offset, u_int bus_width,
3735                   u_int ppr_options)
3736 {
3737         /*
3738          * Always request precompensation from
3739          * the other target if we are running
3740          * at paced syncrates.
3741          */
3742         if (period <= AHD_SYNCRATE_PACED)
3743                 ppr_options |= MSG_EXT_PPR_PCOMP_EN;
3744         if (offset == 0)
3745                 period = AHD_ASYNC_XFER_PERIOD;
3746         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3747         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN;
3748         ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR;
3749         ahd->msgout_buf[ahd->msgout_index++] = period;
3750         ahd->msgout_buf[ahd->msgout_index++] = 0;
3751         ahd->msgout_buf[ahd->msgout_index++] = offset;
3752         ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3753         ahd->msgout_buf[ahd->msgout_index++] = ppr_options;
3754         ahd->msgout_len += 8;
3755         if (bootverbose) {
3756                 kprintf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
3757                        "offset %x, ppr_options %x\n", ahd_name(ahd),
3758                        devinfo->channel, devinfo->target, devinfo->lun,
3759                        bus_width, period, offset, ppr_options);
3760         }
3761 }
3762
3763 /*
3764  * Clear any active message state.
3765  */
3766 static void
3767 ahd_clear_msg_state(struct ahd_softc *ahd)
3768 {
3769         ahd_mode_state saved_modes;
3770
3771         saved_modes = ahd_save_modes(ahd);
3772         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3773         ahd->send_msg_perror = 0;
3774         ahd->msg_flags = MSG_FLAG_NONE;
3775         ahd->msgout_len = 0;
3776         ahd->msgin_index = 0;
3777         ahd->msg_type = MSG_TYPE_NONE;
3778         if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
3779                 /*
3780                  * The target didn't care to respond to our
3781                  * message request, so clear ATN.
3782                  */
3783                 ahd_outb(ahd, CLRSINT1, CLRATNO);
3784         }
3785         ahd_outb(ahd, MSG_OUT, MSG_NOOP);
3786         ahd_outb(ahd, SEQ_FLAGS2,
3787                  ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
3788         ahd_restore_modes(ahd, saved_modes);
3789 }
3790
3791 /*
3792  * Manual message loop handler.
3793  */
3794 static void
3795 ahd_handle_message_phase(struct ahd_softc *ahd)
3796
3797         struct  ahd_devinfo devinfo;
3798         u_int   bus_phase;
3799         int     end_session;
3800
3801         ahd_fetch_devinfo(ahd, &devinfo);
3802         end_session = FALSE;
3803         bus_phase = ahd_inb(ahd, LASTPHASE);
3804
3805         if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
3806                 kprintf("LQIRETRY for LQIPHASE_OUTPKT\n");
3807                 ahd_outb(ahd, LQCTL2, LQIRETRY);
3808         }
3809 reswitch:
3810         switch (ahd->msg_type) {
3811         case MSG_TYPE_INITIATOR_MSGOUT:
3812         {
3813                 int lastbyte;
3814                 int phasemis;
3815                 int msgdone;
3816
3817                 if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0)
3818                         panic("HOST_MSG_LOOP interrupt with no active message");
3819
3820 #ifdef AHD_DEBUG
3821                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3822                         ahd_print_devinfo(ahd, &devinfo);
3823                         kprintf("INITIATOR_MSG_OUT");
3824                 }
3825 #endif
3826                 phasemis = bus_phase != P_MESGOUT;
3827                 if (phasemis) {
3828 #ifdef AHD_DEBUG
3829                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3830                                 kprintf(" PHASEMIS %s\n",
3831                                        ahd_lookup_phase_entry(bus_phase)
3832                                                              ->phasemsg);
3833                         }
3834 #endif
3835                         if (bus_phase == P_MESGIN) {
3836                                 /*
3837                                  * Change gears and see if
3838                                  * this messages is of interest to
3839                                  * us or should be passed back to
3840                                  * the sequencer.
3841                                  */
3842                                 ahd_outb(ahd, CLRSINT1, CLRATNO);
3843                                 ahd->send_msg_perror = 0;
3844                                 ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN;
3845                                 ahd->msgin_index = 0;
3846                                 goto reswitch;
3847                         }
3848                         end_session = TRUE;
3849                         break;
3850                 }
3851
3852                 if (ahd->send_msg_perror) {
3853                         ahd_outb(ahd, CLRSINT1, CLRATNO);
3854                         ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3855 #ifdef AHD_DEBUG
3856                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3857                                 kprintf(" byte 0x%x\n", ahd->send_msg_perror);
3858 #endif
3859                         /*
3860                          * If we are notifying the target of a CRC error
3861                          * during packetized operations, the target is
3862                          * within its rights to acknowledge our message
3863                          * with a busfree.
3864                          */
3865                         if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
3866                          && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
3867                                 ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
3868
3869                         ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
3870                         ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3871                         break;
3872                 }
3873
3874                 msgdone = ahd->msgout_index == ahd->msgout_len;
3875                 if (msgdone) {
3876                         /*
3877                          * The target has requested a retry.
3878                          * Re-assert ATN, reset our message index to
3879                          * 0, and try again.
3880                          */
3881                         ahd->msgout_index = 0;
3882                         ahd_assert_atn(ahd);
3883                 }
3884
3885                 lastbyte = ahd->msgout_index == (ahd->msgout_len - 1);
3886                 if (lastbyte) {
3887                         /* Last byte is signified by dropping ATN */
3888                         ahd_outb(ahd, CLRSINT1, CLRATNO);
3889                 }
3890
3891                 /*
3892                  * Clear our interrupt status and present
3893                  * the next byte on the bus.
3894                  */
3895                 ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3896 #ifdef AHD_DEBUG
3897                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3898                         kprintf(" byte 0x%x\n",
3899                                ahd->msgout_buf[ahd->msgout_index]);
3900 #endif
3901                 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
3902                 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3903                 break;
3904         }
3905         case MSG_TYPE_INITIATOR_MSGIN:
3906         {
3907                 int phasemis;
3908                 int message_done;
3909
3910 #ifdef AHD_DEBUG
3911                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3912                         ahd_print_devinfo(ahd, &devinfo);
3913                         kprintf("INITIATOR_MSG_IN");
3914                 }
3915 #endif
3916                 phasemis = bus_phase != P_MESGIN;
3917                 if (phasemis) {
3918 #ifdef AHD_DEBUG
3919                         if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3920                                 kprintf(" PHASEMIS %s\n",
3921                                        ahd_lookup_phase_entry(bus_phase)
3922                                                              ->phasemsg);
3923                         }
3924 #endif
3925                         ahd->msgin_index = 0;
3926                         if (bus_phase == P_MESGOUT
3927                          && (ahd->send_msg_perror != 0
3928                           || (ahd->msgout_len != 0
3929                            && ahd->msgout_index == 0))) {
3930                                 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3931                                 goto reswitch;
3932                         }
3933                         end_session = TRUE;
3934                         break;
3935                 }
3936
3937                 /* Pull the byte in without acking it */
3938                 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
3939 #ifdef AHD_DEBUG
3940                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3941                         kprintf(" byte 0x%x\n",
3942                                ahd->msgin_buf[ahd->msgin_index]);
3943 #endif
3944
3945                 message_done = ahd_parse_msg(ahd, &devinfo);
3946
3947                 if (message_done) {
3948                         /*
3949                          * Clear our incoming message buffer in case there
3950                          * is another message following this one.
3951                          */
3952                         ahd->msgin_index = 0;
3953
3954                         /*
3955                          * If this message illicited a response,
3956                          * assert ATN so the target takes us to the
3957                          * message out phase.
3958                          */
3959                         if (ahd->msgout_len != 0) {
3960 #ifdef AHD_DEBUG
3961                                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3962                                         ahd_print_devinfo(ahd, &devinfo);
3963                                         kprintf("Asserting ATN for response\n");
3964                                 }
3965 #endif
3966                                 ahd_assert_atn(ahd);
3967                         }
3968                 } else 
3969                         ahd->msgin_index++;
3970
3971                 if (message_done == MSGLOOP_TERMINATED) {
3972                         end_session = TRUE;
3973                 } else {
3974                         /* Ack the byte */
3975                         ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3976                         ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ);
3977                 }
3978                 break;
3979         }
3980         case MSG_TYPE_TARGET_MSGIN:
3981         {
3982                 int msgdone;
3983                 int msgout_request;
3984
3985                 /*
3986                  * By default, the message loop will continue.
3987                  */
3988                 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
3989
3990                 if (ahd->msgout_len == 0)
3991                         panic("Target MSGIN with no active message");
3992
3993                 /*
3994                  * If we interrupted a mesgout session, the initiator
3995                  * will not know this until our first REQ.  So, we
3996                  * only honor mesgout requests after we've sent our
3997                  * first byte.
3998                  */
3999                 if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0
4000                  && ahd->msgout_index > 0)
4001                         msgout_request = TRUE;
4002                 else
4003                         msgout_request = FALSE;
4004
4005                 if (msgout_request) {
4006
4007                         /*
4008                          * Change gears and see if
4009                          * this messages is of interest to
4010                          * us or should be passed back to
4011                          * the sequencer.
4012                          */
4013                         ahd->msg_type = MSG_TYPE_TARGET_MSGOUT;
4014                         ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO);
4015                         ahd->msgin_index = 0;
4016                         /* Dummy read to REQ for first byte */
4017                         ahd_inb(ahd, SCSIDAT);
4018                         ahd_outb(ahd, SXFRCTL0,
4019                                  ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4020                         break;
4021                 }
4022
4023                 msgdone = ahd->msgout_index == ahd->msgout_len;
4024                 if (msgdone) {
4025                         ahd_outb(ahd, SXFRCTL0,
4026                                  ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4027                         end_session = TRUE;
4028                         break;
4029                 }
4030
4031                 /*
4032                  * Present the next byte on the bus.
4033                  */
4034                 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4035                 ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]);
4036                 break;
4037         }
4038         case MSG_TYPE_TARGET_MSGOUT:
4039         {
4040                 int lastbyte;
4041                 int msgdone;
4042
4043                 /*
4044                  * By default, the message loop will continue.
4045                  */
4046                 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
4047
4048                 /*
4049                  * The initiator signals that this is
4050                  * the last byte by dropping ATN.
4051                  */
4052                 lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0;
4053
4054                 /*
4055                  * Read the latched byte, but turn off SPIOEN first
4056                  * so that we don't inadvertently cause a REQ for the
4057                  * next byte.
4058                  */
4059                 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4060                 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT);
4061                 msgdone = ahd_parse_msg(ahd, &devinfo);
4062                 if (msgdone == MSGLOOP_TERMINATED) {
4063                         /*
4064                          * The message is *really* done in that it caused
4065                          * us to go to bus free.  The sequencer has already
4066                          * been reset at this point, so pull the ejection
4067                          * handle.
4068                          */
4069                         return;
4070                 }
4071                 
4072                 ahd->msgin_index++;
4073
4074                 /*
4075                  * XXX Read spec about initiator dropping ATN too soon
4076                  *     and use msgdone to detect it.
4077                  */
4078                 if (msgdone == MSGLOOP_MSGCOMPLETE) {
4079                         ahd->msgin_index = 0;
4080
4081                         /*
4082                          * If this message illicited a response, transition
4083                          * to the Message in phase and send it.
4084                          */
4085                         if (ahd->msgout_len != 0) {
4086                                 ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO);
4087                                 ahd_outb(ahd, SXFRCTL0,
4088                                          ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4089                                 ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
4090                                 ahd->msgin_index = 0;
4091                                 break;
4092                         }
4093                 }
4094
4095                 if (lastbyte)
4096                         end_session = TRUE;
4097                 else {
4098                         /* Ask for the next byte. */
4099                         ahd_outb(ahd, SXFRCTL0,
4100                                  ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4101                 }
4102
4103                 break;
4104         }
4105         default:
4106                 panic("Unknown REQINIT message type");
4107         }
4108
4109         if (end_session) {
4110                 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
4111                         kprintf("%s: Returning to Idle Loop\n",
4112                                ahd_name(ahd));
4113                         ahd_clear_msg_state(ahd);
4114
4115                         /*
4116                          * Perform the equivalent of a clear_target_state.
4117                          */
4118                         ahd_outb(ahd, LASTPHASE, P_BUSFREE);
4119                         ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT);
4120                         ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
4121                 } else {
4122                         ahd_clear_msg_state(ahd);
4123                         ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP);
4124                 }
4125         }
4126 }
4127
4128 /*
4129  * See if we sent a particular extended message to the target.
4130  * If "full" is true, return true only if the target saw the full
4131  * message.  If "full" is false, return true if the target saw at
4132  * least the first byte of the message.
4133  */
4134 static int
4135 ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
4136 {
4137         int found;
4138         u_int index;
4139
4140         found = FALSE;
4141         index = 0;
4142
4143         while (index < ahd->msgout_len) {
4144                 if (ahd->msgout_buf[index] == MSG_EXTENDED) {
4145                         u_int end_index;
4146
4147                         end_index = index + 1 + ahd->msgout_buf[index + 1];
4148                         if (ahd->msgout_buf[index+2] == msgval
4149                          && type == AHDMSG_EXT) {
4150
4151                                 if (full) {
4152                                         if (ahd->msgout_index > end_index)
4153                                                 found = TRUE;
4154                                 } else if (ahd->msgout_index > index)
4155                                         found = TRUE;
4156                         }
4157                         index = end_index;
4158                 } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
4159                         && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
4160
4161                         /* Skip tag type and tag id or residue param*/
4162                         index += 2;
4163                 } else {
4164                         /* Single byte message */
4165                         if (type == AHDMSG_1B
4166                          && ahd->msgout_index > index
4167                          && (ahd->msgout_buf[index] == msgval
4168                           || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
4169                            && msgval == MSG_IDENTIFYFLAG)))
4170                                 found = TRUE;
4171                         index++;
4172                 }
4173
4174                 if (found)
4175                         break;
4176         }
4177         return (found);
4178 }
4179
4180 /*
4181  * Wait for a complete incoming message, parse it, and respond accordingly.
4182  */
4183 static int
4184 ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4185 {
4186         struct  ahd_initiator_tinfo *tinfo;
4187         struct  ahd_tmode_tstate *tstate;
4188         int     reject;
4189         int     done;
4190         int     response;
4191
4192         done = MSGLOOP_IN_PROG;
4193         response = FALSE;
4194         reject = FALSE;
4195         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
4196                                     devinfo->target, &tstate);
4197
4198         /*
4199          * Parse as much of the message as is available,
4200          * rejecting it if we don't support it.  When
4201          * the entire message is available and has been
4202          * handled, return MSGLOOP_MSGCOMPLETE, indicating
4203          * that we have parsed an entire message.
4204          *
4205          * In the case of extended messages, we accept the length
4206          * byte outright and perform more checking once we know the
4207          * extended message type.
4208          */
4209         switch (ahd->msgin_buf[0]) {
4210         case MSG_DISCONNECT:
4211         case MSG_SAVEDATAPOINTER:
4212         case MSG_CMDCOMPLETE:
4213         case MSG_RESTOREPOINTERS:
4214         case MSG_IGN_WIDE_RESIDUE:
4215                 /*
4216                  * End our message loop as these are messages
4217                  * the sequencer handles on its own.
4218                  */
4219                 done = MSGLOOP_TERMINATED;
4220                 break;
4221         case MSG_MESSAGE_REJECT:
4222                 response = ahd_handle_msg_reject(ahd, devinfo);
4223                 /* FALLTHROUGH */
4224         case MSG_NOOP:
4225                 done = MSGLOOP_MSGCOMPLETE;
4226                 break;
4227         case MSG_EXTENDED:
4228         {
4229                 /* Wait for enough of the message to begin validation */
4230                 if (ahd->msgin_index < 2)
4231                         break;
4232                 switch (ahd->msgin_buf[2]) {
4233                 case MSG_EXT_SDTR:
4234                 {
4235                         u_int    period;
4236                         u_int    ppr_options;
4237                         u_int    offset;
4238                         u_int    saved_offset;
4239                         
4240                         if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
4241                                 reject = TRUE;
4242                                 break;
4243                         }
4244
4245                         /*
4246                          * Wait until we have both args before validating
4247                          * and acting on this message.
4248                          *
4249                          * Add one to MSG_EXT_SDTR_LEN to account for
4250                          * the extended message preamble.
4251                          */
4252                         if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
4253                                 break;
4254
4255                         period = ahd->msgin_buf[3];
4256                         ppr_options = 0;
4257                         saved_offset = offset = ahd->msgin_buf[4];
4258                         ahd_devlimited_syncrate(ahd, tinfo, &period,
4259                                                 &ppr_options, devinfo->role);
4260                         ahd_validate_offset(ahd, tinfo, period, &offset,
4261                                             tinfo->curr.width, devinfo->role);
4262                         if (bootverbose) {
4263                                 kprintf("(%s:%c:%d:%d): Received "
4264                                        "SDTR period %x, offset %x\n\t"
4265                                        "Filtered to period %x, offset %x\n",
4266                                        ahd_name(ahd), devinfo->channel,
4267                                        devinfo->target, devinfo->lun,
4268                                        ahd->msgin_buf[3], saved_offset,
4269                                        period, offset);
4270                         }
4271                         ahd_set_syncrate(ahd, devinfo, period,
4272                                          offset, ppr_options,
4273                                          AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4274                                          /*paused*/TRUE);
4275
4276                         /*
4277                          * See if we initiated Sync Negotiation
4278                          * and didn't have to fall down to async
4279                          * transfers.
4280                          */
4281                         if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
4282                                 /* We started it */
4283                                 if (saved_offset != offset) {
4284                                         /* Went too low - force async */
4285                                         reject = TRUE;
4286                                 }
4287                         } else {
4288                                 /*
4289                                  * Send our own SDTR in reply
4290                                  */
4291                                 if (bootverbose
4292                                  && devinfo->role == ROLE_INITIATOR) {
4293                                         kprintf("(%s:%c:%d:%d): Target "
4294                                                "Initiated SDTR\n",
4295                                                ahd_name(ahd), devinfo->channel,
4296                                                devinfo->target, devinfo->lun);
4297                                 }
4298                                 ahd->msgout_index = 0;
4299                                 ahd->msgout_len = 0;
4300                                 ahd_construct_sdtr(ahd, devinfo,
4301                                                    period, offset);
4302                                 ahd->msgout_index = 0;
4303                                 response = TRUE;
4304                         }
4305                         done = MSGLOOP_MSGCOMPLETE;
4306                         break;
4307                 }
4308                 case MSG_EXT_WDTR:
4309                 {
4310                         u_int bus_width;
4311                         u_int saved_width;
4312                         u_int sending_reply;
4313
4314                         sending_reply = FALSE;
4315                         if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
4316                                 reject = TRUE;
4317                                 break;
4318                         }
4319
4320                         /*
4321                          * Wait until we have our arg before validating
4322                          * and acting on this message.
4323                          *
4324                          * Add one to MSG_EXT_WDTR_LEN to account for
4325                          * the extended message preamble.
4326                          */
4327                         if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1))
4328                                 break;
4329
4330                         bus_width = ahd->msgin_buf[3];
4331                         saved_width = bus_width;
4332                         ahd_validate_width(ahd, tinfo, &bus_width,
4333                                            devinfo->role);
4334                         if (bootverbose) {
4335                                 kprintf("(%s:%c:%d:%d): Received WDTR "
4336                                        "%x filtered to %x\n",
4337                                        ahd_name(ahd), devinfo->channel,
4338                                        devinfo->target, devinfo->lun,
4339                                        saved_width, bus_width);
4340                         }
4341
4342                         if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
4343                                 /*
4344                                  * Don't send a WDTR back to the
4345                                  * target, since we asked first.
4346                                  * If the width went higher than our
4347                                  * request, reject it.
4348                                  */
4349                                 if (saved_width > bus_width) {
4350                                         reject = TRUE;
4351                                         kprintf("(%s:%c:%d:%d): requested %dBit "
4352                                                "transfers.  Rejecting...\n",
4353                                                ahd_name(ahd), devinfo->channel,
4354                                                devinfo->target, devinfo->lun,
4355                                                8 * (0x01 << bus_width));
4356                                         bus_width = 0;
4357                                 }
4358                         } else {
4359                                 /*
4360                                  * Send our own WDTR in reply
4361                                  */
4362                                 if (bootverbose
4363                                  && devinfo->role == ROLE_INITIATOR) {
4364                                         kprintf("(%s:%c:%d:%d): Target "
4365                                                "Initiated WDTR\n",
4366                                                ahd_name(ahd), devinfo->channel,
4367                                                devinfo->target, devinfo->lun);
4368                                 }
4369                                 ahd->msgout_index = 0;
4370                                 ahd->msgout_len = 0;
4371                                 ahd_construct_wdtr(ahd, devinfo, bus_width);
4372                                 ahd->msgout_index = 0;
4373                                 response = TRUE;
4374                                 sending_reply = TRUE;
4375                         }
4376                         /*
4377                          * After a wide message, we are async, but
4378                          * some devices don't seem to honor this portion
4379                          * of the spec.  Force a renegotiation of the
4380                          * sync component of our transfer agreement even
4381                          * if our goal is async.  By updating our width
4382                          * after forcing the negotiation, we avoid
4383                          * renegotiating for width.
4384                          */
4385                         ahd_update_neg_request(ahd, devinfo, tstate,
4386                                                tinfo, AHD_NEG_ALWAYS);
4387                         ahd_set_width(ahd, devinfo, bus_width,
4388                                       AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4389                                       /*paused*/TRUE);
4390                         if (sending_reply == FALSE && reject == FALSE) {
4391
4392                                 /*
4393                                  * We will always have an SDTR to send.
4394                                  */
4395                                 ahd->msgout_index = 0;
4396                                 ahd->msgout_len = 0;
4397                                 ahd_build_transfer_msg(ahd, devinfo);
4398                                 ahd->msgout_index = 0;
4399                                 response = TRUE;
4400                         }
4401                         done = MSGLOOP_MSGCOMPLETE;
4402                         break;
4403                 }
4404                 case MSG_EXT_PPR:
4405                 {
4406                         u_int   period;
4407                         u_int   offset;
4408                         u_int   bus_width;
4409                         u_int   ppr_options;
4410                         u_int   saved_width;
4411                         u_int   saved_offset;
4412                         u_int   saved_ppr_options;
4413
4414                         if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) {
4415                                 reject = TRUE;
4416                                 break;
4417                         }
4418
4419                         /*
4420                          * Wait until we have all args before validating
4421                          * and acting on this message.
4422                          *
4423                          * Add one to MSG_EXT_PPR_LEN to account for
4424                          * the extended message preamble.
4425                          */
4426                         if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1))
4427                                 break;
4428
4429                         period = ahd->msgin_buf[3];
4430                         offset = ahd->msgin_buf[5];
4431                         bus_width = ahd->msgin_buf[6];
4432                         saved_width = bus_width;
4433                         ppr_options = ahd->msgin_buf[7];
4434                         /*
4435                          * According to the spec, a DT only
4436                          * period factor with no DT option
4437                          * set implies async.
4438                          */
4439                         if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
4440                          && period <= 9)
4441                                 offset = 0;
4442                         saved_ppr_options = ppr_options;
4443                         saved_offset = offset;
4444
4445                         /*
4446                          * Transfer options are only available if we
4447                          * are negotiating wide.
4448                          */
4449                         if (bus_width == 0)
4450                                 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4451
4452                         ahd_validate_width(ahd, tinfo, &bus_width,
4453                                            devinfo->role);
4454                         ahd_devlimited_syncrate(ahd, tinfo, &period,
4455                                                 &ppr_options, devinfo->role);
4456                         ahd_validate_offset(ahd, tinfo, period, &offset,
4457                                             bus_width, devinfo->role);
4458
4459                         if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
4460                                 /*
4461                                  * If we are unable to do any of the
4462                                  * requested options (we went too low),
4463                                  * then we'll have to reject the message.
4464                                  */
4465                                 if (saved_width > bus_width
4466                                  || saved_offset != offset
4467                                  || saved_ppr_options != ppr_options) {
4468                                         reject = TRUE;
4469                                         period = 0;
4470                                         offset = 0;
4471                                         bus_width = 0;
4472                                         ppr_options = 0;
4473                                 }
4474                         } else {
4475                                 if (devinfo->role != ROLE_TARGET)
4476                                         kprintf("(%s:%c:%d:%d): Target "
4477                                                "Initiated PPR\n",
4478                                                ahd_name(ahd), devinfo->channel,
4479                                                devinfo->target, devinfo->lun);
4480                                 else
4481                                         kprintf("(%s:%c:%d:%d): Initiator "
4482                                                "Initiated PPR\n",
4483                                                ahd_name(ahd), devinfo->channel,
4484                                                devinfo->target, devinfo->lun);
4485                                 ahd->msgout_index = 0;
4486                                 ahd->msgout_len = 0;
4487                                 ahd_construct_ppr(ahd, devinfo, period, offset,
4488                                                   bus_width, ppr_options);
4489                                 ahd->msgout_index = 0;
4490                                 response = TRUE;
4491                         }
4492                         if (bootverbose) {
4493                                 kprintf("(%s:%c:%d:%d): Received PPR width %x, "
4494                                        "period %x, offset %x,options %x\n"
4495                                        "\tFiltered to width %x, period %x, "
4496                                        "offset %x, options %x\n",
4497                                        ahd_name(ahd), devinfo->channel,
4498                                        devinfo->target, devinfo->lun,
4499                                        saved_width, ahd->msgin_buf[3],
4500                                        saved_offset, saved_ppr_options,
4501                                        bus_width, period, offset, ppr_options);
4502                         }
4503                         ahd_set_width(ahd, devinfo, bus_width,
4504                                       AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4505                                       /*paused*/TRUE);
4506                         ahd_set_syncrate(ahd, devinfo, period,
4507                                          offset, ppr_options,
4508                                          AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4509                                          /*paused*/TRUE);
4510
4511                         done = MSGLOOP_MSGCOMPLETE;
4512                         break;
4513                 }
4514                 default:
4515                         /* Unknown extended message.  Reject it. */
4516                         reject = TRUE;
4517                         break;
4518                 }
4519                 break;
4520         }
4521 #ifdef AHD_TARGET_MODE
4522         case MSG_BUS_DEV_RESET:
4523                 ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
4524                                     CAM_BDR_SENT,
4525                                     "Bus Device Reset Received",
4526                                     /*verbose_level*/0);
4527                 ahd_restart(ahd);
4528                 done = MSGLOOP_TERMINATED;
4529                 break;
4530         case MSG_ABORT_TAG:
4531         case MSG_ABORT:
4532         case MSG_CLEAR_QUEUE:
4533         {
4534                 int tag;
4535
4536                 /* Target mode messages */
4537                 if (devinfo->role != ROLE_TARGET) {
4538                         reject = TRUE;
4539                         break;
4540                 }
4541                 tag = SCB_LIST_NULL;
4542                 if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
4543                         tag = ahd_inb(ahd, INITIATOR_TAG);
4544                 ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
4545                                devinfo->lun, tag, ROLE_TARGET,
4546                                CAM_REQ_ABORTED);
4547
4548                 tstate = ahd->enabled_targets[devinfo->our_scsiid];
4549                 if (tstate != NULL) {
4550                         struct ahd_tmode_lstate* lstate;
4551
4552                         lstate = tstate->enabled_luns[devinfo->lun];
4553                         if (lstate != NULL) {
4554                                 ahd_queue_lstate_event(ahd, lstate,
4555                                                        devinfo->our_scsiid,
4556                                                        ahd->msgin_buf[0],
4557                                                        /*arg*/tag);
4558                                 ahd_send_lstate_events(ahd, lstate);
4559                         }
4560                 }
4561                 ahd_restart(ahd);
4562                 done = MSGLOOP_TERMINATED;
4563                 break;
4564         }
4565 #endif
4566         case MSG_QAS_REQUEST:
4567 #ifdef AHD_DEBUG
4568                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4569                         kprintf("%s: QAS request.  SCSISIGI == 0x%x\n",
4570                                ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
4571 #endif
4572                 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
4573                 /* FALLTHROUGH */
4574         case MSG_TERM_IO_PROC:
4575         default:
4576                 reject = TRUE;
4577                 break;
4578         }
4579
4580         if (reject) {
4581                 /*
4582                  * Setup to reject the message.
4583                  */
4584                 ahd->msgout_index = 0;
4585                 ahd->msgout_len = 1;
4586                 ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
4587                 done = MSGLOOP_MSGCOMPLETE;
4588                 response = TRUE;
4589         }
4590
4591         if (done != MSGLOOP_IN_PROG && !response)
4592                 /* Clear the outgoing message buffer */
4593                 ahd->msgout_len = 0;
4594
4595         return (done);
4596 }
4597
4598 /*
4599  * Process a message reject message.
4600  */
4601 static int
4602 ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4603 {
4604         /*
4605          * What we care about here is if we had an
4606          * outstanding SDTR or WDTR message for this
4607          * target.  If we did, this is a signal that
4608          * the target is refusing negotiation.
4609          */
4610         struct scb *scb;
4611         struct ahd_initiator_tinfo *tinfo;
4612         struct ahd_tmode_tstate *tstate;
4613         u_int scb_index;
4614         u_int last_msg;
4615         int   response = 0;
4616
4617         scb_index = ahd_get_scbptr(ahd);
4618         scb = ahd_lookup_scb(ahd, scb_index);
4619         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4620                                     devinfo->our_scsiid,
4621                                     devinfo->target, &tstate);
4622         /* Might be necessary */
4623         last_msg = ahd_inb(ahd, LAST_MSG);
4624
4625         if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
4626                 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
4627                  && tinfo->goal.period <= AHD_SYNCRATE_PACED) {
4628                         /*
4629                          * Target may not like our SPI-4 PPR Options.
4630                          * Attempt to negotiate 80MHz which will turn
4631                          * off these options.
4632                          */
4633                         if (bootverbose) {
4634                                 kprintf("(%s:%c:%d:%d): PPR Rejected. "
4635                                        "Trying simple U160 PPR\n",
4636                                        ahd_name(ahd), devinfo->channel,
4637                                        devinfo->target, devinfo->lun);
4638                         }
4639                         tinfo->goal.period = AHD_SYNCRATE_DT;
4640                         tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ
4641                                                 |  MSG_EXT_PPR_QAS_REQ
4642                                                 |  MSG_EXT_PPR_DT_REQ;
4643                 } else {
4644                         /*
4645                          * Target does not support the PPR message.
4646                          * Attempt to negotiate SPI-2 style.
4647                          */
4648                         if (bootverbose) {
4649                                 kprintf("(%s:%c:%d:%d): PPR Rejected. "
4650                                        "Trying WDTR/SDTR\n",
4651                                        ahd_name(ahd), devinfo->channel,
4652                                        devinfo->target, devinfo->lun);
4653                         }
4654                         tinfo->goal.ppr_options = 0;
4655                         tinfo->curr.transport_version = 2;
4656                         tinfo->goal.transport_version = 2;
4657                 }
4658                 ahd->msgout_index = 0;
4659                 ahd->msgout_len = 0;
4660                 ahd_build_transfer_msg(ahd, devinfo);
4661                 ahd->msgout_index = 0;
4662                 response = 1;
4663         } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
4664
4665                 /* note 8bit xfers */
4666                 kprintf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
4667                        "8bit transfers\n", ahd_name(ahd),
4668                        devinfo->channel, devinfo->target, devinfo->lun);
4669                 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4670                               AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4671                               /*paused*/TRUE);
4672                 /*
4673                  * No need to clear the sync rate.  If the target
4674                  * did not accept the command, our syncrate is
4675                  * unaffected.  If the target started the negotiation,
4676                  * but rejected our response, we already cleared the
4677                  * sync rate before sending our WDTR.
4678                  */
4679                 if (tinfo->goal.offset != tinfo->curr.offset) {
4680
4681                         /* Start the sync negotiation */
4682                         ahd->msgout_index = 0;
4683                         ahd->msgout_len = 0;
4684                         ahd_build_transfer_msg(ahd, devinfo);
4685                         ahd->msgout_index = 0;
4686                         response = 1;
4687                 }
4688         } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
4689                 /* note asynch xfers and clear flag */
4690                 ahd_set_syncrate(ahd, devinfo, /*period*/0,
4691                                  /*offset*/0, /*ppr_options*/0,
4692                                  AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4693                                  /*paused*/TRUE);
4694                 kprintf("(%s:%c:%d:%d): refuses synchronous negotiation. "
4695                        "Using asynchronous transfers\n",
4696                        ahd_name(ahd), devinfo->channel,
4697                        devinfo->target, devinfo->lun);
4698         } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
4699                 int tag_type;
4700                 int mask;
4701
4702                 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
4703
4704                 if (tag_type == MSG_SIMPLE_TASK) {
4705                         kprintf("(%s:%c:%d:%d): refuses tagged commands.  "
4706                                "Performing non-tagged I/O\n", ahd_name(ahd),
4707                                devinfo->channel, devinfo->target, devinfo->lun);
4708                         ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE);
4709                         mask = ~0x23;
4710                 } else {
4711                         kprintf("(%s:%c:%d:%d): refuses %s tagged commands.  "
4712                                "Performing simple queue tagged I/O only\n",
4713                                ahd_name(ahd), devinfo->channel, devinfo->target,
4714                                devinfo->lun, tag_type == MSG_ORDERED_TASK
4715                                ? "ordered" : "head of queue");
4716                         ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC);
4717                         mask = ~0x03;
4718                 }
4719
4720                 /*
4721                  * Resend the identify for this CCB as the target
4722                  * may believe that the selection is invalid otherwise.
4723                  */
4724                 ahd_outb(ahd, SCB_CONTROL,
4725                          ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
4726                 scb->hscb->control &= mask;
4727                 aic_set_transaction_tag(scb, /*enabled*/FALSE,
4728                                         /*type*/MSG_SIMPLE_TASK);
4729                 ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
4730                 ahd_assert_atn(ahd);
4731                 ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
4732                              SCB_GET_TAG(scb));
4733
4734                 /*
4735                  * Requeue all tagged commands for this target
4736                  * currently in our posession so they can be
4737                  * converted to untagged commands.
4738                  */
4739                 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
4740                                    SCB_GET_CHANNEL(ahd, scb),
4741                                    SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
4742                                    ROLE_INITIATOR, CAM_REQUEUE_REQ,
4743                                    SEARCH_COMPLETE);
4744         } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) {
4745                 /*
4746                  * Most likely the device believes that we had
4747                  * previously negotiated packetized.
4748                  */
4749                 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
4750                                |  MSG_FLAG_IU_REQ_CHANGED;
4751
4752                 ahd_force_renegotiation(ahd, devinfo);
4753                 ahd->msgout_index = 0;
4754                 ahd->msgout_len = 0;
4755                 ahd_build_transfer_msg(ahd, devinfo);
4756                 ahd->msgout_index = 0;
4757                 response = 1;
4758         } else {
4759                 /*
4760                  * Otherwise, we ignore it.
4761                  */
4762                 kprintf("%s:%c:%d: Message reject for %x -- ignored\n",
4763                        ahd_name(ahd), devinfo->channel, devinfo->target,
4764                        last_msg);
4765         }
4766         return (response);
4767 }
4768
4769 /*
4770  * Process an ingnore wide residue message.
4771  */
4772 static void
4773 ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4774 {
4775         u_int scb_index;
4776         struct scb *scb;
4777
4778         scb_index = ahd_get_scbptr(ahd);
4779         scb = ahd_lookup_scb(ahd, scb_index);
4780         /*
4781          * XXX Actually check data direction in the sequencer?
4782          * Perhaps add datadir to some spare bits in the hscb?
4783          */
4784         if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0
4785          || aic_get_transfer_dir(scb) != CAM_DIR_IN) {
4786                 /*
4787                  * Ignore the message if we haven't
4788                  * seen an appropriate data phase yet.
4789                  */
4790         } else {
4791                 /*
4792                  * If the residual occurred on the last
4793                  * transfer and the transfer request was
4794                  * expected to end on an odd count, do
4795                  * nothing.  Otherwise, subtract a byte
4796                  * and update the residual count accordingly.
4797                  */
4798                 uint32_t sgptr;
4799
4800                 sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
4801                 if ((sgptr & SG_LIST_NULL) != 0
4802                  && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4803                      & SCB_XFERLEN_ODD) != 0) {
4804                         /*
4805                          * If the residual occurred on the last
4806                          * transfer and the transfer request was
4807                          * expected to end on an odd count, do
4808                          * nothing.
4809                          */
4810                 } else {
4811                         uint32_t data_cnt;
4812                         uint64_t data_addr;
4813                         uint32_t sglen;
4814
4815                         /* Pull in the rest of the sgptr */
4816                         sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
4817                         data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT);
4818                         if ((sgptr & SG_LIST_NULL) != 0) {
4819                                 /*
4820                                  * The residual data count is not updated
4821                                  * for the command run to completion case.
4822                                  * Explicitly zero the count.
4823                                  */
4824                                 data_cnt &= ~AHD_SG_LEN_MASK;
4825                         }
4826                         data_addr = ahd_inq(ahd, SHADDR);
4827                         data_cnt += 1;
4828                         data_addr -= 1;
4829                         sgptr &= SG_PTR_MASK;
4830                         if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
4831                                 struct ahd_dma64_seg *sg;
4832
4833                                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4834
4835                                 /*
4836                                  * The residual sg ptr points to the next S/G
4837                                  * to load so we must go back one.
4838                                  */
4839                                 sg--;
4840                                 sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
4841                                 if (sg != scb->sg_list
4842                                  && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4843
4844                                         sg--;
4845                                         sglen = aic_le32toh(sg->len);
4846                                         /*
4847                                          * Preserve High Address and SG_LIST
4848                                          * bits while setting the count to 1.
4849                                          */
4850                                         data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4851                                         data_addr = aic_le64toh(sg->addr)
4852                                                   + (sglen & AHD_SG_LEN_MASK)
4853                                                   - 1;
4854
4855                                         /*
4856                                          * Increment sg so it points to the
4857                                          * "next" sg.
4858                                          */
4859                                         sg++;
4860                                         sgptr = ahd_sg_virt_to_bus(ahd, scb,
4861                                                                    sg);
4862                                 }
4863                         } else {
4864                                 struct ahd_dma_seg *sg;
4865
4866                                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4867
4868                                 /*
4869                                  * The residual sg ptr points to the next S/G
4870                                  * to load so we must go back one.
4871                                  */
4872                                 sg--;
4873                                 sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
4874                                 if (sg != scb->sg_list
4875                                  && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4876
4877                                         sg--;
4878                                         sglen = aic_le32toh(sg->len);
4879                                         /*
4880                                          * Preserve High Address and SG_LIST
4881                                          * bits while setting the count to 1.
4882                                          */
4883                                         data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4884                                         data_addr = aic_le32toh(sg->addr)
4885                                                   + (sglen & AHD_SG_LEN_MASK)
4886                                                   - 1;
4887
4888                                         /*
4889                                          * Increment sg so it points to the
4890                                          * "next" sg.
4891                                          */
4892                                         sg++;
4893                                         sgptr = ahd_sg_virt_to_bus(ahd, scb,
4894                                                                   sg);
4895                                 }
4896                         }
4897                         /*
4898                          * Toggle the "oddness" of the transfer length
4899                          * to handle this mid-transfer ignore wide
4900                          * residue.  This ensures that the oddness is
4901                          * correct for subsequent data transfers.
4902                          */
4903                         ahd_outb(ahd, SCB_TASK_ATTRIBUTE,
4904                             ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4905                             ^ SCB_XFERLEN_ODD);
4906
4907                         ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
4908                         ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt);
4909                         /*
4910                          * The FIFO's pointers will be updated if/when the
4911                          * sequencer re-enters a data phase.
4912                          */
4913                 }
4914         }
4915 }
4916
4917
4918 /*
4919  * Reinitialize the data pointers for the active transfer
4920  * based on its current residual.
4921  */
4922 static void
4923 ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
4924 {
4925         struct           scb *scb;
4926         ahd_mode_state   saved_modes;
4927         u_int            scb_index;
4928         u_int            wait;
4929         uint32_t         sgptr;
4930         uint32_t         resid;
4931         uint64_t         dataptr;
4932
4933         AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
4934                          AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
4935                          
4936         scb_index = ahd_get_scbptr(ahd);
4937         scb = ahd_lookup_scb(ahd, scb_index);
4938
4939         /*
4940          * Release and reacquire the FIFO so we
4941          * have a clean slate.
4942          */
4943         ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
4944         wait = 1000;
4945         while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE))
4946                 aic_delay(100);
4947         if (wait == 0) {
4948                 ahd_print_path(ahd, scb);
4949                 kprintf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
4950                 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
4951         }
4952         saved_modes = ahd_save_modes(ahd);
4953         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
4954         ahd_outb(ahd, DFFSTAT,
4955                  ahd_inb(ahd, DFFSTAT)
4956                 | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0));
4957
4958         /*
4959          * Determine initial values for data_addr and data_cnt
4960          * for resuming the data phase.
4961          */
4962         sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
4963         sgptr &= SG_PTR_MASK;
4964
4965         resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16)
4966               | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8)
4967               | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT);
4968
4969         if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
4970                 struct ahd_dma64_seg *sg;
4971
4972                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4973
4974                 /* The residual sg_ptr always points to the next sg */
4975                 sg--;
4976
4977                 dataptr = aic_le64toh(sg->addr)
4978                         + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK)
4979                         - resid;
4980                 ahd_outl(ahd, HADDR + 4, dataptr >> 32);
4981         } else {
4982                 struct   ahd_dma_seg *sg;
4983
4984                 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4985
4986                 /* The residual sg_ptr always points to the next sg */
4987                 sg--;
4988
4989                 dataptr = aic_le32toh(sg->addr)
4990                         + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK)
4991                         - resid;
4992                 ahd_outb(ahd, HADDR + 4,
4993                          (aic_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24);
4994         }
4995         ahd_outl(ahd, HADDR, dataptr);
4996         ahd_outb(ahd, HCNT + 2, resid >> 16);
4997         ahd_outb(ahd, HCNT + 1, resid >> 8);
4998         ahd_outb(ahd, HCNT, resid);
4999 }
5000
5001 /*
5002  * Handle the effects of issuing a bus device reset message.
5003  */
5004 static void
5005 ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5006                     u_int lun, cam_status status, char *message,
5007                     int verbose_level)
5008 {
5009 #ifdef AHD_TARGET_MODE
5010         struct ahd_tmode_tstate* tstate;
5011 #endif
5012         int found;
5013
5014         found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
5015                                lun, SCB_LIST_NULL, devinfo->role,
5016                                status);
5017
5018 #ifdef AHD_TARGET_MODE
5019         /*
5020          * Send an immediate notify ccb to all target mord peripheral
5021          * drivers affected by this action.
5022          */
5023         tstate = ahd->enabled_targets[devinfo->our_scsiid];
5024         if (tstate != NULL) {
5025                 u_int cur_lun;
5026                 u_int max_lun;
5027
5028                 if (lun != CAM_LUN_WILDCARD) {
5029                         cur_lun = 0;
5030                         max_lun = AHD_NUM_LUNS - 1;
5031                 } else {
5032                         cur_lun = lun;
5033                         max_lun = lun;
5034                 }
5035                 for (cur_lun <= max_lun; cur_lun++) {
5036                         struct ahd_tmode_lstate* lstate;
5037
5038                         lstate = tstate->enabled_luns[cur_lun];
5039                         if (lstate == NULL)
5040                                 continue;
5041
5042                         ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
5043                                                MSG_BUS_DEV_RESET, /*arg*/0);
5044                         ahd_send_lstate_events(ahd, lstate);
5045                 }
5046         }
5047 #endif
5048
5049         /*
5050          * Go back to async/narrow transfers and renegotiate.
5051          */
5052         ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5053                       AHD_TRANS_CUR, /*paused*/TRUE);
5054         ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
5055                          /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE);
5056         
5057         ahd_send_async(ahd, devinfo->channel, devinfo->target,
5058                        lun, AC_SENT_BDR, NULL);
5059
5060         if (message != NULL
5061          && (verbose_level <= bootverbose))
5062                 kprintf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
5063                        message, devinfo->channel, devinfo->target, found);
5064 }
5065
5066 #ifdef AHD_TARGET_MODE
5067 static void
5068 ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5069                        struct scb *scb)
5070 {
5071
5072         /*              
5073          * To facilitate adding multiple messages together,
5074          * each routine should increment the index and len
5075          * variables instead of setting them explicitly.
5076          */             
5077         ahd->msgout_index = 0;
5078         ahd->msgout_len = 0;
5079
5080         if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
5081                 ahd_build_transfer_msg(ahd, devinfo);
5082         else
5083                 panic("ahd_intr: AWAITING target message with no message");
5084
5085         ahd->msgout_index = 0;
5086         ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
5087 }
5088 #endif
5089 /**************************** Initialization **********************************/
5090 static u_int
5091 ahd_sglist_size(struct ahd_softc *ahd)
5092 {
5093         bus_size_t list_size;
5094
5095         list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG;
5096         if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
5097                 list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG;
5098         return (list_size);
5099 }
5100
5101 /*
5102  * Calculate the optimum S/G List allocation size.  S/G elements used
5103  * for a given transaction must be physically contiguous.  Assume the
5104  * OS will allocate full pages to us, so it doesn't make sense to request
5105  * less than a page.
5106  */
5107 static u_int
5108 ahd_sglist_allocsize(struct ahd_softc *ahd)
5109 {
5110         bus_size_t sg_list_increment;
5111         bus_size_t sg_list_size;
5112         bus_size_t max_list_size;
5113         bus_size_t best_list_size;
5114
5115         /* Start out with the minimum required for AHD_NSEG. */
5116         sg_list_increment = ahd_sglist_size(ahd);
5117         sg_list_size = sg_list_increment;
5118
5119         /* Get us as close as possible to a page in size. */
5120         while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
5121                 sg_list_size += sg_list_increment;
5122
5123         /*
5124          * Try to reduce the amount of wastage by allocating
5125          * multiple pages.
5126          */
5127         best_list_size = sg_list_size;
5128         max_list_size = roundup(sg_list_increment, PAGE_SIZE);
5129         if (max_list_size < 4 * PAGE_SIZE)
5130                 max_list_size = 4 * PAGE_SIZE;
5131         if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment))
5132                 max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment);
5133         while ((sg_list_size + sg_list_increment) <= max_list_size
5134            &&  (sg_list_size % PAGE_SIZE) != 0) {
5135                 bus_size_t new_mod;
5136                 bus_size_t best_mod;
5137
5138                 sg_list_size += sg_list_increment;
5139                 new_mod = sg_list_size % PAGE_SIZE;
5140                 best_mod = best_list_size % PAGE_SIZE;
5141                 if (new_mod > best_mod || new_mod == 0) {
5142                         best_list_size = sg_list_size;
5143                 }
5144         }
5145         return (best_list_size);
5146 }
5147
5148 /*
5149  * Allocate a controller structure for a new device
5150  * and perform initial initializion.
5151  */
5152 struct ahd_softc *
5153 ahd_alloc(void *platform_arg, char *name)
5154 {
5155         struct  ahd_softc *ahd;
5156
5157 #if !defined(__DragonFly__) && !defined(__FreeBSD__)
5158         ahd = kmalloc(sizeof(*ahd), M_DEVBUF, M_INTWAIT);
5159 #else
5160         ahd = device_get_softc((device_t)platform_arg);
5161 #endif
5162         memset(ahd, 0, sizeof(*ahd));
5163         ahd->seep_config = kmalloc(sizeof(*ahd->seep_config),M_DEVBUF,M_INTWAIT);
5164         LIST_INIT(&ahd->pending_scbs);
5165         /* We don't know our unit number until the OSM sets it */
5166         ahd->name = name;
5167         ahd->unit = -1;
5168         ahd->description = NULL;
5169         ahd->bus_description = NULL;
5170         ahd->channel = 'A';
5171         ahd->chip = AHD_NONE;
5172         ahd->features = AHD_FENONE;
5173         ahd->bugs = AHD_BUGNONE;
5174         ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
5175                    | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
5176         aic_timer_init(&ahd->reset_timer);
5177         aic_timer_init(&ahd->stat_timer);
5178         ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
5179         ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
5180         ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
5181         ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT;
5182         ahd->int_coalescing_stop_threshold =
5183             AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT;
5184
5185         if (ahd_platform_alloc(ahd, platform_arg) != 0) {
5186                 ahd_free(ahd);
5187                 ahd = NULL;
5188         }
5189 #ifdef AHD_DEBUG
5190         if ((ahd_debug & AHD_SHOW_MEMORY) != 0) {
5191                 kprintf("%s: scb size = 0x%x, hscb size = 0x%x\n",
5192                        ahd_name(ahd), (u_int)sizeof(struct scb),
5193                        (u_int)sizeof(struct hardware_scb));
5194         }
5195 #endif
5196         return (ahd);
5197 }
5198
5199 int
5200 ahd_softc_init(struct ahd_softc *ahd)
5201 {
5202
5203         ahd->unpause = 0;
5204         ahd->pause = PAUSE; 
5205         return (0);
5206 }
5207
5208 void
5209 ahd_softc_insert(struct ahd_softc *ahd)
5210 {
5211         struct ahd_softc *list_ahd;
5212
5213 #if AIC_PCI_CONFIG > 0
5214         /*
5215          * Second Function PCI devices need to inherit some
5216          * settings from function 0.
5217          */
5218         if ((ahd->features & AHD_MULTI_FUNC) != 0) {
5219                 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5220                         aic_dev_softc_t list_pci;
5221                         aic_dev_softc_t pci;
5222
5223                         list_pci = list_ahd->dev_softc;
5224                         pci = ahd->dev_softc;
5225                         if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci)
5226                          && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) {
5227                                 struct ahd_softc *master;
5228                                 struct ahd_softc *slave;
5229
5230                                 if (aic_get_pci_function(list_pci) == 0) {
5231                                         master = list_ahd;
5232                                         slave = ahd;
5233                                 } else {
5234                                         master = ahd;
5235                                         slave = list_ahd;
5236                                 }
5237                                 slave->flags &= ~AHD_BIOS_ENABLED; 
5238                                 slave->flags |=
5239                                     master->flags & AHD_BIOS_ENABLED;
5240                                 break;
5241                         }
5242                 }
5243         }
5244 #endif
5245
5246         /*
5247          * Insertion sort into our list of softcs.
5248          */
5249         list_ahd = TAILQ_FIRST(&ahd_tailq);
5250         while (list_ahd != NULL
5251             && ahd_softc_comp(ahd, list_ahd) <= 0)
5252                 list_ahd = TAILQ_NEXT(list_ahd, links);
5253         if (list_ahd != NULL)
5254                 TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
5255         else
5256                 TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
5257         ahd->init_level++;
5258 }
5259
5260 /*
5261  * Verify that the passed in softc pointer is for a
5262  * controller that is still configured.
5263  */
5264 struct ahd_softc *
5265 ahd_find_softc(struct ahd_softc *ahd)
5266 {
5267         struct ahd_softc *list_ahd;
5268
5269         TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5270                 if (list_ahd == ahd)
5271                         return (ahd);
5272         }
5273         return (NULL);
5274 }
5275
5276 void
5277 ahd_set_unit(struct ahd_softc *ahd, int unit)
5278 {
5279         ahd->unit = unit;
5280 }
5281
5282 void
5283 ahd_set_name(struct ahd_softc *ahd, char *name)
5284 {
5285         if (ahd->name != NULL)
5286                 kfree(ahd->name, M_DEVBUF);
5287         ahd->name = name;
5288 }
5289
5290 void
5291 ahd_free(struct ahd_softc *ahd)
5292 {
5293         int i;
5294
5295         ahd_terminate_recovery_thread(ahd);
5296         switch (ahd->init_level) {
5297         default:
5298         case 5:
5299                 ahd_shutdown(ahd);
5300                 /* FALLTHROUGH */
5301         case 4:
5302                 aic_dmamap_unload(ahd, ahd->shared_data_dmat,
5303                                   ahd->shared_data_map.dmamap);
5304                 /* FALLTHROUGH */
5305         case 3:
5306                 aic_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
5307                                 ahd->shared_data_map.dmamap);
5308                 aic_dmamap_destroy(ahd, ahd->shared_data_dmat,
5309                                    ahd->shared_data_map.dmamap);
5310                 /* FALLTHROUGH */
5311         case 2:
5312                 aic_dma_tag_destroy(ahd, ahd->shared_data_dmat);
5313         case 1:
5314 #ifndef __linux__
5315                 aic_dma_tag_destroy(ahd, ahd->buffer_dmat);
5316 #endif
5317                 break;
5318         case 0:
5319                 break;
5320         }
5321
5322 #ifndef __linux__
5323         aic_dma_tag_destroy(ahd, ahd->parent_dmat);
5324 #endif
5325         ahd_platform_free(ahd);
5326         ahd_fini_scbdata(ahd);
5327         for (i = 0; i < AHD_NUM_TARGETS; i++) {
5328                 struct ahd_tmode_tstate *tstate;
5329
5330                 tstate = ahd->enabled_targets[i];
5331                 if (tstate != NULL) {
5332 #if AHD_TARGET_MODE
5333                         int j;
5334
5335                         for (j = 0; j < AHD_NUM_LUNS; j++) {
5336                                 struct ahd_tmode_lstate *lstate;
5337
5338                                 lstate = tstate->enabled_luns[j];
5339                                 if (lstate != NULL) {
5340                                         xpt_free_path(lstate->path);
5341                                         kfree(lstate, M_DEVBUF);
5342                                 }
5343                         }
5344 #endif
5345                         kfree(tstate, M_DEVBUF);
5346                 }
5347         }
5348 #if AHD_TARGET_MODE
5349         if (ahd->black_hole != NULL) {
5350                 xpt_free_path(ahd->black_hole->path);
5351                 kfree(ahd->black_hole, M_DEVBUF);
5352         }
5353 #endif
5354         if (ahd->name != NULL)
5355                 kfree(ahd->name, M_DEVBUF);
5356         if (ahd->seep_config != NULL)
5357                 kfree(ahd->seep_config, M_DEVBUF);
5358         if (ahd->saved_stack != NULL)
5359                 kfree(ahd->saved_stack, M_DEVBUF);
5360 #if !defined(__DragonFly__) && !defined(__FreeBSD__)
5361         kfree(ahd, M_DEVBUF);
5362 #endif
5363         return;
5364 }
5365
5366 void
5367 ahd_shutdown(void *arg)
5368 {
5369         struct  ahd_softc *ahd;
5370
5371         ahd = (struct ahd_softc *)arg;
5372
5373         /*
5374          * Stop periodic timer callbacks.
5375          */
5376         aic_timer_stop(&ahd->reset_timer);
5377         aic_timer_stop(&ahd->stat_timer);
5378
5379         /* This will reset most registers to 0, but not all */
5380         ahd_reset(ahd, /*reinit*/FALSE);
5381 }
5382
5383 /*
5384  * Reset the controller and record some information about it
5385  * that is only available just after a reset.  If "reinit" is
5386  * non-zero, this reset occured after initial configuration
5387  * and the caller requests that the chip be fully reinitialized
5388  * to a runable state.  Chip interrupts are *not* enabled after
5389  * a reinitialization.  The caller must enable interrupts via
5390  * ahd_intr_enable().
5391  */
5392 int
5393 ahd_reset(struct ahd_softc *ahd, int reinit)
5394 {
5395         u_int    sxfrctl1;
5396         int      wait;
5397         uint32_t cmd;
5398         
5399         /*
5400          * Preserve the value of the SXFRCTL1 register for all channels.
5401          * It contains settings that affect termination and we don't want
5402          * to disturb the integrity of the bus.
5403          */
5404         ahd_pause(ahd);
5405         ahd_update_modes(ahd);
5406         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5407         sxfrctl1 = ahd_inb(ahd, SXFRCTL1);
5408
5409         cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
5410         if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5411                 uint32_t mod_cmd;
5412
5413                 /*
5414                  * A4 Razor #632
5415                  * During the assertion of CHIPRST, the chip
5416                  * does not disable its parity logic prior to
5417                  * the start of the reset.  This may cause a
5418                  * parity error to be detected and thus a
5419                  * spurious SERR or PERR assertion.  Disble
5420                  * PERR and SERR responses during the CHIPRST.
5421                  */
5422                 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
5423                 aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
5424                                      mod_cmd, /*bytes*/2);
5425         }
5426         ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause);
5427
5428         /*
5429          * Ensure that the reset has finished.  We delay 1000us
5430          * prior to reading the register to make sure the chip
5431          * has sufficiently completed its reset to handle register
5432          * accesses.
5433          */
5434         wait = 1000;
5435         do {
5436                 aic_delay(1000);
5437         } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
5438
5439         if (wait == 0) {
5440                 kprintf("%s: WARNING - Failed chip reset!  "
5441                        "Trying to initialize anyway.\n", ahd_name(ahd));
5442         }
5443         ahd_outb(ahd, HCNTRL, ahd->pause);
5444
5445         if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5446                 /*
5447                  * Clear any latched PCI error status and restore
5448                  * previous SERR and PERR response enables.
5449                  */
5450                 aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
5451                                      0xFF, /*bytes*/1);
5452                 aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
5453                                      cmd, /*bytes*/2);
5454         }
5455
5456         /*
5457          * Mode should be SCSI after a chip reset, but lets
5458          * set it just to be safe.  We touch the MODE_PTR
5459          * register directly so as to bypass the lazy update
5460          * code in ahd_set_modes().
5461          */
5462         ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5463         ahd_outb(ahd, MODE_PTR,
5464                  ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI));
5465
5466         /*
5467          * Restore SXFRCTL1.
5468          *
5469          * We must always initialize STPWEN to 1 before we
5470          * restore the saved values.  STPWEN is initialized
5471          * to a tri-state condition which can only be cleared
5472          * by turning it on.
5473          */
5474         ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
5475         ahd_outb(ahd, SXFRCTL1, sxfrctl1);
5476
5477         /* Determine chip configuration */
5478         ahd->features &= ~AHD_WIDE;
5479         if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0)
5480                 ahd->features |= AHD_WIDE;
5481
5482         /*
5483          * If a recovery action has forced a chip reset,
5484          * re-initialize the chip to our liking.
5485          */
5486         if (reinit != 0)
5487                 ahd_chip_init(ahd);
5488
5489         return (0);
5490 }
5491
5492 /*
5493  * Determine the number of SCBs available on the controller
5494  */
5495 int
5496 ahd_probe_scbs(struct ahd_softc *ahd) {
5497         int i;
5498
5499         AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
5500                          ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
5501         for (i = 0; i < AHD_SCB_MAX; i++) {
5502                 int j;
5503
5504                 ahd_set_scbptr(ahd, i);
5505                 ahd_outw(ahd, SCB_BASE, i);
5506                 for (j = 2; j < 64; j++)
5507                         ahd_outb(ahd, SCB_BASE+j, 0);
5508                 /* Start out life as unallocated (needing an abort) */
5509                 ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE);
5510                 if (ahd_inw_scbram(ahd, SCB_BASE) != i)
5511                         break;
5512                 ahd_set_scbptr(ahd, 0);
5513                 if (ahd_inw_scbram(ahd, SCB_BASE) != 0)
5514                         break;
5515         }
5516         return (i);
5517 }
5518
5519 static void
5520 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 
5521 {
5522         bus_addr_t *baddr;
5523
5524         baddr = (bus_addr_t *)arg;
5525         *baddr = segs->ds_addr;
5526 }
5527
5528 static void
5529 ahd_initialize_hscbs(struct ahd_softc *ahd)
5530 {
5531         int i;
5532
5533         for (i = 0; i < ahd->scb_data.maxhscbs; i++) {
5534                 ahd_set_scbptr(ahd, i);
5535
5536                 /* Clear the control byte. */
5537                 ahd_outb(ahd, SCB_CONTROL, 0);
5538
5539                 /* Set the next pointer */
5540                 ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL);
5541         }
5542 }
5543
5544 static int
5545 ahd_init_scbdata(struct ahd_softc *ahd)
5546 {
5547         struct  scb_data *scb_data;
5548         int     i;
5549
5550         scb_data = &ahd->scb_data;
5551         TAILQ_INIT(&scb_data->free_scbs);
5552         for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++)
5553                 LIST_INIT(&scb_data->free_scb_lists[i]);
5554         LIST_INIT(&scb_data->any_dev_free_scb_list);
5555         SLIST_INIT(&scb_data->hscb_maps);
5556         SLIST_INIT(&scb_data->sg_maps);
5557         SLIST_INIT(&scb_data->sense_maps);
5558
5559         /* Determine the number of hardware SCBs and initialize them */
5560         scb_data->maxhscbs = ahd_probe_scbs(ahd);
5561         if (scb_data->maxhscbs == 0) {
5562                 kprintf("%s: No SCB space found\n", ahd_name(ahd));
5563                 return (ENXIO);
5564         }
5565
5566         ahd_initialize_hscbs(ahd);
5567
5568         /*
5569          * Create our DMA tags.  These tags define the kinds of device
5570          * accessible memory allocations and memory mappings we will
5571          * need to perform during normal operation.
5572          *
5573          * Unless we need to further restrict the allocation, we rely
5574          * on the restrictions of the parent dmat, hence the common
5575          * use of MAXADDR and MAXSIZE.
5576          */
5577
5578         /* DMA tag for our hardware scb structures */
5579         if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
5580                                /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5581                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5582                                /*highaddr*/BUS_SPACE_MAXADDR,
5583                                /*filter*/NULL, /*filterarg*/NULL,
5584                                PAGE_SIZE, /*nsegments*/1,
5585                                /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5586                                /*flags*/0, &scb_data->hscb_dmat) != 0) {
5587                 goto error_exit;
5588         }
5589
5590         scb_data->init_level++;
5591
5592         /* DMA tag for our S/G structures. */
5593         if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8,
5594                                /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5595                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5596                                /*highaddr*/BUS_SPACE_MAXADDR,
5597                                /*filter*/NULL, /*filterarg*/NULL,
5598                                ahd_sglist_allocsize(ahd), /*nsegments*/1,
5599                                /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5600                                /*flags*/0, &scb_data->sg_dmat) != 0) {
5601                 goto error_exit;
5602         }
5603 #ifdef AHD_DEBUG
5604         if ((ahd_debug & AHD_SHOW_MEMORY) != 0)
5605                 kprintf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd),
5606                        ahd_sglist_allocsize(ahd));
5607 #endif
5608
5609         scb_data->init_level++;
5610
5611         /* DMA tag for our sense buffers.  We allocate in page sized chunks */
5612         if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
5613                                /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5614                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5615                                /*highaddr*/BUS_SPACE_MAXADDR,
5616                                /*filter*/NULL, /*filterarg*/NULL,
5617                                PAGE_SIZE, /*nsegments*/1,
5618                                /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5619                                /*flags*/0, &scb_data->sense_dmat) != 0) {
5620                 goto error_exit;
5621         }
5622
5623         scb_data->init_level++;
5624
5625         /* Perform initial CCB allocation */
5626         ahd_alloc_scbs(ahd);
5627
5628         if (scb_data->numscbs == 0) {
5629                 kprintf("%s: ahd_init_scbdata - "
5630                        "Unable to allocate initial scbs\n",
5631                        ahd_name(ahd));
5632                 goto error_exit;
5633         }
5634
5635         /*
5636          * Note that we were successful
5637          */
5638         return (0); 
5639
5640 error_exit:
5641
5642         return (ENOMEM);
5643 }
5644
5645 static struct scb *
5646 ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag)
5647 {
5648         struct scb *scb;
5649
5650         /*
5651          * Look on the pending list.
5652          */
5653         LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
5654                 if (SCB_GET_TAG(scb) == tag)
5655                         return (scb);
5656         }
5657
5658         /*
5659          * Then on all of the collision free lists.
5660          */
5661         TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5662                 struct scb *list_scb;
5663
5664                 list_scb = scb;
5665                 do {
5666                         if (SCB_GET_TAG(list_scb) == tag)
5667                                 return (list_scb);
5668                         list_scb = LIST_NEXT(list_scb, collision_links);
5669                 } while (list_scb);
5670         }
5671
5672         /*
5673          * And finally on the generic free list.
5674          */
5675         LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
5676                 if (SCB_GET_TAG(scb) == tag)
5677                         return (scb);
5678         }
5679
5680         return (NULL);
5681 }
5682
5683 static void
5684 ahd_fini_scbdata(struct ahd_softc *ahd)
5685 {
5686         struct scb_data *scb_data;
5687
5688         scb_data = &ahd->scb_data;
5689         if (scb_data == NULL)
5690                 return;
5691
5692         switch (scb_data->init_level) {
5693         default:
5694         case 7:
5695         {
5696                 struct map_node *sns_map;
5697
5698                 while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) {
5699                         SLIST_REMOVE_HEAD(&scb_data->sense_maps, links);
5700                         aic_dmamap_unload(ahd, scb_data->sense_dmat,
5701                                           sns_map->dmamap);
5702                         aic_dmamem_free(ahd, scb_data->sense_dmat,
5703                                         sns_map->vaddr, sns_map->dmamap);
5704                         kfree(sns_map, M_DEVBUF);
5705                 }
5706                 aic_dma_tag_destroy(ahd, scb_data->sense_dmat);
5707                 /* FALLTHROUGH */
5708         }
5709         case 6:
5710         {
5711                 struct map_node *sg_map;
5712
5713                 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) {
5714                         SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
5715                         aic_dmamap_unload(ahd, scb_data->sg_dmat,
5716                                           sg_map->dmamap);
5717                         aic_dmamem_free(ahd, scb_data->sg_dmat,
5718                                         sg_map->vaddr, sg_map->dmamap);
5719                         kfree(sg_map, M_DEVBUF);
5720                 }
5721                 aic_dma_tag_destroy(ahd, scb_data->sg_dmat);
5722                 /* FALLTHROUGH */
5723         }
5724         case 5:
5725         {
5726                 struct map_node *hscb_map;
5727
5728                 while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) {
5729                         SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links);
5730                         aic_dmamap_unload(ahd, scb_data->hscb_dmat,
5731                                           hscb_map->dmamap);
5732                         aic_dmamem_free(ahd, scb_data->hscb_dmat,
5733                                         hscb_map->vaddr, hscb_map->dmamap);
5734                         kfree(hscb_map, M_DEVBUF);
5735                 }
5736                 aic_dma_tag_destroy(ahd, scb_data->hscb_dmat);
5737                 /* FALLTHROUGH */
5738         }
5739         case 4:
5740         case 3:
5741         case 2:
5742         case 1:
5743         case 0:
5744                 break;
5745         }
5746 }
5747
5748 /*
5749  * DSP filter Bypass must be enabled until the first selection
5750  * after a change in bus mode (Razor #491 and #493).
5751  */
5752 static void
5753 ahd_setup_iocell_workaround(struct ahd_softc *ahd)
5754 {
5755         ahd_mode_state saved_modes;
5756
5757         saved_modes = ahd_save_modes(ahd);
5758         ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5759         ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL)
5760                | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS);
5761         ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
5762 #ifdef AHD_DEBUG
5763         if ((ahd_debug & AHD_SHOW_MISC) != 0)
5764                 kprintf("%s: Setting up iocell workaround\n", ahd_name(ahd));
5765 #endif
5766         ahd_restore_modes(ahd, saved_modes);
5767         ahd->flags &= ~AHD_HAD_FIRST_SEL;
5768 }
5769
5770 static void
5771 ahd_iocell_first_selection(struct ahd_softc *ahd)
5772 {
5773         ahd_mode_state  saved_modes;
5774         u_int           sblkctl;
5775
5776         if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0)
5777                 return;
5778         saved_modes = ahd_save_modes(ahd);
5779         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5780         sblkctl = ahd_inb(ahd, SBLKCTL);
5781         ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5782 #ifdef AHD_DEBUG
5783         if ((ahd_debug & AHD_SHOW_MISC) != 0)
5784                 kprintf("%s: iocell first selection\n", ahd_name(ahd));
5785 #endif
5786         if ((sblkctl & ENAB40) != 0) {
5787                 ahd_outb(ahd, DSPDATACTL,
5788                          ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
5789 #ifdef AHD_DEBUG
5790                 if ((ahd_debug & AHD_SHOW_MISC) != 0)
5791                         kprintf("%s: BYPASS now disabled\n", ahd_name(ahd));
5792 #endif
5793         }
5794         ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
5795         ahd_outb(ahd, CLRINT, CLRSCSIINT);
5796         ahd_restore_modes(ahd, saved_modes);
5797         ahd->flags |= AHD_HAD_FIRST_SEL;
5798 }
5799
5800 /*************************** SCB Management ***********************************/
5801 static void
5802 ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
5803 {
5804         struct  scb_list *free_list;
5805         struct  scb_tailq *free_tailq;
5806         struct  scb *first_scb;
5807
5808         scb->flags |= SCB_ON_COL_LIST;
5809         AHD_SET_SCB_COL_IDX(scb, col_idx);
5810         free_list = &ahd->scb_data.free_scb_lists[col_idx];
5811         free_tailq = &ahd->scb_data.free_scbs;
5812         first_scb = LIST_FIRST(free_list);
5813         if (first_scb != NULL) {
5814                 LIST_INSERT_AFTER(first_scb, scb, collision_links);
5815         } else {
5816                 LIST_INSERT_HEAD(free_list, scb, collision_links);
5817                 TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
5818         }
5819 }
5820
5821 static void
5822 ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
5823 {
5824         struct  scb_list *free_list;
5825         struct  scb_tailq *free_tailq;
5826         struct  scb *first_scb;
5827         u_int   col_idx;
5828
5829         scb->flags &= ~SCB_ON_COL_LIST;
5830         col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
5831         free_list = &ahd->scb_data.free_scb_lists[col_idx];
5832         free_tailq = &ahd->scb_data.free_scbs;
5833         first_scb = LIST_FIRST(free_list);
5834         if (first_scb == scb) {
5835                 struct scb *next_scb;
5836
5837                 /*
5838                  * Maintain order in the collision free
5839                  * lists for fairness if this device has
5840                  * other colliding tags active.
5841                  */
5842                 next_scb = LIST_NEXT(scb, collision_links);
5843                 if (next_scb != NULL) {
5844                         TAILQ_INSERT_AFTER(free_tailq, scb,
5845                                            next_scb, links.tqe);
5846                 }
5847                 TAILQ_REMOVE(free_tailq, scb, links.tqe);
5848         }
5849         LIST_REMOVE(scb, collision_links);
5850 }
5851
5852 /*
5853  * Get a free scb. If there are none, see if we can allocate a new SCB.
5854  */
5855 struct scb *
5856 ahd_get_scb(struct ahd_softc *ahd, u_int col_idx)
5857 {
5858         struct scb *scb;
5859         int tries;
5860
5861         tries = 0;
5862 look_again:
5863         TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5864                 if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
5865                         ahd_rem_col_list(ahd, scb);
5866                         goto found;
5867                 }
5868         }
5869         if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) {
5870
5871                 if (tries++ != 0)
5872                         return (NULL);
5873                 ahd_alloc_scbs(ahd);
5874                 goto look_again;
5875         }
5876         LIST_REMOVE(scb, links.le);
5877         if (col_idx != AHD_NEVER_COL_IDX
5878          && (scb->col_scb != NULL)
5879          && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
5880                 LIST_REMOVE(scb->col_scb, links.le);
5881                 ahd_add_col_list(ahd, scb->col_scb, col_idx);
5882         }
5883 found:
5884         scb->flags |= SCB_ACTIVE;
5885         return (scb);
5886 }
5887
5888 /*
5889  * Return an SCB resource to the free list.
5890  */
5891 void
5892 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5893 {       
5894
5895         /* Clean up for the next user */
5896         scb->flags = SCB_FLAG_NONE;
5897         scb->hscb->control = 0;
5898         ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
5899
5900         if (scb->col_scb == NULL) {
5901
5902                 /*
5903                  * No collision possible.  Just free normally.
5904                  */
5905                 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5906                                  scb, links.le);
5907         } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
5908
5909                 /*
5910                  * The SCB we might have collided with is on
5911                  * a free collision list.  Put both SCBs on
5912                  * the generic list.
5913                  */
5914                 ahd_rem_col_list(ahd, scb->col_scb);
5915                 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5916                                  scb, links.le);
5917                 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5918                                  scb->col_scb, links.le);
5919         } else if ((scb->col_scb->flags
5920                   & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE
5921                 && (scb->col_scb->hscb->control & TAG_ENB) != 0) {
5922
5923                 /*
5924                  * The SCB we might collide with on the next allocation
5925                  * is still active in a non-packetized, tagged, context.
5926                  * Put us on the SCB collision list.
5927                  */
5928                 ahd_add_col_list(ahd, scb,
5929                                  AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
5930         } else {
5931                 /*
5932                  * The SCB we might collide with on the next allocation
5933                  * is either active in a packetized context, or free.
5934                  * Since we can't collide, put this SCB on the generic
5935                  * free list.
5936                  */
5937                 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5938                                  scb, links.le);
5939         }
5940
5941         aic_platform_scb_free(ahd, scb);
5942 }
5943
5944 void
5945 ahd_alloc_scbs(struct ahd_softc *ahd)
5946 {
5947         struct scb_data *scb_data;
5948         struct scb      *next_scb;
5949         struct hardware_scb *hscb;
5950         struct map_node *hscb_map;
5951         struct map_node *sg_map;
5952         struct map_node *sense_map;
5953         uint8_t         *segs;
5954         uint8_t         *sense_data;
5955         bus_addr_t       hscb_busaddr;
5956         bus_addr_t       sg_busaddr;
5957         bus_addr_t       sense_busaddr;
5958         int              newcount;
5959         int              i;
5960
5961         scb_data = &ahd->scb_data;
5962         if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC)
5963                 /* Can't allocate any more */
5964                 return;
5965
5966         if (scb_data->scbs_left != 0) {
5967                 int offset;
5968
5969                 offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
5970                 hscb_map = SLIST_FIRST(&scb_data->hscb_maps);
5971                 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
5972                 hscb_busaddr = hscb_map->busaddr + (offset * sizeof(*hscb));
5973         } else {
5974                 hscb_map = kmalloc(sizeof(*hscb_map), M_DEVBUF, M_INTWAIT);
5975
5976                 /* Allocate the next batch of hardware SCBs */
5977                 if (aic_dmamem_alloc(ahd, scb_data->hscb_dmat,
5978                                      (void **)&hscb_map->vaddr,
5979                                      BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) {
5980                         kfree(hscb_map, M_DEVBUF);
5981                         return;
5982                 }
5983
5984                 SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links);
5985
5986                 aic_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap,
5987                                 hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
5988                                 &hscb_map->busaddr, /*flags*/0);
5989
5990                 hscb = (struct hardware_scb *)hscb_map->vaddr;
5991                 hscb_busaddr = hscb_map->busaddr;
5992                 scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
5993                 if (ahd->next_queued_hscb == NULL) {
5994                         /*
5995                          * We need one HSCB to serve as the "next HSCB".  Since
5996                          * the tag identifier in this HSCB will never be used,
5997                          * there is no point in using a valid SCB from the
5998                          * free pool for it.  So, we allocate this "sentinel"
5999                          * specially.
6000                          */
6001                         ahd->next_queued_hscb = hscb;
6002                         ahd->next_queued_hscb_map = hscb_map;
6003                         memset(hscb, 0, sizeof(*hscb));
6004                         hscb->hscb_busaddr = aic_htole32(hscb_busaddr);
6005                         hscb++;
6006                         hscb_busaddr += sizeof(*hscb);
6007                         scb_data->scbs_left--;
6008                 }
6009         }
6010
6011         if (scb_data->sgs_left != 0) {
6012                 int offset;
6013
6014                 offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd))
6015                        - scb_data->sgs_left) * ahd_sglist_size(ahd);
6016                 sg_map = SLIST_FIRST(&scb_data->sg_maps);
6017                 segs = sg_map->vaddr + offset;
6018                 sg_busaddr = sg_map->busaddr + offset;
6019         } else {
6020                 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT);
6021
6022                 /* Allocate the next batch of S/G lists */
6023                 if (aic_dmamem_alloc(ahd, scb_data->sg_dmat,
6024                                      (void **)&sg_map->vaddr,
6025                                      BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) {
6026                         kfree(sg_map, M_DEVBUF);
6027                         return;
6028                 }
6029
6030                 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
6031
6032                 aic_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap,
6033                                 sg_map->vaddr, ahd_sglist_allocsize(ahd),
6034                                 ahd_dmamap_cb, &sg_map->busaddr, /*flags*/0);
6035
6036                 segs = sg_map->vaddr;
6037                 sg_busaddr = sg_map->busaddr;
6038                 scb_data->sgs_left =
6039                     ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
6040 #ifdef AHD_DEBUG
6041                 if (ahd_debug & AHD_SHOW_MEMORY)
6042                         kprintf("Mapped SG data\n");
6043 #endif
6044         }
6045
6046         if (scb_data->sense_left != 0) {
6047                 int offset;
6048
6049                 offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
6050                 sense_map = SLIST_FIRST(&scb_data->sense_maps);
6051                 sense_data = sense_map->vaddr + offset;
6052                 sense_busaddr = sense_map->busaddr + offset;
6053         } else {
6054                 sense_map = kmalloc(sizeof(*sense_map), M_DEVBUF, M_INTWAIT);
6055
6056                 /* Allocate the next batch of sense buffers */
6057                 if (aic_dmamem_alloc(ahd, scb_data->sense_dmat,
6058                                      (void **)&sense_map->vaddr,
6059                                      BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) {
6060                         kfree(sense_map, M_DEVBUF);
6061                         return;
6062                 }
6063
6064                 SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links);
6065
6066                 aic_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap,
6067                                 sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
6068                                 &sense_map->busaddr, /*flags*/0);
6069
6070                 sense_data = sense_map->vaddr;
6071                 sense_busaddr = sense_map->busaddr;
6072                 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
6073 #ifdef AHD_DEBUG
6074                 if (ahd_debug & AHD_SHOW_MEMORY)
6075                         kprintf("Mapped sense data\n");
6076 #endif
6077         }
6078
6079         newcount = MIN(scb_data->sense_left, scb_data->scbs_left);
6080         newcount = MIN(newcount, scb_data->sgs_left);
6081         newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs));
6082         scb_data->sense_left -= newcount;
6083         scb_data->scbs_left -= newcount;
6084         scb_data->sgs_left -= newcount;
6085         for (i = 0; i < newcount; i++) {
6086                 struct scb_platform_data *pdata;
6087                 u_int col_tag;
6088 #ifndef __linux__
6089                 int error;
6090 #endif
6091
6092                 next_scb = kmalloc(sizeof(*next_scb), M_DEVBUF, M_INTWAIT);
6093                 pdata = kmalloc(sizeof(*pdata), M_DEVBUF, M_INTWAIT);
6094                 next_scb->platform_data = pdata;
6095                 next_scb->hscb_map = hscb_map;
6096                 next_scb->sg_map = sg_map;
6097                 next_scb->sense_map = sense_map;
6098                 next_scb->sg_list = segs;
6099                 next_scb->sense_data = sense_data;
6100                 next_scb->sense_busaddr = sense_busaddr;
6101                 memset(hscb, 0, sizeof(*hscb));
6102                 next_scb->hscb = hscb;
6103                 hscb->hscb_busaddr = aic_htole32(hscb_busaddr);
6104
6105                 /*
6106                  * The sequencer always starts with the second entry.
6107                  * The first entry is embedded in the scb.
6108                  */
6109                 next_scb->sg_list_busaddr = sg_busaddr;
6110                 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
6111                         next_scb->sg_list_busaddr
6112                             += sizeof(struct ahd_dma64_seg);
6113                 else
6114                         next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
6115                 next_scb->ahd_softc = ahd;
6116                 next_scb->flags = SCB_FLAG_NONE;
6117 #ifndef __linux__
6118                 error = aic_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
6119                                           &next_scb->dmamap);
6120                 if (error != 0) {
6121                         kfree(next_scb, M_DEVBUF);
6122                         kfree(pdata, M_DEVBUF);
6123                         break;
6124                 }
6125 #endif
6126                 next_scb->hscb->tag = aic_htole16(scb_data->numscbs);
6127                 col_tag = scb_data->numscbs ^ 0x100;
6128                 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
6129                 if (next_scb->col_scb != NULL)
6130                         next_scb->col_scb->col_scb = next_scb;
6131                 ahd_free_scb(ahd, next_scb);
6132                 hscb++;
6133                 hscb_busaddr += sizeof(*hscb);
6134                 segs += ahd_sglist_size(ahd);
6135                 sg_busaddr += ahd_sglist_size(ahd);
6136                 sense_data += AHD_SENSE_BUFSIZE;
6137                 sense_busaddr += AHD_SENSE_BUFSIZE;
6138                 scb_data->numscbs++;
6139         }
6140 }
6141
6142 void
6143 ahd_controller_info(struct ahd_softc *ahd, char *buf)
6144 {
6145         const char *speed;
6146         const char *type;
6147         int len;
6148
6149         len = ksprintf(buf, "%s: ",
6150                        ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]);
6151         buf += len;
6152
6153         speed = "Ultra320 ";
6154         if ((ahd->features & AHD_WIDE) != 0) {
6155                 type = "Wide ";
6156         } else {
6157                 type = "Single ";
6158         }
6159         len = ksprintf(buf, "%s%sChannel %c, SCSI Id=%d, ",
6160                        speed, type, ahd->channel, ahd->our_id);
6161         buf += len;
6162
6163         ksprintf(buf, "%s, %d SCBs", ahd->bus_description,
6164                  ahd->scb_data.maxhscbs);
6165 }
6166
6167 static const char *channel_strings[] = {
6168         "Primary Low",
6169         "Primary High",
6170         "Secondary Low", 
6171         "Secondary High"
6172 };
6173
6174 static const char *termstat_strings[] = {
6175         "Terminated Correctly",
6176         "Over Terminated",
6177         "Under Terminated",
6178         "Not Configured"
6179 };
6180
6181 /*
6182  * Start the board, ready for normal operation
6183  */
6184 int
6185 ahd_init(struct ahd_softc *ahd)
6186 {
6187         uint8_t         *next_vaddr;
6188         bus_addr_t       next_baddr;
6189         size_t           driver_data_size;
6190         int              i;
6191         int              error;
6192         u_int            warn_user;
6193         uint8_t          current_sensing;
6194         uint8_t          fstat;
6195
6196         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6197
6198         ahd->stack_size = ahd_probe_stack_size(ahd);
6199         ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t),
6200                                   M_DEVBUF, M_WAITOK);
6201
6202         /*
6203          * Verify that the compiler hasn't over-agressively
6204          * padded important structures.
6205          */
6206         if (sizeof(struct hardware_scb) != 64)
6207                 panic("Hardware SCB size is incorrect");
6208
6209 #ifdef AHD_DEBUG
6210         if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0)
6211                 ahd->flags |= AHD_SEQUENCER_DEBUG;
6212 #endif
6213
6214         /*
6215          * Default to allowing initiator operations.
6216          */
6217         ahd->flags |= AHD_INITIATORROLE;
6218
6219         /*
6220          * Only allow target mode features if this unit has them enabled.
6221          */
6222         if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
6223                 ahd->features &= ~AHD_TARGETMODE;
6224
6225 #ifndef __linux__
6226         /* DMA tag for mapping buffers into device visible space. */
6227         if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
6228                                /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
6229                                /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
6230                                         ? (bus_addr_t)0x7FFFFFFFFFULL
6231                                         : BUS_SPACE_MAXADDR_32BIT,
6232                                /*highaddr*/BUS_SPACE_MAXADDR,
6233                                /*filter*/NULL, /*filterarg*/NULL,
6234                                /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
6235                                /*nsegments*/AHD_NSEG,
6236                                /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
6237                                /*flags*/BUS_DMA_ALLOCNOW,
6238                                &ahd->buffer_dmat) != 0) {
6239                 return (ENOMEM);
6240         }
6241 #endif
6242
6243         ahd->init_level++;
6244
6245         /*
6246          * DMA tag for our command fifos and other data in system memory
6247          * the card's sequencer must be able to access.  For initiator
6248          * roles, we need to allocate space for the qoutfifo.  When providing
6249          * for the target mode role, we must additionally provide space for
6250          * the incoming target command fifo.
6251          */
6252         driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo);
6253         if ((ahd->features & AHD_TARGETMODE) != 0)
6254                 driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6255         if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0)
6256                 driver_data_size += PKT_OVERRUN_BUFSIZE;
6257         if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
6258                                /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
6259                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
6260                                /*highaddr*/BUS_SPACE_MAXADDR,
6261                                /*filter*/NULL, /*filterarg*/NULL,
6262                                driver_data_size,
6263                                /*nsegments*/1,
6264                                /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
6265                                /*flags*/0, &ahd->shared_data_dmat) != 0) {
6266                 return (ENOMEM);
6267         }
6268
6269         ahd->init_level++;
6270
6271         /* Allocation of driver data */
6272         if (aic_dmamem_alloc(ahd, ahd->shared_data_dmat,
6273                              (void **)&ahd->shared_data_map.vaddr,
6274                              BUS_DMA_NOWAIT,
6275                              &ahd->shared_data_map.dmamap) != 0) {
6276                 return (ENOMEM);
6277         }
6278
6279         ahd->init_level++;
6280
6281         /* And permanently map it in */
6282         aic_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
6283                         ahd->shared_data_map.vaddr, driver_data_size,
6284                         ahd_dmamap_cb, &ahd->shared_data_map.busaddr,
6285                         /*flags*/0);
6286         ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr;
6287         next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE];
6288         next_baddr = ahd->shared_data_map.busaddr
6289                    + AHD_QOUT_SIZE*sizeof(struct ahd_completion);
6290         if ((ahd->features & AHD_TARGETMODE) != 0) {
6291                 ahd->targetcmds = (struct target_cmd *)next_vaddr;
6292                 next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6293                 next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6294         }
6295
6296         if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
6297                 ahd->overrun_buf = next_vaddr;
6298                 next_vaddr += PKT_OVERRUN_BUFSIZE;
6299                 next_baddr += PKT_OVERRUN_BUFSIZE;
6300         }
6301
6302         ahd->init_level++;
6303
6304         /* Allocate SCB data now that buffer_dmat is initialized */
6305         if (ahd_init_scbdata(ahd) != 0)
6306                 return (ENOMEM);
6307
6308         if ((ahd->flags & AHD_INITIATORROLE) == 0)
6309                 ahd->flags &= ~AHD_RESET_BUS_A;
6310
6311         /*
6312          * Before committing these settings to the chip, give
6313          * the OSM one last chance to modify our configuration.
6314          */
6315         ahd_platform_init(ahd);
6316
6317         /* Bring up the chip. */
6318         ahd_chip_init(ahd);
6319
6320         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6321
6322         if ((ahd->flags & AHD_CURRENT_SENSING) == 0)
6323                 goto init_done;
6324
6325         /*
6326          * Verify termination based on current draw and
6327          * warn user if the bus is over/under terminated.
6328          */
6329         error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
6330                                    CURSENSE_ENB);
6331         if (error != 0) {
6332                 kprintf("%s: current sensing timeout 1\n", ahd_name(ahd));
6333                 goto init_done;
6334         }
6335         for (i = 20, fstat = FLX_FSTAT_BUSY;
6336              (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
6337                 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
6338                 if (error != 0) {
6339                         kprintf("%s: current sensing timeout 2\n",
6340                                ahd_name(ahd));
6341                         goto init_done;
6342                 }
6343         }
6344         if (i == 0) {
6345                 kprintf("%s: Timedout during current-sensing test\n",
6346                        ahd_name(ahd));
6347                 goto init_done;
6348         }
6349
6350         /* Latch Current Sensing status. */
6351         error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
6352         if (error != 0) {
6353                 kprintf("%s: current sensing timeout 3\n", ahd_name(ahd));
6354                 goto init_done;
6355         }
6356
6357         /* Diable current sensing. */
6358         ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
6359
6360 #ifdef AHD_DEBUG
6361         if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
6362                 kprintf("%s: current_sensing == 0x%x\n",
6363                        ahd_name(ahd), current_sensing);
6364         }
6365 #endif
6366         warn_user = 0;
6367         for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) {
6368                 u_int term_stat;
6369
6370                 term_stat = (current_sensing & FLX_CSTAT_MASK);
6371                 switch (term_stat) {
6372                 case FLX_CSTAT_OVER:
6373                 case FLX_CSTAT_UNDER:
6374                         warn_user++;
6375                 case FLX_CSTAT_INVALID:
6376                 case FLX_CSTAT_OKAY:
6377                         if (warn_user == 0 && bootverbose == 0)
6378                                 break;
6379                         kprintf("%s: %s Channel %s\n", ahd_name(ahd),
6380                                channel_strings[i], termstat_strings[term_stat]);
6381                         break;
6382                 }
6383         }
6384         if (warn_user) {
6385                 kprintf("%s: WARNING. Termination is not configured correctly.\n"
6386                        "%s: WARNING. SCSI bus operations may FAIL.\n",
6387                        ahd_name(ahd), ahd_name(ahd));
6388         }
6389 init_done:
6390         ahd_restart(ahd);
6391         aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
6392                         ahd_stat_timer, ahd);
6393         return (0);
6394 }
6395
6396 /*
6397  * (Re)initialize chip state after a chip reset.
6398  */
6399 static void
6400 ahd_chip_init(struct ahd_softc *ahd)
6401 {
6402         uint32_t busaddr;
6403         u_int    sxfrctl1;
6404         u_int    scsiseq_template;
6405         u_int    wait;
6406         u_int    i;
6407         u_int    target;
6408
6409         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6410         /*
6411          * Take the LED out of diagnostic mode
6412          */
6413         ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON));
6414
6415         /*
6416          * Return HS_MAILBOX to its default value.
6417          */
6418         ahd->hs_mailbox = 0;
6419         ahd_outb(ahd, HS_MAILBOX, 0);
6420
6421         /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */
6422         ahd_outb(ahd, IOWNID, ahd->our_id);
6423         ahd_outb(ahd, TOWNID, ahd->our_id);
6424         sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0;
6425         sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0;
6426         if ((ahd->bugs & AHD_LONG_SETIMO_BUG)
6427          && (ahd->seltime != STIMESEL_MIN)) {
6428                 /*
6429                  * The selection timer duration is twice as long
6430                  * as it should be.  Halve it by adding "1" to
6431                  * the user specified setting.
6432                  */
6433                 sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ;
6434         } else {
6435                 sxfrctl1 |= ahd->seltime;
6436         }
6437                 
6438         ahd_outb(ahd, SXFRCTL0, DFON);
6439         ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
6440         ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
6441
6442         /*
6443          * Now that termination is set, wait for up
6444          * to 500ms for our transceivers to settle.  If
6445          * the adapter does not have a cable attached,
6446          * the transceivers may never settle, so don't
6447          * complain if we fail here.
6448          */
6449         for (wait = 10000;
6450              (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
6451              wait--)
6452                 aic_delay(100);
6453
6454         /* Clear any false bus resets due to the transceivers settling */
6455         ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
6456         ahd_outb(ahd, CLRINT, CLRSCSIINT);
6457
6458         /* Initialize mode specific S/G state. */
6459         for (i = 0; i < 2; i++) {
6460                 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
6461                 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
6462                 ahd_outb(ahd, SG_STATE, 0);
6463                 ahd_outb(ahd, CLRSEQINTSRC, 0xFF);
6464                 ahd_outb(ahd, SEQIMODE,
6465                          ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT
6466                         |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD);
6467         }
6468
6469         ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
6470         ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN);
6471         ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75);
6472         ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN);
6473         ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR);
6474         if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
6475                 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE);
6476         } else {
6477                 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE);
6478         }
6479         ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS);
6480         if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX)
6481                 /*
6482                  * Do not issue a target abort when a split completion
6483                  * error occurs.  Let our PCIX interrupt handler deal
6484                  * with it instead. H2A4 Razor #625
6485                  */
6486                 ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS);
6487
6488         if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0)
6489                 ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER);
6490
6491         /*
6492          * Tweak IOCELL settings.
6493          */
6494         if ((ahd->flags & AHD_HP_BOARD) != 0) {
6495                 for (i = 0; i < NUMDSPS; i++) {
6496                         ahd_outb(ahd, DSPSELECT, i);
6497                         ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT);
6498                 }
6499 #ifdef AHD_DEBUG
6500                 if ((ahd_debug & AHD_SHOW_MISC) != 0)
6501                         kprintf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
6502                                WRTBIASCTL_HP_DEFAULT);
6503 #endif
6504         }
6505         ahd_setup_iocell_workaround(ahd);
6506
6507         /*
6508          * Enable LQI Manager interrupts.
6509          */
6510         ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT
6511                               | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI
6512                               | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
6513         ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
6514         /*
6515          * An interrupt from LQOBUSFREE is made redundant by the
6516          * BUSFREE interrupt.  We choose to have the sequencer catch
6517          * LQOPHCHGINPKT errors manually for the command phase at the
6518          * start of a packetized selection case.
6519         ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT);
6520          */
6521         ahd_outb(ahd, LQOMODE1, 0);
6522
6523         /*
6524          * Setup sequencer interrupt handlers.
6525          */
6526         ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr));
6527         ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr));
6528
6529         /*
6530          * Setup SCB Offset registers.
6531          */
6532         if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6533                 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb,
6534                          pkt_long_lun));
6535         } else {
6536                 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun));
6537         }
6538         ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len));
6539         ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute));
6540         ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management));
6541         ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb,
6542                                        shared_data.idata.cdb));
6543         ahd_outb(ahd, QNEXTPTR,
6544                  offsetof(struct hardware_scb, next_hscb_busaddr));
6545         ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET);
6546         ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control));
6547         if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6548                 ahd_outb(ahd, LUNLEN,
6549                          sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1);
6550         } else {
6551                 ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN);
6552         }
6553         ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1);
6554         ahd_outb(ahd, MAXCMD, 0xFF);
6555         ahd_outb(ahd, SCBAUTOPTR,
6556                  AUSCBPTR_EN | offsetof(struct hardware_scb, tag));
6557
6558         /* We haven't been enabled for target mode yet. */
6559         ahd_outb(ahd, MULTARGID, 0);
6560         ahd_outb(ahd, MULTARGID + 1, 0);
6561
6562         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6563         /* Initialize the negotiation table. */
6564         if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) {
6565                 /*
6566                  * Clear the spare bytes in the neg table to avoid
6567                  * spurious parity errors.
6568                  */
6569                 for (target = 0; target < AHD_NUM_TARGETS; target++) {
6570                         ahd_outb(ahd, NEGOADDR, target);
6571                         ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0);
6572                         for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++)
6573                                 ahd_outb(ahd, ANNEXDAT, 0);
6574                 }
6575         }
6576         for (target = 0; target < AHD_NUM_TARGETS; target++) {
6577                 struct   ahd_devinfo devinfo;
6578                 struct   ahd_initiator_tinfo *tinfo;
6579                 struct   ahd_tmode_tstate *tstate;
6580
6581                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6582                                             target, &tstate);
6583                 ahd_compile_devinfo(&devinfo, ahd->our_id,
6584                                     target, CAM_LUN_WILDCARD,
6585                                     'A', ROLE_INITIATOR);
6586                 ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
6587         }
6588
6589         ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
6590         ahd_outb(ahd, CLRINT, CLRSCSIINT);
6591
6592 #if NEEDS_MORE_TESTING
6593         /*
6594          * Always enable abort on incoming L_Qs if this feature is
6595          * supported.  We use this to catch invalid SCB references.
6596          */
6597         if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0)
6598                 ahd_outb(ahd, LQCTL1, ABORTPENDING);
6599         else
6600 #endif
6601                 ahd_outb(ahd, LQCTL1, 0);
6602
6603         /* All of our queues are empty */
6604         ahd->qoutfifonext = 0;
6605         ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID;
6606         ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID);
6607         for (i = 0; i < AHD_QOUT_SIZE; i++)
6608                 ahd->qoutfifo[i].valid_tag = 0;
6609         ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD);
6610
6611         ahd->qinfifonext = 0;
6612         for (i = 0; i < AHD_QIN_SIZE; i++)
6613                 ahd->qinfifo[i] = SCB_LIST_NULL;
6614
6615         if ((ahd->features & AHD_TARGETMODE) != 0) {
6616                 /* All target command blocks start out invalid. */
6617                 for (i = 0; i < AHD_TMODE_CMDS; i++)
6618                         ahd->targetcmds[i].cmd_valid = 0;
6619                 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD);
6620                 ahd->tqinfifonext = 1;
6621                 ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1);
6622                 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
6623         }
6624
6625         /* Initialize Scratch Ram. */
6626         ahd_outb(ahd, SEQ_FLAGS, 0);
6627         ahd_outb(ahd, SEQ_FLAGS2, 0);
6628
6629         /* We don't have any waiting selections */
6630         ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
6631         ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
6632         for (i = 0; i < AHD_NUM_TARGETS; i++)
6633                 ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
6634
6635         /*
6636          * Nobody is waiting to be DMAed into the QOUTFIFO.
6637          */
6638         ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
6639         ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL);
6640         ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
6641         ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
6642         ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
6643
6644         /*
6645          * The Freeze Count is 0.
6646          */
6647         ahd->qfreeze_cnt = 0;
6648         ahd_outw(ahd, QFREEZE_COUNT, 0);
6649         ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0);
6650
6651         /*
6652          * Tell the sequencer where it can find our arrays in memory.
6653          */
6654         busaddr = ahd->shared_data_map.busaddr;
6655         ahd_outl(ahd, SHARED_DATA_ADDR, busaddr);
6656         ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr);
6657
6658         /*
6659          * Setup the allowed SCSI Sequences based on operational mode.
6660          * If we are a target, we'll enable select in operations once
6661          * we've had a lun enabled.
6662          */
6663         scsiseq_template = ENAUTOATNP;
6664         if ((ahd->flags & AHD_INITIATORROLE) != 0)
6665                 scsiseq_template |= ENRSELI;
6666         ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template);
6667
6668         /* There are no busy SCBs yet. */
6669         for (target = 0; target < AHD_NUM_TARGETS; target++) {
6670                 int lun;
6671
6672                 for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++)
6673                         ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun));
6674         }
6675
6676         /*
6677          * Initialize the group code to command length table.
6678          * Vendor Unique codes are set to 0 so we only capture
6679          * the first byte of the cdb.  These can be overridden
6680          * when target mode is enabled.
6681          */
6682         ahd_outb(ahd, CMDSIZE_TABLE, 5);
6683         ahd_outb(ahd, CMDSIZE_TABLE + 1, 9);
6684         ahd_outb(ahd, CMDSIZE_TABLE + 2, 9);
6685         ahd_outb(ahd, CMDSIZE_TABLE + 3, 0);
6686         ahd_outb(ahd, CMDSIZE_TABLE + 4, 15);
6687         ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
6688         ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
6689         ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
6690                 
6691         /* Tell the sequencer of our initial queue positions */
6692         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
6693         ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
6694         ahd->qinfifonext = 0;
6695         ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
6696         ahd_set_hescb_qoff(ahd, 0);
6697         ahd_set_snscb_qoff(ahd, 0);
6698         ahd_set_sescb_qoff(ahd, 0);
6699         ahd_set_sdscb_qoff(ahd, 0);
6700
6701         /*
6702          * Tell the sequencer which SCB will be the next one it receives.
6703          */
6704         busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr);
6705         ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
6706
6707         /*
6708          * Default to coalescing disabled.
6709          */
6710         ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0);
6711         ahd_outw(ahd, CMDS_PENDING, 0);
6712         ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer,
6713                                      ahd->int_coalescing_maxcmds,
6714                                      ahd->int_coalescing_mincmds);
6715         ahd_enable_coalescing(ahd, FALSE);
6716
6717         ahd_loadseq(ahd);
6718         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6719 }
6720
6721 /*
6722  * Setup default device and controller settings.
6723  * This should only be called if our probe has
6724  * determined that no configuration data is available.
6725  */
6726 int
6727 ahd_default_config(struct ahd_softc *ahd)
6728 {
6729         int     targ;
6730
6731         ahd->our_id = 7;
6732
6733         /*
6734          * Allocate a tstate to house information for our
6735          * initiator presence on the bus as well as the user
6736          * data for any target mode initiator.
6737          */
6738         if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6739                 kprintf("%s: unable to allocate ahd_tmode_tstate.  "
6740                        "Failing attach\n", ahd_name(ahd));
6741                 return (ENOMEM);
6742         }
6743
6744         for (targ = 0; targ < AHD_NUM_TARGETS; targ++) {
6745                 struct   ahd_devinfo devinfo;
6746                 struct   ahd_initiator_tinfo *tinfo;
6747                 struct   ahd_tmode_tstate *tstate;
6748                 uint16_t target_mask;
6749
6750                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6751                                             targ, &tstate);
6752                 /*
6753                  * We support SPC2 and SPI4.
6754                  */
6755                 tinfo->user.protocol_version = 4;
6756                 tinfo->user.transport_version = 4;
6757
6758                 target_mask = 0x01 << targ;
6759                 ahd->user_discenable |= target_mask;
6760                 tstate->discenable |= target_mask;
6761                 ahd->user_tagenable |= target_mask;
6762 #ifdef AHD_FORCE_160
6763                 tinfo->user.period = AHD_SYNCRATE_DT;
6764 #else
6765                 tinfo->user.period = AHD_SYNCRATE_160;
6766 #endif
6767                 tinfo->user.offset = MAX_OFFSET;
6768                 tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM
6769                                         | MSG_EXT_PPR_WR_FLOW
6770                                         | MSG_EXT_PPR_HOLD_MCS
6771                                         | MSG_EXT_PPR_IU_REQ
6772                                         | MSG_EXT_PPR_QAS_REQ
6773                                         | MSG_EXT_PPR_DT_REQ;
6774                 if ((ahd->features & AHD_RTI) != 0)
6775                         tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
6776
6777                 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
6778
6779                 /*
6780                  * Start out Async/Narrow/Untagged and with
6781                  * conservative protocol support.
6782                  */
6783                 tinfo->goal.protocol_version = 2;
6784                 tinfo->goal.transport_version = 2;
6785                 tinfo->curr.protocol_version = 2;
6786                 tinfo->curr.transport_version = 2;
6787                 ahd_compile_devinfo(&devinfo, ahd->our_id,
6788                                     targ, CAM_LUN_WILDCARD,
6789                                     'A', ROLE_INITIATOR);
6790                 tstate->tagenable &= ~target_mask;
6791                 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6792                               AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6793                 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6794                                  /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6795                                  /*paused*/TRUE);
6796         }
6797         return (0);
6798 }
6799
6800 /*
6801  * Parse device configuration information.
6802  */
6803 int
6804 ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
6805 {
6806         int targ;
6807         int max_targ;
6808
6809         max_targ = sc->max_targets & CFMAXTARG;
6810         ahd->our_id = sc->brtime_id & CFSCSIID;
6811
6812         /*
6813          * Allocate a tstate to house information for our
6814          * initiator presence on the bus as well as the user
6815          * data for any target mode initiator.
6816          */
6817         if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6818                 kprintf("%s: unable to allocate ahd_tmode_tstate.  "
6819                        "Failing attach\n", ahd_name(ahd));
6820                 return (ENOMEM);
6821         }
6822
6823         for (targ = 0; targ < max_targ; targ++) {
6824                 struct   ahd_devinfo devinfo;
6825                 struct   ahd_initiator_tinfo *tinfo;
6826                 struct   ahd_transinfo *user_tinfo;
6827                 struct   ahd_tmode_tstate *tstate;
6828                 uint16_t target_mask;
6829
6830                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6831                                             targ, &tstate);
6832                 user_tinfo = &tinfo->user;
6833
6834                 /*
6835                  * We support SPC2 and SPI4.
6836                  */
6837                 tinfo->user.protocol_version = 4;
6838                 tinfo->user.transport_version = 4;
6839
6840                 target_mask = 0x01 << targ;
6841                 ahd->user_discenable &= ~target_mask;
6842                 tstate->discenable &= ~target_mask;
6843                 ahd->user_tagenable &= ~target_mask;
6844                 if (sc->device_flags[targ] & CFDISC) {
6845                         tstate->discenable |= target_mask;
6846                         ahd->user_discenable |= target_mask;
6847                         ahd->user_tagenable |= target_mask;
6848                 } else {
6849                         /*
6850                          * Cannot be packetized without disconnection.
6851                          */
6852                         sc->device_flags[targ] &= ~CFPACKETIZED;
6853                 }
6854
6855                 user_tinfo->ppr_options = 0;
6856                 user_tinfo->period = (sc->device_flags[targ] & CFXFER);
6857                 if (user_tinfo->period < CFXFER_ASYNC) {
6858                         if (user_tinfo->period <= AHD_PERIOD_10MHz)
6859                                 user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ;
6860                         user_tinfo->offset = MAX_OFFSET;
6861                 } else  {
6862                         user_tinfo->offset = 0;
6863                         user_tinfo->period = AHD_ASYNC_XFER_PERIOD;
6864                 }
6865 #ifdef AHD_FORCE_160
6866                 if (user_tinfo->period <= AHD_SYNCRATE_160)
6867                         user_tinfo->period = AHD_SYNCRATE_DT;
6868 #endif
6869
6870                 if ((sc->device_flags[targ] & CFPACKETIZED) != 0) {
6871                         user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM
6872                                                 |  MSG_EXT_PPR_WR_FLOW
6873                                                 |  MSG_EXT_PPR_HOLD_MCS
6874                                                 |  MSG_EXT_PPR_IU_REQ;
6875                         if ((ahd->features & AHD_RTI) != 0)
6876                                 user_tinfo->ppr_options |= MSG_EXT_PPR_RTI;
6877                 }
6878
6879                 if ((sc->device_flags[targ] & CFQAS) != 0)
6880                         user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ;
6881
6882                 if ((sc->device_flags[targ] & CFWIDEB) != 0)
6883                         user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT;
6884                 else
6885                         user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
6886 #ifdef AHD_DEBUG
6887                 if ((ahd_debug & AHD_SHOW_MISC) != 0)
6888                         kprintf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
6889                                user_tinfo->period, user_tinfo->offset,
6890                                user_tinfo->ppr_options);
6891 #endif
6892                 /*
6893                  * Start out Async/Narrow/Untagged and with
6894                  * conservative protocol support.
6895                  */
6896                 tstate->tagenable &= ~target_mask;
6897                 tinfo->goal.protocol_version = 2;
6898                 tinfo->goal.transport_version = 2;
6899                 tinfo->curr.protocol_version = 2;
6900                 tinfo->curr.transport_version = 2;
6901                 ahd_compile_devinfo(&devinfo, ahd->our_id,
6902                                     targ, CAM_LUN_WILDCARD,
6903                                     'A', ROLE_INITIATOR);
6904                 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6905                               AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6906                 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6907                                  /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6908                                  /*paused*/TRUE);
6909         }
6910
6911         ahd->flags &= ~AHD_SPCHK_ENB_A;
6912         if (sc->bios_control & CFSPARITY)
6913                 ahd->flags |= AHD_SPCHK_ENB_A;
6914
6915         ahd->flags &= ~AHD_RESET_BUS_A;
6916         if (sc->bios_control & CFRESETB)
6917                 ahd->flags |= AHD_RESET_BUS_A;
6918
6919         ahd->flags &= ~AHD_EXTENDED_TRANS_A;
6920         if (sc->bios_control & CFEXTEND)
6921                 ahd->flags |= AHD_EXTENDED_TRANS_A;
6922
6923         ahd->flags &= ~AHD_BIOS_ENABLED;
6924         if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED)
6925                 ahd->flags |= AHD_BIOS_ENABLED;
6926
6927         ahd->flags &= ~AHD_STPWLEVEL_A;
6928         if ((sc->adapter_control & CFSTPWLEVEL) != 0)
6929                 ahd->flags |= AHD_STPWLEVEL_A;
6930
6931         return (0);
6932 }
6933
6934 /*
6935  * Parse device configuration information.
6936  */
6937 int
6938 ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd)
6939 {
6940         int error;
6941
6942         error = ahd_verify_vpd_cksum(vpd);
6943         if (error == 0)
6944                 return (EINVAL);
6945         if ((vpd->bios_flags & VPDBOOTHOST) != 0)
6946                 ahd->flags |= AHD_BOOT_CHANNEL;
6947         return (0);
6948 }
6949
6950 void
6951 ahd_intr_enable(struct ahd_softc *ahd, int enable)
6952 {
6953         u_int hcntrl;
6954
6955         hcntrl = ahd_inb(ahd, HCNTRL);
6956         hcntrl &= ~INTEN;
6957         ahd->pause &= ~INTEN;
6958         ahd->unpause &= ~INTEN;
6959         if (enable) {
6960                 hcntrl |= INTEN;
6961                 ahd->pause |= INTEN;
6962                 ahd->unpause |= INTEN;
6963         }
6964         ahd_outb(ahd, HCNTRL, hcntrl);
6965 }
6966
6967 void
6968 ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds,
6969                              u_int mincmds)
6970 {
6971         if (timer > AHD_TIMER_MAX_US)
6972                 timer = AHD_TIMER_MAX_US;
6973         ahd->int_coalescing_timer = timer;
6974
6975         if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX)
6976                 maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX;
6977         if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX)
6978                 mincmds = AHD_INT_COALESCING_MINCMDS_MAX;
6979         ahd->int_coalescing_maxcmds = maxcmds;
6980         ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK);
6981         ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds);
6982         ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds);
6983 }
6984
6985 void
6986 ahd_enable_coalescing(struct ahd_softc *ahd, int enable)
6987 {
6988
6989         ahd->hs_mailbox &= ~ENINT_COALESCE;
6990         if (enable)
6991                 ahd->hs_mailbox |= ENINT_COALESCE;
6992         ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox);
6993         ahd_flush_device_writes(ahd);
6994         ahd_run_qoutfifo(ahd);
6995 }
6996
6997 /*
6998  * Ensure that the card is paused in a location
6999  * outside of all critical sections and that all
7000  * pending work is completed prior to returning.
7001  * This routine should only be called from outside
7002  * an interrupt context.
7003  */
7004 void
7005 ahd_pause_and_flushwork(struct ahd_softc *ahd)
7006 {
7007         u_int intstat;
7008         u_int maxloops;
7009
7010         maxloops = 1000;
7011         ahd->flags |= AHD_ALL_INTERRUPTS;
7012         ahd_pause(ahd);
7013         /*
7014          * Freeze the outgoing selections.  We do this only
7015          * until we are safely paused without further selections
7016          * pending.
7017          */
7018         ahd->qfreeze_cnt--;
7019         ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
7020         ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN);
7021         do {
7022                 struct scb *waiting_scb;
7023
7024                 ahd_unpause(ahd);
7025                 /*
7026                  * Give the sequencer some time to service
7027                  * any active selections.
7028                  */
7029                 aic_delay(200);
7030
7031                 ahd_intr(ahd);
7032                 ahd_pause(ahd);
7033                 ahd_clear_critical_section(ahd);
7034                 intstat = ahd_inb(ahd, INTSTAT);
7035                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7036                 if ((ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
7037                         ahd_outb(ahd, SCSISEQ0,
7038                                  ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
7039                 /*
7040                  * In the non-packetized case, the sequencer (for Rev A),
7041                  * relies on ENSELO remaining set after SELDO.  The hardware
7042                  * auto-clears ENSELO in the packetized case.
7043                  */
7044                 waiting_scb = ahd_lookup_scb(ahd,
7045                                              ahd_inw(ahd, WAITING_TID_HEAD));
7046                 if (waiting_scb != NULL
7047                  && (waiting_scb->flags & SCB_PACKETIZED) == 0
7048                  && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)
7049                         ahd_outb(ahd, SCSISEQ0,
7050                                  ahd_inb(ahd, SCSISEQ0) | ENSELO);
7051         } while (--maxloops
7052               && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0)
7053               && ((intstat & INT_PEND) != 0
7054                || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
7055                || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
7056
7057         if (maxloops == 0) {
7058                 kprintf("Infinite interrupt loop, INTSTAT = %x",
7059                       ahd_inb(ahd, INTSTAT));
7060         }
7061         ahd->qfreeze_cnt++;
7062         ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
7063
7064         ahd_flush_qoutfifo(ahd);
7065
7066         ahd_platform_flushwork(ahd);
7067         ahd->flags &= ~AHD_ALL_INTERRUPTS;
7068 }
7069
7070 int
7071 ahd_suspend(struct ahd_softc *ahd)
7072 {
7073
7074         ahd_pause_and_flushwork(ahd);
7075
7076         if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
7077                 ahd_unpause(ahd);
7078                 return (EBUSY);
7079         }
7080         ahd_shutdown(ahd);
7081         return (0);
7082 }
7083
7084 int
7085 ahd_resume(struct ahd_softc *ahd)
7086 {
7087
7088         ahd_reset(ahd, /*reinit*/TRUE);
7089         ahd_intr_enable(ahd, TRUE); 
7090         ahd_restart(ahd);
7091         return (0);
7092 }
7093
7094 /************************** Busy Target Table *********************************/
7095 /*
7096  * Set SCBPTR to the SCB that contains the busy
7097  * table entry for TCL.  Return the offset into
7098  * the SCB that contains the entry for TCL.
7099  * saved_scbid is dereferenced and set to the
7100  * scbid that should be restored once manipualtion
7101  * of the TCL entry is complete.
7102  */
7103 static __inline u_int
7104 ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
7105 {
7106         /*
7107          * Index to the SCB that contains the busy entry.
7108          */
7109         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7110         *saved_scbid = ahd_get_scbptr(ahd);
7111         ahd_set_scbptr(ahd, TCL_LUN(tcl)
7112                      | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4));
7113
7114         /*
7115          * And now calculate the SCB offset to the entry.
7116          * Each entry is 2 bytes wide, hence the
7117          * multiplication by 2.
7118          */
7119         return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS);
7120 }
7121
7122 /*
7123  * Return the untagged transaction id for a given target/channel lun.
7124  */
7125 u_int
7126 ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
7127 {
7128         u_int scbid;
7129         u_int scb_offset;
7130         u_int saved_scbptr;
7131                 
7132         scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
7133         scbid = ahd_inw_scbram(ahd, scb_offset);
7134         ahd_set_scbptr(ahd, saved_scbptr);
7135         return (scbid);
7136 }
7137
7138 void
7139 ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
7140 {
7141         u_int scb_offset;
7142         u_int saved_scbptr;
7143                 
7144         scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
7145         ahd_outw(ahd, scb_offset, scbid);
7146         ahd_set_scbptr(ahd, saved_scbptr);
7147 }
7148
7149 /************************** SCB and SCB queue management **********************/
7150 int
7151 ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
7152               char channel, int lun, u_int tag, role_t role)
7153 {
7154         int targ = SCB_GET_TARGET(ahd, scb);
7155         char chan = SCB_GET_CHANNEL(ahd, scb);
7156         int slun = SCB_GET_LUN(scb);
7157         int match;
7158
7159         match = ((chan == channel) || (channel == ALL_CHANNELS));
7160         if (match != 0)
7161                 match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
7162         if (match != 0)
7163                 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
7164         if (match != 0) {
7165 #if AHD_TARGET_MODE
7166                 int group;
7167
7168                 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
7169                 if (role == ROLE_INITIATOR) {
7170                         match = (group != XPT_FC_GROUP_TMODE)
7171                               && ((tag == SCB_GET_TAG(scb))
7172                                || (tag == SCB_LIST_NULL));
7173                 } else if (role == ROLE_TARGET) {
7174                         match = (group == XPT_FC_GROUP_TMODE)
7175                               && ((tag == scb->io_ctx->csio.tag_id)
7176                                || (tag == SCB_LIST_NULL));
7177                 }
7178 #else /* !AHD_TARGET_MODE */
7179                 match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
7180 #endif /* AHD_TARGET_MODE */
7181         }
7182
7183         return match;
7184 }
7185
7186 void
7187 ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
7188 {
7189         int     target;
7190         char    channel;
7191         int     lun;
7192
7193         target = SCB_GET_TARGET(ahd, scb);
7194         lun = SCB_GET_LUN(scb);
7195         channel = SCB_GET_CHANNEL(ahd, scb);
7196         
7197         ahd_search_qinfifo(ahd, target, channel, lun,
7198                            /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
7199                            CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7200
7201         ahd_platform_freeze_devq(ahd, scb);
7202 }
7203
7204 void
7205 ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
7206 {
7207         struct scb      *prev_scb;
7208         ahd_mode_state   saved_modes;
7209
7210         saved_modes = ahd_save_modes(ahd);
7211         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7212         prev_scb = NULL;
7213         if (ahd_qinfifo_count(ahd) != 0) {
7214                 u_int prev_tag;
7215                 u_int prev_pos;
7216
7217                 prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1);
7218                 prev_tag = ahd->qinfifo[prev_pos];
7219                 prev_scb = ahd_lookup_scb(ahd, prev_tag);
7220         }
7221         ahd_qinfifo_requeue(ahd, prev_scb, scb);
7222         ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7223         ahd_restore_modes(ahd, saved_modes);
7224 }
7225
7226 static void
7227 ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
7228                     struct scb *scb)
7229 {
7230         if (prev_scb == NULL) {
7231                 uint32_t busaddr;
7232
7233                 busaddr = aic_le32toh(scb->hscb->hscb_busaddr);
7234                 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
7235         } else {
7236                 prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
7237                 ahd_sync_scb(ahd, prev_scb, 
7238                              BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7239         }
7240         ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
7241         ahd->qinfifonext++;
7242         scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
7243         ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7244 }
7245
7246 static int
7247 ahd_qinfifo_count(struct ahd_softc *ahd)
7248 {
7249         u_int qinpos;
7250         u_int wrap_qinpos;
7251         u_int wrap_qinfifonext;
7252
7253         AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7254         qinpos = ahd_get_snscb_qoff(ahd);
7255         wrap_qinpos = AHD_QIN_WRAP(qinpos);
7256         wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext);
7257         if (wrap_qinfifonext >= wrap_qinpos)
7258                 return (wrap_qinfifonext - wrap_qinpos);
7259         else
7260                 return (wrap_qinfifonext
7261                       + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos);
7262 }
7263
7264 void
7265 ahd_reset_cmds_pending(struct ahd_softc *ahd)
7266 {
7267         struct          scb *scb;
7268         ahd_mode_state  saved_modes;
7269         u_int           pending_cmds;
7270
7271         saved_modes = ahd_save_modes(ahd);
7272         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7273
7274         /*
7275          * Don't count any commands as outstanding that the
7276          * sequencer has already marked for completion.
7277          */
7278         ahd_flush_qoutfifo(ahd);
7279
7280         pending_cmds = 0;
7281         LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
7282                 pending_cmds++;
7283         }
7284         ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd));
7285         ahd_restore_modes(ahd, saved_modes);
7286         ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7287 }
7288
7289 int
7290 ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
7291                    int lun, u_int tag, role_t role, uint32_t status,
7292                    ahd_search_action action)
7293 {
7294         struct scb      *scb;
7295         struct scb      *prev_scb;
7296         ahd_mode_state   saved_modes;
7297         u_int            qinstart;
7298         u_int            qinpos;
7299         u_int            qintail;
7300         u_int            tid_next;
7301         u_int            tid_prev;
7302         u_int            scbid;
7303         u_int            savedscbptr;
7304         uint32_t         busaddr;
7305         int              found;
7306         int              targets;
7307
7308         /* Must be in CCHAN mode */
7309         saved_modes = ahd_save_modes(ahd);
7310         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7311
7312         /*
7313          * Halt any pending SCB DMA.  The sequencer will reinitiate
7314          * this dma if the qinfifo is not empty once we unpause.
7315          */
7316         if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR))
7317          == (CCARREN|CCSCBEN|CCSCBDIR)) {
7318                 ahd_outb(ahd, CCSCBCTL,
7319                          ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN));
7320                 while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0)
7321                         ;
7322         }
7323         /* Determine sequencer's position in the qinfifo. */
7324         qintail = AHD_QIN_WRAP(ahd->qinfifonext);
7325         qinstart = ahd_get_snscb_qoff(ahd);
7326         qinpos = AHD_QIN_WRAP(qinstart);
7327         found = 0;
7328         prev_scb = NULL;
7329
7330         if (action == SEARCH_PRINT) {
7331                 kprintf("qinstart = %d qinfifonext = %d\nQINFIFO:",
7332                        qinstart, ahd->qinfifonext);
7333         }
7334
7335         /*
7336          * Start with an empty queue.  Entries that are not chosen
7337          * for removal will be re-added to the queue as we go.
7338          */
7339         ahd->qinfifonext = qinstart;
7340         busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr);
7341         ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
7342
7343         while (qinpos != qintail) {
7344                 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
7345                 if (scb == NULL) {
7346                         kprintf("qinpos = %d, SCB index = %d\n",
7347                                 qinpos, ahd->qinfifo[qinpos]);
7348                         panic("Loop 1\n");
7349                 }
7350
7351                 if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
7352                         /*
7353                          * We found an scb that needs to be acted on.
7354                          */
7355                         found++;
7356                         switch (action) {
7357                         case SEARCH_COMPLETE:
7358                         {
7359                                 cam_status ostat;
7360                                 cam_status cstat;
7361
7362                                 ostat = aic_get_transaction_status(scb);
7363                                 if (ostat == CAM_REQ_INPROG)
7364                                         aic_set_transaction_status(scb,
7365                                                                    status);
7366                                 cstat = aic_get_transaction_status(scb);
7367                                 if (cstat != CAM_REQ_CMP)
7368                                         aic_freeze_scb(scb);
7369                                 if ((scb->flags & SCB_ACTIVE) == 0)
7370                                         kprintf("Inactive SCB in qinfifo\n");
7371                                 ahd_done(ahd, scb);
7372
7373                                 /* FALLTHROUGH */
7374                         }
7375                         case SEARCH_REMOVE:
7376                                 break;
7377                         case SEARCH_PRINT:
7378                                 kprintf(" 0x%x", ahd->qinfifo[qinpos]);
7379                                 /* FALLTHROUGH */
7380                         case SEARCH_COUNT:
7381                                 ahd_qinfifo_requeue(ahd, prev_scb, scb);
7382                                 prev_scb = scb;
7383                                 break;
7384                         }
7385                 } else {
7386                         ahd_qinfifo_requeue(ahd, prev_scb, scb);
7387                         prev_scb = scb;
7388                 }
7389                 qinpos = AHD_QIN_WRAP(qinpos+1);
7390         }
7391
7392         ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7393
7394         if (action == SEARCH_PRINT)
7395                 kprintf("\nWAITING_TID_QUEUES:\n");
7396
7397         /*
7398          * Search waiting for selection lists.  We traverse the
7399          * list of "their ids" waiting for selection and, if
7400          * appropriate, traverse the SCBs of each "their id"
7401          * looking for matches.
7402          */
7403         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7404         savedscbptr = ahd_get_scbptr(ahd);
7405         tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
7406         tid_prev = SCB_LIST_NULL;
7407         targets = 0;
7408         for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
7409                 u_int tid_head;
7410
7411                 /*
7412                  * We limit based on the number of SCBs since
7413                  * MK_MESSAGE SCBs are not in the per-tid lists.
7414                  */
7415                 targets++;
7416                 if (targets > AHD_SCB_MAX) {
7417                         panic("TID LIST LOOP");
7418                 }
7419                 if (scbid >= ahd->scb_data.numscbs) {
7420                         kprintf("%s: Waiting TID List inconsistency. "
7421                                "SCB index == 0x%x, yet numscbs == 0x%x.",
7422                                ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7423                         ahd_dump_card_state(ahd);
7424                         panic("for safety");
7425                 }
7426                 scb = ahd_lookup_scb(ahd, scbid);
7427                 if (scb == NULL) {
7428                         kprintf("%s: SCB = 0x%x Not Active!\n",
7429                                ahd_name(ahd), scbid);
7430                         panic("Waiting TID List traversal\n");
7431                 }
7432                 ahd_set_scbptr(ahd, scbid);
7433                 tid_next = ahd_inw_scbram(ahd, SCB_NEXT2);
7434                 if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
7435                                   SCB_LIST_NULL, ROLE_UNKNOWN) == 0) {
7436                         tid_prev = scbid;
7437                         continue;
7438                 }
7439
7440                 /*
7441                  * We found a list of scbs that needs to be searched.
7442                  */
7443                 if (action == SEARCH_PRINT)
7444                         kprintf("       %d ( ", SCB_GET_TARGET(ahd, scb));
7445                 tid_head = scbid;
7446                 found += ahd_search_scb_list(ahd, target, channel,
7447                                              lun, tag, role, status,
7448                                              action, &tid_head,
7449                                              SCB_GET_TARGET(ahd, scb));
7450                 if (tid_head != scbid)
7451                         ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
7452                 if (!SCBID_IS_NULL(tid_head))
7453                         tid_prev = tid_head;
7454                 if (action == SEARCH_PRINT)
7455                         kprintf(")\n");
7456         }
7457         ahd_set_scbptr(ahd, savedscbptr);
7458         ahd_restore_modes(ahd, saved_modes);
7459         return (found);
7460 }
7461
7462 static int
7463 ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
7464                     int lun, u_int tag, role_t role, uint32_t status,
7465                     ahd_search_action action, u_int *list_head, u_int tid)
7466 {
7467         struct  scb *scb;
7468         u_int   scbid;
7469         u_int   next;
7470         u_int   prev;
7471         int     found;
7472
7473         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7474         found = 0;
7475         prev = SCB_LIST_NULL;
7476         next = *list_head;
7477         for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
7478                 if (scbid >= ahd->scb_data.numscbs) {
7479                         kprintf("%s:SCB List inconsistency. "
7480                                "SCB == 0x%x, yet numscbs == 0x%x.",
7481                                ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7482                         ahd_dump_card_state(ahd);
7483                         panic("for safety");
7484                 }
7485                 scb = ahd_lookup_scb(ahd, scbid);
7486                 if (scb == NULL) {
7487                         kprintf("%s: SCB = %d Not Active!\n",
7488                                ahd_name(ahd), scbid);
7489                         panic("Waiting List traversal\n");
7490                 }
7491                 ahd_set_scbptr(ahd, scbid);
7492                 next = ahd_inw_scbram(ahd, SCB_NEXT);
7493                 if (ahd_match_scb(ahd, scb, target, channel,
7494                                   lun, SCB_LIST_NULL, role) == 0) {
7495                         prev = scbid;
7496                         continue;
7497                 }
7498                 found++;
7499                 switch (action) {
7500                 case SEARCH_COMPLETE:
7501                 {
7502                         cam_status ostat;
7503                         cam_status cstat;
7504
7505                         ostat = aic_get_transaction_status(scb);
7506                         if (ostat == CAM_REQ_INPROG)
7507                                 aic_set_transaction_status(scb, status);
7508                         cstat = aic_get_transaction_status(scb);
7509                         if (cstat != CAM_REQ_CMP)
7510                                 aic_freeze_scb(scb);
7511                         if ((scb->flags & SCB_ACTIVE) == 0)
7512                                 kprintf("Inactive SCB in Waiting List\n");
7513                         ahd_done(ahd, scb);
7514                         /* FALLTHROUGH */
7515                 }
7516                 case SEARCH_REMOVE:
7517                         ahd_rem_wscb(ahd, scbid, prev, next, tid);
7518                         if (prev == SCB_LIST_NULL)
7519                                 *list_head = next;
7520                         break;
7521                 case SEARCH_PRINT:
7522                         kprintf("0x%x ", scbid);
7523                 case SEARCH_COUNT:
7524                         prev = scbid;
7525                         break;
7526                 }
7527                 if (found > AHD_SCB_MAX)
7528                         panic("SCB LIST LOOP");
7529         }
7530         if (action == SEARCH_COMPLETE
7531          || action == SEARCH_REMOVE)
7532                 ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found);
7533         return (found);
7534 }
7535
7536 static void
7537 ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev,
7538                     u_int tid_cur, u_int tid_next)
7539 {
7540         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7541
7542         if (SCBID_IS_NULL(tid_cur)) {
7543
7544                 /* Bypass current TID list */
7545                 if (SCBID_IS_NULL(tid_prev)) {
7546                         ahd_outw(ahd, WAITING_TID_HEAD, tid_next);
7547                 } else {
7548                         ahd_set_scbptr(ahd, tid_prev);
7549                         ahd_outw(ahd, SCB_NEXT2, tid_next);
7550                 }
7551                 if (SCBID_IS_NULL(tid_next))
7552                         ahd_outw(ahd, WAITING_TID_TAIL, tid_prev);
7553         } else {
7554
7555                 /* Stitch through tid_cur */
7556                 if (SCBID_IS_NULL(tid_prev)) {
7557                         ahd_outw(ahd, WAITING_TID_HEAD, tid_cur);
7558                 } else {
7559                         ahd_set_scbptr(ahd, tid_prev);
7560                         ahd_outw(ahd, SCB_NEXT2, tid_cur);
7561                 }
7562                 ahd_set_scbptr(ahd, tid_cur);
7563                 ahd_outw(ahd, SCB_NEXT2, tid_next);
7564
7565                 if (SCBID_IS_NULL(tid_next))
7566                         ahd_outw(ahd, WAITING_TID_TAIL, tid_cur);
7567         }
7568 }
7569
7570 /*
7571  * Manipulate the waiting for selection list and return the
7572  * scb that follows the one that we remove.
7573  */
7574 static u_int
7575 ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
7576              u_int prev, u_int next, u_int tid)
7577 {
7578         u_int tail_offset;
7579
7580         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7581         if (!SCBID_IS_NULL(prev)) {
7582                 ahd_set_scbptr(ahd, prev);
7583                 ahd_outw(ahd, SCB_NEXT, next);
7584         }
7585
7586         /*
7587          * SCBs that had MK_MESSAGE set in them will not
7588          * be queued to the per-target lists, so don't
7589          * blindly clear the tail pointer.
7590          */
7591         tail_offset = WAITING_SCB_TAILS + (2 * tid);
7592         if (SCBID_IS_NULL(next)
7593          && ahd_inw(ahd, tail_offset) == scbid)
7594                 ahd_outw(ahd, tail_offset, prev);
7595         ahd_add_scb_to_free_list(ahd, scbid);
7596         return (next);
7597 }
7598
7599 /*
7600  * Add the SCB as selected by SCBPTR onto the on chip list of
7601  * free hardware SCBs.  This list is empty/unused if we are not
7602  * performing SCB paging.
7603  */
7604 static void
7605 ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid)
7606 {
7607 /* XXX Need some other mechanism to designate "free". */
7608         /*
7609          * Invalidate the tag so that our abort
7610          * routines don't think it's active.
7611         ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL);
7612          */
7613 }
7614
7615 /******************************** Error Handling ******************************/
7616 /*
7617  * Abort all SCBs that match the given description (target/channel/lun/tag),
7618  * setting their status to the passed in status if the status has not already
7619  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
7620  * is paused before it is called.
7621  */
7622 int
7623 ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
7624                int lun, u_int tag, role_t role, uint32_t status)
7625 {
7626         struct          scb *scbp;
7627         struct          scb *scbp_next;
7628         u_int           i, j;
7629         u_int           maxtarget;
7630         u_int           minlun;
7631         u_int           maxlun;
7632         int             found;
7633         ahd_mode_state  saved_modes;
7634
7635         /* restore this when we're done */
7636         saved_modes = ahd_save_modes(ahd);
7637         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7638
7639         found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL,
7640                                    role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7641
7642         /*
7643          * Clean out the busy target table for any untagged commands.
7644          */
7645         i = 0;
7646         maxtarget = 16;
7647         if (target != CAM_TARGET_WILDCARD) {
7648                 i = target;
7649                 if (channel == 'B')
7650                         i += 8;
7651                 maxtarget = i + 1;
7652         }
7653
7654         if (lun == CAM_LUN_WILDCARD) {
7655                 minlun = 0;
7656                 maxlun = AHD_NUM_LUNS_NONPKT;
7657         } else if (lun >= AHD_NUM_LUNS_NONPKT) {
7658                 minlun = maxlun = 0;
7659         } else {
7660                 minlun = lun;
7661                 maxlun = lun + 1;
7662         }
7663
7664         if (role != ROLE_TARGET) {
7665                 for (;i < maxtarget; i++) {
7666                         for (j = minlun;j < maxlun; j++) {
7667                                 u_int scbid;
7668                                 u_int tcl;
7669
7670                                 tcl = BUILD_TCL_RAW(i, 'A', j);
7671                                 scbid = ahd_find_busy_tcl(ahd, tcl);
7672                                 scbp = ahd_lookup_scb(ahd, scbid);
7673                                 if (scbp == NULL
7674                                  || ahd_match_scb(ahd, scbp, target, channel,
7675                                                   lun, tag, role) == 0)
7676                                         continue;
7677                                 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j));
7678                         }
7679                 }
7680         }
7681
7682         /*
7683          * Don't abort commands that have already completed,
7684          * but haven't quite made it up to the host yet.
7685          */
7686         ahd_flush_qoutfifo(ahd);
7687
7688         /*
7689          * Go through the pending CCB list and look for
7690          * commands for this target that are still active.
7691          * These are other tagged commands that were
7692          * disconnected when the reset occurred.
7693          */
7694         scbp_next = LIST_FIRST(&ahd->pending_scbs);
7695         while (scbp_next != NULL) {
7696                 scbp = scbp_next;
7697                 scbp_next = LIST_NEXT(scbp, pending_links);
7698                 if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) {
7699                         cam_status ostat;
7700
7701                         ostat = aic_get_transaction_status(scbp);
7702                         if (ostat == CAM_REQ_INPROG)
7703                                 aic_set_transaction_status(scbp, status);
7704                         if (aic_get_transaction_status(scbp) != CAM_REQ_CMP)
7705                                 aic_freeze_scb(scbp);
7706                         if ((scbp->flags & SCB_ACTIVE) == 0)
7707                                 kprintf("Inactive SCB on pending list\n");
7708                         ahd_done(ahd, scbp);
7709                         found++;
7710                 }
7711         }
7712         ahd_restore_modes(ahd, saved_modes);
7713         ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status);
7714         ahd->flags |= AHD_UPDATE_PEND_CMDS;
7715         return found;
7716 }
7717
7718 static void
7719 ahd_reset_current_bus(struct ahd_softc *ahd)
7720 {
7721         uint8_t scsiseq;
7722
7723         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7724         ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST);
7725         scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO);
7726         ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO);
7727         ahd_flush_device_writes(ahd);
7728         aic_delay(AHD_BUSRESET_DELAY);
7729         /* Turn off the bus reset */
7730         ahd_outb(ahd, SCSISEQ0, scsiseq);
7731         ahd_flush_device_writes(ahd);
7732         aic_delay(AHD_BUSRESET_DELAY);
7733         if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) {
7734                 /*
7735                  * 2A Razor #474
7736                  * Certain chip state is not cleared for
7737                  * SCSI bus resets that we initiate, so
7738                  * we must reset the chip.
7739                  */
7740                 ahd_reset(ahd, /*reinit*/TRUE);
7741                 ahd_intr_enable(ahd, /*enable*/TRUE);
7742                 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7743         }
7744
7745         ahd_clear_intstat(ahd);
7746 }
7747
7748 int
7749 ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7750 {
7751         struct  ahd_devinfo devinfo;
7752         u_int   initiator;
7753         u_int   target;
7754         u_int   max_scsiid;
7755         int     found;
7756         u_int   fifo;
7757         u_int   next_fifo;
7758
7759         ahd->pending_device = NULL;
7760
7761         ahd_compile_devinfo(&devinfo,
7762                             CAM_TARGET_WILDCARD,
7763                             CAM_TARGET_WILDCARD,
7764                             CAM_LUN_WILDCARD,
7765                             channel, ROLE_UNKNOWN);
7766         ahd_pause(ahd);
7767
7768         /* Make sure the sequencer is in a safe location. */
7769         ahd_clear_critical_section(ahd);
7770
7771 #if AHD_TARGET_MODE
7772         if ((ahd->flags & AHD_TARGETROLE) != 0) {
7773                 ahd_run_tqinfifo(ahd, /*paused*/TRUE);
7774         }
7775 #endif
7776         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7777
7778         /*
7779          * Disable selections so no automatic hardware
7780          * functions will modify chip state.
7781          */
7782         ahd_outb(ahd, SCSISEQ0, 0);
7783         ahd_outb(ahd, SCSISEQ1, 0);
7784
7785         /*
7786          * Safely shut down our DMA engines.  Always start with
7787          * the FIFO that is not currently active (if any are
7788          * actively connected).
7789          */
7790         next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
7791         if (next_fifo > CURRFIFO_1)
7792                 /* If disconneced, arbitrarily start with FIFO1. */
7793                 next_fifo = fifo = 0;
7794         do {
7795                 next_fifo ^= CURRFIFO_1;
7796                 ahd_set_modes(ahd, next_fifo, next_fifo);
7797                 ahd_outb(ahd, DFCNTRL,
7798                          ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN));
7799                 while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0)
7800                         aic_delay(10);
7801                 /*
7802                  * Set CURRFIFO to the now inactive channel.
7803                  */
7804                 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7805                 ahd_outb(ahd, DFFSTAT, next_fifo);
7806         } while (next_fifo != fifo);
7807
7808         /*
7809          * Reset the bus if we are initiating this reset
7810          */
7811         ahd_clear_msg_state(ahd);
7812         ahd_outb(ahd, SIMODE1,
7813                  ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
7814
7815         if (initiate_reset)
7816                 ahd_reset_current_bus(ahd);
7817
7818         ahd_clear_intstat(ahd);
7819
7820         /*
7821          * Clean up all the state information for the
7822          * pending transactions on this bus.
7823          */
7824         found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel,
7825                                CAM_LUN_WILDCARD, SCB_LIST_NULL,
7826                                ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
7827
7828         /*
7829          * Cleanup anything left in the FIFOs.
7830          */
7831         ahd_clear_fifo(ahd, 0);
7832         ahd_clear_fifo(ahd, 1);
7833
7834         /*
7835          * Revert to async/narrow transfers until we renegotiate.
7836          */
7837         max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7838         for (target = 0; target <= max_scsiid; target++) {
7839
7840                 if (ahd->enabled_targets[target] == NULL)
7841                         continue;
7842                 for (initiator = 0; initiator <= max_scsiid; initiator++) {
7843                         struct ahd_devinfo devinfo;
7844
7845                         ahd_compile_devinfo(&devinfo, target, initiator,
7846                                             CAM_LUN_WILDCARD,
7847                                             'A', ROLE_UNKNOWN);
7848                         ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
7849                                       AHD_TRANS_CUR, /*paused*/TRUE);
7850                         ahd_set_syncrate(ahd, &devinfo, /*period*/0,
7851                                          /*offset*/0, /*ppr_options*/0,
7852                                          AHD_TRANS_CUR, /*paused*/TRUE);
7853                 }
7854         }
7855
7856 #ifdef AHD_TARGET_MODE
7857         max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7858
7859         /*
7860          * Send an immediate notify ccb to all target more peripheral
7861          * drivers affected by this action.
7862          */
7863         for (target = 0; target <= max_scsiid; target++) {
7864                 struct ahd_tmode_tstate* tstate;
7865                 u_int lun;
7866
7867                 tstate = ahd->enabled_targets[target];
7868                 if (tstate == NULL)
7869                         continue;
7870                 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
7871                         struct ahd_tmode_lstate* lstate;
7872
7873                         lstate = tstate->enabled_luns[lun];
7874                         if (lstate == NULL)
7875                                 continue;
7876
7877                         ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD,
7878                                                EVENT_TYPE_BUS_RESET, /*arg*/0);
7879                         ahd_send_lstate_events(ahd, lstate);
7880                 }
7881         }
7882 #endif
7883         /* Notify the XPT that a bus reset occurred */
7884         ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
7885                        CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
7886         ahd_restart(ahd);
7887         /*
7888          * Freeze the SIMQ until our poller can determine that
7889          * the bus reset has really gone away.  We set the initial
7890          * timer to 0 to have the check performed as soon as possible
7891          * from the timer context.
7892          */
7893         if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
7894                 ahd->flags |= AHD_RESET_POLL_ACTIVE;
7895                 aic_freeze_simq(ahd);
7896                 aic_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
7897         }
7898         return (found);
7899 }
7900
7901
7902 #define AHD_RESET_POLL_US 1000
7903 static void
7904 ahd_reset_poll(void *arg)
7905 {
7906         struct  ahd_softc *ahd;
7907         u_int   scsiseq1;
7908         
7909         ahd = ahd_find_softc((struct ahd_softc *)arg);
7910         if (ahd == NULL) {
7911                 kprintf("ahd_reset_poll: Instance %p no longer exists\n", arg);
7912                 return;
7913         }
7914         ahd_lock();
7915         ahd_pause(ahd);
7916         ahd_update_modes(ahd);
7917         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7918         ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
7919         if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
7920                 aic_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
7921                                 ahd_reset_poll, ahd);
7922                 ahd_unpause(ahd);
7923                 ahd_unlock();
7924                 return;
7925         }
7926
7927         /* Reset is now low.  Complete chip reinitialization. */
7928         ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
7929         scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
7930         ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
7931         ahd_unpause(ahd);
7932         ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
7933         ahd_unlock();
7934         aic_release_simq(ahd);
7935 }
7936
7937 /**************************** Statistics Processing ***************************/
7938 static void
7939 ahd_stat_timer(void *arg)
7940 {
7941         struct  ahd_softc *ahd;
7942         int     enint_coal;
7943         
7944         ahd = ahd_find_softc((struct ahd_softc *)arg);
7945         if (ahd == NULL) {
7946                 kprintf("ahd_stat_timer: Instance %p no longer exists\n", arg);
7947                 return;
7948         }
7949         ahd_lock();
7950
7951         enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
7952         if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold)
7953                 enint_coal |= ENINT_COALESCE;
7954         else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold)
7955                 enint_coal &= ~ENINT_COALESCE;
7956
7957         if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) {
7958                 ahd_enable_coalescing(ahd, enint_coal);
7959 #ifdef AHD_DEBUG
7960                 if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
7961                         kprintf("%s: Interrupt coalescing "
7962                                "now %sabled. Cmds %d\n",
7963                                ahd_name(ahd),
7964                                (enint_coal & ENINT_COALESCE) ? "en" : "dis",
7965                                ahd->cmdcmplt_total);
7966 #endif
7967         }
7968
7969         ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
7970         ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
7971         ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
7972         aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
7973                         ahd_stat_timer, ahd);
7974         ahd_unlock();
7975 }
7976
7977 /****************************** Status Processing *****************************/
7978 void
7979 ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
7980 {
7981         if (scb->hscb->shared_data.istatus.scsi_status != 0) {
7982                 ahd_handle_scsi_status(ahd, scb);
7983         } else {
7984                 ahd_calc_residual(ahd, scb);
7985                 ahd_done(ahd, scb);
7986         }
7987 }
7988
7989 void
7990 ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
7991 {
7992         struct  hardware_scb *hscb;
7993         int     paused;
7994
7995         /*
7996          * The sequencer freezes its select-out queue
7997          * anytime a SCSI status error occurs.  We must
7998          * handle the error and increment our qfreeze count
7999          * to allow the sequencer to continue.  We don't
8000          * bother clearing critical sections here since all
8001          * operations are on data structures that the sequencer
8002          * is not touching once the queue is frozen.
8003          */
8004         hscb = scb->hscb; 
8005
8006         if (ahd_is_paused(ahd)) {
8007                 paused = 1;
8008         } else {
8009                 paused = 0;
8010                 ahd_pause(ahd);
8011         }
8012
8013         /* Freeze the queue until the client sees the error. */
8014         ahd_freeze_devq(ahd, scb);
8015         aic_freeze_scb(scb);
8016         ahd->qfreeze_cnt++;
8017         ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
8018
8019         if (paused == 0)
8020                 ahd_unpause(ahd);
8021
8022         /* Don't want to clobber the original sense code */
8023         if ((scb->flags & SCB_SENSE) != 0) {
8024                 /*
8025                  * Clear the SCB_SENSE Flag and perform
8026                  * a normal command completion.
8027                  */
8028                 scb->flags &= ~SCB_SENSE;
8029                 aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
8030                 ahd_done(ahd, scb);
8031                 return;
8032         }
8033         aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
8034         aic_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
8035         switch (hscb->shared_data.istatus.scsi_status) {
8036         case STATUS_PKT_SENSE:
8037         {
8038                 struct scsi_status_iu_header *siu;
8039
8040                 ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
8041                 siu = (struct scsi_status_iu_header *)scb->sense_data;
8042                 aic_set_scsi_status(scb, siu->status);
8043 #ifdef AHD_DEBUG
8044                 if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
8045                         ahd_print_path(ahd, scb);
8046                         kprintf("SCB 0x%x Received PKT Status of 0x%x\n",
8047                                SCB_GET_TAG(scb), siu->status);
8048                         kprintf("\tflags = 0x%x, sense len = 0x%x, "
8049                                "pktfail = 0x%x\n",
8050                                siu->flags, scsi_4btoul(siu->sense_length),
8051                                scsi_4btoul(siu->pkt_failures_length));
8052                 }
8053 #endif
8054                 if ((siu->flags & SIU_RSPVALID) != 0) {
8055                         ahd_print_path(ahd, scb);
8056                         if (scsi_4btoul(siu->pkt_failures_length) < 4) {
8057                                 kprintf("Unable to parse pkt_failures\n");
8058                         } else {
8059
8060                                 switch (SIU_PKTFAIL_CODE(siu)) {
8061                                 case SIU_PFC_NONE:
8062                                         kprintf("No packet failure found\n");
8063                                         break;
8064                                 case SIU_PFC_CIU_FIELDS_INVALID:
8065                                         kprintf("Invalid Command IU Field\n");
8066                                         break;
8067                                 case SIU_PFC_TMF_NOT_SUPPORTED:
8068                                         kprintf("TMF not supportd\n");
8069                                         break;
8070                                 case SIU_PFC_TMF_FAILED:
8071                                         kprintf("TMF failed\n");
8072                                         break;
8073                                 case SIU_PFC_INVALID_TYPE_CODE:
8074                                         kprintf("Invalid L_Q Type code\n");
8075                                         break;
8076                                 case SIU_PFC_ILLEGAL_REQUEST:
8077                                         kprintf("Illegal request\n");
8078                                 default:
8079                                         break;
8080                                 }
8081                         }
8082                         if (siu->status == SCSI_STATUS_OK)
8083                                 aic_set_transaction_status(scb,
8084                                                            CAM_REQ_CMP_ERR);
8085                 }
8086                 if ((siu->flags & SIU_SNSVALID) != 0) {
8087                         scb->flags |= SCB_PKT_SENSE;
8088 #ifdef AHD_DEBUG
8089                         if ((ahd_debug & AHD_SHOW_SENSE) != 0)
8090                                 kprintf("Sense data available\n");
8091 #endif
8092                 }
8093                 ahd_done(ahd, scb);
8094                 break;
8095         }
8096         case SCSI_STATUS_CMD_TERMINATED:
8097         case SCSI_STATUS_CHECK_COND:
8098         {
8099                 struct ahd_devinfo devinfo;
8100                 struct ahd_dma_seg *sg;
8101                 struct scsi_sense *sc;
8102                 struct ahd_initiator_tinfo *targ_info;
8103                 struct ahd_tmode_tstate *tstate;
8104                 struct ahd_transinfo *tinfo;
8105 #ifdef AHD_DEBUG
8106                 if (ahd_debug & AHD_SHOW_SENSE) {
8107                         ahd_print_path(ahd, scb);
8108                         kprintf("SCB %d: requests Check Status\n",
8109                                SCB_GET_TAG(scb));
8110                 }
8111 #endif
8112
8113                 if (aic_perform_autosense(scb) == 0)
8114                         break;
8115
8116                 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
8117                                     SCB_GET_TARGET(ahd, scb),
8118                                     SCB_GET_LUN(scb),
8119                                     SCB_GET_CHANNEL(ahd, scb),
8120                                     ROLE_INITIATOR);
8121                 targ_info = ahd_fetch_transinfo(ahd,
8122                                                 devinfo.channel,
8123                                                 devinfo.our_scsiid,
8124                                                 devinfo.target,
8125                                                 &tstate);
8126                 tinfo = &targ_info->curr;
8127                 sg = scb->sg_list;
8128                 sc = (struct scsi_sense *)hscb->shared_data.idata.cdb;
8129                 /*
8130                  * Save off the residual if there is one.
8131                  */
8132                 ahd_update_residual(ahd, scb);
8133 #ifdef AHD_DEBUG
8134                 if (ahd_debug & AHD_SHOW_SENSE) {
8135                         ahd_print_path(ahd, scb);
8136                         kprintf("Sending Sense\n");
8137                 }
8138 #endif
8139                 scb->sg_count = 0;
8140                 sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
8141                                   aic_get_sense_bufsize(ahd, scb),
8142                                   /*last*/TRUE);
8143                 sc->opcode = REQUEST_SENSE;
8144                 sc->byte2 = 0;
8145                 if (tinfo->protocol_version <= SCSI_REV_2
8146                  && SCB_GET_LUN(scb) < 8)
8147                         sc->byte2 = SCB_GET_LUN(scb) << 5;
8148                 sc->unused[0] = 0;
8149                 sc->unused[1] = 0;
8150                 sc->length = aic_get_sense_bufsize(ahd, scb);
8151                 sc->control = 0;
8152
8153                 /*
8154                  * We can't allow the target to disconnect.
8155                  * This will be an untagged transaction and
8156                  * having the target disconnect will make this
8157                  * transaction indestinguishable from outstanding
8158                  * tagged transactions.
8159                  */
8160                 hscb->control = 0;
8161
8162                 /*
8163                  * This request sense could be because the
8164                  * the device lost power or in some other
8165                  * way has lost our transfer negotiations.
8166                  * Renegotiate if appropriate.  Unit attention
8167                  * errors will be reported before any data
8168                  * phases occur.
8169                  */
8170                 if (aic_get_residual(scb) == aic_get_transfer_length(scb)) {
8171                         ahd_update_neg_request(ahd, &devinfo,
8172                                                tstate, targ_info,
8173                                                AHD_NEG_IF_NON_ASYNC);
8174                 }
8175                 if (tstate->auto_negotiate & devinfo.target_mask) {
8176                         hscb->control |= MK_MESSAGE;
8177                         scb->flags &=
8178                             ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET);
8179                         scb->flags |= SCB_AUTO_NEGOTIATE;
8180                 }
8181                 hscb->cdb_len = sizeof(*sc);
8182                 ahd_setup_data_scb(ahd, scb);
8183                 scb->flags |= SCB_SENSE;
8184                 ahd_queue_scb(ahd, scb);
8185                 /*
8186                  * Ensure we have enough time to actually
8187                  * retrieve the sense.
8188                  */
8189                 aic_scb_timer_reset(scb, 5 * 1000000);
8190                 break;
8191         }
8192         case SCSI_STATUS_OK:
8193                 kprintf("%s: Interrupted for staus of 0???\n",
8194                        ahd_name(ahd));
8195                 /* FALLTHROUGH */
8196         default:
8197                 ahd_done(ahd, scb);
8198                 break;
8199         }
8200 }
8201
8202 /*
8203  * Calculate the residual for a just completed SCB.
8204  */
8205 void
8206 ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
8207 {
8208         struct hardware_scb *hscb;
8209         struct initiator_status *spkt;
8210         uint32_t sgptr;
8211         uint32_t resid_sgptr;
8212         uint32_t resid;
8213
8214         /*
8215          * 5 cases.
8216          * 1) No residual.
8217          *    SG_STATUS_VALID clear in sgptr.
8218          * 2) Transferless command
8219          * 3) Never performed any transfers.
8220          *    sgptr has SG_FULL_RESID set.
8221          * 4) No residual but target did not
8222          *    save data pointers after the
8223          *    last transfer, so sgptr was
8224          *    never updated.
8225          * 5) We have a partial residual.
8226          *    Use residual_sgptr to determine
8227          *    where we are.
8228          */
8229
8230         hscb = scb->hscb;
8231         sgptr = aic_le32toh(hscb->sgptr);
8232         if ((sgptr & SG_STATUS_VALID) == 0)
8233                 /* Case 1 */
8234                 return;
8235         sgptr &= ~SG_STATUS_VALID;
8236
8237         if ((sgptr & SG_LIST_NULL) != 0)
8238                 /* Case 2 */
8239                 return;
8240
8241         /*
8242          * Residual fields are the same in both
8243          * target and initiator status packets,
8244          * so we can always use the initiator fields
8245          * regardless of the role for this SCB.
8246          */
8247         spkt = &hscb->shared_data.istatus;
8248         resid_sgptr = aic_le32toh(spkt->residual_sgptr);
8249         if ((sgptr & SG_FULL_RESID) != 0) {
8250                 /* Case 3 */
8251                 resid = aic_get_transfer_length(scb);
8252         } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
8253                 /* Case 4 */
8254                 return;
8255         } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
8256                 ahd_print_path(ahd, scb);
8257                 kprintf("data overrun detected Tag == 0x%x.\n",
8258                        SCB_GET_TAG(scb));
8259                 ahd_freeze_devq(ahd, scb);
8260                 aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
8261                 aic_freeze_scb(scb);
8262                 return;
8263         } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
8264                 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
8265                 /* NOTREACHED */
8266         } else {
8267                 struct ahd_dma_seg *sg;
8268
8269                 /*
8270                  * Remainder of the SG where the transfer
8271                  * stopped.  
8272                  */
8273                 resid = aic_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
8274                 sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
8275
8276                 /* The residual sg_ptr always points to the next sg */
8277                 sg--;
8278
8279                 /*
8280                  * Add up the contents of all residual
8281                  * SG segments that are after the SG where
8282                  * the transfer stopped.
8283                  */
8284                 while ((aic_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) {
8285                         sg++;
8286                         resid += aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
8287                 }
8288         }
8289         if ((scb->flags & SCB_SENSE) == 0)
8290                 aic_set_residual(scb, resid);
8291         else
8292                 aic_set_sense_residual(scb, resid);
8293
8294 #ifdef AHD_DEBUG
8295         if ((ahd_debug & AHD_SHOW_MISC) != 0) {
8296                 ahd_print_path(ahd, scb);
8297                 kprintf("Handled %sResidual of %d bytes\n",
8298                        (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
8299         }
8300 #endif
8301 }
8302
8303 /******************************* Target Mode **********************************/
8304 #ifdef AHD_TARGET_MODE
8305 /*
8306  * Add a target mode event to this lun's queue
8307  */
8308 static void
8309 ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
8310                        u_int initiator_id, u_int event_type, u_int event_arg)
8311 {
8312         struct ahd_tmode_event *event;
8313         int pending;
8314
8315         xpt_freeze_devq(lstate->path, /*count*/1);
8316         if (lstate->event_w_idx >= lstate->event_r_idx)
8317                 pending = lstate->event_w_idx - lstate->event_r_idx;
8318         else
8319                 pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1
8320                         - (lstate->event_r_idx - lstate->event_w_idx);
8321
8322         if (event_type == EVENT_TYPE_BUS_RESET
8323          || event_type == MSG_BUS_DEV_RESET) {
8324                 /*
8325                  * Any earlier events are irrelevant, so reset our buffer.
8326                  * This has the effect of allowing us to deal with reset
8327                  * floods (an external device holding down the reset line)
8328                  * without losing the event that is really interesting.
8329                  */
8330                 lstate->event_r_idx = 0;
8331                 lstate->event_w_idx = 0;
8332                 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
8333         }
8334
8335         if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
8336                 xpt_print_path(lstate->path);
8337                 kprintf("immediate event %x:%x lost\n",
8338                        lstate->event_buffer[lstate->event_r_idx].event_type,
8339                        lstate->event_buffer[lstate->event_r_idx].event_arg);
8340                 lstate->event_r_idx++;
8341                 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8342                         lstate->event_r_idx = 0;
8343                 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
8344         }
8345
8346         event = &lstate->event_buffer[lstate->event_w_idx];
8347         event->initiator_id = initiator_id;
8348         event->event_type = event_type;
8349         event->event_arg = event_arg;
8350         lstate->event_w_idx++;
8351         if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8352                 lstate->event_w_idx = 0;
8353 }
8354
8355 /*
8356  * Send any target mode events queued up waiting
8357  * for immediate notify resources.
8358  */
8359 void
8360 ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate)
8361 {
8362         struct ccb_hdr *ccbh;
8363         struct ccb_immed_notify *inot;
8364
8365         while (lstate->event_r_idx != lstate->event_w_idx
8366             && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
8367                 struct ahd_tmode_event *event;
8368
8369                 event = &lstate->event_buffer[lstate->event_r_idx];
8370                 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
8371                 inot = (struct ccb_immed_notify *)ccbh;
8372                 switch (event->event_type) {
8373                 case EVENT_TYPE_BUS_RESET:
8374                         ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
8375                         break;
8376                 default:
8377                         ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
8378                         inot->message_args[0] = event->event_type;
8379                         inot->message_args[1] = event->event_arg;
8380                         break;
8381                 }
8382                 inot->initiator_id = event->initiator_id;
8383                 inot->sense_len = 0;
8384                 xpt_done((union ccb *)inot);
8385                 lstate->event_r_idx++;
8386                 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8387                         lstate->event_r_idx = 0;
8388         }
8389 }
8390 #endif
8391
8392 /******************** Sequencer Program Patching/Download *********************/
8393
8394 #ifdef AHD_DUMP_SEQ
8395 void
8396 ahd_dumpseq(struct ahd_softc* ahd)
8397 {
8398         int i;
8399         int max_prog;
8400
8401         max_prog = 2048;
8402
8403         ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8404         ahd_outw(ahd, PRGMCNT, 0);
8405         for (i = 0; i < max_prog; i++) {
8406                 uint8_t ins_bytes[4];
8407
8408                 ahd_insb(ahd, SEQRAM, ins_bytes, 4);
8409                 kprintf("0x%08x\n", ins_bytes[0] << 24
8410                                  | ins_bytes[1] << 16
8411                                  | ins_bytes[2] << 8
8412                                  | ins_bytes[3]);
8413         }
8414 }
8415 #endif
8416
8417 static void
8418 ahd_loadseq(struct ahd_softc *ahd)
8419 {
8420         struct  cs cs_table[num_critical_sections];
8421         u_int   begin_set[num_critical_sections];
8422         u_int   end_set[num_critical_sections];
8423         struct  patch *cur_patch;
8424         u_int   cs_count;
8425         u_int   cur_cs;
8426         u_int   i;
8427         int     downloaded;
8428         u_int   skip_addr;
8429         u_int   sg_prefetch_cnt;
8430         u_int   sg_prefetch_cnt_limit;
8431         u_int   sg_prefetch_align;
8432         u_int   sg_size;
8433         uint8_t download_consts[DOWNLOAD_CONST_COUNT];
8434
8435         if (bootverbose)
8436                 kprintf("%s: Downloading Sequencer Program...",
8437                        ahd_name(ahd));
8438
8439 #if DOWNLOAD_CONST_COUNT != 7
8440 #error "Download Const Mismatch"
8441 #endif
8442         /*
8443          * Start out with 0 critical sections
8444          * that apply to this firmware load.
8445          */
8446         cs_count = 0;
8447         cur_cs = 0;
8448         memset(begin_set, 0, sizeof(begin_set));
8449         memset(end_set, 0, sizeof(end_set));
8450
8451         /*
8452          * Setup downloadable constant table.
8453          * 
8454          * The computation for the S/G prefetch variables is
8455          * a bit complicated.  We would like to always fetch
8456          * in terms of cachelined sized increments.  However,
8457          * if the cacheline is not an even multiple of the
8458          * SG element size or is larger than our SG RAM, using
8459          * just the cache size might leave us with only a portion
8460          * of an SG element at the tail of a prefetch.  If the
8461          * cacheline is larger than our S/G prefetch buffer less
8462          * the size of an SG element, we may round down to a cacheline
8463          * that doesn't contain any or all of the S/G of interest
8464          * within the bounds of our S/G ram.  Provide variables to
8465          * the sequencer that will allow it to handle these edge
8466          * cases.
8467          */
8468         /* Start by aligning to the nearest cacheline. */
8469         sg_prefetch_align = ahd->pci_cachesize;
8470         if (sg_prefetch_align == 0)
8471                 sg_prefetch_align = 8;
8472         /* Round down to the nearest power of 2. */
8473         while (powerof2(sg_prefetch_align) == 0)
8474                 sg_prefetch_align--;
8475         /*
8476          * If the cacheline boundary is greater than half our prefetch RAM
8477          * we risk not being able to fetch even a single complete S/G
8478          * segment if we align to that boundary.
8479          */
8480         if (sg_prefetch_align > CCSGADDR_MAX/2)
8481                 sg_prefetch_align = CCSGADDR_MAX/2;
8482         /* Start by fetching a single cacheline. */
8483         sg_prefetch_cnt = sg_prefetch_align;
8484         /*
8485          * Increment the prefetch count by cachelines until
8486          * at least one S/G element will fit.
8487          */
8488         sg_size = sizeof(struct ahd_dma_seg);
8489         if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
8490                 sg_size = sizeof(struct ahd_dma64_seg);
8491         while (sg_prefetch_cnt < sg_size)
8492                 sg_prefetch_cnt += sg_prefetch_align;
8493         /*
8494          * If the cacheline is not an even multiple of
8495          * the S/G size, we may only get a partial S/G when
8496          * we align. Add a cacheline if this is the case.
8497          */
8498         if ((sg_prefetch_align % sg_size) != 0
8499          && (sg_prefetch_cnt < CCSGADDR_MAX))
8500                 sg_prefetch_cnt += sg_prefetch_align;
8501         /*
8502          * Lastly, compute a value that the sequencer can use
8503          * to determine if the remainder of the CCSGRAM buffer
8504          * has a full S/G element in it.
8505          */
8506         sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1);
8507         download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
8508         download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit;
8509         download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1);
8510         download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1);
8511         download_consts[SG_SIZEOF] = sg_size;
8512         download_consts[PKT_OVERRUN_BUFOFFSET] =
8513                 (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256;
8514         download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN;
8515         cur_patch = patches;
8516         downloaded = 0;
8517         skip_addr = 0;
8518         ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8519         ahd_outw(ahd, PRGMCNT, 0);
8520
8521         for (i = 0; i < sizeof(seqprog)/4; i++) {
8522                 if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) {
8523                         /*
8524                          * Don't download this instruction as it
8525                          * is in a patch that was removed.
8526                          */
8527                         continue;
8528                 }
8529                 /*
8530                  * Move through the CS table until we find a CS
8531                  * that might apply to this instruction.
8532                  */
8533                 for (; cur_cs < num_critical_sections; cur_cs++) {
8534                         if (critical_sections[cur_cs].end <= i) {
8535                                 if (begin_set[cs_count] == TRUE
8536                                  && end_set[cs_count] == FALSE) {
8537                                         cs_table[cs_count].end = downloaded;
8538                                         end_set[cs_count] = TRUE;
8539                                         cs_count++;
8540                                 }
8541                                 continue;
8542                         }
8543                         if (critical_sections[cur_cs].begin <= i
8544                          && begin_set[cs_count] == FALSE) {
8545                                 cs_table[cs_count].begin = downloaded;
8546                                 begin_set[cs_count] = TRUE;
8547                         }
8548                         break;
8549                 }
8550                 ahd_download_instr(ahd, i, download_consts);
8551                 downloaded++;
8552         }
8553
8554         ahd->num_critical_sections = cs_count;
8555         if (cs_count != 0) {
8556                 cs_count *= sizeof(struct cs);
8557                 ahd->critical_sections = kmalloc(cs_count, M_DEVBUF, M_INTWAIT);
8558                 memcpy(ahd->critical_sections, cs_table, cs_count);
8559         }
8560         ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
8561
8562         if (bootverbose) {
8563                 kprintf(" %d instructions downloaded\n", downloaded);
8564                 kprintf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
8565                        ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
8566         }
8567 }
8568
8569 static int
8570 ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
8571                 u_int start_instr, u_int *skip_addr)
8572 {
8573         struct  patch *cur_patch;
8574         struct  patch *last_patch;
8575         u_int   num_patches;
8576
8577         num_patches = sizeof(patches)/sizeof(struct patch);
8578         last_patch = &patches[num_patches];
8579         cur_patch = *start_patch;
8580
8581         while (cur_patch < last_patch && start_instr == cur_patch->begin) {
8582
8583                 if (cur_patch->patch_func(ahd) == 0) {
8584
8585                         /* Start rejecting code */
8586                         *skip_addr = start_instr + cur_patch->skip_instr;
8587                         cur_patch += cur_patch->skip_patch;
8588                 } else {
8589                         /* Accepted this patch.  Advance to the next
8590                          * one and wait for our intruction pointer to
8591                          * hit this point.
8592                          */
8593                         cur_patch++;
8594                 }
8595         }
8596
8597         *start_patch = cur_patch;
8598         if (start_instr < *skip_addr)
8599                 /* Still skipping */
8600                 return (0);
8601
8602         return (1);
8603 }
8604
8605 static u_int
8606 ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
8607 {
8608         struct patch *cur_patch;
8609         int address_offset;
8610         u_int skip_addr;
8611         u_int i;
8612
8613         address_offset = 0;
8614         cur_patch = patches;
8615         skip_addr = 0;
8616
8617         for (i = 0; i < address;) {
8618
8619                 ahd_check_patch(ahd, &cur_patch, i, &skip_addr);
8620
8621                 if (skip_addr > i) {
8622                         int end_addr;
8623
8624                         end_addr = MIN(address, skip_addr);
8625                         address_offset += end_addr - i;
8626                         i = skip_addr;
8627                 } else {
8628                         i++;
8629                 }
8630         }
8631         return (address - address_offset);
8632 }
8633
8634 static void
8635 ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
8636 {
8637         union   ins_formats instr;
8638         struct  ins_format1 *fmt1_ins;
8639         struct  ins_format3 *fmt3_ins;
8640         u_int   opcode;
8641
8642         /*
8643          * The firmware is always compiled into a little endian format.
8644          */
8645         instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
8646
8647         fmt1_ins = &instr.format1;
8648         fmt3_ins = NULL;
8649
8650         /* Pull the opcode */
8651         opcode = instr.format1.opcode;
8652         switch (opcode) {
8653         case AIC_OP_JMP:
8654         case AIC_OP_JC:
8655         case AIC_OP_JNC:
8656         case AIC_OP_CALL:
8657         case AIC_OP_JNE:
8658         case AIC_OP_JNZ:
8659         case AIC_OP_JE:
8660         case AIC_OP_JZ:
8661         {
8662                 fmt3_ins = &instr.format3;
8663                 fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
8664                 /* FALLTHROUGH */
8665         }
8666         case AIC_OP_OR:
8667         case AIC_OP_AND:
8668         case AIC_OP_XOR:
8669         case AIC_OP_ADD:
8670         case AIC_OP_ADC:
8671         case AIC_OP_BMOV:
8672                 if (fmt1_ins->parity != 0) {
8673                         fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
8674                 }
8675                 fmt1_ins->parity = 0;
8676                 /* FALLTHROUGH */
8677         case AIC_OP_ROL:
8678         {
8679                 int i, count;
8680
8681                 /* Calculate odd parity for the instruction */
8682                 for (i = 0, count = 0; i < 31; i++) {
8683                         uint32_t mask;
8684
8685                         mask = 0x01 << i;
8686                         if ((instr.integer & mask) != 0)
8687                                 count++;
8688                 }
8689                 if ((count & 0x01) == 0)
8690                         instr.format1.parity = 1;
8691
8692                 /* The sequencer is a little endian cpu */
8693                 instr.integer = aic_htole32(instr.integer);
8694                 ahd_outsb(ahd, SEQRAM, instr.bytes, 4);
8695                 break;
8696         }
8697         default:
8698                 panic("Unknown opcode encountered in seq program");
8699                 break;
8700         }
8701 }
8702
8703 static int
8704 ahd_probe_stack_size(struct ahd_softc *ahd)
8705 {
8706         int last_probe;
8707
8708         last_probe = 0;
8709         while (1) {
8710                 int i;
8711
8712                 /*
8713                  * We avoid using 0 as a pattern to avoid
8714                  * confusion if the stack implementation
8715                  * "back-fills" with zeros when "poping'
8716                  * entries.
8717                  */
8718                 for (i = 1; i <= last_probe+1; i++) {
8719                        ahd_outb(ahd, STACK, i & 0xFF);
8720                        ahd_outb(ahd, STACK, (i >> 8) & 0xFF);
8721                 }
8722
8723                 /* Verify */
8724                 for (i = last_probe+1; i > 0; i--) {
8725                         u_int stack_entry;
8726
8727                         stack_entry = ahd_inb(ahd, STACK)
8728                                     |(ahd_inb(ahd, STACK) << 8);
8729                         if (stack_entry != i)
8730                                 goto sized;
8731                 }
8732                 last_probe++;
8733         }
8734 sized:
8735         return (last_probe);
8736 }
8737
8738 void
8739 ahd_dump_all_cards_state(void)
8740 {
8741         struct ahd_softc *list_ahd;
8742
8743         TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
8744                 ahd_dump_card_state(list_ahd);
8745         }
8746 }
8747
8748 int
8749 ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
8750                    const char *name, u_int address, u_int value,
8751                    u_int *cur_column, u_int wrap_point)
8752 {
8753         int     printed;
8754         u_int   printed_mask;
8755
8756         if (cur_column != NULL && *cur_column >= wrap_point) {
8757                 kprintf("\n");
8758                 *cur_column = 0;
8759         }
8760         printed = kprintf("%s[0x%x]", name, value);
8761         if (table == NULL) {
8762                 printed += kprintf(" ");
8763                 *cur_column += printed;
8764                 return (printed);
8765         }
8766         printed_mask = 0;
8767         while (printed_mask != 0xFF) {
8768                 int entry;
8769
8770                 for (entry = 0; entry < num_entries; entry++) {
8771                         if (((value & table[entry].mask)
8772                           != table[entry].value)
8773                          || ((printed_mask & table[entry].mask)
8774                           == table[entry].mask))
8775                                 continue;
8776
8777                         printed += kprintf("%s%s",
8778                                           printed_mask == 0 ? ":(" : "|",
8779                                           table[entry].name);
8780                         printed_mask |= table[entry].mask;
8781                         
8782                         break;
8783                 }
8784                 if (entry >= num_entries)
8785                         break;
8786         }
8787         if (printed_mask != 0)
8788                 printed += kprintf(") ");
8789         else
8790                 printed += kprintf(" ");
8791         if (cur_column != NULL)
8792                 *cur_column += printed;
8793         return (printed);
8794 }
8795
8796 void
8797 ahd_dump_card_state(struct ahd_softc *ahd)
8798 {
8799         struct scb      *scb;
8800         ahd_mode_state   saved_modes;
8801         u_int            dffstat;
8802         int              paused;
8803         u_int            scb_index;
8804         u_int            saved_scb_index;
8805         u_int            cur_col;
8806         int              i;
8807
8808         if (ahd_is_paused(ahd)) {
8809                 paused = 1;
8810         } else {
8811                 paused = 0;
8812                 ahd_pause(ahd);
8813         }
8814         saved_modes = ahd_save_modes(ahd);
8815         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
8816         kprintf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
8817                "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
8818                ahd_name(ahd), 
8819                ahd_inw(ahd, CURADDR),
8820                ahd_build_mode_state(ahd, ahd->saved_src_mode,
8821                                     ahd->saved_dst_mode));
8822         if (paused)
8823                 kprintf("Card was paused\n");
8824
8825         if (ahd_check_cmdcmpltqueues(ahd))
8826                 kprintf("Completions are pending\n");
8827
8828         /*
8829          * Mode independent registers.
8830          */
8831         cur_col = 0;
8832         ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
8833         ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
8834         ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
8835         ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50);
8836         ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50);
8837         ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50);
8838         ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50);
8839         ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50);
8840         ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50);
8841         ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50);
8842         ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50);
8843         ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50);
8844         ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
8845         ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
8846         ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
8847         ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
8848         ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
8849         ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
8850         ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50);
8851         ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50);
8852         ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50);
8853         ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50);
8854         ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50);
8855         ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50);
8856         ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
8857         ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
8858         ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
8859         kprintf("\n");
8860         kprintf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
8861                "CURRSCB 0x%x NEXTSCB 0x%x\n",
8862                ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
8863                ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
8864                ahd_inw(ahd, NEXTSCB));
8865         cur_col = 0;
8866         /* QINFIFO */
8867         ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
8868                            CAM_LUN_WILDCARD, SCB_LIST_NULL,
8869                            ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
8870         saved_scb_index = ahd_get_scbptr(ahd);
8871         kprintf("Pending list:");
8872         i = 0;
8873         LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
8874                 if (i++ > AHD_SCB_MAX)
8875                         break;
8876                 cur_col = kprintf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
8877                                  ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
8878                 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
8879                 ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
8880                                       &cur_col, 60);
8881                 ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
8882                                      &cur_col, 60);
8883         }
8884         kprintf("\nTotal %d\n", i);
8885
8886         kprintf("Kernel Free SCB list: ");
8887         i = 0;
8888         TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
8889                 struct scb *list_scb;
8890
8891                 list_scb = scb;
8892                 do {
8893                         kprintf("%d ", SCB_GET_TAG(list_scb));
8894                         list_scb = LIST_NEXT(list_scb, collision_links);
8895                 } while (list_scb && i++ < AHD_SCB_MAX);
8896         }
8897
8898         LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
8899                 if (i++ > AHD_SCB_MAX)
8900                         break;
8901                 kprintf("%d ", SCB_GET_TAG(scb));
8902         }
8903         kprintf("\n");
8904
8905         kprintf("Sequencer Complete DMA-inprog list: ");
8906         scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
8907         i = 0;
8908         while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8909                 ahd_set_scbptr(ahd, scb_index);
8910                 kprintf("%d ", scb_index);
8911                 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8912         }
8913         kprintf("\n");
8914
8915         kprintf("Sequencer Complete list: ");
8916         scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
8917         i = 0;
8918         while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8919                 ahd_set_scbptr(ahd, scb_index);
8920                 kprintf("%d ", scb_index);
8921                 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8922         }
8923         kprintf("\n");
8924
8925         
8926         kprintf("Sequencer DMA-Up and Complete list: ");
8927         scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
8928         i = 0;
8929         while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8930                 ahd_set_scbptr(ahd, scb_index);
8931                 kprintf("%d ", scb_index);
8932                 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8933         }
8934         kprintf("\n");
8935         kprintf("Sequencer On QFreeze and Complete list: ");
8936         scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
8937         i = 0;
8938         while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8939                 ahd_set_scbptr(ahd, scb_index);
8940                 kprintf("%d ", scb_index);
8941                 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8942         }
8943         kprintf("\n");
8944         ahd_set_scbptr(ahd, saved_scb_index);
8945         dffstat = ahd_inb(ahd, DFFSTAT);
8946         for (i = 0; i < 2; i++) {
8947 #ifdef AHD_DEBUG
8948                 struct scb *fifo_scb;
8949 #endif
8950                 u_int       fifo_scbptr;
8951
8952                 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
8953                 fifo_scbptr = ahd_get_scbptr(ahd);
8954                 kprintf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
8955                        ahd_name(ahd), i,
8956                        (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
8957                        ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
8958                 cur_col = 0;
8959                 ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50);
8960                 ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50);
8961                 ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50);
8962                 ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50);
8963                 ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW),
8964                                           &cur_col, 50);
8965                 ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50);
8966                 ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50);
8967                 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
8968                 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
8969                 if (cur_col > 50) {
8970                         kprintf("\n");
8971                         cur_col = 0;
8972                 }
8973                 cur_col += kprintf("SHADDR = 0x%x%x, SHCNT = 0x%x ",
8974                                   ahd_inl(ahd, SHADDR+4),
8975                                   ahd_inl(ahd, SHADDR),
8976                                   (ahd_inb(ahd, SHCNT)
8977                                 | (ahd_inb(ahd, SHCNT + 1) << 8)
8978                                 | (ahd_inb(ahd, SHCNT + 2) << 16)));
8979                 if (cur_col > 50) {
8980                         kprintf("\n");
8981                         cur_col = 0;
8982                 }
8983                 cur_col += kprintf("HADDR = 0x%x%x, HCNT = 0x%x ",
8984                                   ahd_inl(ahd, HADDR+4),
8985                                   ahd_inl(ahd, HADDR),
8986                                   (ahd_inb(ahd, HCNT)
8987                                 | (ahd_inb(ahd, HCNT + 1) << 8)
8988                                 | (ahd_inb(ahd, HCNT + 2) << 16)));
8989                 ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50);
8990 #ifdef AHD_DEBUG
8991                 if ((ahd_debug & AHD_SHOW_SG) != 0) {
8992                         fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr);
8993                         if (fifo_scb != NULL)
8994                                 ahd_dump_sglist(fifo_scb);
8995                 }
8996 #endif
8997         }
8998         kprintf("\nLQIN: ");
8999         for (i = 0; i < 20; i++)
9000                 kprintf("0x%x ", ahd_inb(ahd, LQIN + i));
9001         kprintf("\n");
9002         ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
9003         kprintf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
9004                ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
9005                ahd_inb(ahd, OPTIONMODE));
9006         kprintf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
9007                ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
9008                ahd_inb(ahd, MAXCMDCNT));
9009         ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
9010         kprintf("\n");
9011         ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
9012         cur_col = 0;
9013         ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50);
9014         kprintf("\n");
9015         ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
9016         kprintf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
9017                ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
9018                ahd_inw(ahd, DINDEX));
9019         kprintf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
9020                ahd_name(ahd), ahd_get_scbptr(ahd),
9021                ahd_inw_scbram(ahd, SCB_NEXT),
9022                ahd_inw_scbram(ahd, SCB_NEXT2));
9023         kprintf("CDB %x %x %x %x %x %x\n",
9024                ahd_inb_scbram(ahd, SCB_CDB_STORE),
9025                ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
9026                ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
9027                ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
9028                ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
9029                ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
9030         kprintf("STACK:");
9031         for (i = 0; i < ahd->stack_size; i++) {
9032                 ahd->saved_stack[i] =
9033                     ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
9034                 kprintf(" 0x%x", ahd->saved_stack[i]);
9035         }
9036         for (i = ahd->stack_size-1; i >= 0; i--) {
9037                 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
9038                 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
9039         }
9040         kprintf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
9041         ahd_platform_dump_card_state(ahd);
9042         ahd_restore_modes(ahd, saved_modes);
9043         if (paused == 0)
9044                 ahd_unpause(ahd);
9045 }
9046
9047 void
9048 ahd_dump_scbs(struct ahd_softc *ahd)
9049 {
9050         ahd_mode_state saved_modes;
9051         u_int          saved_scb_index;
9052         int            i;
9053
9054         saved_modes = ahd_save_modes(ahd);
9055         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
9056         saved_scb_index = ahd_get_scbptr(ahd);
9057         for (i = 0; i < AHD_SCB_MAX; i++) {
9058                 ahd_set_scbptr(ahd, i);
9059                 kprintf("%3d", i);
9060                 kprintf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
9061                        ahd_inb_scbram(ahd, SCB_CONTROL),
9062                        ahd_inb_scbram(ahd, SCB_SCSIID),
9063                        ahd_inw_scbram(ahd, SCB_NEXT),
9064                        ahd_inw_scbram(ahd, SCB_NEXT2),
9065                        ahd_inl_scbram(ahd, SCB_SGPTR),
9066                        ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
9067         }
9068         kprintf("\n");
9069         ahd_set_scbptr(ahd, saved_scb_index);
9070         ahd_restore_modes(ahd, saved_modes);
9071 }
9072
9073
9074 /*************************** Timeout Handling *********************************/
9075 void
9076 ahd_timeout(struct scb *scb)
9077 {
9078         struct ahd_softc *ahd;
9079
9080         ahd = scb->ahd_softc;
9081         if ((scb->flags & SCB_ACTIVE) != 0) {
9082                 if ((scb->flags & SCB_TIMEDOUT) == 0) {
9083                         LIST_INSERT_HEAD(&ahd->timedout_scbs, scb,
9084                                          timedout_links);
9085                         scb->flags |= SCB_TIMEDOUT;
9086                 }
9087                 ahd_wakeup_recovery_thread(ahd);
9088         }
9089 }
9090
9091 /*
9092  * ahd_recover_commands determines if any of the commands that have currently
9093  * timedout are the root cause for this timeout.  Innocent commands are given
9094  * a new timeout while we wait for the command executing on the bus to timeout.
9095  * This routine is invoked from a thread context so we are allowed to sleep.
9096  * Our lock is not held on entry.
9097  */
9098 void
9099 ahd_recover_commands(struct ahd_softc *ahd)
9100 {
9101         struct  scb *scb;
9102         struct  scb *active_scb;
9103         int     found;
9104         int     was_paused;
9105         u_int   active_scbptr;
9106         u_int   last_phase;
9107
9108         ahd_lock();
9109
9110         /*
9111          * Pause the controller and manually flush any
9112          * commands that have just completed but that our
9113          * interrupt handler has yet to see.
9114          */
9115         was_paused = ahd_is_paused(ahd);
9116         ahd_pause_and_flushwork(ahd);
9117
9118         if (LIST_EMPTY(&ahd->timedout_scbs) != 0) {
9119                 /*
9120                  * The timedout commands have already
9121                  * completed.  This typically means
9122                  * that either the timeout value was on
9123                  * the hairy edge of what the device
9124                  * requires or - more likely - interrupts
9125                  * are not happening.
9126                  */
9127                 kprintf("%s: Timedout SCBs already complete. "
9128                        "Interrupts may not be functioning.\n", ahd_name(ahd));
9129                 ahd_unpause(ahd);
9130                 ahd_unlock();
9131                 return;
9132         }
9133
9134         kprintf("%s: Recovery Initiated - Card was %spaused\n", ahd_name(ahd),
9135                was_paused ? "" : "not ");
9136         ahd_dump_card_state(ahd);
9137
9138         /*
9139          * Determine identity of SCB acting on the bus.
9140          * This test only catches non-packetized transactions.
9141          * Due to the fleeting nature of packetized operations,
9142          * we can't easily determine that a packetized operation
9143          * is on the bus.
9144          */
9145         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
9146         last_phase = ahd_inb(ahd, LASTPHASE);
9147         active_scbptr = ahd_get_scbptr(ahd);
9148         active_scb = NULL;
9149         if (last_phase != P_BUSFREE
9150          || (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0)
9151                 active_scb = ahd_lookup_scb(ahd, active_scbptr);
9152
9153         while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) {
9154                 int     target;
9155                 int     lun;
9156                 char    channel;
9157
9158                 target = SCB_GET_TARGET(ahd, scb);
9159                 channel = SCB_GET_CHANNEL(ahd, scb);
9160                 lun = SCB_GET_LUN(scb);
9161
9162                 ahd_print_path(ahd, scb);
9163                 kprintf("SCB 0x%x - timed out\n", scb->hscb->tag);
9164
9165                 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
9166                         /*
9167                          * Been down this road before.
9168                          * Do a full bus reset.
9169                          */
9170                         aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
9171 bus_reset:
9172                         found = ahd_reset_channel(ahd, channel,
9173                                                   /*Initiate Reset*/TRUE);
9174                         kprintf("%s: Issued Channel %c Bus Reset. "
9175                                "%d SCBs aborted\n", ahd_name(ahd), channel,
9176                                found);
9177                         continue;
9178                 }
9179
9180                 /*
9181                  * Remove the command from the timedout list in
9182                  * preparation for requeing it.
9183                  */
9184                 LIST_REMOVE(scb, timedout_links);
9185                 scb->flags &= ~SCB_TIMEDOUT;
9186
9187                 if (active_scb != NULL) {
9188
9189                         if (active_scb != scb) {
9190                                 /*
9191                                  * If the active SCB is not us, assume that
9192                                  * the active SCB has a longer timeout than
9193                                  * the timedout SCB, and wait for the active
9194                                  * SCB to timeout.
9195                                  */ 
9196                                 ahd_other_scb_timeout(ahd, scb, active_scb);
9197                                 continue;
9198                         } 
9199
9200                         /*
9201                          * We're active on the bus, so assert ATN
9202                          * and hope that the target responds.
9203                          */
9204                         ahd_set_recoveryscb(ahd, active_scb);
9205                         active_scb->flags |= SCB_RECOVERY_SCB|SCB_DEVICE_RESET;
9206                         ahd_outb(ahd, MSG_OUT, HOST_MSG);
9207                         ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
9208                         ahd_print_path(ahd, active_scb);
9209                         kprintf("BDR message in message buffer\n");
9210                         aic_scb_timer_reset(scb, 2 * 1000000);
9211                         break;
9212                 } else if (last_phase != P_BUSFREE
9213                         && ahd_inb(ahd, SCSIPHASE) == 0) {
9214                         /*
9215                          * SCB is not identified, there
9216                          * is no pending REQ, and the sequencer
9217                          * has not seen a busfree.  Looks like
9218                          * a stuck connection waiting to
9219                          * go busfree.  Reset the bus.
9220                          */
9221                         kprintf("%s: Connection stuck awaiting busfree or "
9222                                "Identify Msg.\n", ahd_name(ahd));
9223                         goto bus_reset;
9224                 } else if (ahd_search_qinfifo(ahd, target, channel, lun,
9225                                               scb->hscb->tag, ROLE_INITIATOR,
9226                                               /*status*/0, SEARCH_COUNT) > 0) {
9227
9228                         /*
9229                          * We haven't even gone out on the bus
9230                          * yet, so the timeout must be due to
9231                          * some other command.  Reset the timer
9232                          * and go on.
9233                          */
9234                         ahd_other_scb_timeout(ahd, scb, scb);
9235                 } else {
9236                         /*
9237                          * This SCB is for a disconnected transaction
9238                          * and we haven't found a better candidate on
9239                          * the bus to explain this timeout.
9240                          */
9241                         ahd_set_recoveryscb(ahd, scb);
9242
9243                         /*
9244                          * Actually re-queue this SCB in an attempt
9245                          * to select the device before it reconnects.
9246                          * In either case (selection or reselection),
9247                          * we will now issue a target reset to the
9248                          * timed-out device.
9249                          *
9250                          * Set the MK_MESSAGE control bit indicating
9251                          * that we desire to send a message.  We
9252                          * also set the disconnected flag since
9253                          * in the paging case there is no guarantee
9254                          * that our SCB control byte matches the
9255                          * version on the card.  We don't want the
9256                          * sequencer to abort the command thinking
9257                          * an unsolicited reselection occurred.
9258                          */
9259                         scb->flags |= SCB_DEVICE_RESET;
9260                         scb->hscb->cdb_len = 0;
9261                         scb->hscb->task_attribute = 0;
9262                         scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
9263
9264                         ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
9265                         if ((scb->flags & SCB_PACKETIZED) != 0) {
9266                                 /*
9267                                  * Mark the SCB has having an outstanding
9268                                  * task management function.  Should the command
9269                                  * complete normally before the task management
9270                                  * function can be sent, the host will be
9271                                  * notified to abort our requeued SCB.
9272                                  */
9273                                 ahd_outb(ahd, SCB_TASK_MANAGEMENT,
9274                                          scb->hscb->task_management);
9275                         } else {
9276                                 /*
9277                                  * If non-packetized, set the MK_MESSAGE control
9278                                  * bit indicating that we desire to send a
9279                                  * message.  We also set the disconnected flag
9280                                  * since there is no guarantee that our SCB
9281                                  * control byte matches the version on the
9282                                  * card.  We don't want the sequencer to abort
9283                                  * the command thinking an unsolicited
9284                                  * reselection occurred.
9285                                  */
9286                                 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
9287
9288                                 /*
9289                                  * The sequencer will never re-reference the
9290                                  * in-core SCB.  To make sure we are notified
9291                                  * during reslection, set the MK_MESSAGE flag in
9292                                  * the card's copy of the SCB.
9293                                  */
9294                                 ahd_outb(ahd, SCB_CONTROL,
9295                                          ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
9296                         }
9297
9298                         /*
9299                          * Clear out any entries in the QINFIFO first
9300                          * so we are the next SCB for this target
9301                          * to run.
9302                          */
9303                         ahd_search_qinfifo(ahd, target, channel, lun,
9304                                            SCB_LIST_NULL, ROLE_INITIATOR,
9305                                            CAM_REQUEUE_REQ, SEARCH_COMPLETE);
9306                         ahd_qinfifo_requeue_tail(ahd, scb);
9307                         ahd_set_scbptr(ahd, active_scbptr);
9308                         ahd_print_path(ahd, scb);
9309                         kprintf("Queuing a BDR SCB\n");
9310                         aic_scb_timer_reset(scb, 2 * 1000000);
9311                         break;
9312                 }
9313         }
9314         
9315         /*
9316          * Any remaining SCBs were not the "culprit", so remove
9317          * them from the timeout list.  The timer for these commands
9318          * will be reset once the recovery SCB completes.
9319          */
9320         while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) {
9321
9322                 LIST_REMOVE(scb, timedout_links);
9323                 scb->flags &= ~SCB_TIMEDOUT;
9324         }
9325
9326         ahd_unpause(ahd);
9327         ahd_unlock();
9328 }
9329
9330 static void
9331 ahd_other_scb_timeout(struct ahd_softc *ahd, struct scb *scb,
9332                       struct scb *other_scb)
9333 {
9334         u_int   newtimeout;
9335
9336         ahd_print_path(ahd, scb);
9337         kprintf("Other SCB Timeout%s",
9338                (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
9339                ? " again\n" : "\n");
9340         scb->flags |= SCB_OTHERTCL_TIMEOUT;
9341         newtimeout = MAX(aic_get_timeout(other_scb),
9342                          aic_get_timeout(scb));
9343         aic_scb_timer_reset(scb, newtimeout);
9344 }
9345
9346 /**************************** Flexport Logic **********************************/
9347 /*
9348  * Read count 16bit words from 16bit word address start_addr from the
9349  * SEEPROM attached to the controller, into buf, using the controller's
9350  * SEEPROM reading state machine.  Optionally treat the data as a byte
9351  * stream in terms of byte order.
9352  */
9353 int
9354 ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
9355                  u_int start_addr, u_int count, int bytestream)
9356 {
9357         u_int cur_addr;
9358         u_int end_addr;
9359         int   error;
9360
9361         /*
9362          * If we never make it through the loop even once,
9363          * we were passed invalid arguments.
9364          */
9365         error = EINVAL;
9366         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9367         end_addr = start_addr + count;
9368         for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
9369
9370                 ahd_outb(ahd, SEEADR, cur_addr);
9371                 ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
9372                 
9373                 error = ahd_wait_seeprom(ahd);
9374                 if (error)
9375                         break;
9376                 if (bytestream != 0) {
9377                         uint8_t *bytestream_ptr;
9378
9379                         bytestream_ptr = (uint8_t *)buf;
9380                         *bytestream_ptr++ = ahd_inb(ahd, SEEDAT);
9381                         *bytestream_ptr = ahd_inb(ahd, SEEDAT+1);
9382                 } else {
9383                         /*
9384                          * ahd_inw() already handles machine byte order.
9385                          */
9386                         *buf = ahd_inw(ahd, SEEDAT);
9387                 }
9388                 buf++;
9389         }
9390         return (error);
9391 }
9392
9393 /*
9394  * Write count 16bit words from buf, into SEEPROM attache to the
9395  * controller starting at 16bit word address start_addr, using the
9396  * controller's SEEPROM writing state machine.
9397  */
9398 int
9399 ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
9400                   u_int start_addr, u_int count)
9401 {
9402         u_int cur_addr;
9403         u_int end_addr;
9404         int   error;
9405         int   retval;
9406
9407         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9408         error = ENOENT;
9409
9410         /* Place the chip into write-enable mode */
9411         ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR);
9412         ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART);
9413         error = ahd_wait_seeprom(ahd);
9414         if (error)
9415                 return (error);
9416
9417         /*
9418          * Write the data.  If we don't get throught the loop at
9419          * least once, the arguments were invalid.
9420          */
9421         retval = EINVAL;
9422         end_addr = start_addr + count;
9423         for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
9424                 ahd_outw(ahd, SEEDAT, *buf++);
9425                 ahd_outb(ahd, SEEADR, cur_addr);
9426                 ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
9427                 
9428                 retval = ahd_wait_seeprom(ahd);
9429                 if (retval)
9430                         break;
9431         }
9432
9433         /*
9434          * Disable writes.
9435          */
9436         ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR);
9437         ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART);
9438         error = ahd_wait_seeprom(ahd);
9439         if (error)
9440                 return (error);
9441         return (retval);
9442 }
9443
9444 /*
9445  * Wait ~100us for the serial eeprom to satisfy our request.
9446  */
9447 int
9448 ahd_wait_seeprom(struct ahd_softc *ahd)
9449 {
9450         int cnt;
9451
9452         cnt = 5000;
9453         while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt)
9454                 aic_delay(5);
9455
9456         if (cnt == 0)
9457                 return (ETIMEDOUT);
9458         return (0);
9459 }
9460
9461 /*
9462  * Validate the two checksums in the per_channel
9463  * vital product data struct.
9464  */
9465 int
9466 ahd_verify_vpd_cksum(struct vpd_config *vpd)
9467 {
9468         int i;
9469         int maxaddr;
9470         uint32_t checksum;
9471         uint8_t *vpdarray;
9472
9473         vpdarray = (uint8_t *)vpd;
9474         maxaddr = offsetof(struct vpd_config, vpd_checksum);
9475         checksum = 0;
9476         for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++)
9477                 checksum = checksum + vpdarray[i];
9478         if (checksum == 0
9479          || (-checksum & 0xFF) != vpd->vpd_checksum)
9480                 return (0);
9481
9482         checksum = 0;
9483         maxaddr = offsetof(struct vpd_config, checksum);
9484         for (i = offsetof(struct vpd_config, default_target_flags);
9485              i < maxaddr; i++)
9486                 checksum = checksum + vpdarray[i];
9487         if (checksum == 0
9488          || (-checksum & 0xFF) != vpd->checksum)
9489                 return (0);
9490         return (1);
9491 }
9492
9493 int
9494 ahd_verify_cksum(struct seeprom_config *sc)
9495 {
9496         int i;
9497         int maxaddr;
9498         uint32_t checksum;
9499         uint16_t *scarray;
9500
9501         maxaddr = (sizeof(*sc)/2) - 1;
9502         checksum = 0;
9503         scarray = (uint16_t *)sc;
9504
9505         for (i = 0; i < maxaddr; i++)
9506                 checksum = checksum + scarray[i];
9507         if (checksum == 0
9508          || (checksum & 0xFFFF) != sc->checksum) {
9509                 return (0);
9510         } else {
9511                 return (1);
9512         }
9513 }
9514
9515 int
9516 ahd_acquire_seeprom(struct ahd_softc *ahd)
9517 {
9518         /*
9519          * We should be able to determine the SEEPROM type
9520          * from the flexport logic, but unfortunately not
9521          * all implementations have this logic and there is
9522          * no programatic method for determining if the logic
9523          * is present.
9524          */
9525         return (1);
9526 #if 0
9527         uint8_t seetype;
9528         int     error;
9529
9530         error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
9531         if (error != 0
9532          || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
9533                 return (0);
9534         return (1);
9535 #endif
9536 }
9537
9538 void
9539 ahd_release_seeprom(struct ahd_softc *ahd)
9540 {
9541         /* Currently a no-op */
9542 }
9543
9544 int
9545 ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value)
9546 {
9547         int error;
9548
9549         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9550         if (addr > 7)
9551                 panic("ahd_write_flexport: address out of range");
9552         ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9553         error = ahd_wait_flexport(ahd);
9554         if (error != 0)
9555                 return (error);
9556         ahd_outb(ahd, BRDDAT, value);
9557         ahd_flush_device_writes(ahd);
9558         ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3));
9559         ahd_flush_device_writes(ahd);
9560         ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9561         ahd_flush_device_writes(ahd);
9562         ahd_outb(ahd, BRDCTL, 0);
9563         ahd_flush_device_writes(ahd);
9564         return (0);
9565 }
9566
9567 int
9568 ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value)
9569 {
9570         int     error;
9571
9572         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9573         if (addr > 7)
9574                 panic("ahd_read_flexport: address out of range");
9575         ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3));
9576         error = ahd_wait_flexport(ahd);
9577         if (error != 0)
9578                 return (error);
9579         *value = ahd_inb(ahd, BRDDAT);
9580         ahd_outb(ahd, BRDCTL, 0);
9581         ahd_flush_device_writes(ahd);
9582         return (0);
9583 }
9584
9585 /*
9586  * Wait at most 2 seconds for flexport arbitration to succeed.
9587  */
9588 int
9589 ahd_wait_flexport(struct ahd_softc *ahd)
9590 {
9591         int cnt;
9592
9593         AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9594         cnt = 1000000 * 2 / 5;
9595         while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt)
9596                 aic_delay(5);
9597
9598         if (cnt == 0)
9599                 return (ETIMEDOUT);
9600         return (0);
9601 }
9602
9603 /************************* Target Mode ****************************************/
9604 #ifdef AHD_TARGET_MODE
9605 cam_status
9606 ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb,
9607                     struct ahd_tmode_tstate **tstate,
9608                     struct ahd_tmode_lstate **lstate,
9609                     int notfound_failure)
9610 {
9611
9612         if ((ahd->features & AHD_TARGETMODE) == 0)
9613                 return (CAM_REQ_INVALID);
9614
9615         /*
9616          * Handle the 'black hole' device that sucks up
9617          * requests to unattached luns on enabled targets.
9618          */
9619         if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
9620          && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
9621                 *tstate = NULL;
9622                 *lstate = ahd->black_hole;
9623         } else {
9624                 u_int max_id;
9625
9626                 max_id = (ahd->features & AHD_WIDE) ? 15 : 7;
9627                 if (ccb->ccb_h.target_id > max_id)
9628                         return (CAM_TID_INVALID);
9629
9630                 if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS)
9631                         return (CAM_LUN_INVALID);
9632
9633                 *tstate = ahd->enabled_targets[ccb->ccb_h.target_id];
9634                 *lstate = NULL;
9635                 if (*tstate != NULL)
9636                         *lstate =
9637                             (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
9638         }
9639
9640         if (notfound_failure != 0 && *lstate == NULL)
9641                 return (CAM_PATH_INVALID);
9642
9643         return (CAM_REQ_CMP);
9644 }
9645
9646 void
9647 ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
9648 {
9649 #if NOT_YET
9650         struct     ahd_tmode_tstate *tstate;
9651         struct     ahd_tmode_lstate *lstate;
9652         struct     ccb_en_lun *cel;
9653         cam_status status;
9654         u_int      target;
9655         u_int      lun;
9656         u_int      target_mask;
9657         u_long     s;
9658         char       channel;
9659
9660         status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate,
9661                                      /*notfound_failure*/FALSE);
9662
9663         if (status != CAM_REQ_CMP) {
9664                 ccb->ccb_h.status = status;
9665                 return;
9666         }
9667
9668         if ((ahd->features & AHD_MULTIROLE) != 0) {
9669                 u_int      our_id;
9670
9671                 our_id = ahd->our_id;
9672                 if (ccb->ccb_h.target_id != our_id) {
9673                         if ((ahd->features & AHD_MULTI_TID) != 0
9674                          && (ahd->flags & AHD_INITIATORROLE) != 0) {
9675                                 /*
9676                                  * Only allow additional targets if
9677                                  * the initiator role is disabled.
9678                                  * The hardware cannot handle a re-select-in
9679                                  * on the initiator id during a re-select-out
9680                                  * on a different target id.
9681                                  */
9682                                 status = CAM_TID_INVALID;
9683                         } else if ((ahd->flags & AHD_INITIATORROLE) != 0
9684                                 || ahd->enabled_luns > 0) {
9685                                 /*
9686                                  * Only allow our target id to change
9687                                  * if the initiator role is not configured
9688                                  * and there are no enabled luns which
9689                                  * are attached to the currently registered
9690                                  * scsi id.
9691                                  */
9692                                 status = CAM_TID_INVALID;
9693                         }
9694                 }
9695         }
9696
9697         if (status != CAM_REQ_CMP) {
9698                 ccb->ccb_h.status = status;
9699                 return;
9700         }
9701
9702         /*
9703          * We now have an id that is valid.
9704          * If we aren't in target mode, switch modes.
9705          */
9706         if ((ahd->flags & AHD_TARGETROLE) == 0
9707          && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
9708                 u_long  s;
9709
9710                 kprintf("Configuring Target Mode\n");
9711                 ahd_lock();
9712                 if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
9713                         ccb->ccb_h.status = CAM_BUSY;
9714                         ahd_unlock();
9715                         return;
9716                 }
9717                 ahd->flags |= AHD_TARGETROLE;
9718                 if ((ahd->features & AHD_MULTIROLE) == 0)
9719                         ahd->flags &= ~AHD_INITIATORROLE;
9720                 ahd_pause(ahd);
9721                 ahd_loadseq(ahd);
9722                 ahd_restart(ahd);
9723                 ahd_unlock();
9724         }
9725         cel = &ccb->cel;
9726         target = ccb->ccb_h.target_id;
9727         lun = ccb->ccb_h.target_lun;
9728         channel = SIM_CHANNEL(ahd, sim);
9729         target_mask = 0x01 << target;
9730         if (channel == 'B')
9731                 target_mask <<= 8;
9732
9733         if (cel->enable != 0) {
9734                 u_int scsiseq1;
9735
9736                 /* Are we already enabled?? */
9737                 if (lstate != NULL) {
9738                         xpt_print_path(ccb->ccb_h.path);
9739                         kprintf("Lun already enabled\n");
9740                         ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
9741                         return;
9742                 }
9743
9744                 if (cel->grp6_len != 0
9745                  || cel->grp7_len != 0) {
9746                         /*
9747                          * Don't (yet?) support vendor
9748                          * specific commands.
9749                          */
9750                         ccb->ccb_h.status = CAM_REQ_INVALID;
9751                         kprintf("Non-zero Group Codes\n");
9752                         return;
9753                 }
9754
9755                 /*
9756                  * Seems to be okay.
9757                  * Setup our data structures.
9758                  */
9759                 if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
9760                         tstate = ahd_alloc_tstate(ahd, target, channel);
9761                         if (tstate == NULL) {
9762                                 xpt_print_path(ccb->ccb_h.path);
9763                                 kprintf("Couldn't allocate tstate\n");
9764                                 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9765                                 return;
9766                         }
9767                 }
9768                 lstate = kmalloc(sizeof(*lstate), M_DEVBUF, M_INTWAIT | M_ZERO);
9769                 status = xpt_create_path(&lstate->path, /*periph*/NULL,
9770                                          xpt_path_path_id(ccb->ccb_h.path),
9771                                          xpt_path_target_id(ccb->ccb_h.path),
9772                                          xpt_path_lun_id(ccb->ccb_h.path));
9773                 if (status != CAM_REQ_CMP) {
9774                         kfree(lstate, M_DEVBUF);
9775                         xpt_print_path(ccb->ccb_h.path);
9776                         kprintf("Couldn't allocate path\n");
9777                         ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9778                         return;
9779                 }
9780                 SLIST_INIT(&lstate->accept_tios);
9781                 SLIST_INIT(&lstate->immed_notifies);
9782                 ahd_lock();
9783                 ahd_pause(ahd);
9784                 if (target != CAM_TARGET_WILDCARD) {
9785                         tstate->enabled_luns[lun] = lstate;
9786                         ahd->enabled_luns++;
9787
9788                         if ((ahd->features & AHD_MULTI_TID) != 0) {
9789                                 u_int targid_mask;
9790
9791                                 targid_mask = ahd_inw(ahd, TARGID);
9792                                 targid_mask |= target_mask;
9793                                 ahd_outw(ahd, TARGID, targid_mask);
9794                                 ahd_update_scsiid(ahd, targid_mask);
9795                         } else {
9796                                 u_int our_id;
9797                                 char  channel;
9798
9799                                 channel = SIM_CHANNEL(ahd, sim);
9800                                 our_id = SIM_SCSI_ID(ahd, sim);
9801
9802                                 /*
9803                                  * This can only happen if selections
9804                                  * are not enabled
9805                                  */
9806                                 if (target != our_id) {
9807                                         u_int sblkctl;
9808                                         char  cur_channel;
9809                                         int   swap;
9810
9811                                         sblkctl = ahd_inb(ahd, SBLKCTL);
9812                                         cur_channel = (sblkctl & SELBUSB)
9813                                                     ? 'B' : 'A';
9814                                         if ((ahd->features & AHD_TWIN) == 0)
9815                                                 cur_channel = 'A';
9816                                         swap = cur_channel != channel;
9817                                         ahd->our_id = target;
9818
9819                                         if (swap)
9820                                                 ahd_outb(ahd, SBLKCTL,
9821                                                          sblkctl ^ SELBUSB);
9822
9823                                         ahd_outb(ahd, SCSIID, target);
9824
9825                                         if (swap)
9826                                                 ahd_outb(ahd, SBLKCTL, sblkctl);
9827                                 }
9828                         }
9829                 } else
9830                         ahd->black_hole = lstate;
9831                 /* Allow select-in operations */
9832                 if (ahd->black_hole != NULL && ahd->enabled_luns > 0) {
9833                         scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
9834                         scsiseq1 |= ENSELI;
9835                         ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
9836                         scsiseq1 = ahd_inb(ahd, SCSISEQ1);
9837                         scsiseq1 |= ENSELI;
9838                         ahd_outb(ahd, SCSISEQ1, scsiseq1);
9839                 }
9840                 ahd_unpause(ahd);
9841                 ahd_unlock();
9842                 ccb->ccb_h.status = CAM_REQ_CMP;
9843                 xpt_print_path(ccb->ccb_h.path);
9844                 kprintf("Lun now enabled for target mode\n");
9845         } else {
9846                 struct scb *scb;
9847                 int i, empty;
9848
9849                 if (lstate == NULL) {
9850                         ccb->ccb_h.status = CAM_LUN_INVALID;
9851                         return;
9852                 }
9853
9854                 ahd_lock();
9855                 
9856                 ccb->ccb_h.status = CAM_REQ_CMP;
9857                 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
9858                         struct ccb_hdr *ccbh;
9859
9860                         ccbh = &scb->io_ctx->ccb_h;
9861                         if (ccbh->func_code == XPT_CONT_TARGET_IO
9862                          && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
9863                                 kprintf("CTIO pending\n");
9864                                 ccb->ccb_h.status = CAM_REQ_INVALID;
9865                                 ahd_unlock();
9866                                 return;
9867                         }
9868                 }
9869
9870                 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
9871                         kprintf("ATIOs pending\n");
9872                         ccb->ccb_h.status = CAM_REQ_INVALID;
9873                 }
9874
9875                 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
9876                         kprintf("INOTs pending\n");
9877                         ccb->ccb_h.status = CAM_REQ_INVALID;
9878                 }
9879
9880                 if (ccb->ccb_h.status != CAM_REQ_CMP) {
9881                         ahd_unlock();
9882                         return;
9883                 }
9884
9885                 xpt_print_path(ccb->ccb_h.path);
9886                 kprintf("Target mode disabled\n");
9887                 xpt_free_path(lstate->path);
9888                 kfree(lstate, M_DEVBUF);
9889
9890                 ahd_pause(ahd);
9891                 /* Can we clean up the target too? */
9892                 if (target != CAM_TARGET_WILDCARD) {
9893                         tstate->enabled_luns[lun] = NULL;
9894                         ahd->enabled_luns--;
9895                         for (empty = 1, i = 0; i < 8; i++)
9896                                 if (tstate->enabled_luns[i] != NULL) {
9897                                         empty = 0;
9898                                         break;
9899                                 }
9900
9901                         if (empty) {
9902                                 ahd_free_tstate(ahd, target, channel,
9903                                                 /*force*/FALSE);
9904                                 if (ahd->features & AHD_MULTI_TID) {
9905                                         u_int targid_mask;
9906
9907                                         targid_mask = ahd_inw(ahd, TARGID);
9908                                         targid_mask &= ~target_mask;
9909                                         ahd_outw(ahd, TARGID, targid_mask);
9910                                         ahd_update_scsiid(ahd, targid_mask);
9911                                 }
9912                         }
9913                 } else {
9914
9915                         ahd->black_hole = NULL;
9916
9917                         /*
9918                          * We can't allow selections without
9919                          * our black hole device.
9920                          */
9921                         empty = TRUE;
9922                 }
9923                 if (ahd->enabled_luns == 0) {
9924                         /* Disallow select-in */
9925                         u_int scsiseq1;
9926
9927                         scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
9928                         scsiseq1 &= ~ENSELI;
9929                         ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
9930                         scsiseq1 = ahd_inb(ahd, SCSISEQ1);
9931                         scsiseq1 &= ~ENSELI;
9932                         ahd_outb(ahd, SCSISEQ1, scsiseq1);
9933
9934                         if ((ahd->features & AHD_MULTIROLE) == 0) {
9935                                 kprintf("Configuring Initiator Mode\n");
9936                                 ahd->flags &= ~AHD_TARGETROLE;
9937                                 ahd->flags |= AHD_INITIATORROLE;
9938                                 ahd_pause(ahd);
9939                                 ahd_loadseq(ahd);
9940                                 ahd_restart(ahd);
9941                                 /*
9942                                  * Unpaused.  The extra unpause
9943                                  * that follows is harmless.
9944                                  */
9945                         }
9946                 }
9947                 ahd_unpause(ahd);
9948                 ahd_unlock();
9949         }
9950 #endif
9951 }
9952
9953 static void
9954 ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
9955 {
9956 #if NOT_YET
9957         u_int scsiid_mask;
9958         u_int scsiid;
9959
9960         if ((ahd->features & AHD_MULTI_TID) == 0)
9961                 panic("ahd_update_scsiid called on non-multitid unit\n");
9962
9963         /*
9964          * Since we will rely on the TARGID mask
9965          * for selection enables, ensure that OID
9966          * in SCSIID is not set to some other ID
9967          * that we don't want to allow selections on.
9968          */
9969         if ((ahd->features & AHD_ULTRA2) != 0)
9970                 scsiid = ahd_inb(ahd, SCSIID_ULTRA2);
9971         else
9972                 scsiid = ahd_inb(ahd, SCSIID);
9973         scsiid_mask = 0x1 << (scsiid & OID);
9974         if ((targid_mask & scsiid_mask) == 0) {
9975                 u_int our_id;
9976
9977                 /* ffs counts from 1 */
9978                 our_id = ffs(targid_mask);
9979                 if (our_id == 0)
9980                         our_id = ahd->our_id;
9981                 else
9982                         our_id--;
9983                 scsiid &= TID;
9984                 scsiid |= our_id;
9985         }
9986         if ((ahd->features & AHD_ULTRA2) != 0)
9987                 ahd_outb(ahd, SCSIID_ULTRA2, scsiid);
9988         else
9989                 ahd_outb(ahd, SCSIID, scsiid);
9990 #endif
9991 }
9992
9993 void
9994 ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
9995 {
9996         struct target_cmd *cmd;
9997
9998         ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD);
9999         while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) {
10000
10001                 /*
10002                  * Only advance through the queue if we
10003                  * have the resources to process the command.
10004                  */
10005                 if (ahd_handle_target_cmd(ahd, cmd) != 0)
10006                         break;
10007
10008                 cmd->cmd_valid = 0;
10009                 ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
10010                                 ahd->shared_data_dmamap,
10011                                 ahd_targetcmd_offset(ahd, ahd->tqinfifonext),
10012                                 sizeof(struct target_cmd),
10013                                 BUS_DMASYNC_PREREAD);
10014                 ahd->tqinfifonext++;
10015
10016                 /*
10017                  * Lazily update our position in the target mode incoming
10018                  * command queue as seen by the sequencer.
10019                  */
10020                 if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
10021                         u_int hs_mailbox;
10022
10023                         hs_mailbox = ahd_inb(ahd, HS_MAILBOX);
10024                         hs_mailbox &= ~HOST_TQINPOS;
10025                         hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS;
10026                         ahd_outb(ahd, HS_MAILBOX, hs_mailbox);
10027                 }
10028         }
10029 }
10030
10031 static int
10032 ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
10033 {
10034         struct    ahd_tmode_tstate *tstate;
10035         struct    ahd_tmode_lstate *lstate;
10036         struct    ccb_accept_tio *atio;
10037         uint8_t *byte;
10038         int       initiator;
10039         int       target;
10040         int       lun;
10041
10042         initiator = SCSIID_TARGET(ahd, cmd->scsiid);
10043         target = SCSIID_OUR_ID(cmd->scsiid);
10044         lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
10045
10046         byte = cmd->bytes;
10047         tstate = ahd->enabled_targets[target];
10048         lstate = NULL;
10049         if (tstate != NULL)
10050                 lstate = tstate->enabled_luns[lun];
10051
10052         /*
10053          * Commands for disabled luns go to the black hole driver.
10054          */
10055         if (lstate == NULL)
10056                 lstate = ahd->black_hole;
10057
10058         atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
10059         if (atio == NULL) {
10060                 ahd->flags |= AHD_TQINFIFO_BLOCKED;
10061                 /*
10062                  * Wait for more ATIOs from the peripheral driver for this lun.
10063                  */
10064                 return (1);
10065         } else
10066                 ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
10067 #ifdef AHD_DEBUG
10068         if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10069                 kprintf("Incoming command from %d for %d:%d%s\n",
10070                        initiator, target, lun,
10071                        lstate == ahd->black_hole ? "(Black Holed)" : "");
10072 #endif
10073         SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
10074
10075         if (lstate == ahd->black_hole) {
10076                 /* Fill in the wildcards */
10077                 atio->ccb_h.target_id = target;
10078                 atio->ccb_h.target_lun = lun;
10079         }
10080
10081         /*
10082          * Package it up and send it off to
10083          * whomever has this lun enabled.
10084          */
10085         atio->sense_len = 0;
10086         atio->init_id = initiator;
10087         if (byte[0] != 0xFF) {
10088                 /* Tag was included */
10089                 atio->tag_action = *byte++;
10090                 atio->tag_id = *byte++;
10091                 atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
10092         } else {
10093                 atio->ccb_h.flags = 0;
10094         }
10095         byte++;
10096
10097         /* Okay.  Now determine the cdb size based on the command code */
10098         switch (*byte >> CMD_GROUP_CODE_SHIFT) {
10099         case 0:
10100                 atio->cdb_len = 6;
10101                 break;
10102         case 1:
10103         case 2:
10104                 atio->cdb_len = 10;
10105                 break;
10106         case 4:
10107                 atio->cdb_len = 16;
10108                 break;
10109         case 5:
10110                 atio->cdb_len = 12;
10111                 break;
10112         case 3:
10113         default:
10114                 /* Only copy the opcode. */
10115                 atio->cdb_len = 1;
10116                 kprintf("Reserved or VU command code type encountered\n");
10117                 break;
10118         }
10119         
10120         memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
10121
10122         atio->ccb_h.status |= CAM_CDB_RECVD;
10123
10124         if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
10125                 /*
10126                  * We weren't allowed to disconnect.
10127                  * We're hanging on the bus until a
10128                  * continue target I/O comes in response
10129                  * to this accept tio.
10130                  */
10131 #ifdef AHD_DEBUG
10132                 if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10133                         kprintf("Received Immediate Command %d:%d:%d - %p\n",
10134                                initiator, target, lun, ahd->pending_device);
10135 #endif
10136                 ahd->pending_device = lstate;
10137                 ahd_freeze_ccb((union ccb *)atio);
10138                 atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
10139         }
10140         xpt_done((union ccb*)atio);
10141         return (0);
10142 }
10143
10144 #endif