MPSAFE locking for the ahc/ahd drivers using lockmgr locks.
[dragonfly.git] / sys / dev / disk / aic7xxx / aic7xxx_osm.c
CommitLineData
984263bc 1/*
750f3593 2 * Bus independent FreeBSD shim for the aic7xxx based Adaptec SCSI controllers
984263bc
MD
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
750f3593 31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#20 $
984263bc 32 *
afd686e0 33 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.45 2006/09/05 20:28:28 mjacob Exp $
ef8ef949 34 * $DragonFly: src/sys/dev/disk/aic7xxx/aic7xxx_osm.c,v 1.21 2008/02/09 18:13:13 pavalos Exp $
984263bc
MD
35 */
36
1f2de5d4
MD
37#include "aic7xxx_osm.h"
38#include "aic7xxx_inline.h"
984263bc 39
750f3593
PA
40#include <sys/kthread.h>
41
984263bc
MD
42#ifndef AHC_TMODE_ENABLE
43#define AHC_TMODE_ENABLE 0
44#endif
45
750f3593
PA
46#include "aic_osm_lib.c"
47
984263bc
MD
48#define ccb_scb_ptr spriv_ptr0
49
50devclass_t ahc_devclass;
51
9f00895f 52#if 0
984263bc
MD
53static void ahc_dump_targcmd(struct target_cmd *cmd);
54#endif
55static int ahc_modevent(module_t mod, int type, void *data);
56static void ahc_action(struct cam_sim *sim, union ccb *ccb);
57static void ahc_get_tran_settings(struct ahc_softc *ahc,
58 int our_id, char channel,
59 struct ccb_trans_settings *cts);
60static void ahc_async(void *callback_arg, uint32_t code,
61 struct cam_path *path, void *arg);
62static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
63 int nsegments, int error);
64static void ahc_poll(struct cam_sim *sim);
65static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
66 struct ccb_scsiio *csio, struct scb *scb);
67static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
68 union ccb *ccb);
69static int ahc_create_path(struct ahc_softc *ahc,
70 char channel, u_int target, u_int lun,
71 struct cam_path **path);
72
984263bc
MD
73
74static int
75ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
76 u_int lun, struct cam_path **path)
77{
78 path_id_t path_id;
79
80 if (channel == 'B')
81 path_id = cam_sim_path(ahc->platform_data->sim_b);
82 else
83 path_id = cam_sim_path(ahc->platform_data->sim);
84
85 return (xpt_create_path(path, /*periph*/NULL,
86 path_id, target, lun));
87}
88
89int
90ahc_map_int(struct ahc_softc *ahc)
91{
92 int error;
dff3fb2d
PA
93 int zero;
94 int shareable;
95
96 zero = 0;
97 shareable = (ahc->flags & AHC_EDGE_INTERRUPT) ? 0: RF_SHAREABLE;
98 ahc->platform_data->irq =
99 bus_alloc_resource_any(ahc->dev_softc, SYS_RES_IRQ, &zero,
100 RF_ACTIVE | shareable);
101 if (ahc->platform_data->irq == NULL) {
102 device_printf(ahc->dev_softc,
103 "bus_alloc_resource() failed to allocate IRQ\n");
104 return (ENOMEM);
105 }
106 ahc->platform_data->irq_res_type = SYS_RES_IRQ;
984263bc
MD
107
108 /* Hook up our interrupt handler */
109 error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
ef8ef949 110 INTR_MPSAFE, ahc_platform_intr, ahc,
e9cb6d99 111 &ahc->platform_data->ih, NULL);
984263bc
MD
112
113 if (error != 0)
114 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
115 error);
116 return (error);
117}
118
dff3fb2d
PA
119int
120aic7770_map_registers(struct ahc_softc *ahc, u_int unused_ioport_arg)
121{
122 struct resource *regs;
123 int rid;
124
125 rid = 0;
126 regs = bus_alloc_resource_any(ahc->dev_softc, SYS_RES_IOPORT, &rid,
127 RF_ACTIVE);
128 if (regs == NULL) {
129 device_printf(ahc->dev_softc, "Unable to map I/O space?!\n");
130 return ENOMEM;
131 }
132 ahc->platform_data->regs_res_type = SYS_RES_IOPORT;
133 ahc->platform_data->regs_res_id = rid,
134 ahc->platform_data->regs = regs;
135 ahc->tag = rman_get_bustag(regs);
136 ahc->bsh = rman_get_bushandle(regs);
137 return (0);
138}
139
984263bc
MD
140/*
141 * Attach all the sub-devices we can find
142 */
143int
144ahc_attach(struct ahc_softc *ahc)
145{
146 char ahc_info[256];
147 struct ccb_setasync csa;
984263bc
MD
148 int bus_id;
149 int bus_id2;
150 struct cam_sim *sim;
151 struct cam_sim *sim2;
152 struct cam_path *path;
153 struct cam_path *path2;
984263bc
MD
154 int count;
155
156 count = 0;
157 sim = NULL;
158 sim2 = NULL;
afd686e0
PA
159 path = NULL;
160 path2 = NULL;
984263bc 161
750f3593
PA
162 /*
163 * Create a thread to perform all recovery.
164 */
165 if (ahc_spawn_recovery_thread(ahc) != 0)
166 goto fail;
167
984263bc 168 ahc_controller_info(ahc, ahc_info);
e3869ec7 169 kprintf("%s\n", ahc_info);
ef8ef949 170 ahc_lock(ahc);
750f3593 171
984263bc
MD
172 /*
173 * Attach secondary channel first if the user has
174 * declared it the primary channel.
175 */
176 if ((ahc->features & AHC_TWIN) != 0
177 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
178 bus_id = 1;
179 bus_id2 = 0;
180 } else {
181 bus_id = 0;
182 bus_id2 = 1;
183 }
184
984263bc
MD
185 /*
186 * Construct our first channel SIM entry
187 */
188 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
189 device_get_unit(ahc->dev_softc),
3aed1355
MD
190 1, AHC_MAX_QUEUE, NULL);
191 if (sim == NULL)
984263bc 192 goto fail;
984263bc
MD
193
194 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
3aed1355 195 cam_sim_free(sim);
984263bc
MD
196 sim = NULL;
197 goto fail;
198 }
199
200 if (xpt_create_path(&path, /*periph*/NULL,
201 cam_sim_path(sim), CAM_TARGET_WILDCARD,
202 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
203 xpt_bus_deregister(cam_sim_path(sim));
3aed1355 204 cam_sim_free(sim);
984263bc
MD
205 sim = NULL;
206 goto fail;
207 }
208
209 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
210 csa.ccb_h.func_code = XPT_SASYNC_CB;
211 csa.event_enable = AC_LOST_DEVICE;
212 csa.callback = ahc_async;
213 csa.callback_arg = sim;
214 xpt_action((union ccb *)&csa);
215 count++;
216
217 if (ahc->features & AHC_TWIN) {
218 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
219 ahc, device_get_unit(ahc->dev_softc), 1,
3aed1355 220 AHC_MAX_QUEUE, NULL);
984263bc
MD
221
222 if (sim2 == NULL) {
e3869ec7 223 kprintf("ahc_attach: Unable to attach second "
984263bc
MD
224 "bus due to resource shortage");
225 goto fail;
226 }
227
228 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
e3869ec7 229 kprintf("ahc_attach: Unable to attach second "
984263bc
MD
230 "bus due to resource shortage");
231 /*
232 * We do not want to destroy the device queue
233 * because the first bus is using it.
234 */
3aed1355 235 cam_sim_free(sim2);
984263bc
MD
236 goto fail;
237 }
238
239 if (xpt_create_path(&path2, /*periph*/NULL,
240 cam_sim_path(sim2),
241 CAM_TARGET_WILDCARD,
242 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
243 xpt_bus_deregister(cam_sim_path(sim2));
3aed1355 244 cam_sim_free(sim2);
984263bc
MD
245 sim2 = NULL;
246 goto fail;
247 }
248 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
249 csa.ccb_h.func_code = XPT_SASYNC_CB;
250 csa.event_enable = AC_LOST_DEVICE;
251 csa.callback = ahc_async;
252 csa.callback_arg = sim2;
253 xpt_action((union ccb *)&csa);
254 count++;
255 }
256
257fail:
258 if ((ahc->features & AHC_TWIN) != 0
259 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
260 ahc->platform_data->sim_b = sim;
261 ahc->platform_data->path_b = path;
262 ahc->platform_data->sim = sim2;
263 ahc->platform_data->path = path2;
264 } else {
265 ahc->platform_data->sim = sim;
266 ahc->platform_data->path = path;
267 ahc->platform_data->sim_b = sim2;
268 ahc->platform_data->path_b = path2;
269 }
ef8ef949 270 ahc_unlock(ahc);
984263bc
MD
271
272 if (count != 0) {
273 /* We have to wait until after any system dumps... */
274 ahc->platform_data->eh =
87e2fa7e 275 EVENTHANDLER_REGISTER(shutdown_post_sync, ahc_shutdown,
30527035 276 ahc, SHUTDOWN_PRI_DRIVER);
984263bc
MD
277 ahc_intr_enable(ahc, TRUE);
278 }
279
984263bc
MD
280 return (count);
281}
282
283/*
284 * Catch an interrupt from the adapter
285 */
286void
287ahc_platform_intr(void *arg)
288{
289 struct ahc_softc *ahc;
290
291 ahc = (struct ahc_softc *)arg;
ef8ef949 292 ahc_lock(ahc);
984263bc 293 ahc_intr(ahc);
ef8ef949 294 ahc_unlock(ahc);
984263bc
MD
295}
296
297/*
298 * We have an scb which has been processed by the
299 * adaptor, now we look to see how the operation
300 * went.
301 */
302void
303ahc_done(struct ahc_softc *ahc, struct scb *scb)
304{
305 union ccb *ccb;
306
307 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
308 ("ahc_done - scb %d\n", scb->hscb->tag));
309
310 ccb = scb->io_ctx;
311 LIST_REMOVE(scb, pending_links);
750f3593
PA
312 if ((scb->flags & SCB_TIMEDOUT) != 0)
313 LIST_REMOVE(scb, timedout_links);
984263bc
MD
314 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
315 struct scb_tailq *untagged_q;
316 int target_offset;
317
318 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
319 untagged_q = &ahc->untagged_queues[target_offset];
320 TAILQ_REMOVE(untagged_q, scb, links.tqe);
321 scb->flags &= ~SCB_UNTAGGEDQ;
322 ahc_run_untagged_queue(ahc, untagged_q);
323 }
324
ef8ef949 325 callout_stop(&scb->io_timer);
984263bc
MD
326
327 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
328 bus_dmasync_op_t op;
329
330 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
331 op = BUS_DMASYNC_POSTREAD;
332 else
333 op = BUS_DMASYNC_POSTWRITE;
334 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
335 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
336 }
337
338 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
339 struct cam_path *ccb_path;
340
341 /*
342 * If we have finally disconnected, clean up our
343 * pending device state.
344 * XXX - There may be error states that cause where
345 * we will remain connected.
346 */
347 ccb_path = ccb->ccb_h.path;
348 if (ahc->pending_device != NULL
349 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) {
350
351 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
352 ahc->pending_device = NULL;
353 } else {
354 if (bootverbose) {
355 xpt_print_path(ccb->ccb_h.path);
e3869ec7 356 kprintf("Still connected\n");
984263bc 357 }
750f3593 358 aic_freeze_ccb(ccb);
984263bc
MD
359 }
360 }
361
750f3593 362 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG)
984263bc
MD
363 ccb->ccb_h.status |= CAM_REQ_CMP;
364 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
365 ahc_free_scb(ahc, scb);
366 xpt_done(ccb);
367 return;
368 }
369
370 /*
371 * If the recovery SCB completes, we have to be
372 * out of our timeout.
373 */
374 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
375 struct scb *list_scb;
376
dff3fb2d 377 ahc->scb_data->recovery_scbs--;
984263bc 378
750f3593
PA
379 if (aic_get_transaction_status(scb) == CAM_BDR_SENT
380 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED)
381 aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
dff3fb2d
PA
382
383 if (ahc->scb_data->recovery_scbs == 0) {
384 /*
385 * All recovery actions have completed successfully,
386 * so reinstate the timeouts for all other pending
387 * commands.
388 */
ef8ef949
PA
389 LIST_FOREACH(list_scb, &ahc->pending_scbs,
390 pending_links) {
dff3fb2d 391
bf9d144c
PA
392 aic_scb_timer_reset(list_scb,
393 aic_get_timeout(scb));
dff3fb2d
PA
394 }
395
396 ahc_print_path(ahc, scb);
397 kprintf("no longer in timeout, status = %x\n",
398 ccb->ccb_h.status);
399 }
984263bc
MD
400 }
401
402 /* Don't clobber any existing error state */
750f3593 403 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) {
984263bc
MD
404 ccb->ccb_h.status |= CAM_REQ_CMP;
405 } else if ((scb->flags & SCB_SENSE) != 0) {
406 /*
407 * We performed autosense retrieval.
408 *
409 * Zero any sense not transferred by the
410 * device. The SCSI spec mandates that any
411 * untransfered data should be assumed to be
412 * zero. Complete the 'bounce' of sense information
413 * through buffers accessible via bus-space by
414 * copying it into the clients csio.
415 */
416 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
417 memcpy(&ccb->csio.sense_data,
418 ahc_get_sense_buf(ahc, scb),
750f3593 419 (aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK)
984263bc
MD
420 - ccb->csio.sense_resid);
421 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
422 }
423 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
424 ahc_free_scb(ahc, scb);
425 xpt_done(ccb);
426}
427
428static void
429ahc_action(struct cam_sim *sim, union ccb *ccb)
430{
431 struct ahc_softc *ahc;
432 struct ahc_tmode_lstate *lstate;
433 u_int target_id;
434 u_int our_id;
984263bc
MD
435
436 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
437
438 ahc = (struct ahc_softc *)cam_sim_softc(sim);
439
440 target_id = ccb->ccb_h.target_id;
441 our_id = SIM_SCSI_ID(ahc, sim);
442
443 switch (ccb->ccb_h.func_code) {
444 /* Common cases first */
445 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
446 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
447 {
448 struct ahc_tmode_tstate *tstate;
449 cam_status status;
450
451 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
452 &lstate, TRUE);
453
454 if (status != CAM_REQ_CMP) {
455 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
456 /* Response from the black hole device */
457 tstate = NULL;
458 lstate = ahc->black_hole;
459 } else {
460 ccb->ccb_h.status = status;
461 xpt_done(ccb);
462 break;
463 }
464 }
465 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
466
984263bc
MD
467 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
468 sim_links.sle);
469 ccb->ccb_h.status = CAM_REQ_INPROG;
470 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
471 ahc_run_tqinfifo(ahc, /*paused*/FALSE);
984263bc
MD
472 break;
473 }
474
475 /*
476 * The target_id represents the target we attempt to
477 * select. In target mode, this is the initiator of
478 * the original command.
479 */
480 our_id = target_id;
481 target_id = ccb->csio.init_id;
482 /* FALLTHROUGH */
483 }
484 case XPT_SCSI_IO: /* Execute the requested I/O operation */
485 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
486 {
487 struct scb *scb;
488 struct hardware_scb *hscb;
489
490 if ((ahc->flags & AHC_INITIATORROLE) == 0
491 && (ccb->ccb_h.func_code == XPT_SCSI_IO
492 || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
493 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
494 xpt_done(ccb);
495 return;
496 }
497
498 /*
499 * get an scb to use.
500 */
984263bc
MD
501 if ((scb = ahc_get_scb(ahc)) == NULL) {
502
503 xpt_freeze_simq(sim, /*count*/1);
504 ahc->flags |= AHC_RESOURCE_SHORTAGE;
984263bc
MD
505 ccb->ccb_h.status = CAM_REQUEUE_REQ;
506 xpt_done(ccb);
507 return;
508 }
984263bc
MD
509
510 hscb = scb->hscb;
511
512 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
513 ("start scb(%p)\n", scb));
514 scb->io_ctx = ccb;
515 /*
516 * So we can find the SCB when an abort is requested
517 */
518 ccb->ccb_h.ccb_scb_ptr = scb;
519
520 /*
521 * Put all the arguments for the xfer in the scb
522 */
523 hscb->control = 0;
524 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
525 hscb->lun = ccb->ccb_h.target_lun;
526 if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
527 hscb->cdb_len = 0;
528 scb->flags |= SCB_DEVICE_RESET;
529 hscb->control |= MK_MESSAGE;
530 ahc_execute_scb(scb, NULL, 0, 0);
531 } else {
532 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
533 struct target_data *tdata;
534
535 tdata = &hscb->shared_data.tdata;
536 if (ahc->pending_device == lstate)
537 scb->flags |= SCB_TARGET_IMMEDIATE;
538 hscb->control |= TARGET_SCB;
539 scb->flags |= SCB_TARGET_SCB;
540 tdata->target_phases = 0;
541 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
542 tdata->target_phases |= SPHASE_PENDING;
543 tdata->scsi_status =
544 ccb->csio.scsi_status;
545 }
546 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
547 tdata->target_phases |= NO_DISCONNECT;
548
549 tdata->initiator_tag = ccb->csio.tag_id;
550 }
551 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
552 hscb->control |= ccb->csio.tag_action;
553
554 ahc_setup_data(ahc, sim, &ccb->csio, scb);
555 }
556 break;
557 }
558 case XPT_NOTIFY_ACK:
559 case XPT_IMMED_NOTIFY:
560 {
561 struct ahc_tmode_tstate *tstate;
562 struct ahc_tmode_lstate *lstate;
563 cam_status status;
564
565 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
566 &lstate, TRUE);
567
568 if (status != CAM_REQ_CMP) {
569 ccb->ccb_h.status = status;
570 xpt_done(ccb);
571 break;
572 }
573 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
574 sim_links.sle);
575 ccb->ccb_h.status = CAM_REQ_INPROG;
576 ahc_send_lstate_events(ahc, lstate);
577 break;
578 }
579 case XPT_EN_LUN: /* Enable LUN as a target */
580 ahc_handle_en_lun(ahc, sim, ccb);
581 xpt_done(ccb);
582 break;
583 case XPT_ABORT: /* Abort the specified CCB */
584 {
585 ahc_abort_ccb(ahc, sim, ccb);
586 break;
587 }
588 case XPT_SET_TRAN_SETTINGS:
589 {
590#ifdef AHC_NEW_TRAN_SETTINGS
591 struct ahc_devinfo devinfo;
592 struct ccb_trans_settings *cts;
593 struct ccb_trans_settings_scsi *scsi;
594 struct ccb_trans_settings_spi *spi;
595 struct ahc_initiator_tinfo *tinfo;
596 struct ahc_tmode_tstate *tstate;
597 uint16_t *discenable;
598 uint16_t *tagenable;
599 u_int update_type;
600
601 cts = &ccb->cts;
602 scsi = &cts->proto_specific.scsi;
603 spi = &cts->xport_specific.spi;
604 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
605 cts->ccb_h.target_id,
606 cts->ccb_h.target_lun,
607 SIM_CHANNEL(ahc, sim),
608 ROLE_UNKNOWN);
609 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
610 devinfo.our_scsiid,
611 devinfo.target, &tstate);
612 update_type = 0;
613 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
614 update_type |= AHC_TRANS_GOAL;
615 discenable = &tstate->discenable;
616 tagenable = &tstate->tagenable;
617 tinfo->curr.protocol_version =
618 cts->protocol_version;
619 tinfo->curr.transport_version =
620 cts->transport_version;
621 tinfo->goal.protocol_version =
622 cts->protocol_version;
623 tinfo->goal.transport_version =
624 cts->transport_version;
625 } else if (cts->type == CTS_TYPE_USER_SETTINGS) {
626 update_type |= AHC_TRANS_USER;
627 discenable = &ahc->user_discenable;
628 tagenable = &ahc->user_tagenable;
629 tinfo->user.protocol_version =
630 cts->protocol_version;
631 tinfo->user.transport_version =
632 cts->transport_version;
633 } else {
634 ccb->ccb_h.status = CAM_REQ_INVALID;
635 xpt_done(ccb);
636 break;
637 }
638
984263bc
MD
639 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
640 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
641 *discenable |= devinfo.target_mask;
642 else
643 *discenable &= ~devinfo.target_mask;
644 }
645
646 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
647 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
648 *tagenable |= devinfo.target_mask;
649 else
650 *tagenable &= ~devinfo.target_mask;
651 }
652
653 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
654 ahc_validate_width(ahc, /*tinfo limit*/NULL,
655 &spi->bus_width, ROLE_UNKNOWN);
656 ahc_set_width(ahc, &devinfo, spi->bus_width,
657 update_type, /*paused*/FALSE);
658 }
659
660 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
661 if (update_type == AHC_TRANS_USER)
662 spi->ppr_options = tinfo->user.ppr_options;
663 else
664 spi->ppr_options = tinfo->goal.ppr_options;
665 }
666
667 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
668 if (update_type == AHC_TRANS_USER)
669 spi->sync_offset = tinfo->user.offset;
670 else
671 spi->sync_offset = tinfo->goal.offset;
672 }
673
674 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
675 if (update_type == AHC_TRANS_USER)
676 spi->sync_period = tinfo->user.period;
677 else
678 spi->sync_period = tinfo->goal.period;
679 }
680
681 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
682 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
683 struct ahc_syncrate *syncrate;
684 u_int maxsync;
685
686 if ((ahc->features & AHC_ULTRA2) != 0)
687 maxsync = AHC_SYNCRATE_DT;
688 else if ((ahc->features & AHC_ULTRA) != 0)
689 maxsync = AHC_SYNCRATE_ULTRA;
690 else
691 maxsync = AHC_SYNCRATE_FAST;
692
693 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
694 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
695
696 syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
697 &spi->ppr_options,
698 maxsync);
699 ahc_validate_offset(ahc, /*tinfo limit*/NULL,
700 syncrate, &spi->sync_offset,
701 spi->bus_width, ROLE_UNKNOWN);
702
703 /* We use a period of 0 to represent async */
704 if (spi->sync_offset == 0) {
705 spi->sync_period = 0;
706 spi->ppr_options = 0;
707 }
708
709 ahc_set_syncrate(ahc, &devinfo, syncrate,
710 spi->sync_period, spi->sync_offset,
711 spi->ppr_options, update_type,
712 /*paused*/FALSE);
713 }
984263bc
MD
714 ccb->ccb_h.status = CAM_REQ_CMP;
715 xpt_done(ccb);
716#else
717 struct ahc_devinfo devinfo;
718 struct ccb_trans_settings *cts;
719 struct ahc_initiator_tinfo *tinfo;
720 struct ahc_tmode_tstate *tstate;
721 uint16_t *discenable;
722 uint16_t *tagenable;
723 u_int update_type;
984263bc
MD
724
725 cts = &ccb->cts;
726 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
727 cts->ccb_h.target_id,
728 cts->ccb_h.target_lun,
729 SIM_CHANNEL(ahc, sim),
730 ROLE_UNKNOWN);
731 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
732 devinfo.our_scsiid,
733 devinfo.target, &tstate);
734 update_type = 0;
735 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
736 update_type |= AHC_TRANS_GOAL;
737 discenable = &tstate->discenable;
738 tagenable = &tstate->tagenable;
739 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
740 update_type |= AHC_TRANS_USER;
741 discenable = &ahc->user_discenable;
742 tagenable = &ahc->user_tagenable;
743 } else {
744 ccb->ccb_h.status = CAM_REQ_INVALID;
745 xpt_done(ccb);
746 break;
747 }
748
984263bc
MD
749 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
750 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
751 *discenable |= devinfo.target_mask;
752 else
753 *discenable &= ~devinfo.target_mask;
754 }
755
756 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
757 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
758 *tagenable |= devinfo.target_mask;
759 else
760 *tagenable &= ~devinfo.target_mask;
761 }
762
763 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
764 ahc_validate_width(ahc, /*tinfo limit*/NULL,
765 &cts->bus_width, ROLE_UNKNOWN);
766 ahc_set_width(ahc, &devinfo, cts->bus_width,
767 update_type, /*paused*/FALSE);
768 }
769
770 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
771 if (update_type == AHC_TRANS_USER)
772 cts->sync_offset = tinfo->user.offset;
773 else
774 cts->sync_offset = tinfo->goal.offset;
775 }
776
777 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
778 if (update_type == AHC_TRANS_USER)
779 cts->sync_period = tinfo->user.period;
780 else
781 cts->sync_period = tinfo->goal.period;
782 }
783
784 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
785 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
786 struct ahc_syncrate *syncrate;
787 u_int ppr_options;
788 u_int maxsync;
789
790 if ((ahc->features & AHC_ULTRA2) != 0)
791 maxsync = AHC_SYNCRATE_DT;
792 else if ((ahc->features & AHC_ULTRA) != 0)
793 maxsync = AHC_SYNCRATE_ULTRA;
794 else
795 maxsync = AHC_SYNCRATE_FAST;
796
797 ppr_options = 0;
798 if (cts->sync_period <= 9
799 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
800 ppr_options = MSG_EXT_PPR_DT_REQ;
801
802 syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
803 &ppr_options,
804 maxsync);
805 ahc_validate_offset(ahc, /*tinfo limit*/NULL,
806 syncrate, &cts->sync_offset,
807 MSG_EXT_WDTR_BUS_8_BIT,
808 ROLE_UNKNOWN);
809
810 /* We use a period of 0 to represent async */
811 if (cts->sync_offset == 0) {
812 cts->sync_period = 0;
813 ppr_options = 0;
814 }
815
816 if (ppr_options == MSG_EXT_PPR_DT_REQ
817 && tinfo->user.transport_version >= 3) {
818 tinfo->goal.transport_version =
819 tinfo->user.transport_version;
820 tinfo->curr.transport_version =
821 tinfo->user.transport_version;
822 }
823
824 ahc_set_syncrate(ahc, &devinfo, syncrate,
825 cts->sync_period, cts->sync_offset,
826 ppr_options, update_type,
827 /*paused*/FALSE);
828 }
984263bc
MD
829 ccb->ccb_h.status = CAM_REQ_CMP;
830 xpt_done(ccb);
831#endif
832 break;
833 }
834 case XPT_GET_TRAN_SETTINGS:
835 /* Get default/user set transfer settings for the target */
836 {
837
984263bc
MD
838 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
839 SIM_CHANNEL(ahc, sim), &ccb->cts);
984263bc
MD
840 xpt_done(ccb);
841 break;
842 }
843 case XPT_CALC_GEOMETRY:
844 {
750f3593 845 int extended;
984263bc 846
984263bc 847 extended = SIM_IS_SCSIBUS_B(ahc, sim)
750f3593
PA
848 ? ahc->flags & AHC_EXTENDED_TRANS_B
849 : ahc->flags & AHC_EXTENDED_TRANS_A;
d717de5c 850 cam_calc_geometry(&ccb->ccg, extended);
984263bc
MD
851 xpt_done(ccb);
852 break;
853 }
854 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
855 {
856 int found;
857
984263bc
MD
858 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
859 /*initiate reset*/TRUE);
984263bc
MD
860 if (bootverbose) {
861 xpt_print_path(SIM_PATH(ahc, sim));
e3869ec7 862 kprintf("SCSI bus reset delivered. "
984263bc
MD
863 "%d SCBs aborted.\n", found);
864 }
865 ccb->ccb_h.status = CAM_REQ_CMP;
866 xpt_done(ccb);
867 break;
868 }
869 case XPT_TERM_IO: /* Terminate the I/O process */
870 /* XXX Implement */
871 ccb->ccb_h.status = CAM_REQ_INVALID;
872 xpt_done(ccb);
873 break;
874 case XPT_PATH_INQ: /* Path routing inquiry */
875 {
876 struct ccb_pathinq *cpi = &ccb->cpi;
877
878 cpi->version_num = 1; /* XXX??? */
879 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
880 if ((ahc->features & AHC_WIDE) != 0)
881 cpi->hba_inquiry |= PI_WIDE_16;
882 if ((ahc->features & AHC_TARGETMODE) != 0) {
883 cpi->target_sprt = PIT_PROCESSOR
884 | PIT_DISCONNECT
885 | PIT_TERM_IO;
886 } else {
887 cpi->target_sprt = 0;
888 }
889 cpi->hba_misc = 0;
890 cpi->hba_eng_cnt = 0;
891 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
892 cpi->max_lun = AHC_NUM_LUNS - 1;
893 if (SIM_IS_SCSIBUS_B(ahc, sim)) {
894 cpi->initiator_id = ahc->our_id_b;
895 if ((ahc->flags & AHC_RESET_BUS_B) == 0)
896 cpi->hba_misc |= PIM_NOBUSRESET;
897 } else {
898 cpi->initiator_id = ahc->our_id;
899 if ((ahc->flags & AHC_RESET_BUS_A) == 0)
900 cpi->hba_misc |= PIM_NOBUSRESET;
901 }
902 cpi->bus_id = cam_sim_bus(sim);
903 cpi->base_transfer_speed = 3300;
904 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
905 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
906 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
907 cpi->unit_number = cam_sim_unit(sim);
908#ifdef AHC_NEW_TRAN_SETTINGS
909 cpi->protocol = PROTO_SCSI;
910 cpi->protocol_version = SCSI_REV_2;
911 cpi->transport = XPORT_SPI;
912 cpi->transport_version = 2;
913 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
914 if ((ahc->features & AHC_DT) != 0) {
915 cpi->transport_version = 3;
916 cpi->xport_specific.spi.ppr_options =
917 SID_SPI_CLOCK_DT_ST;
918 }
919#endif
920 cpi->ccb_h.status = CAM_REQ_CMP;
921 xpt_done(ccb);
922 break;
923 }
924 default:
925 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
926 xpt_done(ccb);
927 break;
928 }
929}
930
931static void
932ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
933 struct ccb_trans_settings *cts)
934{
935#ifdef AHC_NEW_TRAN_SETTINGS
936 struct ahc_devinfo devinfo;
937 struct ccb_trans_settings_scsi *scsi;
938 struct ccb_trans_settings_spi *spi;
939 struct ahc_initiator_tinfo *targ_info;
940 struct ahc_tmode_tstate *tstate;
941 struct ahc_transinfo *tinfo;
942
943 scsi = &cts->proto_specific.scsi;
944 spi = &cts->xport_specific.spi;
945 ahc_compile_devinfo(&devinfo, our_id,
946 cts->ccb_h.target_id,
947 cts->ccb_h.target_lun,
948 channel, ROLE_UNKNOWN);
949 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
950 devinfo.our_scsiid,
951 devinfo.target, &tstate);
952
953 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
954 tinfo = &targ_info->curr;
955 else
956 tinfo = &targ_info->user;
957
958 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
959 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
960 if (cts->type == CTS_TYPE_USER_SETTINGS) {
961 if ((ahc->user_discenable & devinfo.target_mask) != 0)
962 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
963
964 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
965 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
966 } else {
967 if ((tstate->discenable & devinfo.target_mask) != 0)
968 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
969
970 if ((tstate->tagenable & devinfo.target_mask) != 0)
971 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
972 }
973 cts->protocol_version = tinfo->protocol_version;
974 cts->transport_version = tinfo->transport_version;
975
976 spi->sync_period = tinfo->period;
977 spi->sync_offset = tinfo->offset;
978 spi->bus_width = tinfo->width;
979 spi->ppr_options = tinfo->ppr_options;
980
981 cts->protocol = PROTO_SCSI;
982 cts->transport = XPORT_SPI;
983 spi->valid = CTS_SPI_VALID_SYNC_RATE
984 | CTS_SPI_VALID_SYNC_OFFSET
985 | CTS_SPI_VALID_BUS_WIDTH
986 | CTS_SPI_VALID_PPR_OPTIONS;
987
988 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
989 scsi->valid = CTS_SCSI_VALID_TQ;
990 spi->valid |= CTS_SPI_VALID_DISC;
991 } else {
992 scsi->valid = 0;
993 }
994
995 cts->ccb_h.status = CAM_REQ_CMP;
996#else
997 struct ahc_devinfo devinfo;
998 struct ahc_initiator_tinfo *targ_info;
999 struct ahc_tmode_tstate *tstate;
1000 struct ahc_transinfo *tinfo;
1001
1002 ahc_compile_devinfo(&devinfo, our_id,
1003 cts->ccb_h.target_id,
1004 cts->ccb_h.target_lun,
1005 channel, ROLE_UNKNOWN);
1006 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
1007 devinfo.our_scsiid,
1008 devinfo.target, &tstate);
1009
1010 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
1011 tinfo = &targ_info->curr;
1012 else
1013 tinfo = &targ_info->user;
1014
1015 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1016 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
1017 if ((ahc->user_discenable & devinfo.target_mask) != 0)
1018 cts->flags |= CCB_TRANS_DISC_ENB;
1019
1020 if ((ahc->user_tagenable & devinfo.target_mask) != 0)
1021 cts->flags |= CCB_TRANS_TAG_ENB;
1022 } else {
1023 if ((tstate->discenable & devinfo.target_mask) != 0)
1024 cts->flags |= CCB_TRANS_DISC_ENB;
1025
1026 if ((tstate->tagenable & devinfo.target_mask) != 0)
1027 cts->flags |= CCB_TRANS_TAG_ENB;
1028 }
1029 cts->sync_period = tinfo->period;
1030 cts->sync_offset = tinfo->offset;
1031 cts->bus_width = tinfo->width;
1032
1033 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1034 | CCB_TRANS_SYNC_OFFSET_VALID
1035 | CCB_TRANS_BUS_WIDTH_VALID;
1036
1037 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
1038 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
1039
1040 cts->ccb_h.status = CAM_REQ_CMP;
1041#endif
1042}
1043
1044static void
1045ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1046{
1047 struct ahc_softc *ahc;
1048 struct cam_sim *sim;
1049
1050 sim = (struct cam_sim *)callback_arg;
1051 ahc = (struct ahc_softc *)cam_sim_softc(sim);
1052 switch (code) {
1053 case AC_LOST_DEVICE:
1054 {
1055 struct ahc_devinfo devinfo;
984263bc
MD
1056
1057 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1058 xpt_path_target_id(path),
1059 xpt_path_lun_id(path),
1060 SIM_CHANNEL(ahc, sim),
1061 ROLE_UNKNOWN);
1062
1063 /*
1064 * Revert to async/narrow transfers
1065 * for the next device.
1066 */
984263bc
MD
1067 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1068 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1069 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1070 /*period*/0, /*offset*/0, /*ppr_options*/0,
1071 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1072 /*paused*/FALSE);
984263bc
MD
1073 break;
1074 }
1075 default:
1076 break;
1077 }
1078}
1079
1080static void
1081ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1082 int error)
1083{
1084 struct scb *scb;
1085 union ccb *ccb;
1086 struct ahc_softc *ahc;
1087 struct ahc_initiator_tinfo *tinfo;
1088 struct ahc_tmode_tstate *tstate;
1089 u_int mask;
984263bc
MD
1090
1091 scb = (struct scb *)arg;
1092 ccb = scb->io_ctx;
1093 ahc = scb->ahc_softc;
1094
1095 if (error != 0) {
1096 if (error == EFBIG)
750f3593 1097 aic_set_transaction_status(scb, CAM_REQ_TOO_BIG);
984263bc 1098 else
750f3593 1099 aic_set_transaction_status(scb, CAM_REQ_CMP_ERR);
984263bc
MD
1100 if (nsegments != 0)
1101 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
984263bc 1102 ahc_free_scb(ahc, scb);
984263bc
MD
1103 xpt_done(ccb);
1104 return;
1105 }
1106 if (nsegments != 0) {
1107 struct ahc_dma_seg *sg;
1108 bus_dma_segment_t *end_seg;
1109 bus_dmasync_op_t op;
1110
1111 end_seg = dm_segs + nsegments;
1112
1113 /* Copy the segments into our SG list */
1114 sg = scb->sg_list;
1115 while (dm_segs < end_seg) {
1116 uint32_t len;
1117
750f3593 1118 sg->addr = aic_htole32(dm_segs->ds_addr);
984263bc
MD
1119 len = dm_segs->ds_len
1120 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
750f3593 1121 sg->len = aic_htole32(len);
984263bc
MD
1122 sg++;
1123 dm_segs++;
1124 }
1125
1126 /*
1127 * Note where to find the SG entries in bus space.
1128 * We also set the full residual flag which the
1129 * sequencer will clear as soon as a data transfer
1130 * occurs.
1131 */
750f3593 1132 scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
984263bc
MD
1133
1134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1135 op = BUS_DMASYNC_PREREAD;
1136 else
1137 op = BUS_DMASYNC_PREWRITE;
1138
1139 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1140
1141 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1142 struct target_data *tdata;
1143
1144 tdata = &scb->hscb->shared_data.tdata;
1145 tdata->target_phases |= DPHASE_PENDING;
750f3593
PA
1146 /*
1147 * CAM data direction is relative to the initiator.
1148 */
984263bc
MD
1149 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1150 tdata->data_phase = P_DATAOUT;
1151 else
1152 tdata->data_phase = P_DATAIN;
1153
1154 /*
1155 * If the transfer is of an odd length and in the
1156 * "in" direction (scsi->HostBus), then it may
1157 * trigger a bug in the 'WideODD' feature of
1158 * non-Ultra2 chips. Force the total data-length
1159 * to be even by adding an extra, 1 byte, SG,
1160 * element. We do this even if we are not currently
1161 * negotiated wide as negotiation could occur before
1162 * this command is executed.
1163 */
1164 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1165 && (ccb->csio.dxfer_len & 0x1) != 0
750f3593 1166 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
984263bc
MD
1167
1168 nsegments++;
1169 if (nsegments > AHC_NSEG) {
1170
750f3593 1171 aic_set_transaction_status(scb,
984263bc
MD
1172 CAM_REQ_TOO_BIG);
1173 bus_dmamap_unload(ahc->buffer_dmat,
1174 scb->dmamap);
984263bc 1175 ahc_free_scb(ahc, scb);
984263bc
MD
1176 xpt_done(ccb);
1177 return;
1178 }
750f3593
PA
1179 sg->addr = aic_htole32(ahc->dma_bug_buf);
1180 sg->len = aic_htole32(1);
984263bc
MD
1181 sg++;
1182 }
1183 }
1184 sg--;
750f3593 1185 sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
984263bc
MD
1186
1187 /* Copy the first SG into the "current" data pointer area */
1188 scb->hscb->dataptr = scb->sg_list->addr;
1189 scb->hscb->datacnt = scb->sg_list->len;
1190 } else {
750f3593 1191 scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
984263bc
MD
1192 scb->hscb->dataptr = 0;
1193 scb->hscb->datacnt = 0;
1194 }
1195
1196 scb->sg_count = nsegments;
1197
984263bc
MD
1198 /*
1199 * Last time we need to check if this SCB needs to
1200 * be aborted.
1201 */
750f3593 1202 if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) {
984263bc
MD
1203 if (nsegments != 0)
1204 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1205 ahc_free_scb(ahc, scb);
984263bc
MD
1206 xpt_done(ccb);
1207 return;
1208 }
1209
1210 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1211 SCSIID_OUR_ID(scb->hscb->scsiid),
1212 SCSIID_TARGET(ahc, scb->hscb->scsiid),
1213 &tstate);
1214
1215 mask = SCB_GET_TARGET_MASK(ahc, scb);
1216 scb->hscb->scsirate = tinfo->scsirate;
1217 scb->hscb->scsioffset = tinfo->curr.offset;
1218 if ((tstate->ultraenb & mask) != 0)
1219 scb->hscb->control |= ULTRAENB;
1220
1221 if ((tstate->discenable & mask) != 0
1222 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1223 scb->hscb->control |= DISCENB;
1224
1225 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1226 && (tinfo->goal.width != 0
1227 || tinfo->goal.offset != 0
1228 || tinfo->goal.ppr_options != 0)) {
1229 scb->flags |= SCB_NEGOTIATE;
1230 scb->hscb->control |= MK_MESSAGE;
1231 } else if ((tstate->auto_negotiate & mask) != 0) {
1232 scb->flags |= SCB_AUTO_NEGOTIATE;
1233 scb->hscb->control |= MK_MESSAGE;
1234 }
1235
1236 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1237
1238 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1239
984263bc
MD
1240 /*
1241 * We only allow one untagged transaction
1242 * per target in the initiator role unless
1243 * we are storing a full busy target *lun*
1244 * table in SCB space.
1245 */
1246 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1247 && (ahc->flags & AHC_SCB_BTT) == 0) {
1248 struct scb_tailq *untagged_q;
1249 int target_offset;
1250
1251 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1252 untagged_q = &(ahc->untagged_queues[target_offset]);
1253 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1254 scb->flags |= SCB_UNTAGGEDQ;
1255 if (TAILQ_FIRST(untagged_q) != scb) {
984263bc
MD
1256 return;
1257 }
1258 }
1259 scb->flags |= SCB_ACTIVE;
1260
dff3fb2d
PA
1261 /*
1262 * Timers are disabled while recovery is in progress.
1263 */
1264 aic_scb_timer_start(scb);
1265
984263bc
MD
1266 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1267 /* Define a mapping from our tag to the SCB. */
1268 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
1269 ahc_pause(ahc);
1270 if ((ahc->flags & AHC_PAGESCBS) == 0)
1271 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1272 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
1273 ahc_unpause(ahc);
1274 } else {
1275 ahc_queue_scb(ahc, scb);
1276 }
984263bc
MD
1277}
1278
1279static void
1280ahc_poll(struct cam_sim *sim)
1281{
1282 struct ahc_softc *ahc;
1283
1284 ahc = (struct ahc_softc *)cam_sim_softc(sim);
1285 ahc_intr(ahc);
1286}
1287
1288static void
1289ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1290 struct ccb_scsiio *csio, struct scb *scb)
1291{
1292 struct hardware_scb *hscb;
1293 struct ccb_hdr *ccb_h;
1294
1295 hscb = scb->hscb;
1296 ccb_h = &csio->ccb_h;
1297
1298 csio->resid = 0;
1299 csio->sense_resid = 0;
1300 if (ccb_h->func_code == XPT_SCSI_IO) {
1301 hscb->cdb_len = csio->cdb_len;
1302 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1303
1304 if (hscb->cdb_len > sizeof(hscb->cdb32)
1305 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
750f3593 1306 aic_set_transaction_status(scb,
984263bc 1307 CAM_REQ_INVALID);
984263bc 1308 ahc_free_scb(ahc, scb);
984263bc
MD
1309 xpt_done((union ccb *)csio);
1310 return;
1311 }
1312 if (hscb->cdb_len > 12) {
1313 memcpy(hscb->cdb32,
1314 csio->cdb_io.cdb_ptr,
1315 hscb->cdb_len);
1316 scb->flags |= SCB_CDB32_PTR;
1317 } else {
1318 memcpy(hscb->shared_data.cdb,
1319 csio->cdb_io.cdb_ptr,
1320 hscb->cdb_len);
1321 }
1322 } else {
1323 if (hscb->cdb_len > 12) {
1324 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1325 hscb->cdb_len);
1326 scb->flags |= SCB_CDB32_PTR;
1327 } else {
1328 memcpy(hscb->shared_data.cdb,
1329 csio->cdb_io.cdb_bytes,
1330 hscb->cdb_len);
1331 }
1332 }
1333 }
1334
1335 /* Only use S/G if there is a transfer */
1336 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1337 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1338 /* We've been given a pointer to a single buffer */
1339 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
984263bc
MD
1340 int error;
1341
c1139c5e 1342 crit_enter();
984263bc
MD
1343 error = bus_dmamap_load(ahc->buffer_dmat,
1344 scb->dmamap,
1345 csio->data_ptr,
1346 csio->dxfer_len,
1347 ahc_execute_scb,
1348 scb, /*flags*/0);
1349 if (error == EINPROGRESS) {
1350 /*
1351 * So as to maintain ordering,
1352 * freeze the controller queue
1353 * until our mapping is
1354 * returned.
1355 */
1356 xpt_freeze_simq(sim,
1357 /*count*/1);
1358 scb->io_ctx->ccb_h.status |=
1359 CAM_RELEASE_SIMQ;
1360 }
c1139c5e 1361 crit_exit();
984263bc
MD
1362 } else {
1363 struct bus_dma_segment seg;
1364
1365 /* Pointer to physical buffer */
1366 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1367 panic("ahc_setup_data - Transfer size "
1368 "larger than can device max");
1369
1370 seg.ds_addr =
1371 (bus_addr_t)(vm_offset_t)csio->data_ptr;
1372 seg.ds_len = csio->dxfer_len;
1373 ahc_execute_scb(scb, &seg, 1, 0);
1374 }
1375 } else {
1376 struct bus_dma_segment *segs;
1377
1378 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1379 panic("ahc_setup_data - Physical segment "
1380 "pointers unsupported");
1381
1382 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1383 panic("ahc_setup_data - Virtual segment "
1384 "addresses unsupported");
1385
1386 /* Just use the segments provided */
1387 segs = (struct bus_dma_segment *)csio->data_ptr;
1388 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1389 }
1390 } else {
1391 ahc_execute_scb(scb, NULL, 0, 0);
1392 }
1393}
1394
984263bc
MD
1395static void
1396ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1397{
1398 union ccb *abort_ccb;
1399
1400 abort_ccb = ccb->cab.abort_ccb;
1401 switch (abort_ccb->ccb_h.func_code) {
1402 case XPT_ACCEPT_TARGET_IO:
1403 case XPT_IMMED_NOTIFY:
1404 case XPT_CONT_TARGET_IO:
1405 {
1406 struct ahc_tmode_tstate *tstate;
1407 struct ahc_tmode_lstate *lstate;
1408 struct ccb_hdr_slist *list;
1409 cam_status status;
1410
1411 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1412 &lstate, TRUE);
1413
1414 if (status != CAM_REQ_CMP) {
1415 ccb->ccb_h.status = status;
1416 break;
1417 }
1418
1419 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1420 list = &lstate->accept_tios;
1421 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1422 list = &lstate->immed_notifies;
1423 else
1424 list = NULL;
1425
1426 if (list != NULL) {
1427 struct ccb_hdr *curelm;
1428 int found;
1429
1430 curelm = SLIST_FIRST(list);
1431 found = 0;
1432 if (curelm == &abort_ccb->ccb_h) {
1433 found = 1;
1434 SLIST_REMOVE_HEAD(list, sim_links.sle);
1435 } else {
1436 while(curelm != NULL) {
1437 struct ccb_hdr *nextelm;
1438
1439 nextelm =
1440 SLIST_NEXT(curelm, sim_links.sle);
1441
1442 if (nextelm == &abort_ccb->ccb_h) {
1443 found = 1;
1444 SLIST_NEXT(curelm,
1445 sim_links.sle) =
1446 SLIST_NEXT(nextelm,
1447 sim_links.sle);
1448 break;
1449 }
1450 curelm = nextelm;
1451 }
1452 }
1453
1454 if (found) {
1455 abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1456 xpt_done(abort_ccb);
1457 ccb->ccb_h.status = CAM_REQ_CMP;
1458 } else {
1459 xpt_print_path(abort_ccb->ccb_h.path);
e3869ec7 1460 kprintf("Not found\n");
984263bc
MD
1461 ccb->ccb_h.status = CAM_PATH_INVALID;
1462 }
1463 break;
1464 }
1465 /* FALLTHROUGH */
1466 }
1467 case XPT_SCSI_IO:
1468 /* XXX Fully implement the hard ones */
1469 ccb->ccb_h.status = CAM_UA_ABORT;
1470 break;
1471 default:
1472 ccb->ccb_h.status = CAM_REQ_INVALID;
1473 break;
1474 }
1475 xpt_done(ccb);
1476}
1477
1478void
1479ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1480 u_int lun, ac_code code, void *opt_arg)
1481{
1482 struct ccb_trans_settings cts;
1483 struct cam_path *path;
1484 void *arg;
1485 int error;
1486
1487 arg = NULL;
1488 error = ahc_create_path(ahc, channel, target, lun, &path);
1489
1490 if (error != CAM_REQ_CMP)
1491 return;
1492
1493 switch (code) {
1494 case AC_TRANSFER_NEG:
1495 {
1496#ifdef AHC_NEW_TRAN_SETTINGS
1497 struct ccb_trans_settings_scsi *scsi;
1498
1499 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1500 scsi = &cts.proto_specific.scsi;
1501#else
1502 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1503#endif
1504 cts.ccb_h.path = path;
1505 cts.ccb_h.target_id = target;
1506 cts.ccb_h.target_lun = lun;
1507 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1508 : ahc->our_id_b,
1509 channel, &cts);
1510 arg = &cts;
1511#ifdef AHC_NEW_TRAN_SETTINGS
1512 scsi->valid &= ~CTS_SCSI_VALID_TQ;
1513 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1514#else
1515 cts.valid &= ~CCB_TRANS_TQ_VALID;
1516 cts.flags &= ~CCB_TRANS_TAG_ENB;
1517#endif
1518 if (opt_arg == NULL)
1519 break;
1520 if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED)
1521#ifdef AHC_NEW_TRAN_SETTINGS
1522 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1523 scsi->valid |= CTS_SCSI_VALID_TQ;
1524#else
1525 cts.flags |= CCB_TRANS_TAG_ENB;
1526 cts.valid |= CCB_TRANS_TQ_VALID;
1527#endif
1528 break;
1529 }
1530 case AC_SENT_BDR:
1531 case AC_BUS_RESET:
1532 break;
1533 default:
1534 panic("ahc_send_async: Unexpected async event");
1535 }
1536 xpt_async(code, path, arg);
1537 xpt_free_path(path);
1538}
1539
1540void
1541ahc_platform_set_tags(struct ahc_softc *ahc,
1542 struct ahc_devinfo *devinfo, int enable)
1543{
1544}
1545
1546int
1547ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1548{
efda3bd0 1549 ahc->platform_data = kmalloc(sizeof(struct ahc_platform_data), M_DEVBUF,
bf22d4c1 1550 M_INTWAIT | M_ZERO);
984263bc
MD
1551 return (0);
1552}
1553
1554void
1555ahc_platform_free(struct ahc_softc *ahc)
1556{
1557 struct ahc_platform_data *pdata;
1558
1559 pdata = ahc->platform_data;
1560 if (pdata != NULL) {
1561 if (pdata->regs != NULL)
1562 bus_release_resource(ahc->dev_softc,
1563 pdata->regs_res_type,
1564 pdata->regs_res_id,
1565 pdata->regs);
1566
1567 if (pdata->irq != NULL)
1568 bus_release_resource(ahc->dev_softc,
1569 pdata->irq_res_type,
1570 0, pdata->irq);
1571
1572 if (pdata->sim_b != NULL) {
1573 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1574 xpt_free_path(pdata->path_b);
1575 xpt_bus_deregister(cam_sim_path(pdata->sim_b));
3aed1355 1576 cam_sim_free(pdata->sim_b);
984263bc
MD
1577 }
1578 if (pdata->sim != NULL) {
1579 xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1580 xpt_free_path(pdata->path);
1581 xpt_bus_deregister(cam_sim_path(pdata->sim));
3aed1355 1582 cam_sim_free(pdata->sim);
984263bc
MD
1583 }
1584 if (pdata->eh != NULL)
87e2fa7e 1585 EVENTHANDLER_DEREGISTER(shutdown_post_sync, pdata->eh);
efda3bd0 1586 kfree(ahc->platform_data, M_DEVBUF);
984263bc
MD
1587 }
1588}
1589
1590int
1591ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1592{
1593 /* We don't sort softcs under FreeBSD so report equal always */
1594 return (0);
1595}
1596
1597int
1598ahc_detach(device_t dev)
1599{
1600 struct ahc_softc *ahc;
984263bc 1601
984263bc
MD
1602 device_printf(dev, "detaching device\n");
1603 ahc = device_get_softc(dev);
ef8ef949 1604 ahc_lock(ahc);
750f3593 1605 TAILQ_REMOVE(&ahc_tailq, ahc, links);
984263bc
MD
1606 ahc_intr_enable(ahc, FALSE);
1607 bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
ef8ef949 1608 ahc_unlock(ahc);
984263bc 1609 ahc_free(ahc);
984263bc
MD
1610 return (0);
1611}
1612
9f00895f 1613#if 0
984263bc
MD
1614static void
1615ahc_dump_targcmd(struct target_cmd *cmd)
1616{
1617 uint8_t *byte;
1618 uint8_t *last_byte;
1619 int i;
1620
1621 byte = &cmd->initiator_channel;
1622 /* Debugging info for received commands */
1623 last_byte = &cmd[1].initiator_channel;
1624
1625 i = 0;
1626 while (byte < last_byte) {
1627 if (i == 0)
e3869ec7
SW
1628 kprintf("\t");
1629 kprintf("%#x", *byte++);
984263bc
MD
1630 i++;
1631 if (i == 8) {
e3869ec7 1632 kprintf("\n");
984263bc
MD
1633 i = 0;
1634 } else {
e3869ec7 1635 kprintf(", ");
984263bc
MD
1636 }
1637 }
1638}
1639#endif
1640
1641static int
1642ahc_modevent(module_t mod, int type, void *data)
1643{
1644 /* XXX Deal with busy status on unload. */
ef8ef949 1645 /* XXX Deal with unknown events */
984263bc
MD
1646 return 0;
1647}
1648
1649static moduledata_t ahc_mod = {
1650 "ahc",
1651 ahc_modevent,
1652 NULL
1653};
1654
1655DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1656MODULE_DEPEND(ahc, cam, 1, 1, 1);
1657MODULE_VERSION(ahc, 1);