2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
7 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
10 * Copyright (c) 1996-2000 Justin Gibbs.
11 * All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions, and the following disclaimer,
18 * without modification, immediately at the beginning of the file.
19 * 2. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $FreeBSD: src/sys/dev/advansys/advansys.c,v 1.14.2.4 2002/01/06 21:21:42 dwmalone Exp $
38 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
40 * Copyright (c) 1995-1997 Advanced System Products, Inc.
41 * All Rights Reserved.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that redistributions of source
45 * code retain the above copyright notice and this comment without
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/thread2.h>
57 #include <bus/cam/cam.h>
58 #include <bus/cam/cam_ccb.h>
59 #include <bus/cam/cam_sim.h>
60 #include <bus/cam/cam_xpt_sim.h>
61 #include <bus/cam/cam_xpt_periph.h>
62 #include <bus/cam/cam_debug.h>
64 #include <bus/cam/scsi/scsi_all.h>
65 #include <bus/cam/scsi/scsi_message.h>
68 #include <vm/vm_param.h>
73 static void adv_action(struct cam_sim *sim, union ccb *ccb);
74 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
75 int nsegments, int error);
76 static void adv_poll(struct cam_sim *sim);
77 static void adv_run_doneq(struct adv_softc *adv);
78 static struct adv_ccb_info *
79 adv_alloc_ccb_info(struct adv_softc *adv);
80 static void adv_destroy_ccb_info(struct adv_softc *adv,
81 struct adv_ccb_info *cinfo);
82 static __inline struct adv_ccb_info *
83 adv_get_ccb_info(struct adv_softc *adv);
84 static __inline void adv_free_ccb_info(struct adv_softc *adv,
85 struct adv_ccb_info *cinfo);
86 static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
87 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
88 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
90 static __inline struct adv_ccb_info *
91 adv_get_ccb_info(struct adv_softc *adv)
93 struct adv_ccb_info *cinfo;
96 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
97 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
99 cinfo = adv_alloc_ccb_info(adv);
107 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
110 cinfo->state = ACCB_FREE;
111 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
116 adv_set_state(struct adv_softc *adv, adv_state state)
119 xpt_freeze_simq(adv->sim, /*count*/1);
124 adv_clear_state(struct adv_softc *adv, union ccb* ccb)
127 adv_clear_state_really(adv, ccb);
131 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
133 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
134 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
135 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
138 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
139 if (openings >= adv->openings_needed) {
140 adv->state &= ~ADV_RESOURCE_SHORTAGE;
141 adv->openings_needed = 0;
145 if ((adv->state & ADV_IN_TIMEOUT) != 0) {
146 struct adv_ccb_info *cinfo;
148 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
149 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
150 struct ccb_hdr *ccb_h;
153 * We now traverse our list of pending CCBs
154 * and reinstate their timeouts.
156 ccb_h = LIST_FIRST(&adv->pending_ccbs);
157 while (ccb_h != NULL) {
158 callout_reset(&ccb_h->timeout_ch,
159 (ccb_h->timeout * hz) / 1000,
161 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
163 adv->state &= ~ADV_IN_TIMEOUT;
164 kprintf("%s: No longer in timeout\n", adv_name(adv));
168 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
172 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
174 bus_addr_t* physaddr;
176 physaddr = (bus_addr_t*)arg;
177 *physaddr = segs->ds_addr;
181 adv_name(struct adv_softc *adv)
183 static char name[10];
185 ksnprintf(name, sizeof(name), "adv%d", adv->unit);
190 adv_action(struct cam_sim *sim, union ccb *ccb)
192 struct adv_softc *adv;
194 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
196 adv = (struct adv_softc *)cam_sim_softc(sim);
198 switch (ccb->ccb_h.func_code) {
199 /* Common cases first */
200 case XPT_SCSI_IO: /* Execute the requested I/O operation */
202 struct ccb_hdr *ccb_h;
203 struct ccb_scsiio *csio;
204 struct adv_ccb_info *cinfo;
208 cinfo = adv_get_ccb_info(adv);
210 panic("XXX Handle CCB info error!!!");
212 ccb_h->ccb_cinfo_ptr = cinfo;
215 /* Only use S/G if there is a transfer */
216 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
217 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
219 * We've been given a pointer
222 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
227 bus_dmamap_load(adv->buffer_dmat,
233 if (error == EINPROGRESS) {
235 * So as to maintain ordering,
236 * freeze the controller queue
237 * until our mapping is
245 struct bus_dma_segment seg;
247 /* Pointer to physical buffer */
249 (bus_addr_t)csio->data_ptr;
250 seg.ds_len = csio->dxfer_len;
251 adv_execute_ccb(csio, &seg, 1, 0);
254 struct bus_dma_segment *segs;
255 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
256 panic("adv_setup_data - Physical "
257 "segment pointers unsupported");
259 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
260 panic("adv_setup_data - Virtual "
261 "segment addresses unsupported");
263 /* Just use the segments provided */
264 segs = (struct bus_dma_segment *)csio->data_ptr;
265 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
268 adv_execute_ccb(ccb, NULL, 0, 0);
272 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
273 case XPT_TARGET_IO: /* Execute target I/O request */
274 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
275 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
276 case XPT_EN_LUN: /* Enable LUN as a target */
277 case XPT_ABORT: /* Abort the specified CCB */
279 ccb->ccb_h.status = CAM_REQ_INVALID;
282 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
283 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
284 case XPT_SET_TRAN_SETTINGS:
286 struct ccb_trans_settings_scsi *scsi;
287 struct ccb_trans_settings_spi *spi;
288 struct ccb_trans_settings *cts;
289 target_bit_vector targ_mask;
290 struct adv_transinfo *tconf;
294 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
298 * The user must specify which type of settings he wishes
301 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) {
302 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
303 update_type |= ADV_TRANS_GOAL;
304 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) {
305 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
306 update_type |= ADV_TRANS_USER;
308 ccb->ccb_h.status = CAM_REQ_INVALID;
313 scsi = &cts->proto_specific.scsi;
314 spi = &cts->xport_specific.spi;
315 if ((update_type & ADV_TRANS_GOAL) != 0) {
316 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
317 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
318 adv->disc_enable |= targ_mask;
320 adv->disc_enable &= ~targ_mask;
321 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
325 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
326 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
327 adv->cmd_qng_enabled |= targ_mask;
329 adv->cmd_qng_enabled &= ~targ_mask;
333 if ((update_type & ADV_TRANS_USER) != 0) {
334 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
335 if ((spi->flags & CTS_SPI_VALID_DISC) != 0)
336 adv->user_disc_enable |= targ_mask;
338 adv->user_disc_enable &= ~targ_mask;
341 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
342 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
343 adv->user_cmd_qng_enabled |= targ_mask;
345 adv->user_cmd_qng_enabled &= ~targ_mask;
350 * If the user specifies either the sync rate, or offset,
351 * but not both, the unspecified parameter defaults to its
352 * current value in transfer negotiations.
354 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
355 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
357 * If the user provided a sync rate but no offset,
358 * use the current offset.
360 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
361 spi->sync_offset = tconf->offset;
364 * If the user provided an offset but no sync rate,
365 * use the current sync rate.
367 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
368 spi->sync_period = tconf->period;
370 adv_period_offset_to_sdtr(adv, &spi->sync_period,
372 cts->ccb_h.target_id);
374 adv_set_syncrate(adv, /*struct cam_path */NULL,
375 cts->ccb_h.target_id, spi->sync_period,
376 spi->sync_offset, update_type);
380 ccb->ccb_h.status = CAM_REQ_CMP;
384 case XPT_GET_TRAN_SETTINGS:
385 /* Get default/user set transfer settings for the target */
387 struct ccb_trans_settings_scsi *scsi;
388 struct ccb_trans_settings_spi *spi;
389 struct ccb_trans_settings *cts;
390 struct adv_transinfo *tconf;
391 target_bit_vector target_mask;
394 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
396 scsi = &cts->proto_specific.scsi;
397 spi = &cts->xport_specific.spi;
399 cts->protocol = PROTO_SCSI;
400 cts->protocol_version = SCSI_REV_2;
401 cts->transport = XPORT_SPI;
402 cts->transport_version = 2;
404 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
405 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
408 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
409 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
410 if ((adv->disc_enable & target_mask) != 0)
411 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
412 if ((adv->cmd_qng_enabled & target_mask) != 0)
413 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
415 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
416 if ((adv->user_disc_enable & target_mask) != 0)
417 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
418 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
419 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
421 spi->sync_period = tconf->period;
422 spi->sync_offset = tconf->offset;
424 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
425 spi->valid = CTS_SPI_VALID_SYNC_RATE
426 | CTS_SPI_VALID_SYNC_OFFSET
427 | CTS_SPI_VALID_BUS_WIDTH
428 | CTS_SPI_VALID_DISC;
429 scsi->valid = CTS_SCSI_VALID_TQ;
430 ccb->ccb_h.status = CAM_REQ_CMP;
434 case XPT_CALC_GEOMETRY:
436 struct ccb_calc_geometry *ccg;
438 u_int32_t secs_per_cylinder;
442 size_mb = ccg->volume_size
443 / ((1024L * 1024L) / ccg->block_size);
444 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
446 if (size_mb > 1024 && extended) {
448 ccg->secs_per_track = 63;
451 ccg->secs_per_track = 32;
453 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
454 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
455 ccb->ccb_h.status = CAM_REQ_CMP;
459 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
462 adv_stop_execution(adv);
463 adv_reset_bus(adv, /*initiate_reset*/TRUE);
464 adv_start_execution(adv);
467 ccb->ccb_h.status = CAM_REQ_CMP;
471 case XPT_TERM_IO: /* Terminate the I/O process */
473 ccb->ccb_h.status = CAM_REQ_INVALID;
476 case XPT_PATH_INQ: /* Path routing inquiry */
478 struct ccb_pathinq *cpi = &ccb->cpi;
480 cpi->version_num = 1; /* XXX??? */
481 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
482 cpi->target_sprt = 0;
484 cpi->hba_eng_cnt = 0;
487 cpi->initiator_id = adv->scsi_id;
488 cpi->bus_id = cam_sim_bus(sim);
489 cpi->base_transfer_speed = 3300;
490 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
491 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
492 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
493 cpi->unit_number = cam_sim_unit(sim);
494 cpi->ccb_h.status = CAM_REQ_CMP;
495 cpi->transport = XPORT_SPI;
496 cpi->transport_version = 2;
497 cpi->protocol = PROTO_SCSI;
498 cpi->protocol_version = SCSI_REV_2;
503 ccb->ccb_h.status = CAM_REQ_INVALID;
510 * Currently, the output of bus_dmammap_load suits our needs just
511 * fine, but should it change, we'd need to do something here.
513 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
516 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
517 int nsegments, int error)
519 struct ccb_scsiio *csio;
520 struct ccb_hdr *ccb_h;
522 struct adv_softc *adv;
523 struct adv_ccb_info *cinfo;
524 struct adv_scsi_q scsiq;
525 struct adv_sg_head sghead;
527 csio = (struct ccb_scsiio *)arg;
528 ccb_h = &csio->ccb_h;
529 sim = xpt_path_sim(ccb_h->path);
530 adv = (struct adv_softc *)cam_sim_softc(sim);
531 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
534 * Setup our done routine to release the simq on
535 * the next ccb that completes.
537 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
538 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
540 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
541 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
542 /* XXX Need phystovirt!!!! */
543 /* How about pmap_kenter??? */
544 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
546 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
549 scsiq.cdbptr = csio->cdb_io.cdb_bytes;
552 * Build up the request
557 scsiq.q1.sg_queue_cnt = 0;
558 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
559 scsiq.q1.target_lun = ccb_h->target_lun;
560 scsiq.q1.sense_len = csio->sense_len;
561 scsiq.q1.extra_bytes = 0;
562 scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
563 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
566 scsiq.q2.cdb_len = csio->cdb_len;
567 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
568 scsiq.q2.tag_code = csio->tag_action;
570 scsiq.q2.tag_code = 0;
573 if (nsegments != 0) {
576 scsiq.q1.data_addr = dm_segs->ds_addr;
577 scsiq.q1.data_cnt = dm_segs->ds_len;
579 scsiq.q1.cntl |= QC_SG_HEAD;
581 = sghead.entry_to_copy
584 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
585 scsiq.sg_head = &sghead;
587 scsiq.sg_head = NULL;
589 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
590 op = BUS_DMASYNC_PREREAD;
592 op = BUS_DMASYNC_PREWRITE;
593 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
595 scsiq.q1.data_addr = 0;
596 scsiq.q1.data_cnt = 0;
597 scsiq.sg_head = NULL;
603 * Last time we need to check if this SCB needs to
606 if (ccb_h->status != CAM_REQ_INPROG) {
608 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
609 adv_clear_state(adv, (union ccb *)csio);
610 adv_free_ccb_info(adv, cinfo);
611 xpt_done((union ccb *)csio);
616 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
617 /* Temporary resource shortage */
618 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
620 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
621 csio->ccb_h.status = CAM_REQUEUE_REQ;
622 adv_clear_state(adv, (union ccb *)csio);
623 adv_free_ccb_info(adv, cinfo);
624 xpt_done((union ccb *)csio);
628 cinfo->state |= ACCB_ACTIVE;
629 ccb_h->status |= CAM_SIM_QUEUED;
630 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
631 /* Schedule our timeout */
632 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000,
637 static struct adv_ccb_info *
638 adv_alloc_ccb_info(struct adv_softc *adv)
641 struct adv_ccb_info *cinfo;
643 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
644 cinfo->state = ACCB_FREE;
645 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
648 kprintf("%s: Unable to allocate CCB info "
649 "dmamap - error %d\n", adv_name(adv), error);
652 adv->ccb_infos_allocated++;
657 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
659 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
663 adv_timeout(void *arg)
666 struct adv_softc *adv;
667 struct adv_ccb_info *cinfo;
669 ccb = (union ccb *)arg;
670 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
671 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
673 xpt_print_path(ccb->ccb_h.path);
674 kprintf("Timed out\n");
677 /* Have we been taken care of already?? */
678 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
683 adv_stop_execution(adv);
685 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
686 struct ccb_hdr *ccb_h;
689 * In order to simplify the recovery process, we ask the XPT
690 * layer to halt the queue of new transactions and we traverse
691 * the list of pending CCBs and remove their timeouts. This
692 * means that the driver attempts to clear only one error
693 * condition at a time. In general, timeouts that occur
694 * close together are related anyway, so there is no benefit
695 * in attempting to handle errors in parrallel. Timeouts will
696 * be reinstated when the recovery process ends.
698 adv_set_state(adv, ADV_IN_TIMEOUT);
700 /* This CCB is the CCB representing our recovery actions */
701 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
703 ccb_h = LIST_FIRST(&adv->pending_ccbs);
704 while (ccb_h != NULL) {
705 callout_stop(&ccb_h->timeout_ch);
706 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
709 /* XXX Should send a BDR */
710 /* Attempt an abort as our first tact */
711 xpt_print_path(ccb->ccb_h.path);
712 kprintf("Attempting abort\n");
713 adv_abort_ccb(adv, ccb->ccb_h.target_id,
714 ccb->ccb_h.target_lun, ccb,
715 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
716 callout_reset(&ccb->ccb_h.timeout_ch, 2 * hz, adv_timeout, ccb);
718 /* Our attempt to perform an abort failed, go for a reset */
719 xpt_print_path(ccb->ccb_h.path);
720 kprintf("Resetting bus\n");
721 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
722 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
723 adv_reset_bus(adv, /*initiate_reset*/TRUE);
725 adv_start_execution(adv);
730 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
732 struct adv_softc *adv = device_get_softc(dev);
735 * Allocate a storage area for us
737 LIST_INIT(&adv->pending_ccbs);
738 SLIST_INIT(&adv->free_ccb_infos);
740 adv->unit = device_get_unit(dev);
748 adv_free(struct adv_softc *adv)
750 switch (adv->init_level) {
753 struct adv_ccb_info *cinfo;
755 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
756 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
757 adv_destroy_ccb_info(adv, cinfo);
760 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
763 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
766 bus_dma_tag_destroy(adv->sense_dmat);
768 bus_dma_tag_destroy(adv->buffer_dmat);
770 bus_dma_tag_destroy(adv->parent_dmat);
772 if (adv->ccb_infos != NULL)
773 kfree(adv->ccb_infos, M_DEVBUF);
780 adv_init(struct adv_softc *adv)
782 struct adv_eeprom_config eeprom_config;
785 u_int16_t config_lsw;
786 u_int16_t config_msw;
791 * Stop script execution.
793 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
794 adv_stop_execution(adv);
795 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
796 kprintf("adv%d: Unable to halt adapter. Initialization"
797 "failed\n", adv->unit);
800 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
801 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
802 kprintf("adv%d: Unable to set program counter. Initialization"
803 "failed\n", adv->unit);
807 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
808 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
810 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
811 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
813 * XXX The Linux code flags this as an error,
814 * but what should we report to the user???
815 * It seems that clearing the config register
816 * makes this error recoverable.
818 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
821 /* Suck in the configuration from the EEProm */
822 checksum = adv_get_eeprom_config(adv, &eeprom_config);
824 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
826 * XXX The Linux code sets a warning level for this
827 * condition, yet nothing of meaning is printed to
828 * the user. What does this mean???
830 if (adv->chip_version == 3) {
831 if (eeprom_config.cfg_lsw != config_lsw)
832 eeprom_config.cfg_lsw = config_lsw;
833 if (eeprom_config.cfg_msw != config_msw) {
834 eeprom_config.cfg_msw = config_msw;
838 if (checksum == eeprom_config.chksum) {
840 /* Range/Sanity checking */
841 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
842 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
844 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
845 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
847 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
848 eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
850 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
851 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
853 adv->max_openings = eeprom_config.max_total_qng;
854 adv->user_disc_enable = eeprom_config.disc_enable;
855 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
856 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
857 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
858 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
859 adv->control = eeprom_config.cntl;
860 for (i = 0; i <= ADV_MAX_TID; i++) {
863 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
866 sync_data = eeprom_config.sdtr_data[i];
867 adv_sdtr_to_period_offset(adv,
869 &adv->tinfo[i].user.period,
870 &adv->tinfo[i].user.offset,
873 config_lsw = eeprom_config.cfg_lsw;
874 eeprom_config.cfg_msw = config_msw;
878 kprintf("adv%d: Warning EEPROM Checksum mismatch. "
879 "Using default device parameters\n", adv->unit);
881 /* Set reasonable defaults since we can't read the EEPROM */
882 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
883 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
884 adv->disc_enable = TARGET_BIT_VECTOR_SET;
885 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
886 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
887 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
889 adv->control = 0xFFFF;
891 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
892 /* Default to no Ultra to support the 3030 */
893 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
894 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
895 for (i = 0; i <= ADV_MAX_TID; i++) {
896 adv_sdtr_to_period_offset(adv, sync_data,
897 &adv->tinfo[i].user.period,
898 &adv->tinfo[i].user.offset,
901 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
903 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
904 config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
905 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
906 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
912 for (i = 0; i <= ADV_MAX_TID; i++) {
913 if (adv->tinfo[i].user.period < max_sync)
914 adv->tinfo[i].user.period = max_sync;
917 if (adv_test_external_lram(adv) == 0) {
918 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
919 eeprom_config.max_total_qng =
920 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
921 eeprom_config.max_tag_qng =
922 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
924 eeprom_config.cfg_msw |= 0x0800;
925 config_msw |= 0x0800;
926 eeprom_config.max_total_qng =
927 ADV_MAX_PCI_INRAM_TOTAL_QNG;
928 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
930 adv->max_openings = eeprom_config.max_total_qng;
932 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
933 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
936 * Don't write the eeprom data back for now.
937 * I'd rather not mess up the user's card. We also don't
938 * fully sanitize the eeprom settings above for the write-back
939 * to be 100% correct.
941 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
942 kprintf("%s: WARNING! Failure writing to EEPROM.\n",
946 adv_set_chip_scsiid(adv, adv->scsi_id);
947 if (adv_init_lram_and_mcode(adv))
950 adv->disc_enable = adv->user_disc_enable;
952 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
953 for (i = 0; i <= ADV_MAX_TID; i++) {
955 * Start off in async mode.
957 adv_set_syncrate(adv, /*struct cam_path */NULL,
958 i, /*period*/0, /*offset*/0,
961 * Enable the use of tagged commands on all targets.
962 * This allows the kernel driver to make up it's own mind
963 * as it sees fit to tag queue instead of having the
964 * firmware try and second guess the tag_code settins.
966 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
969 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
970 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
971 kprintf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
972 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0)
973 ? "Ultra SCSI" : "SCSI",
974 adv->scsi_id, adv->max_openings);
981 struct adv_softc *adv;
983 u_int16_t saved_ram_addr;
985 u_int8_t saved_ctrl_reg;
988 adv = (struct adv_softc *)arg;
990 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
993 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
996 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
997 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
998 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
1001 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
1002 kprintf("Detected Bus Reset\n");
1003 adv_reset_bus(adv, /*initiate_reset*/FALSE);
1007 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
1009 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
1010 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
1011 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
1012 host_flag | ADV_HOST_FLAG_IN_ISR);
1014 adv_ack_interrupt(adv);
1016 if ((chipstat & ADV_CSW_HALTED) != 0
1017 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
1018 adv_isr_chip_halted(adv);
1019 saved_ctrl_reg &= ~ADV_CC_HALT;
1023 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
1025 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
1026 panic("adv_intr: Unable to set LRAM addr");
1028 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
1031 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
1035 adv_run_doneq(struct adv_softc *adv)
1037 struct adv_q_done_info scsiq;
1041 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
1042 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
1044 while (done_qno != ADV_QLINK_END) {
1046 struct adv_ccb_info *cinfo;
1051 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1053 /* Pull status from this request */
1054 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
1055 adv->max_dma_count);
1057 /* Mark it as free */
1058 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
1059 scsiq.q_status & ~(QS_READY|QS_ABORTED));
1061 /* Process request based on retrieved info */
1062 if ((scsiq.cntl & QC_SG_HEAD) != 0) {
1066 * S/G based request. Free all of the queue
1067 * structures that contained S/G information.
1069 for (i = 0; i < sg_queue_cnt; i++) {
1070 done_qno = adv_read_lram_8(adv, done_qaddr
1074 if (done_qno == ADV_QLINK_END) {
1075 panic("adv_qdone: Corrupted SG "
1076 "list encountered");
1079 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1081 /* Mark SG queue as free */
1082 adv_write_lram_8(adv, done_qaddr
1083 + ADV_SCSIQ_B_STATUS, QS_FREE);
1088 if (adv->cur_active < (sg_queue_cnt + 1))
1089 panic("adv_qdone: Attempting to free more "
1090 "queues than are active");
1092 adv->cur_active -= sg_queue_cnt + 1;
1094 aborted = (scsiq.q_status & QS_ABORTED) != 0;
1096 if ((scsiq.q_status != QS_DONE)
1097 && (scsiq.q_status & QS_ABORTED) == 0)
1098 panic("adv_qdone: completed scsiq with unknown status");
1100 scsiq.remain_bytes += scsiq.extra_bytes;
1102 if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1103 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1104 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1105 scsiq.d3.done_stat = QD_NO_ERROR;
1106 scsiq.d3.host_stat = QHSTA_NO_ERROR;
1110 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
1112 ccb->csio.resid = scsiq.remain_bytes;
1114 scsiq.d3.done_stat, scsiq.d3.host_stat,
1115 scsiq.d3.scsi_stat, scsiq.q_no);
1117 doneq_head = done_qno;
1118 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1120 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1125 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1126 u_int host_stat, u_int scsi_status, u_int q_no)
1128 struct adv_ccb_info *cinfo;
1130 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1131 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1132 callout_stop(&ccb->ccb_h.timeout_ch);
1133 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1134 bus_dmasync_op_t op;
1136 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1137 op = BUS_DMASYNC_POSTREAD;
1139 op = BUS_DMASYNC_POSTWRITE;
1140 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1141 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1144 switch (done_stat) {
1146 if (host_stat == QHSTA_NO_ERROR) {
1147 ccb->ccb_h.status = CAM_REQ_CMP;
1150 xpt_print_path(ccb->ccb_h.path);
1151 kprintf("adv_done - queue done without error, "
1152 "but host status non-zero(%x)\n", host_stat);
1155 switch (host_stat) {
1156 case QHSTA_M_TARGET_STATUS_BUSY:
1157 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
1159 * Assume that if we were a tagged transaction
1160 * the target reported queue full. Otherwise,
1161 * report busy. The firmware really should just
1162 * pass the original status back up to us even
1163 * if it thinks the target was in error for
1164 * returning this status as no other transactions
1165 * from this initiator are in effect, but this
1166 * ignores multi-initiator setups and there is
1167 * evidence that the firmware gets its per-device
1168 * transaction counts screwed up occassionally.
1170 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1171 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
1172 && host_stat != QHSTA_M_TARGET_STATUS_BUSY)
1173 scsi_status = SCSI_STATUS_QUEUE_FULL;
1175 scsi_status = SCSI_STATUS_BUSY;
1176 adv_abort_ccb(adv, ccb->ccb_h.target_id,
1177 ccb->ccb_h.target_lun,
1178 /*ccb*/NULL, CAM_REQUEUE_REQ,
1179 /*queued_only*/TRUE);
1181 case QHSTA_M_NO_AUTO_REQ_SENSE:
1182 case QHSTA_NO_ERROR:
1183 ccb->csio.scsi_status = scsi_status;
1184 switch (scsi_status) {
1185 case SCSI_STATUS_CHECK_COND:
1186 case SCSI_STATUS_CMD_TERMINATED:
1187 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1188 /* Structure copy */
1189 ccb->csio.sense_data =
1190 adv->sense_buffers[q_no - 1];
1192 case SCSI_STATUS_BUSY:
1193 case SCSI_STATUS_RESERV_CONFLICT:
1194 case SCSI_STATUS_QUEUE_FULL:
1195 case SCSI_STATUS_COND_MET:
1196 case SCSI_STATUS_INTERMED:
1197 case SCSI_STATUS_INTERMED_COND_MET:
1198 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1200 case SCSI_STATUS_OK:
1201 ccb->ccb_h.status |= CAM_REQ_CMP;
1205 case QHSTA_M_SEL_TIMEOUT:
1206 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1208 case QHSTA_M_DATA_OVER_RUN:
1209 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1211 case QHSTA_M_UNEXPECTED_BUS_FREE:
1212 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1214 case QHSTA_M_BAD_BUS_PHASE_SEQ:
1215 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1217 case QHSTA_M_BAD_CMPL_STATUS_IN:
1218 /* No command complete after a status message */
1219 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1221 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
1222 case QHSTA_M_WTM_TIMEOUT:
1223 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
1224 /* The SCSI bus hung in a phase */
1225 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1226 adv_reset_bus(adv, /*initiate_reset*/TRUE);
1228 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1229 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1231 case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
1232 case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
1233 case QHSTA_D_HOST_ABORT_FAILED:
1234 case QHSTA_D_EXE_SCSI_Q_FAILED:
1235 case QHSTA_D_ASPI_NO_BUF_POOL:
1236 case QHSTA_M_BAD_TAG_CODE:
1237 case QHSTA_D_LRAM_CMP_ERROR:
1238 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1240 panic("%s: Unhandled Host status error %x",
1241 adv_name(adv), host_stat);
1246 case QD_ABORTED_BY_HOST:
1247 /* Don't clobber any, more explicit, error codes we've set */
1248 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1249 ccb->ccb_h.status = CAM_REQ_ABORTED;
1253 xpt_print_path(ccb->ccb_h.path);
1254 kprintf("adv_done - queue done with unknown status %x:%x\n",
1255 done_stat, host_stat);
1256 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1259 adv_clear_state(adv, ccb);
1260 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1261 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1262 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1263 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1265 adv_free_ccb_info(adv, cinfo);
1267 * Null this out so that we catch driver bugs that cause a
1268 * ccb to be completed twice.
1270 ccb->ccb_h.ccb_cinfo_ptr = NULL;
1271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1276 * Function to poll for command completion when
1277 * interrupts are disabled (crash dumps)
1280 adv_poll(struct cam_sim *sim)
1282 adv_intr(cam_sim_softc(sim));
1286 * Attach all the sub-devices we can find
1289 adv_attach(struct adv_softc *adv)
1291 struct ccb_setasync csa;
1295 * Allocate an array of ccb mapping structures. We put the
1296 * index of the ccb_info structure into the queue representing
1297 * a transaction and use it for mapping the queue to the
1298 * upper level SCSI transaction it represents.
1300 adv->ccb_infos = kmalloc(sizeof(*adv->ccb_infos) * adv->max_openings,
1301 M_DEVBUF, M_WAITOK);
1305 * Create our DMA tags. These tags define the kinds of device
1306 * accessible memory allocations and memory mappings we will
1307 * need to perform during normal operation.
1309 * Unless we need to further restrict the allocation, we rely
1310 * on the restrictions of the parent dmat, hence the common
1311 * use of MAXADDR and MAXSIZE.
1313 * The ASC boards use chains of "queues" (the transactional
1314 * resources on the board) to represent long S/G lists.
1315 * The first queue represents the command and holds a
1316 * single address and data pair. The queues that follow
1317 * can each hold ADV_SG_LIST_PER_Q entries. Given the
1318 * total number of queues, we can express the largest
1319 * transaction we can map. We reserve a few queues for
1320 * error recovery. Take those into account as well.
1322 * There is a way to take an interrupt to download the
1323 * next batch of S/G entries if there are more than 255
1324 * of them (the counter in the queue structure is a u_int8_t).
1325 * We don't use this feature, so limit the S/G list size
1328 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
1332 /* DMA tag for mapping buffers into device visible space. */
1333 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0,
1334 /*lowaddr*/BUS_SPACE_MAXADDR,
1335 /*highaddr*/BUS_SPACE_MAXADDR,
1336 /*filter*/NULL, /*filterarg*/NULL,
1338 /*nsegments*/max_sg,
1339 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1340 /*flags*/BUS_DMA_ALLOCNOW,
1341 &adv->buffer_dmat) != 0) {
1346 /* DMA tag for our sense buffers */
1347 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0,
1348 /*lowaddr*/BUS_SPACE_MAXADDR,
1349 /*highaddr*/BUS_SPACE_MAXADDR,
1350 /*filter*/NULL, /*filterarg*/NULL,
1351 sizeof(struct scsi_sense_data)*adv->max_openings,
1353 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1354 /*flags*/0, &adv->sense_dmat) != 0) {
1360 /* Allocation for our sense buffers */
1361 if (bus_dmamem_alloc(adv->sense_dmat, (void *)&adv->sense_buffers,
1362 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1368 /* And permanently map them */
1369 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1371 sizeof(struct scsi_sense_data)*adv->max_openings,
1372 adv_map, &adv->sense_physbase, /*flags*/0);
1379 if (adv_start_chip(adv) != 1) {
1380 kprintf("adv%d: Unable to start on board processor. Aborting.\n",
1386 * Construct our SIM entry.
1388 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1389 &sim_mplock, 1, adv->max_openings, NULL);
1390 if (adv->sim == NULL)
1396 * XXX Twin Channel EISA Cards???
1398 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1399 cam_sim_free(adv->sim);
1403 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1404 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1406 xpt_bus_deregister(cam_sim_path(adv->sim));
1407 cam_sim_free(adv->sim);
1411 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1412 csa.ccb_h.func_code = XPT_SASYNC_CB;
1413 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1414 csa.callback = advasync;
1415 csa.callback_arg = adv;
1416 xpt_action((union ccb *)&csa);