1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.4 2004/03/15 01:10:43 dillon Exp $ */
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
37 #include <bus/pci/pcireg.h>
38 #include <bus/pci/pcivar.h>
40 #include <machine/bus_memio.h>
41 #include <machine/bus_pio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
45 #include <sys/malloc.h>
47 #include "isp_freebsd.h"
49 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
50 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
51 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
52 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
54 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
56 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
57 static int isp_pci_mbxdma(struct ispsoftc *);
59 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
61 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
63 static void isp_pci_reset1(struct ispsoftc *);
64 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
66 static struct ispmdvec mdvec = {
77 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
80 static struct ispmdvec mdvec_1080 = {
91 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
94 static struct ispmdvec mdvec_12160 = {
105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
108 static struct ispmdvec mdvec_2100 = {
120 static struct ispmdvec mdvec_2200 = {
132 static struct ispmdvec mdvec_2300 = {
144 #ifndef PCIM_CMD_INVEN
145 #define PCIM_CMD_INVEN 0x10
147 #ifndef PCIM_CMD_BUSMASTEREN
148 #define PCIM_CMD_BUSMASTEREN 0x0004
150 #ifndef PCIM_CMD_PERRESPEN
151 #define PCIM_CMD_PERRESPEN 0x0040
153 #ifndef PCIM_CMD_SEREN
154 #define PCIM_CMD_SEREN 0x0100
158 #define PCIR_COMMAND 0x04
161 #ifndef PCIR_CACHELNSZ
162 #define PCIR_CACHELNSZ 0x0c
165 #ifndef PCIR_LATTIMER
166 #define PCIR_LATTIMER 0x0d
170 #define PCIR_ROMADDR 0x30
173 #ifndef PCI_VENDOR_QLOGIC
174 #define PCI_VENDOR_QLOGIC 0x1077
177 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
178 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
181 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
182 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
185 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
186 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
189 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
190 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
193 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
194 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
197 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
198 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
201 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
202 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
205 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
206 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
209 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
210 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
213 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
214 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
217 #define PCI_QLOGIC_ISP1020 \
218 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
220 #define PCI_QLOGIC_ISP1080 \
221 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
223 #define PCI_QLOGIC_ISP10160 \
224 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
226 #define PCI_QLOGIC_ISP12160 \
227 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
229 #define PCI_QLOGIC_ISP1240 \
230 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
232 #define PCI_QLOGIC_ISP1280 \
233 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
235 #define PCI_QLOGIC_ISP2100 \
236 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
238 #define PCI_QLOGIC_ISP2200 \
239 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
241 #define PCI_QLOGIC_ISP2300 \
242 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
244 #define PCI_QLOGIC_ISP2312 \
245 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
248 * Odd case for some AMI raid cards... We need to *not* attach to this.
250 #define AMI_RAID_SUBVENDOR_ID 0x101e
252 #define IO_MAP_REG 0x10
253 #define MEM_MAP_REG 0x14
255 #define PCI_DFLT_LTNCY 0x40
256 #define PCI_DFLT_LNSZ 0x10
258 static int isp_pci_probe (device_t);
259 static int isp_pci_attach (device_t);
262 struct isp_pcisoftc {
263 struct ispsoftc pci_isp;
265 struct resource * pci_reg;
266 bus_space_tag_t pci_st;
267 bus_space_handle_t pci_sh;
269 int16_t pci_poff[_NREG_BLKS];
273 ispfwfunc *isp_get_firmware_p = NULL;
275 static device_method_t isp_pci_methods[] = {
276 /* Device interface */
277 DEVMETHOD(device_probe, isp_pci_probe),
278 DEVMETHOD(device_attach, isp_pci_attach),
281 static void isp_pci_intr(void *);
283 static driver_t isp_pci_driver = {
284 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
286 static devclass_t isp_devclass;
287 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
288 MODULE_VERSION(isp, 1);
291 isp_pci_probe(device_t dev)
293 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
294 case PCI_QLOGIC_ISP1020:
295 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
297 case PCI_QLOGIC_ISP1080:
298 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
300 case PCI_QLOGIC_ISP1240:
301 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
303 case PCI_QLOGIC_ISP1280:
304 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
306 case PCI_QLOGIC_ISP10160:
307 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
309 case PCI_QLOGIC_ISP12160:
310 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
313 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
315 case PCI_QLOGIC_ISP2100:
316 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
318 case PCI_QLOGIC_ISP2200:
319 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
321 case PCI_QLOGIC_ISP2300:
322 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
324 case PCI_QLOGIC_ISP2312:
325 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
330 if (device_get_unit(dev) == 0 && bootverbose) {
331 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
332 "Core Version %d.%d\n",
333 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
334 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
337 * XXXX: Here is where we might load the f/w module
338 * XXXX: (or increase a reference count to it).
344 isp_pci_attach(device_t dev)
346 struct resource *regs, *irq;
347 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug;
348 u_int32_t data, cmd, linesz, psize, basetype;
349 struct isp_pcisoftc *pcs;
350 struct ispsoftc *isp = NULL;
351 struct ispmdvec *mdvp;
356 * Figure out if we're supposed to skip this one.
358 unit = device_get_unit(dev);
359 if (getenv_int("isp_disable", &bitmap)) {
360 if (bitmap & (1 << unit)) {
361 device_printf(dev, "not configuring\n");
363 * But return '0' to preserve HBA numbering.
369 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO);
372 * Figure out which we should try first - memory mapping or i/o mapping?
376 m2 = PCIM_CMD_PORTEN;
378 m1 = PCIM_CMD_PORTEN;
382 if (getenv_int("isp_mem_map", &bitmap)) {
383 if (bitmap & (1 << unit)) {
385 m2 = PCIM_CMD_PORTEN;
389 if (getenv_int("isp_io_map", &bitmap)) {
390 if (bitmap & (1 << unit)) {
391 m1 = PCIM_CMD_PORTEN;
396 linesz = PCI_DFLT_LNSZ;
400 cmd = pci_read_config(dev, PCIR_COMMAND, 1);
402 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
403 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
404 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
406 if (regs == NULL && (cmd & m2)) {
407 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
408 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
409 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
412 device_printf(dev, "unable to map any ports\n");
416 device_printf(dev, "using %s space register mapping\n",
417 (rgd == IO_MAP_REG)? "I/O" : "Memory");
420 pcs->pci_st = rman_get_bustag(regs);
421 pcs->pci_sh = rman_get_bushandle(regs);
423 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
424 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
425 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
426 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
427 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
429 basetype = ISP_HA_SCSI_UNKNOWN;
430 psize = sizeof (sdparam);
431 lim = BUS_SPACE_MAXSIZE_32BIT;
432 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
434 basetype = ISP_HA_SCSI_UNKNOWN;
435 psize = sizeof (sdparam);
436 lim = BUS_SPACE_MAXSIZE_24BIT;
438 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
440 basetype = ISP_HA_SCSI_1080;
441 psize = sizeof (sdparam);
442 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
443 ISP1080_DMA_REGS_OFF;
445 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
447 basetype = ISP_HA_SCSI_1240;
448 psize = 2 * sizeof (sdparam);
449 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
450 ISP1080_DMA_REGS_OFF;
452 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
454 basetype = ISP_HA_SCSI_1280;
455 psize = 2 * sizeof (sdparam);
456 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
457 ISP1080_DMA_REGS_OFF;
459 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
461 basetype = ISP_HA_SCSI_10160;
462 psize = sizeof (sdparam);
463 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
464 ISP1080_DMA_REGS_OFF;
466 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
468 basetype = ISP_HA_SCSI_12160;
469 psize = 2 * sizeof (sdparam);
470 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
471 ISP1080_DMA_REGS_OFF;
473 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
475 basetype = ISP_HA_FC_2100;
476 psize = sizeof (fcparam);
477 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
478 PCI_MBOX_REGS2100_OFF;
479 if (pci_get_revid(dev) < 3) {
481 * XXX: Need to get the actual revision
482 * XXX: number of the 2100 FB. At any rate,
483 * XXX: lower cache line size for early revision
489 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
491 basetype = ISP_HA_FC_2200;
492 psize = sizeof (fcparam);
493 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
494 PCI_MBOX_REGS2100_OFF;
496 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
498 basetype = ISP_HA_FC_2300;
499 psize = sizeof (fcparam);
500 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
501 PCI_MBOX_REGS2300_OFF;
503 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
505 basetype = ISP_HA_FC_2312;
506 psize = sizeof (fcparam);
507 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
508 PCI_MBOX_REGS2300_OFF;
511 isp->isp_param = malloc(psize, M_DEVBUF, M_WAITOK | M_ZERO);
512 isp->isp_mdvec = mdvp;
513 isp->isp_type = basetype;
514 isp->isp_revision = pci_get_revid(dev);
515 #ifdef ISP_TARGET_MODE
516 isp->isp_role = ISP_ROLE_BOTH;
518 isp->isp_role = ISP_DEFAULT_ROLES;
524 * Try and find firmware for this device.
527 if (isp_get_firmware_p) {
528 int device = (int) pci_get_device(dev);
529 #ifdef ISP_TARGET_MODE
530 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
532 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
537 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
540 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
541 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
542 if (IS_2300(isp)) { /* per QLogic errata */
543 cmd &= ~PCIM_CMD_INVEN;
547 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
549 isp->isp_touched = 1;
552 pci_write_config(dev, PCIR_COMMAND, cmd, 1);
555 * Make sure the Cache Line Size register is set sensibly.
557 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
558 if (data != linesz) {
559 data = PCI_DFLT_LNSZ;
560 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
561 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
565 * Make sure the Latency Timer is sane.
567 data = pci_read_config(dev, PCIR_LATTIMER, 1);
568 if (data < PCI_DFLT_LTNCY) {
569 data = PCI_DFLT_LTNCY;
570 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
571 pci_write_config(dev, PCIR_LATTIMER, data, 1);
575 * Make sure we've disabled the ROM.
577 data = pci_read_config(dev, PCIR_ROMADDR, 4);
579 pci_write_config(dev, PCIR_ROMADDR, data, 4);
582 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
583 1, RF_ACTIVE | RF_SHAREABLE);
585 device_printf(dev, "could not allocate interrupt\n");
589 if (getenv_int("isp_no_fwload", &bitmap)) {
590 if (bitmap & (1 << unit))
591 isp->isp_confopts |= ISP_CFG_NORELOAD;
593 if (getenv_int("isp_fwload", &bitmap)) {
594 if (bitmap & (1 << unit))
595 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
597 if (getenv_int("isp_no_nvram", &bitmap)) {
598 if (bitmap & (1 << unit))
599 isp->isp_confopts |= ISP_CFG_NONVRAM;
601 if (getenv_int("isp_nvram", &bitmap)) {
602 if (bitmap & (1 << unit))
603 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
605 if (getenv_int("isp_fcduplex", &bitmap)) {
606 if (bitmap & (1 << unit))
607 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
609 if (getenv_int("isp_no_fcduplex", &bitmap)) {
610 if (bitmap & (1 << unit))
611 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
613 if (getenv_int("isp_nport", &bitmap)) {
614 if (bitmap & (1 << unit))
615 isp->isp_confopts |= ISP_CFG_NPORT;
619 * Because the resource_*_value functions can neither return
620 * 64 bit integer values, nor can they be directly coerced
621 * to interpret the right hand side of the assignment as
622 * you want them to interpret it, we have to force WWN
623 * hint replacement to specify WWN strings with a leading
624 * 'w' (e..g w50000000aaaa0001). Sigh.
626 if (getenv_quad("isp_portwwn", &wwn)) {
627 isp->isp_osinfo.default_port_wwn = wwn;
628 isp->isp_confopts |= ISP_CFG_OWNWWPN;
630 if (isp->isp_osinfo.default_port_wwn == 0) {
631 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
634 if (getenv_quad("isp_nodewwn", &wwn)) {
635 isp->isp_osinfo.default_node_wwn = wwn;
636 isp->isp_confopts |= ISP_CFG_OWNWWNN;
638 if (isp->isp_osinfo.default_node_wwn == 0) {
639 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
643 (void) getenv_int("isp_debug", &isp_debug);
644 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, isp_pci_intr,
646 device_printf(dev, "could not setup interrupt\n");
650 #ifdef ISP_FW_CRASH_DUMP
652 if (getenv_int("isp_fw_dump_enable", &bitmap)) {
653 if (bitmap & (1 << unit) {
656 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
657 } else if (IS_23XX(isp)) {
658 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
661 FCPARAM(isp)->isp_dump_data =
662 malloc(amt, M_DEVBUF, M_WAITOK);
663 bzero(FCPARAM(isp)->isp_dump_data, amt);
666 "f/w crash dumps not supported for card\n");
673 isp->isp_port = pci_get_function(dev);
677 * Set up logging levels.
680 isp->isp_dblev = isp_debug;
682 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
685 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
688 * Make sure we're in reset state.
693 if (isp->isp_state != ISP_RESETSTATE) {
698 if (isp->isp_state != ISP_INITSTATE) {
699 /* If we're a Fibre Channel Card, we allow deferred attach */
707 if (isp->isp_state != ISP_RUNSTATE) {
708 /* If we're a Fibre Channel Card, we allow deferred attach */
716 * XXXX: Here is where we might unload the f/w module
717 * XXXX: (or decrease the reference count to it).
724 if (pcs && pcs->ih) {
725 (void) bus_teardown_intr(dev, irq, pcs->ih);
729 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
734 (void) bus_release_resource(dev, rtp, rgd, regs);
738 if (pcs->pci_isp.isp_param)
739 free(pcs->pci_isp.isp_param, M_DEVBUF);
744 * XXXX: Here is where we might unload the f/w module
745 * XXXX: (or decrease the reference count to it).
751 isp_pci_intr(void *arg)
753 struct ispsoftc *isp = arg;
754 u_int16_t isr, sema, mbox;
758 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
761 int iok = isp->isp_osinfo.intsok;
762 isp->isp_osinfo.intsok = 0;
763 isp_intr(isp, isr, sema, mbox);
764 isp->isp_osinfo.intsok = iok;
770 #define IspVirt2Off(a, x) \
771 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
772 _BLK_REG_SHFT] + ((x) & 0xff))
774 #define BXR2(pcs, off) \
775 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
776 #define BXW2(pcs, off, v) \
777 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
781 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
783 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
784 u_int16_t val0, val1;
788 val0 = BXR2(pcs, IspVirt2Off(isp, off));
789 val1 = BXR2(pcs, IspVirt2Off(isp, off));
790 } while (val0 != val1 && ++i < 1000);
799 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
800 u_int16_t *semap, u_int16_t *mbp)
802 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
806 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
809 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
813 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
814 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
816 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
817 isr &= INT_PENDING_MASK(isp);
818 sema &= BIU_SEMA_LOCK;
819 if (isr == 0 && sema == 0) {
823 if ((*semap = sema) != 0) {
825 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
829 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
836 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
837 u_int16_t *semap, u_int16_t *mbox0p)
839 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
842 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
846 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
847 IspVirt2Off(pcs, BIU_R2HSTSLO));
848 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
849 if ((r2hisr & BIU_R2HST_INTR) == 0) {
853 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
854 case ISPR2HST_ROM_MBX_OK:
855 case ISPR2HST_ROM_MBX_FAIL:
856 case ISPR2HST_MBX_OK:
857 case ISPR2HST_MBX_FAIL:
858 case ISPR2HST_ASYNC_EVENT:
859 *isrp = r2hisr & 0xffff;
860 *mbox0p = (r2hisr >> 16);
863 case ISPR2HST_RIO_16:
864 *isrp = r2hisr & 0xffff;
865 *mbox0p = ASYNC_RIO1;
869 *isrp = r2hisr & 0xffff;
870 *mbox0p = ASYNC_CMD_CMPLT;
873 case ISPR2HST_FPOST_CTIO:
874 *isrp = r2hisr & 0xffff;
875 *mbox0p = ASYNC_CTIO_DONE;
878 case ISPR2HST_RSPQ_UPDATE:
879 *isrp = r2hisr & 0xffff;
889 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
892 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
895 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
897 * We will assume that someone has paused the RISC processor.
899 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
900 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
901 oldconf | BIU_PCI_CONF1_SXP);
903 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
904 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
905 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
911 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
913 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
916 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
918 * We will assume that someone has paused the RISC processor.
920 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
921 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
922 oldconf | BIU_PCI_CONF1_SXP);
924 BXW2(pcs, IspVirt2Off(isp, regoff), val);
925 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
926 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
931 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
933 u_int16_t rv, oc = 0;
934 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
936 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
937 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
940 * We will assume that someone has paused the RISC processor.
942 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
943 tc = oc & ~BIU_PCI1080_CONF1_DMA;
944 if (regoff & SXP_BANK1_SELECT)
945 tc |= BIU_PCI1080_CONF1_SXP1;
947 tc |= BIU_PCI1080_CONF1_SXP0;
948 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
949 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
950 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
951 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
952 oc | BIU_PCI1080_CONF1_DMA);
954 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
956 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
962 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
964 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
967 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
968 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
971 * We will assume that someone has paused the RISC processor.
973 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
974 tc = oc & ~BIU_PCI1080_CONF1_DMA;
975 if (regoff & SXP_BANK1_SELECT)
976 tc |= BIU_PCI1080_CONF1_SXP1;
978 tc |= BIU_PCI1080_CONF1_SXP0;
979 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
980 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
981 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
983 oc | BIU_PCI1080_CONF1_DMA);
985 BXW2(pcs, IspVirt2Off(isp, regoff), val);
987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
993 struct ispsoftc *isp;
997 static void imc(void *, bus_dma_segment_t *, int, int);
1000 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1002 struct imush *imushp = (struct imush *) arg;
1004 imushp->error = error;
1006 struct ispsoftc *isp =imushp->isp;
1007 bus_addr_t addr = segs->ds_addr;
1009 isp->isp_rquest_dma = addr;
1010 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1011 isp->isp_result_dma = addr;
1013 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1014 FCPARAM(isp)->isp_scdma = addr;
1020 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1022 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1025 isp_pci_mbxdma(struct ispsoftc *isp)
1027 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1031 bus_size_t alim, slim;
1035 * Already been here? If so, leave...
1037 if (isp->isp_rquest) {
1041 #ifdef ISP_DAC_SUPPORTED
1042 alim = BUS_SPACE_UNRESTRICTED;
1044 alim = BUS_SPACE_MAXADDR_32BIT;
1046 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1047 slim = BUS_SPACE_MAXADDR_32BIT;
1049 slim = BUS_SPACE_MAXADDR_24BIT;
1053 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1054 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1055 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1061 len = sizeof (XS_T **) * isp->isp_maxcmds;
1062 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1063 if (isp->isp_xflist == NULL) {
1064 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1068 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1069 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1070 if (pcs->dmaps == NULL) {
1071 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1072 free(isp->isp_xflist, M_DEVBUF);
1078 * Allocate and map the request, result queues, plus FC scratch area.
1080 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1081 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1083 len += ISP2100_SCRLEN;
1086 ns = (len / PAGE_SIZE) + 1;
1087 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim,
1088 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1089 isp_prt(isp, ISP_LOGERR,
1090 "cannot create a dma tag for control spaces");
1091 free(pcs->dmaps, M_DEVBUF);
1092 free(isp->isp_xflist, M_DEVBUF);
1097 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1098 &isp->isp_cdmap) != 0) {
1099 isp_prt(isp, ISP_LOGERR,
1100 "cannot allocate %d bytes of CCB memory", len);
1101 bus_dma_tag_destroy(isp->isp_cdmat);
1102 free(isp->isp_xflist, M_DEVBUF);
1103 free(pcs->dmaps, M_DEVBUF);
1108 for (i = 0; i < isp->isp_maxcmds; i++) {
1109 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1111 isp_prt(isp, ISP_LOGERR,
1112 "error %d creating per-cmd DMA maps", error);
1114 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1122 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1124 isp_prt(isp, ISP_LOGERR,
1125 "error %d loading dma map for control areas", im.error);
1129 isp->isp_rquest = base;
1130 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1131 isp->isp_result = base;
1133 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1134 FCPARAM(isp)->isp_scratch = base;
1140 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1141 bus_dma_tag_destroy(isp->isp_cdmat);
1142 free(isp->isp_xflist, M_DEVBUF);
1143 free(pcs->dmaps, M_DEVBUF);
1145 isp->isp_rquest = NULL;
1150 struct ispsoftc *isp;
1158 #define MUSHERR_NOQENTRIES -2
1160 #ifdef ISP_TARGET_MODE
1162 * We need to handle DMA for target mode differently from initiator mode.
1164 * DMA mapping and construction and submission of CTIO Request Entries
1165 * and rendevous for completion are very tightly coupled because we start
1166 * out by knowing (per platform) how much data we have to move, but we
1167 * don't know, up front, how many DMA mapping segments will have to be used
1168 * cover that data, so we don't know how many CTIO Request Entries we
1169 * will end up using. Further, for performance reasons we may want to
1170 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1172 * The standard vector still goes through isp_pci_dmasetup, but the callback
1173 * for the DMA mapping routines comes here instead with the whole transfer
1174 * mapped and a pointer to a partially filled in already allocated request
1175 * queue entry. We finish the job.
1177 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1178 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1180 #define STATUS_WITH_DATA 1
1183 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1186 struct ccb_scsiio *csio;
1187 struct ispsoftc *isp;
1188 struct isp_pcisoftc *pcs;
1190 ct_entry_t *cto, *qe;
1191 u_int8_t scsi_status;
1192 u_int16_t curi, nxti, handle;
1195 int nth_ctio, nctios, send_status;
1197 mp = (mush_t *) arg;
1204 csio = mp->cmd_token;
1206 curi = isp->isp_reqidx;
1207 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1210 cto->ct_seg_count = 0;
1211 cto->ct_header.rqs_entry_count = 1;
1212 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1215 cto->ct_header.rqs_seqno = 1;
1216 isp_prt(isp, ISP_LOGTDEBUG1,
1217 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1218 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1219 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1220 cto->ct_scsi_status, cto->ct_resid);
1221 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1222 isp_put_ctio(isp, cto, qe);
1226 nctios = nseg / ISP_RQDSEG;
1227 if (nseg % ISP_RQDSEG) {
1232 * Save syshandle, and potentially any SCSI status, which we'll
1233 * reinsert on the last CTIO we're going to send.
1236 handle = cto->ct_syshandle;
1237 cto->ct_syshandle = 0;
1238 cto->ct_header.rqs_seqno = 0;
1239 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1242 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1243 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1245 * Preserve residual.
1247 resid = cto->ct_resid;
1250 * Save actual SCSI status.
1252 scsi_status = cto->ct_scsi_status;
1254 #ifndef STATUS_WITH_DATA
1255 sflags |= CT_NO_DATA;
1257 * We can't do a status at the same time as a data CTIO, so
1258 * we need to synthesize an extra CTIO at this level.
1263 sflags = scsi_status = resid = 0;
1267 cto->ct_scsi_status = 0;
1269 pcs = (struct isp_pcisoftc *)isp;
1270 dp = &pcs->dmaps[isp_handle_index(handle)];
1271 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1272 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1274 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1279 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1286 if (seglim > ISP_RQDSEG)
1287 seglim = ISP_RQDSEG;
1289 for (seg = 0; seg < seglim; seg++, nseg--) {
1291 * Unlike normal initiator commands, we don't
1292 * do any swizzling here.
1294 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1295 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1296 cto->ct_xfrlen += dm_segs->ds_len;
1299 cto->ct_seg_count = seg;
1302 * This case should only happen when we're sending an
1303 * extra CTIO with final status.
1305 if (send_status == 0) {
1306 isp_prt(isp, ISP_LOGWARN,
1307 "tdma_mk ran out of segments");
1314 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1315 * ct_tagtype, and ct_timeout have been carried over
1316 * unchanged from what our caller had set.
1318 * The dataseg fields and the seg_count fields we just got
1319 * through setting. The data direction we've preserved all
1320 * along and only clear it if we're now sending status.
1323 if (nth_ctio == nctios - 1) {
1325 * We're the last in a sequence of CTIOs, so mark
1326 * this CTIO and save the handle to the CCB such that
1327 * when this CTIO completes we can free dma resources
1328 * and do whatever else we need to do to finish the
1329 * rest of the command. We *don't* give this to the
1330 * firmware to work on- the caller will do that.
1333 cto->ct_syshandle = handle;
1334 cto->ct_header.rqs_seqno = 1;
1337 cto->ct_scsi_status = scsi_status;
1338 cto->ct_flags |= sflags;
1339 cto->ct_resid = resid;
1342 isp_prt(isp, ISP_LOGTDEBUG1,
1343 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1344 "scsi status %x resid %d",
1345 cto->ct_fwhandle, csio->ccb_h.target_lun,
1346 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1347 cto->ct_scsi_status, cto->ct_resid);
1349 isp_prt(isp, ISP_LOGTDEBUG1,
1350 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1351 cto->ct_fwhandle, csio->ccb_h.target_lun,
1352 cto->ct_iid, cto->ct_tag_val,
1355 isp_put_ctio(isp, cto, qe);
1356 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1358 MEMORYBARRIER(isp, SYNC_REQUEST,
1362 ct_entry_t *oqe = qe;
1365 * Make sure syshandle fields are clean
1367 cto->ct_syshandle = 0;
1368 cto->ct_header.rqs_seqno = 0;
1370 isp_prt(isp, ISP_LOGTDEBUG1,
1371 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1372 cto->ct_fwhandle, csio->ccb_h.target_lun,
1373 cto->ct_iid, cto->ct_flags);
1379 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1380 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1381 if (nxti == mp->optr) {
1382 isp_prt(isp, ISP_LOGTDEBUG0,
1383 "Queue Overflow in tdma_mk");
1384 mp->error = MUSHERR_NOQENTRIES;
1389 * Now that we're done with the old CTIO,
1390 * flush it out to the request queue.
1392 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1393 isp_put_ctio(isp, cto, oqe);
1394 if (nth_ctio != 0) {
1395 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1398 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1401 * Reset some fields in the CTIO so we can reuse
1402 * for the next one we'll flush to the request
1405 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1406 cto->ct_header.rqs_entry_count = 1;
1407 cto->ct_header.rqs_flags = 0;
1409 cto->ct_scsi_status = 0;
1412 cto->ct_seg_count = 0;
1413 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1420 * We don't have to do multiple CTIOs here. Instead, we can just do
1421 * continuation segments as needed. This greatly simplifies the code
1422 * improves performance.
1426 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1429 struct ccb_scsiio *csio;
1430 struct ispsoftc *isp;
1431 ct2_entry_t *cto, *qe;
1432 u_int16_t curi, nxti;
1435 mp = (mush_t *) arg;
1442 csio = mp->cmd_token;
1445 curi = isp->isp_reqidx;
1446 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1449 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1450 isp_prt(isp, ISP_LOGWARN,
1451 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1452 "set (0x%x)", cto->ct_flags);
1457 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1458 * flags to NO DATA and clear relative offset flags.
1459 * We preserve the ct_resid and the response area.
1461 cto->ct_header.rqs_seqno = 1;
1462 cto->ct_seg_count = 0;
1464 isp_prt(isp, ISP_LOGTDEBUG1,
1465 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1466 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1467 cto->ct_iid, cto->ct_flags, cto->ct_status,
1468 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1469 isp_put_ctio2(isp, cto, qe);
1470 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1474 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1475 isp_prt(isp, ISP_LOGERR,
1476 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1477 "(0x%x)", cto->ct_flags);
1486 * Set up the CTIO2 data segments.
1488 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1489 cto->ct_seg_count++, segcnt++) {
1490 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1491 dm_segs[segcnt].ds_addr;
1492 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1493 dm_segs[segcnt].ds_len;
1494 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1495 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d",
1496 cto->ct_seg_count, dm_segs[segcnt].ds_addr,
1497 dm_segs[segcnt].ds_len);
1500 while (segcnt < nseg) {
1503 ispcontreq_t local, *crq = &local, *qep;
1505 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1507 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1508 if (nxti == mp->optr) {
1510 isp_prt(isp, ISP_LOGTDEBUG0,
1511 "tdma_mkfc: request queue overflow");
1512 mp->error = MUSHERR_NOQENTRIES;
1515 cto->ct_header.rqs_entry_count++;
1516 MEMZERO((void *)crq, sizeof (*crq));
1517 crq->req_header.rqs_entry_count = 1;
1518 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1519 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1521 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1522 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1523 isp_prt(isp, ISP_LOGTDEBUG1,
1524 "isp_send_ctio2: ent%d[%d]%x:%u",
1525 cto->ct_header.rqs_entry_count-1, seg,
1526 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len);
1527 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1528 cto->ct_seg_count++;
1530 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1531 isp_put_cont_req(isp, crq, qep);
1532 ISP_TDQE(isp, "cont entry", curi, qep);
1536 * No do final twiddling for the CTIO itself.
1538 cto->ct_header.rqs_seqno = 1;
1539 isp_prt(isp, ISP_LOGTDEBUG1,
1540 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1541 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1542 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1544 isp_put_ctio2(isp, cto, qe);
1545 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1550 static void dma2(void *, bus_dma_segment_t *, int, int);
1553 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1556 struct ispsoftc *isp;
1557 struct ccb_scsiio *csio;
1558 struct isp_pcisoftc *pcs;
1560 bus_dma_segment_t *eseg;
1562 int seglim, datalen;
1565 mp = (mush_t *) arg;
1572 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1576 csio = mp->cmd_token;
1579 pcs = (struct isp_pcisoftc *)mp->isp;
1580 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1583 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1584 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1586 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1589 datalen = XS_XFRLEN(csio);
1592 * We're passed an initial partially filled in entry that
1593 * has most fields filled in except for data transfer
1596 * Our job is to fill in the initial request queue entry and
1597 * then to start allocating and filling in continuation entries
1598 * until we've covered the entire transfer.
1602 seglim = ISP_RQDSEG_T2;
1603 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1604 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1605 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1607 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1610 if (csio->cdb_len > 12) {
1613 seglim = ISP_RQDSEG;
1615 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1616 rq->req_flags |= REQFLAG_DATA_IN;
1618 rq->req_flags |= REQFLAG_DATA_OUT;
1622 eseg = dm_segs + nseg;
1624 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1626 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1627 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1629 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1632 rq->req_dataseg[rq->req_seg_count].ds_base =
1634 rq->req_dataseg[rq->req_seg_count].ds_count =
1637 datalen -= dm_segs->ds_len;
1638 rq->req_seg_count++;
1642 while (datalen > 0 && dm_segs != eseg) {
1644 ispcontreq_t local, *crq = &local, *cqe;
1646 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1648 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1649 if (nxti == mp->optr) {
1650 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1651 mp->error = MUSHERR_NOQENTRIES;
1654 rq->req_header.rqs_entry_count++;
1655 MEMZERO((void *)crq, sizeof (*crq));
1656 crq->req_header.rqs_entry_count = 1;
1657 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1660 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1661 crq->req_dataseg[seglim].ds_base =
1663 crq->req_dataseg[seglim].ds_count =
1665 rq->req_seg_count++;
1668 datalen -= dm_segs->ds_len;
1670 isp_put_cont_req(isp, crq, cqe);
1671 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1677 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1678 u_int16_t *nxtip, u_int16_t optr)
1680 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1682 bus_dmamap_t *dp = NULL;
1684 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1686 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1687 #ifdef ISP_TARGET_MODE
1688 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1694 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1695 (csio->dxfer_len == 0)) {
1698 mp->cmd_token = csio;
1699 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1703 (*eptr)(mp, NULL, 0, 0);
1711 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1712 (csio->dxfer_len == 0)) {
1713 rq->req_seg_count = 1;
1718 * Do a virtual grapevine step to collect info for
1719 * the callback dma allocation that we have to use...
1723 mp->cmd_token = csio;
1729 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1730 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1732 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1734 error = bus_dmamap_load(pcs->dmat, *dp,
1735 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1736 if (error == EINPROGRESS) {
1737 bus_dmamap_unload(pcs->dmat, *dp);
1739 isp_prt(isp, ISP_LOGERR,
1740 "deferred dma allocation not supported");
1741 } else if (error && mp->error == 0) {
1743 isp_prt(isp, ISP_LOGERR,
1744 "error %d in dma mapping code", error);
1750 /* Pointer to physical buffer */
1751 struct bus_dma_segment seg;
1752 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1753 seg.ds_len = csio->dxfer_len;
1754 (*eptr)(mp, &seg, 1, 0);
1757 struct bus_dma_segment *segs;
1759 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1760 isp_prt(isp, ISP_LOGERR,
1761 "Physical segment pointers unsupported");
1763 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1764 isp_prt(isp, ISP_LOGERR,
1765 "Virtual segment addresses unsupported");
1768 /* Just use the segments provided */
1769 segs = (struct bus_dma_segment *) csio->data_ptr;
1770 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1774 int retval = CMD_COMPLETE;
1775 if (mp->error == MUSHERR_NOQENTRIES) {
1776 retval = CMD_EAGAIN;
1777 } else if (mp->error == EFBIG) {
1778 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1779 } else if (mp->error == EINVAL) {
1780 XS_SETERR(csio, CAM_REQ_INVALID);
1782 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1787 switch (rq->req_header.rqs_entry_type) {
1788 case RQSTYPE_REQUEST:
1789 isp_put_request(isp, rq, qep);
1791 case RQSTYPE_CMDONLY:
1792 isp_put_extended_request(isp, (ispextreq_t *)rq,
1793 (ispextreq_t *)qep);
1796 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1799 return (CMD_QUEUED);
1803 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1805 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1806 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1807 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1810 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1812 bus_dmamap_unload(pcs->dmat, *dp);
1817 isp_pci_reset1(struct ispsoftc *isp)
1819 /* Make sure the BIOS is disabled */
1820 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1821 /* and enable interrupts */
1826 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1828 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1830 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1832 printf("%s:\n", device_get_nameunit(isp->isp_dev));
1834 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1836 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1837 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1838 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1839 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1843 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1844 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1845 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1846 ISP_READ(isp, CDMA_FIFO_STS));
1847 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1848 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1849 ISP_READ(isp, DDMA_FIFO_STS));
1850 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1851 ISP_READ(isp, SXP_INTERRUPT),
1852 ISP_READ(isp, SXP_GROSS_ERR),
1853 ISP_READ(isp, SXP_PINS_CTRL));
1854 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1856 printf(" mbox regs: %x %x %x %x %x\n",
1857 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1858 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1859 ISP_READ(isp, OUTMAILBOX4));
1860 printf(" PCI Status Command/Status=%x\n",
1861 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));