1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.10 2006/09/05 00:55:37 dillon Exp $ */
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
37 #include <bus/pci/pcireg.h>
38 #include <bus/pci/pcivar.h>
40 #include <machine/bus_memio.h>
41 #include <machine/bus_pio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
45 #include <sys/malloc.h>
47 #include "isp_freebsd.h"
49 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
50 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
51 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
52 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
54 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
56 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
57 static int isp_pci_mbxdma(struct ispsoftc *);
59 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
61 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
63 static void isp_pci_reset1(struct ispsoftc *);
64 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
66 static struct ispmdvec mdvec = {
77 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
80 static struct ispmdvec mdvec_1080 = {
91 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
94 static struct ispmdvec mdvec_12160 = {
105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
108 static struct ispmdvec mdvec_2100 = {
120 static struct ispmdvec mdvec_2200 = {
132 static struct ispmdvec mdvec_2300 = {
144 #ifndef PCIM_CMD_INVEN
145 #define PCIM_CMD_INVEN 0x10
147 #ifndef PCIM_CMD_BUSMASTEREN
148 #define PCIM_CMD_BUSMASTEREN 0x0004
150 #ifndef PCIM_CMD_PERRESPEN
151 #define PCIM_CMD_PERRESPEN 0x0040
153 #ifndef PCIM_CMD_SEREN
154 #define PCIM_CMD_SEREN 0x0100
158 #define PCIR_COMMAND 0x04
161 #ifndef PCIR_CACHELNSZ
162 #define PCIR_CACHELNSZ 0x0c
165 #ifndef PCIR_LATTIMER
166 #define PCIR_LATTIMER 0x0d
170 #define PCIR_ROMADDR 0x30
173 #ifndef PCI_VENDOR_QLOGIC
174 #define PCI_VENDOR_QLOGIC 0x1077
177 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
178 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
181 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
182 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
185 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
186 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
189 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
190 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
193 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
194 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
197 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
198 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
201 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
202 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
205 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
206 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
209 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
210 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
213 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
214 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
217 #define PCI_QLOGIC_ISP1020 \
218 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
220 #define PCI_QLOGIC_ISP1080 \
221 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
223 #define PCI_QLOGIC_ISP10160 \
224 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
226 #define PCI_QLOGIC_ISP12160 \
227 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
229 #define PCI_QLOGIC_ISP1240 \
230 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
232 #define PCI_QLOGIC_ISP1280 \
233 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
235 #define PCI_QLOGIC_ISP2100 \
236 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
238 #define PCI_QLOGIC_ISP2200 \
239 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
241 #define PCI_QLOGIC_ISP2300 \
242 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
244 #define PCI_QLOGIC_ISP2312 \
245 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
248 * Odd case for some AMI raid cards... We need to *not* attach to this.
250 #define AMI_RAID_SUBVENDOR_ID 0x101e
252 #define IO_MAP_REG 0x10
253 #define MEM_MAP_REG 0x14
255 #define PCI_DFLT_LTNCY 0x40
256 #define PCI_DFLT_LNSZ 0x10
258 static int isp_pci_probe (device_t);
259 static int isp_pci_attach (device_t);
262 struct isp_pcisoftc {
263 struct ispsoftc pci_isp;
265 struct resource * pci_reg;
266 bus_space_tag_t pci_st;
267 bus_space_handle_t pci_sh;
269 int16_t pci_poff[_NREG_BLKS];
273 ispfwfunc *isp_get_firmware_p = NULL;
275 static device_method_t isp_pci_methods[] = {
276 /* Device interface */
277 DEVMETHOD(device_probe, isp_pci_probe),
278 DEVMETHOD(device_attach, isp_pci_attach),
281 static void isp_pci_intr(void *);
283 static driver_t isp_pci_driver = {
284 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
286 static devclass_t isp_devclass;
287 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
288 MODULE_VERSION(isp, 1);
291 isp_pci_probe(device_t dev)
293 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
294 case PCI_QLOGIC_ISP1020:
295 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
297 case PCI_QLOGIC_ISP1080:
298 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
300 case PCI_QLOGIC_ISP1240:
301 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
303 case PCI_QLOGIC_ISP1280:
304 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
306 case PCI_QLOGIC_ISP10160:
307 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
309 case PCI_QLOGIC_ISP12160:
310 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
313 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
315 case PCI_QLOGIC_ISP2100:
316 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
318 case PCI_QLOGIC_ISP2200:
319 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
321 case PCI_QLOGIC_ISP2300:
322 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
324 case PCI_QLOGIC_ISP2312:
325 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
330 if (device_get_unit(dev) == 0 && bootverbose) {
331 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
332 "Core Version %d.%d\n",
333 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
334 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
337 * XXXX: Here is where we might load the f/w module
338 * XXXX: (or increase a reference count to it).
344 isp_pci_attach(device_t dev)
346 struct resource *regs, *irq;
347 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug;
348 u_int32_t data, cmd, linesz, psize, basetype;
349 struct isp_pcisoftc *pcs;
350 struct ispsoftc *isp = NULL;
351 struct ispmdvec *mdvp;
356 * Figure out if we're supposed to skip this one.
358 unit = device_get_unit(dev);
359 if (kgetenv_int("isp_disable", &bitmap)) {
360 if (bitmap & (1 << unit)) {
361 device_printf(dev, "not configuring\n");
363 * But return '0' to preserve HBA numbering.
369 pcs = kmalloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO);
372 * Figure out which we should try first - memory mapping or i/o mapping?
374 m1 = PCIM_CMD_PORTEN;
377 if (kgetenv_int("isp_mem_map", &bitmap)) {
378 if (bitmap & (1 << unit)) {
380 m2 = PCIM_CMD_PORTEN;
384 if (kgetenv_int("isp_io_map", &bitmap)) {
385 if (bitmap & (1 << unit)) {
386 m1 = PCIM_CMD_PORTEN;
391 linesz = PCI_DFLT_LNSZ;
395 cmd = pci_read_config(dev, PCIR_COMMAND, 1);
397 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
398 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
399 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
401 if (regs == NULL && (cmd & m2)) {
402 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
403 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
404 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
407 device_printf(dev, "unable to map any ports\n");
411 device_printf(dev, "using %s space register mapping\n",
412 (rgd == IO_MAP_REG)? "I/O" : "Memory");
415 pcs->pci_st = rman_get_bustag(regs);
416 pcs->pci_sh = rman_get_bushandle(regs);
418 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
419 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
420 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
421 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
422 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
424 basetype = ISP_HA_SCSI_UNKNOWN;
425 psize = sizeof (sdparam);
426 lim = BUS_SPACE_MAXSIZE_32BIT;
427 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
429 basetype = ISP_HA_SCSI_UNKNOWN;
430 psize = sizeof (sdparam);
431 lim = BUS_SPACE_MAXSIZE_24BIT;
433 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
435 basetype = ISP_HA_SCSI_1080;
436 psize = sizeof (sdparam);
437 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
438 ISP1080_DMA_REGS_OFF;
440 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
442 basetype = ISP_HA_SCSI_1240;
443 psize = 2 * sizeof (sdparam);
444 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
445 ISP1080_DMA_REGS_OFF;
447 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
449 basetype = ISP_HA_SCSI_1280;
450 psize = 2 * sizeof (sdparam);
451 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
452 ISP1080_DMA_REGS_OFF;
454 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
456 basetype = ISP_HA_SCSI_10160;
457 psize = sizeof (sdparam);
458 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
459 ISP1080_DMA_REGS_OFF;
461 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
463 basetype = ISP_HA_SCSI_12160;
464 psize = 2 * sizeof (sdparam);
465 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
466 ISP1080_DMA_REGS_OFF;
468 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
470 basetype = ISP_HA_FC_2100;
471 psize = sizeof (fcparam);
472 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
473 PCI_MBOX_REGS2100_OFF;
474 if (pci_get_revid(dev) < 3) {
476 * XXX: Need to get the actual revision
477 * XXX: number of the 2100 FB. At any rate,
478 * XXX: lower cache line size for early revision
484 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
486 basetype = ISP_HA_FC_2200;
487 psize = sizeof (fcparam);
488 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
489 PCI_MBOX_REGS2100_OFF;
491 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
493 basetype = ISP_HA_FC_2300;
494 psize = sizeof (fcparam);
495 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
496 PCI_MBOX_REGS2300_OFF;
498 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
500 basetype = ISP_HA_FC_2312;
501 psize = sizeof (fcparam);
502 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
503 PCI_MBOX_REGS2300_OFF;
506 isp->isp_param = kmalloc(psize, M_DEVBUF, M_WAITOK | M_ZERO);
507 isp->isp_mdvec = mdvp;
508 isp->isp_type = basetype;
509 isp->isp_revision = pci_get_revid(dev);
510 #ifdef ISP_TARGET_MODE
511 isp->isp_role = ISP_ROLE_BOTH;
513 isp->isp_role = ISP_DEFAULT_ROLES;
519 * Try and find firmware for this device.
522 if (isp_get_firmware_p) {
523 int device = (int) pci_get_device(dev);
524 #ifdef ISP_TARGET_MODE
525 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
527 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
532 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
535 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
536 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
537 if (IS_2300(isp)) { /* per QLogic errata */
538 cmd &= ~PCIM_CMD_INVEN;
542 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
544 isp->isp_touched = 1;
547 pci_write_config(dev, PCIR_COMMAND, cmd, 1);
550 * Make sure the Cache Line Size register is set sensibly.
552 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
553 if (data != linesz) {
554 data = PCI_DFLT_LNSZ;
555 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
556 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
560 * Make sure the Latency Timer is sane.
562 data = pci_read_config(dev, PCIR_LATTIMER, 1);
563 if (data < PCI_DFLT_LTNCY) {
564 data = PCI_DFLT_LTNCY;
565 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
566 pci_write_config(dev, PCIR_LATTIMER, data, 1);
570 * Make sure we've disabled the ROM.
572 data = pci_read_config(dev, PCIR_ROMADDR, 4);
574 pci_write_config(dev, PCIR_ROMADDR, data, 4);
577 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
578 1, RF_ACTIVE | RF_SHAREABLE);
580 device_printf(dev, "could not allocate interrupt\n");
584 if (kgetenv_int("isp_no_fwload", &bitmap)) {
585 if (bitmap & (1 << unit))
586 isp->isp_confopts |= ISP_CFG_NORELOAD;
588 if (kgetenv_int("isp_fwload", &bitmap)) {
589 if (bitmap & (1 << unit))
590 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
592 if (kgetenv_int("isp_no_nvram", &bitmap)) {
593 if (bitmap & (1 << unit))
594 isp->isp_confopts |= ISP_CFG_NONVRAM;
596 if (kgetenv_int("isp_nvram", &bitmap)) {
597 if (bitmap & (1 << unit))
598 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
600 if (kgetenv_int("isp_fcduplex", &bitmap)) {
601 if (bitmap & (1 << unit))
602 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
604 if (kgetenv_int("isp_no_fcduplex", &bitmap)) {
605 if (bitmap & (1 << unit))
606 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
608 if (kgetenv_int("isp_nport", &bitmap)) {
609 if (bitmap & (1 << unit))
610 isp->isp_confopts |= ISP_CFG_NPORT;
614 * Because the resource_*_value functions can neither return
615 * 64 bit integer values, nor can they be directly coerced
616 * to interpret the right hand side of the assignment as
617 * you want them to interpret it, we have to force WWN
618 * hint replacement to specify WWN strings with a leading
619 * 'w' (e..g w50000000aaaa0001). Sigh.
621 if (kgetenv_quad("isp_portwwn", &wwn)) {
622 isp->isp_osinfo.default_port_wwn = wwn;
623 isp->isp_confopts |= ISP_CFG_OWNWWPN;
625 if (isp->isp_osinfo.default_port_wwn == 0) {
626 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
629 if (kgetenv_quad("isp_nodewwn", &wwn)) {
630 isp->isp_osinfo.default_node_wwn = wwn;
631 isp->isp_confopts |= ISP_CFG_OWNWWNN;
633 if (isp->isp_osinfo.default_node_wwn == 0) {
634 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
638 (void) kgetenv_int("isp_debug", &isp_debug);
639 if (bus_setup_intr(dev, irq, 0, isp_pci_intr,
640 isp, &pcs->ih, NULL)) {
641 device_printf(dev, "could not setup interrupt\n");
645 #ifdef ISP_FW_CRASH_DUMP
647 if (kgetenv_int("isp_fw_dump_enable", &bitmap)) {
648 if (bitmap & (1 << unit) {
651 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
652 } else if (IS_23XX(isp)) {
653 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
656 FCPARAM(isp)->isp_dump_data =
657 kmalloc(amt, M_DEVBUF, M_WAITOK);
658 bzero(FCPARAM(isp)->isp_dump_data, amt);
661 "f/w crash dumps not supported for card\n");
668 isp->isp_port = pci_get_function(dev);
672 * Set up logging levels.
675 isp->isp_dblev = isp_debug;
677 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
680 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
683 * Make sure we're in reset state.
688 if (isp->isp_state != ISP_RESETSTATE) {
693 if (isp->isp_state != ISP_INITSTATE) {
694 /* If we're a Fibre Channel Card, we allow deferred attach */
702 if (isp->isp_state != ISP_RUNSTATE) {
703 /* If we're a Fibre Channel Card, we allow deferred attach */
711 * XXXX: Here is where we might unload the f/w module
712 * XXXX: (or decrease the reference count to it).
719 if (pcs && pcs->ih) {
720 (void) bus_teardown_intr(dev, irq, pcs->ih);
724 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
729 (void) bus_release_resource(dev, rtp, rgd, regs);
733 if (pcs->pci_isp.isp_param)
734 kfree(pcs->pci_isp.isp_param, M_DEVBUF);
735 kfree(pcs, M_DEVBUF);
739 * XXXX: Here is where we might unload the f/w module
740 * XXXX: (or decrease the reference count to it).
746 isp_pci_intr(void *arg)
748 struct ispsoftc *isp = arg;
749 u_int16_t isr, sema, mbox;
753 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
756 int iok = isp->isp_osinfo.intsok;
757 isp->isp_osinfo.intsok = 0;
758 isp_intr(isp, isr, sema, mbox);
759 isp->isp_osinfo.intsok = iok;
765 #define IspVirt2Off(a, x) \
766 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
767 _BLK_REG_SHFT] + ((x) & 0xff))
769 #define BXR2(pcs, off) \
770 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
771 #define BXW2(pcs, off, v) \
772 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
776 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
778 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
779 u_int16_t val0, val1;
783 val0 = BXR2(pcs, IspVirt2Off(isp, off));
784 val1 = BXR2(pcs, IspVirt2Off(isp, off));
785 } while (val0 != val1 && ++i < 1000);
794 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
795 u_int16_t *semap, u_int16_t *mbp)
797 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
801 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
804 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
808 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
809 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
811 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
812 isr &= INT_PENDING_MASK(isp);
813 sema &= BIU_SEMA_LOCK;
814 if (isr == 0 && sema == 0) {
818 if ((*semap = sema) != 0) {
820 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
824 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
831 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
832 u_int16_t *semap, u_int16_t *mbox0p)
834 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
837 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
841 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
842 IspVirt2Off(pcs, BIU_R2HSTSLO));
843 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
844 if ((r2hisr & BIU_R2HST_INTR) == 0) {
848 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
849 case ISPR2HST_ROM_MBX_OK:
850 case ISPR2HST_ROM_MBX_FAIL:
851 case ISPR2HST_MBX_OK:
852 case ISPR2HST_MBX_FAIL:
853 case ISPR2HST_ASYNC_EVENT:
854 *isrp = r2hisr & 0xffff;
855 *mbox0p = (r2hisr >> 16);
858 case ISPR2HST_RIO_16:
859 *isrp = r2hisr & 0xffff;
860 *mbox0p = ASYNC_RIO1;
864 *isrp = r2hisr & 0xffff;
865 *mbox0p = ASYNC_CMD_CMPLT;
868 case ISPR2HST_FPOST_CTIO:
869 *isrp = r2hisr & 0xffff;
870 *mbox0p = ASYNC_CTIO_DONE;
873 case ISPR2HST_RSPQ_UPDATE:
874 *isrp = r2hisr & 0xffff;
884 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
887 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
890 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
892 * We will assume that someone has paused the RISC processor.
894 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
895 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
896 oldconf | BIU_PCI_CONF1_SXP);
898 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
899 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
900 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
906 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
908 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
911 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
913 * We will assume that someone has paused the RISC processor.
915 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
916 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
917 oldconf | BIU_PCI_CONF1_SXP);
919 BXW2(pcs, IspVirt2Off(isp, regoff), val);
920 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
921 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
926 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
928 u_int16_t rv, oc = 0;
929 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
931 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
932 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
935 * We will assume that someone has paused the RISC processor.
937 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
938 tc = oc & ~BIU_PCI1080_CONF1_DMA;
939 if (regoff & SXP_BANK1_SELECT)
940 tc |= BIU_PCI1080_CONF1_SXP1;
942 tc |= BIU_PCI1080_CONF1_SXP0;
943 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
944 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
945 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
946 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
947 oc | BIU_PCI1080_CONF1_DMA);
949 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
951 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
957 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
959 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
962 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
963 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
966 * We will assume that someone has paused the RISC processor.
968 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
969 tc = oc & ~BIU_PCI1080_CONF1_DMA;
970 if (regoff & SXP_BANK1_SELECT)
971 tc |= BIU_PCI1080_CONF1_SXP1;
973 tc |= BIU_PCI1080_CONF1_SXP0;
974 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
975 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
976 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
977 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
978 oc | BIU_PCI1080_CONF1_DMA);
980 BXW2(pcs, IspVirt2Off(isp, regoff), val);
982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
988 struct ispsoftc *isp;
992 static void imc(void *, bus_dma_segment_t *, int, int);
995 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
997 struct imush *imushp = (struct imush *) arg;
999 imushp->error = error;
1001 struct ispsoftc *isp =imushp->isp;
1002 bus_addr_t addr = segs->ds_addr;
1004 isp->isp_rquest_dma = addr;
1005 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1006 isp->isp_result_dma = addr;
1008 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1009 FCPARAM(isp)->isp_scdma = addr;
1015 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1017 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1020 isp_pci_mbxdma(struct ispsoftc *isp)
1022 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1026 bus_size_t alim, slim;
1030 * Already been here? If so, leave...
1032 if (isp->isp_rquest) {
1036 #ifdef ISP_DAC_SUPPORTED
1037 alim = BUS_SPACE_UNRESTRICTED;
1039 alim = BUS_SPACE_MAXADDR_32BIT;
1041 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1042 slim = BUS_SPACE_MAXADDR_32BIT;
1044 slim = BUS_SPACE_MAXADDR_24BIT;
1048 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1049 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1050 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1056 len = sizeof (XS_T **) * isp->isp_maxcmds;
1057 isp->isp_xflist = (XS_T **) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1058 if (isp->isp_xflist == NULL) {
1059 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1063 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1064 pcs->dmaps = (bus_dmamap_t *) kmalloc(len, M_DEVBUF, M_WAITOK);
1065 if (pcs->dmaps == NULL) {
1066 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1067 kfree(isp->isp_xflist, M_DEVBUF);
1073 * Allocate and map the request, result queues, plus FC scratch area.
1075 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1076 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1078 len += ISP2100_SCRLEN;
1081 ns = (len / PAGE_SIZE) + 1;
1082 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim,
1083 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1084 isp_prt(isp, ISP_LOGERR,
1085 "cannot create a dma tag for control spaces");
1086 kfree(pcs->dmaps, M_DEVBUF);
1087 kfree(isp->isp_xflist, M_DEVBUF);
1092 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1093 &isp->isp_cdmap) != 0) {
1094 isp_prt(isp, ISP_LOGERR,
1095 "cannot allocate %d bytes of CCB memory", len);
1096 bus_dma_tag_destroy(isp->isp_cdmat);
1097 kfree(isp->isp_xflist, M_DEVBUF);
1098 kfree(pcs->dmaps, M_DEVBUF);
1103 for (i = 0; i < isp->isp_maxcmds; i++) {
1104 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1106 isp_prt(isp, ISP_LOGERR,
1107 "error %d creating per-cmd DMA maps", error);
1109 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1117 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1119 isp_prt(isp, ISP_LOGERR,
1120 "error %d loading dma map for control areas", im.error);
1124 isp->isp_rquest = base;
1125 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1126 isp->isp_result = base;
1128 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1129 FCPARAM(isp)->isp_scratch = base;
1135 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1136 bus_dma_tag_destroy(isp->isp_cdmat);
1137 kfree(isp->isp_xflist, M_DEVBUF);
1138 kfree(pcs->dmaps, M_DEVBUF);
1140 isp->isp_rquest = NULL;
1145 struct ispsoftc *isp;
1153 #define MUSHERR_NOQENTRIES -2
1155 #ifdef ISP_TARGET_MODE
1157 * We need to handle DMA for target mode differently from initiator mode.
1159 * DMA mapping and construction and submission of CTIO Request Entries
1160 * and rendevous for completion are very tightly coupled because we start
1161 * out by knowing (per platform) how much data we have to move, but we
1162 * don't know, up front, how many DMA mapping segments will have to be used
1163 * cover that data, so we don't know how many CTIO Request Entries we
1164 * will end up using. Further, for performance reasons we may want to
1165 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1167 * The standard vector still goes through isp_pci_dmasetup, but the callback
1168 * for the DMA mapping routines comes here instead with the whole transfer
1169 * mapped and a pointer to a partially filled in already allocated request
1170 * queue entry. We finish the job.
1172 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1173 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1175 #define STATUS_WITH_DATA 1
1178 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1181 struct ccb_scsiio *csio;
1182 struct ispsoftc *isp;
1183 struct isp_pcisoftc *pcs;
1185 ct_entry_t *cto, *qe;
1186 u_int8_t scsi_status;
1187 u_int16_t curi, nxti, handle;
1190 int nth_ctio, nctios, send_status;
1192 mp = (mush_t *) arg;
1199 csio = mp->cmd_token;
1201 curi = isp->isp_reqidx;
1202 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1205 cto->ct_seg_count = 0;
1206 cto->ct_header.rqs_entry_count = 1;
1207 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1210 cto->ct_header.rqs_seqno = 1;
1211 isp_prt(isp, ISP_LOGTDEBUG1,
1212 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1213 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1214 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1215 cto->ct_scsi_status, cto->ct_resid);
1216 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1217 isp_put_ctio(isp, cto, qe);
1221 nctios = nseg / ISP_RQDSEG;
1222 if (nseg % ISP_RQDSEG) {
1227 * Save syshandle, and potentially any SCSI status, which we'll
1228 * reinsert on the last CTIO we're going to send.
1231 handle = cto->ct_syshandle;
1232 cto->ct_syshandle = 0;
1233 cto->ct_header.rqs_seqno = 0;
1234 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1237 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1238 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1240 * Preserve residual.
1242 resid = cto->ct_resid;
1245 * Save actual SCSI status.
1247 scsi_status = cto->ct_scsi_status;
1249 #ifndef STATUS_WITH_DATA
1250 sflags |= CT_NO_DATA;
1252 * We can't do a status at the same time as a data CTIO, so
1253 * we need to synthesize an extra CTIO at this level.
1258 sflags = scsi_status = resid = 0;
1262 cto->ct_scsi_status = 0;
1264 pcs = (struct isp_pcisoftc *)isp;
1265 dp = &pcs->dmaps[isp_handle_index(handle)];
1266 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1267 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1269 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1274 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1281 if (seglim > ISP_RQDSEG)
1282 seglim = ISP_RQDSEG;
1284 for (seg = 0; seg < seglim; seg++, nseg--) {
1286 * Unlike normal initiator commands, we don't
1287 * do any swizzling here.
1289 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1290 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1291 cto->ct_xfrlen += dm_segs->ds_len;
1294 cto->ct_seg_count = seg;
1297 * This case should only happen when we're sending an
1298 * extra CTIO with final status.
1300 if (send_status == 0) {
1301 isp_prt(isp, ISP_LOGWARN,
1302 "tdma_mk ran out of segments");
1309 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1310 * ct_tagtype, and ct_timeout have been carried over
1311 * unchanged from what our caller had set.
1313 * The dataseg fields and the seg_count fields we just got
1314 * through setting. The data direction we've preserved all
1315 * along and only clear it if we're now sending status.
1318 if (nth_ctio == nctios - 1) {
1320 * We're the last in a sequence of CTIOs, so mark
1321 * this CTIO and save the handle to the CCB such that
1322 * when this CTIO completes we can free dma resources
1323 * and do whatever else we need to do to finish the
1324 * rest of the command. We *don't* give this to the
1325 * firmware to work on- the caller will do that.
1328 cto->ct_syshandle = handle;
1329 cto->ct_header.rqs_seqno = 1;
1332 cto->ct_scsi_status = scsi_status;
1333 cto->ct_flags |= sflags;
1334 cto->ct_resid = resid;
1337 isp_prt(isp, ISP_LOGTDEBUG1,
1338 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1339 "scsi status %x resid %d",
1340 cto->ct_fwhandle, csio->ccb_h.target_lun,
1341 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1342 cto->ct_scsi_status, cto->ct_resid);
1344 isp_prt(isp, ISP_LOGTDEBUG1,
1345 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1346 cto->ct_fwhandle, csio->ccb_h.target_lun,
1347 cto->ct_iid, cto->ct_tag_val,
1350 isp_put_ctio(isp, cto, qe);
1351 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1353 MEMORYBARRIER(isp, SYNC_REQUEST,
1357 ct_entry_t *oqe = qe;
1360 * Make sure syshandle fields are clean
1362 cto->ct_syshandle = 0;
1363 cto->ct_header.rqs_seqno = 0;
1365 isp_prt(isp, ISP_LOGTDEBUG1,
1366 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1367 cto->ct_fwhandle, csio->ccb_h.target_lun,
1368 cto->ct_iid, cto->ct_flags);
1374 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1375 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1376 if (nxti == mp->optr) {
1377 isp_prt(isp, ISP_LOGTDEBUG0,
1378 "Queue Overflow in tdma_mk");
1379 mp->error = MUSHERR_NOQENTRIES;
1384 * Now that we're done with the old CTIO,
1385 * flush it out to the request queue.
1387 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1388 isp_put_ctio(isp, cto, oqe);
1389 if (nth_ctio != 0) {
1390 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1393 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1396 * Reset some fields in the CTIO so we can reuse
1397 * for the next one we'll flush to the request
1400 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1401 cto->ct_header.rqs_entry_count = 1;
1402 cto->ct_header.rqs_flags = 0;
1404 cto->ct_scsi_status = 0;
1407 cto->ct_seg_count = 0;
1408 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1415 * We don't have to do multiple CTIOs here. Instead, we can just do
1416 * continuation segments as needed. This greatly simplifies the code
1417 * improves performance.
1421 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1424 struct ccb_scsiio *csio;
1425 struct ispsoftc *isp;
1426 ct2_entry_t *cto, *qe;
1427 u_int16_t curi, nxti;
1430 mp = (mush_t *) arg;
1437 csio = mp->cmd_token;
1440 curi = isp->isp_reqidx;
1441 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1444 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1445 isp_prt(isp, ISP_LOGWARN,
1446 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1447 "set (0x%x)", cto->ct_flags);
1452 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1453 * flags to NO DATA and clear relative offset flags.
1454 * We preserve the ct_resid and the response area.
1456 cto->ct_header.rqs_seqno = 1;
1457 cto->ct_seg_count = 0;
1459 isp_prt(isp, ISP_LOGTDEBUG1,
1460 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1461 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1462 cto->ct_iid, cto->ct_flags, cto->ct_status,
1463 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1464 isp_put_ctio2(isp, cto, qe);
1465 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1469 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1470 isp_prt(isp, ISP_LOGERR,
1471 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1472 "(0x%x)", cto->ct_flags);
1481 * Set up the CTIO2 data segments.
1483 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1484 cto->ct_seg_count++, segcnt++) {
1485 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1486 dm_segs[segcnt].ds_addr;
1487 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1488 dm_segs[segcnt].ds_len;
1489 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1490 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d",
1491 cto->ct_seg_count, dm_segs[segcnt].ds_addr,
1492 dm_segs[segcnt].ds_len);
1495 while (segcnt < nseg) {
1498 ispcontreq_t local, *crq = &local, *qep;
1500 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1502 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1503 if (nxti == mp->optr) {
1505 isp_prt(isp, ISP_LOGTDEBUG0,
1506 "tdma_mkfc: request queue overflow");
1507 mp->error = MUSHERR_NOQENTRIES;
1510 cto->ct_header.rqs_entry_count++;
1511 MEMZERO((void *)crq, sizeof (*crq));
1512 crq->req_header.rqs_entry_count = 1;
1513 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1514 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1516 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1517 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1518 isp_prt(isp, ISP_LOGTDEBUG1,
1519 "isp_send_ctio2: ent%d[%d]%x:%u",
1520 cto->ct_header.rqs_entry_count-1, seg,
1521 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len);
1522 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1523 cto->ct_seg_count++;
1525 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1526 isp_put_cont_req(isp, crq, qep);
1527 ISP_TDQE(isp, "cont entry", curi, qep);
1531 * No do final twiddling for the CTIO itself.
1533 cto->ct_header.rqs_seqno = 1;
1534 isp_prt(isp, ISP_LOGTDEBUG1,
1535 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1536 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1537 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1539 isp_put_ctio2(isp, cto, qe);
1540 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1545 static void dma2(void *, bus_dma_segment_t *, int, int);
1548 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1551 struct ispsoftc *isp;
1552 struct ccb_scsiio *csio;
1553 struct isp_pcisoftc *pcs;
1555 bus_dma_segment_t *eseg;
1557 int seglim, datalen;
1560 mp = (mush_t *) arg;
1567 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1571 csio = mp->cmd_token;
1574 pcs = (struct isp_pcisoftc *)mp->isp;
1575 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1578 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1579 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1581 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1584 datalen = XS_XFRLEN(csio);
1587 * We're passed an initial partially filled in entry that
1588 * has most fields filled in except for data transfer
1591 * Our job is to fill in the initial request queue entry and
1592 * then to start allocating and filling in continuation entries
1593 * until we've covered the entire transfer.
1597 seglim = ISP_RQDSEG_T2;
1598 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1599 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1600 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1602 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1605 if (csio->cdb_len > 12) {
1608 seglim = ISP_RQDSEG;
1610 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1611 rq->req_flags |= REQFLAG_DATA_IN;
1613 rq->req_flags |= REQFLAG_DATA_OUT;
1617 eseg = dm_segs + nseg;
1619 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1621 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1622 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1624 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1627 rq->req_dataseg[rq->req_seg_count].ds_base =
1629 rq->req_dataseg[rq->req_seg_count].ds_count =
1632 datalen -= dm_segs->ds_len;
1633 rq->req_seg_count++;
1637 while (datalen > 0 && dm_segs != eseg) {
1639 ispcontreq_t local, *crq = &local, *cqe;
1641 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1643 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1644 if (nxti == mp->optr) {
1645 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1646 mp->error = MUSHERR_NOQENTRIES;
1649 rq->req_header.rqs_entry_count++;
1650 MEMZERO((void *)crq, sizeof (*crq));
1651 crq->req_header.rqs_entry_count = 1;
1652 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1655 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1656 crq->req_dataseg[seglim].ds_base =
1658 crq->req_dataseg[seglim].ds_count =
1660 rq->req_seg_count++;
1663 datalen -= dm_segs->ds_len;
1665 isp_put_cont_req(isp, crq, cqe);
1666 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1672 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1673 u_int16_t *nxtip, u_int16_t optr)
1675 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1677 bus_dmamap_t *dp = NULL;
1679 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1681 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1682 #ifdef ISP_TARGET_MODE
1683 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1690 (csio->dxfer_len == 0)) {
1693 mp->cmd_token = csio;
1694 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1698 (*eptr)(mp, NULL, 0, 0);
1706 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1707 (csio->dxfer_len == 0)) {
1708 rq->req_seg_count = 1;
1713 * Do a virtual grapevine step to collect info for
1714 * the callback dma allocation that we have to use...
1718 mp->cmd_token = csio;
1724 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1725 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1727 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1729 error = bus_dmamap_load(pcs->dmat, *dp,
1730 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1731 if (error == EINPROGRESS) {
1732 bus_dmamap_unload(pcs->dmat, *dp);
1734 isp_prt(isp, ISP_LOGERR,
1735 "deferred dma allocation not supported");
1736 } else if (error && mp->error == 0) {
1738 isp_prt(isp, ISP_LOGERR,
1739 "error %d in dma mapping code", error);
1745 /* Pointer to physical buffer */
1746 struct bus_dma_segment seg;
1747 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1748 seg.ds_len = csio->dxfer_len;
1749 (*eptr)(mp, &seg, 1, 0);
1752 struct bus_dma_segment *segs;
1754 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1755 isp_prt(isp, ISP_LOGERR,
1756 "Physical segment pointers unsupported");
1758 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1759 isp_prt(isp, ISP_LOGERR,
1760 "Virtual segment addresses unsupported");
1763 /* Just use the segments provided */
1764 segs = (struct bus_dma_segment *) csio->data_ptr;
1765 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1769 int retval = CMD_COMPLETE;
1770 if (mp->error == MUSHERR_NOQENTRIES) {
1771 retval = CMD_EAGAIN;
1772 } else if (mp->error == EFBIG) {
1773 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1774 } else if (mp->error == EINVAL) {
1775 XS_SETERR(csio, CAM_REQ_INVALID);
1777 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1782 switch (rq->req_header.rqs_entry_type) {
1783 case RQSTYPE_REQUEST:
1784 isp_put_request(isp, rq, qep);
1786 case RQSTYPE_CMDONLY:
1787 isp_put_extended_request(isp, (ispextreq_t *)rq,
1788 (ispextreq_t *)qep);
1791 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1794 return (CMD_QUEUED);
1798 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1800 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1801 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1802 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1803 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1805 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1807 bus_dmamap_unload(pcs->dmat, *dp);
1812 isp_pci_reset1(struct ispsoftc *isp)
1814 /* Make sure the BIOS is disabled */
1815 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1816 /* and enable interrupts */
1821 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1823 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1825 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1827 printf("%s:\n", device_get_nameunit(isp->isp_dev));
1829 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1831 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1832 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1833 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1834 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1838 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1839 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1840 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1841 ISP_READ(isp, CDMA_FIFO_STS));
1842 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1843 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1844 ISP_READ(isp, DDMA_FIFO_STS));
1845 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1846 ISP_READ(isp, SXP_INTERRUPT),
1847 ISP_READ(isp, SXP_GROSS_ERR),
1848 ISP_READ(isp, SXP_PINS_CTRL));
1849 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1851 printf(" mbox regs: %x %x %x %x %x\n",
1852 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1853 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1854 ISP_READ(isp, OUTMAILBOX4));
1855 printf(" PCI Status Command/Status=%x\n",
1856 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));