1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
36 #include <sys/malloc.h>
38 #include <bus/pci/pcireg.h>
39 #include <bus/pci/pcivar.h>
41 #include "isp_freebsd.h"
43 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
44 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
45 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
46 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
48 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
50 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
51 static int isp_pci_mbxdma(struct ispsoftc *);
53 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
55 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
57 static void isp_pci_reset1(struct ispsoftc *);
58 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
60 static struct ispmdvec mdvec = {
71 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
74 static struct ispmdvec mdvec_1080 = {
85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
88 static struct ispmdvec mdvec_12160 = {
99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
102 static struct ispmdvec mdvec_2100 = {
114 static struct ispmdvec mdvec_2200 = {
126 static struct ispmdvec mdvec_2300 = {
138 #ifndef PCIM_CMD_INVEN
139 #define PCIM_CMD_INVEN 0x10
141 #ifndef PCIM_CMD_BUSMASTEREN
142 #define PCIM_CMD_BUSMASTEREN 0x0004
144 #ifndef PCIM_CMD_PERRESPEN
145 #define PCIM_CMD_PERRESPEN 0x0040
147 #ifndef PCIM_CMD_SEREN
148 #define PCIM_CMD_SEREN 0x0100
152 #define PCIR_COMMAND 0x04
155 #ifndef PCIR_CACHELNSZ
156 #define PCIR_CACHELNSZ 0x0c
159 #ifndef PCIR_LATTIMER
160 #define PCIR_LATTIMER 0x0d
164 #define PCIR_ROMADDR 0x30
167 #ifndef PCI_VENDOR_QLOGIC
168 #define PCI_VENDOR_QLOGIC 0x1077
171 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
172 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
175 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
176 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
179 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
180 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
183 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
184 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
187 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
188 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
191 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
192 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
195 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
196 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
199 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
203 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
204 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
207 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
208 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
211 #define PCI_QLOGIC_ISP1020 \
212 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
214 #define PCI_QLOGIC_ISP1080 \
215 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
217 #define PCI_QLOGIC_ISP10160 \
218 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
220 #define PCI_QLOGIC_ISP12160 \
221 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
223 #define PCI_QLOGIC_ISP1240 \
224 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
226 #define PCI_QLOGIC_ISP1280 \
227 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
229 #define PCI_QLOGIC_ISP2100 \
230 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
232 #define PCI_QLOGIC_ISP2200 \
233 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
235 #define PCI_QLOGIC_ISP2300 \
236 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
238 #define PCI_QLOGIC_ISP2312 \
239 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
242 * Odd case for some AMI raid cards... We need to *not* attach to this.
244 #define AMI_RAID_SUBVENDOR_ID 0x101e
246 #define IO_MAP_REG 0x10
247 #define MEM_MAP_REG 0x14
249 #define PCI_DFLT_LTNCY 0x40
250 #define PCI_DFLT_LNSZ 0x10
252 static int isp_pci_probe (device_t);
253 static int isp_pci_attach (device_t);
256 struct isp_pcisoftc {
257 struct ispsoftc pci_isp;
259 struct resource * pci_reg;
260 bus_space_tag_t pci_st;
261 bus_space_handle_t pci_sh;
263 int16_t pci_poff[_NREG_BLKS];
267 ispfwfunc *isp_get_firmware_p = NULL;
269 static device_method_t isp_pci_methods[] = {
270 /* Device interface */
271 DEVMETHOD(device_probe, isp_pci_probe),
272 DEVMETHOD(device_attach, isp_pci_attach),
275 static void isp_pci_intr(void *);
277 static driver_t isp_pci_driver = {
278 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
280 static devclass_t isp_devclass;
281 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, NULL, NULL);
282 MODULE_VERSION(isp, 1);
285 isp_pci_probe(device_t dev)
287 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
288 case PCI_QLOGIC_ISP1020:
289 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
291 case PCI_QLOGIC_ISP1080:
292 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
294 case PCI_QLOGIC_ISP1240:
295 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
297 case PCI_QLOGIC_ISP1280:
298 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
300 case PCI_QLOGIC_ISP10160:
301 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
303 case PCI_QLOGIC_ISP12160:
304 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
307 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
309 case PCI_QLOGIC_ISP2100:
310 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
312 case PCI_QLOGIC_ISP2200:
313 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
315 case PCI_QLOGIC_ISP2300:
316 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
318 case PCI_QLOGIC_ISP2312:
319 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
324 if (device_get_unit(dev) == 0 && bootverbose) {
325 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
326 "Core Version %d.%d\n",
327 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
328 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
331 * XXXX: Here is where we might load the f/w module
332 * XXXX: (or increase a reference count to it).
338 isp_pci_attach(device_t dev)
340 struct resource *regs, *irq;
341 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug;
342 u_int32_t data, cmd, linesz, psize, basetype;
343 struct isp_pcisoftc *pcs;
344 struct ispsoftc *isp = NULL;
345 struct ispmdvec *mdvp;
350 * Figure out if we're supposed to skip this one.
352 unit = device_get_unit(dev);
353 if (kgetenv_int("isp_disable", &bitmap)) {
354 if (bitmap & (1 << unit)) {
355 device_printf(dev, "not configuring\n");
357 * But return '0' to preserve HBA numbering.
363 pcs = kmalloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO);
366 * Figure out which we should try first - memory mapping or i/o mapping?
368 m1 = PCIM_CMD_PORTEN;
371 if (kgetenv_int("isp_mem_map", &bitmap)) {
372 if (bitmap & (1 << unit)) {
374 m2 = PCIM_CMD_PORTEN;
378 if (kgetenv_int("isp_io_map", &bitmap)) {
379 if (bitmap & (1 << unit)) {
380 m1 = PCIM_CMD_PORTEN;
385 linesz = PCI_DFLT_LNSZ;
389 cmd = pci_read_config(dev, PCIR_COMMAND, 1);
391 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
392 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
393 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
395 if (regs == NULL && (cmd & m2)) {
396 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
397 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
398 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
401 device_printf(dev, "unable to map any ports\n");
405 device_printf(dev, "using %s space register mapping\n",
406 (rgd == IO_MAP_REG)? "I/O" : "Memory");
409 pcs->pci_st = rman_get_bustag(regs);
410 pcs->pci_sh = rman_get_bushandle(regs);
412 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
413 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
414 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
415 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
416 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
418 basetype = ISP_HA_SCSI_UNKNOWN;
419 psize = sizeof (sdparam);
420 lim = BUS_SPACE_MAXSIZE_32BIT;
421 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
423 basetype = ISP_HA_SCSI_UNKNOWN;
424 psize = sizeof (sdparam);
425 lim = BUS_SPACE_MAXSIZE_24BIT;
427 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
429 basetype = ISP_HA_SCSI_1080;
430 psize = sizeof (sdparam);
431 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
432 ISP1080_DMA_REGS_OFF;
434 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
436 basetype = ISP_HA_SCSI_1240;
437 psize = 2 * sizeof (sdparam);
438 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
439 ISP1080_DMA_REGS_OFF;
441 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
443 basetype = ISP_HA_SCSI_1280;
444 psize = 2 * sizeof (sdparam);
445 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
446 ISP1080_DMA_REGS_OFF;
448 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
450 basetype = ISP_HA_SCSI_10160;
451 psize = sizeof (sdparam);
452 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
453 ISP1080_DMA_REGS_OFF;
455 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
457 basetype = ISP_HA_SCSI_12160;
458 psize = 2 * sizeof (sdparam);
459 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
460 ISP1080_DMA_REGS_OFF;
462 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
464 basetype = ISP_HA_FC_2100;
465 psize = sizeof (fcparam);
466 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
467 PCI_MBOX_REGS2100_OFF;
468 if (pci_get_revid(dev) < 3) {
470 * XXX: Need to get the actual revision
471 * XXX: number of the 2100 FB. At any rate,
472 * XXX: lower cache line size for early revision
478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
480 basetype = ISP_HA_FC_2200;
481 psize = sizeof (fcparam);
482 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
483 PCI_MBOX_REGS2100_OFF;
485 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
487 basetype = ISP_HA_FC_2300;
488 psize = sizeof (fcparam);
489 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
490 PCI_MBOX_REGS2300_OFF;
492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
494 basetype = ISP_HA_FC_2312;
495 psize = sizeof (fcparam);
496 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
497 PCI_MBOX_REGS2300_OFF;
500 isp->isp_param = kmalloc(psize, M_DEVBUF, M_WAITOK | M_ZERO);
501 isp->isp_mdvec = mdvp;
502 isp->isp_type = basetype;
503 isp->isp_revision = pci_get_revid(dev);
504 #ifdef ISP_TARGET_MODE
505 isp->isp_role = ISP_ROLE_BOTH;
507 isp->isp_role = ISP_DEFAULT_ROLES;
513 * Try and find firmware for this device.
516 if (isp_get_firmware_p) {
517 int device = (int) pci_get_device(dev);
518 #ifdef ISP_TARGET_MODE
519 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
521 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
526 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
529 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
530 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
531 if (IS_2300(isp)) { /* per QLogic errata */
532 cmd &= ~PCIM_CMD_INVEN;
536 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
538 isp->isp_touched = 1;
541 pci_write_config(dev, PCIR_COMMAND, cmd, 1);
544 * Make sure the Cache Line Size register is set sensibly.
546 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
547 if (data != linesz) {
548 data = PCI_DFLT_LNSZ;
549 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
550 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
554 * Make sure the Latency Timer is sane.
556 data = pci_read_config(dev, PCIR_LATTIMER, 1);
557 if (data < PCI_DFLT_LTNCY) {
558 data = PCI_DFLT_LTNCY;
559 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
560 pci_write_config(dev, PCIR_LATTIMER, data, 1);
564 * Make sure we've disabled the ROM.
566 data = pci_read_config(dev, PCIR_ROMADDR, 4);
568 pci_write_config(dev, PCIR_ROMADDR, data, 4);
571 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
572 1, RF_ACTIVE | RF_SHAREABLE);
574 device_printf(dev, "could not allocate interrupt\n");
578 if (kgetenv_int("isp_no_fwload", &bitmap)) {
579 if (bitmap & (1 << unit))
580 isp->isp_confopts |= ISP_CFG_NORELOAD;
582 if (kgetenv_int("isp_fwload", &bitmap)) {
583 if (bitmap & (1 << unit))
584 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
586 if (kgetenv_int("isp_no_nvram", &bitmap)) {
587 if (bitmap & (1 << unit))
588 isp->isp_confopts |= ISP_CFG_NONVRAM;
590 if (kgetenv_int("isp_nvram", &bitmap)) {
591 if (bitmap & (1 << unit))
592 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
594 if (kgetenv_int("isp_fcduplex", &bitmap)) {
595 if (bitmap & (1 << unit))
596 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
598 if (kgetenv_int("isp_no_fcduplex", &bitmap)) {
599 if (bitmap & (1 << unit))
600 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
602 if (kgetenv_int("isp_nport", &bitmap)) {
603 if (bitmap & (1 << unit))
604 isp->isp_confopts |= ISP_CFG_NPORT;
608 * Because the resource_*_value functions can neither return
609 * 64 bit integer values, nor can they be directly coerced
610 * to interpret the right hand side of the assignment as
611 * you want them to interpret it, we have to force WWN
612 * hint replacement to specify WWN strings with a leading
613 * 'w' (e..g w50000000aaaa0001). Sigh.
615 if (kgetenv_quad("isp_portwwn", &wwn)) {
616 isp->isp_osinfo.default_port_wwn = wwn;
617 isp->isp_confopts |= ISP_CFG_OWNWWPN;
619 if (isp->isp_osinfo.default_port_wwn == 0) {
620 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
623 if (kgetenv_quad("isp_nodewwn", &wwn)) {
624 isp->isp_osinfo.default_node_wwn = wwn;
625 isp->isp_confopts |= ISP_CFG_OWNWWNN;
627 if (isp->isp_osinfo.default_node_wwn == 0) {
628 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
632 (void) kgetenv_int("isp_debug", &isp_debug);
633 if (bus_setup_intr(dev, irq, 0, isp_pci_intr,
634 isp, &pcs->ih, NULL)) {
635 device_printf(dev, "could not setup interrupt\n");
639 #ifdef ISP_FW_CRASH_DUMP
641 if (kgetenv_int("isp_fw_dump_enable", &bitmap)) {
642 if (bitmap & (1 << unit) {
645 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
646 } else if (IS_23XX(isp)) {
647 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
650 FCPARAM(isp)->isp_dump_data =
651 kmalloc(amt, M_DEVBUF, M_WAITOK);
652 bzero(FCPARAM(isp)->isp_dump_data, amt);
655 "f/w crash dumps not supported for card\n");
662 isp->isp_port = pci_get_function(dev);
666 * Set up logging levels.
669 isp->isp_dblev = isp_debug;
671 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
674 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
677 * Make sure we're in reset state.
682 if (isp->isp_state != ISP_RESETSTATE) {
687 if (isp->isp_state != ISP_INITSTATE) {
688 /* If we're a Fibre Channel Card, we allow deferred attach */
696 if (isp->isp_state != ISP_RUNSTATE) {
697 /* If we're a Fibre Channel Card, we allow deferred attach */
705 * XXXX: Here is where we might unload the f/w module
706 * XXXX: (or decrease the reference count to it).
713 if (pcs && pcs->ih) {
714 (void) bus_teardown_intr(dev, irq, pcs->ih);
718 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
723 (void) bus_release_resource(dev, rtp, rgd, regs);
727 if (pcs->pci_isp.isp_param)
728 kfree(pcs->pci_isp.isp_param, M_DEVBUF);
729 kfree(pcs, M_DEVBUF);
733 * XXXX: Here is where we might unload the f/w module
734 * XXXX: (or decrease the reference count to it).
740 isp_pci_intr(void *arg)
742 struct ispsoftc *isp = arg;
743 u_int16_t isr, sema, mbox;
747 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
750 int iok = isp->isp_osinfo.intsok;
751 isp->isp_osinfo.intsok = 0;
752 isp_intr(isp, isr, sema, mbox);
753 isp->isp_osinfo.intsok = iok;
759 #define IspVirt2Off(a, x) \
760 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
761 _BLK_REG_SHFT] + ((x) & 0xff))
763 #define BXR2(pcs, off) \
764 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
765 #define BXW2(pcs, off, v) \
766 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
770 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
772 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
773 u_int16_t val0, val1;
777 val0 = BXR2(pcs, IspVirt2Off(isp, off));
778 val1 = BXR2(pcs, IspVirt2Off(isp, off));
779 } while (val0 != val1 && ++i < 1000);
788 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
789 u_int16_t *semap, u_int16_t *mbp)
791 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
795 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
798 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
802 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
803 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
805 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
806 isr &= INT_PENDING_MASK(isp);
807 sema &= BIU_SEMA_LOCK;
808 if (isr == 0 && sema == 0) {
812 if ((*semap = sema) != 0) {
814 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
818 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
825 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
826 u_int16_t *semap, u_int16_t *mbox0p)
828 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
831 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
835 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
836 IspVirt2Off(pcs, BIU_R2HSTSLO));
837 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
838 if ((r2hisr & BIU_R2HST_INTR) == 0) {
842 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
843 case ISPR2HST_ROM_MBX_OK:
844 case ISPR2HST_ROM_MBX_FAIL:
845 case ISPR2HST_MBX_OK:
846 case ISPR2HST_MBX_FAIL:
847 case ISPR2HST_ASYNC_EVENT:
848 *isrp = r2hisr & 0xffff;
849 *mbox0p = (r2hisr >> 16);
852 case ISPR2HST_RIO_16:
853 *isrp = r2hisr & 0xffff;
854 *mbox0p = ASYNC_RIO1;
858 *isrp = r2hisr & 0xffff;
859 *mbox0p = ASYNC_CMD_CMPLT;
862 case ISPR2HST_FPOST_CTIO:
863 *isrp = r2hisr & 0xffff;
864 *mbox0p = ASYNC_CTIO_DONE;
867 case ISPR2HST_RSPQ_UPDATE:
868 *isrp = r2hisr & 0xffff;
878 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
881 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
884 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
886 * We will assume that someone has paused the RISC processor.
888 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
889 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
890 oldconf | BIU_PCI_CONF1_SXP);
892 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
893 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
894 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
900 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
902 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
905 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
907 * We will assume that someone has paused the RISC processor.
909 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
910 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
911 oldconf | BIU_PCI_CONF1_SXP);
913 BXW2(pcs, IspVirt2Off(isp, regoff), val);
914 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
915 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
920 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
922 u_int16_t rv, oc = 0;
923 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
925 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
926 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
929 * We will assume that someone has paused the RISC processor.
931 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
932 tc = oc & ~BIU_PCI1080_CONF1_DMA;
933 if (regoff & SXP_BANK1_SELECT)
934 tc |= BIU_PCI1080_CONF1_SXP1;
936 tc |= BIU_PCI1080_CONF1_SXP0;
937 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
938 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
939 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
940 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
941 oc | BIU_PCI1080_CONF1_DMA);
943 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
945 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
951 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
956 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
957 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
960 * We will assume that someone has paused the RISC processor.
962 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
963 tc = oc & ~BIU_PCI1080_CONF1_DMA;
964 if (regoff & SXP_BANK1_SELECT)
965 tc |= BIU_PCI1080_CONF1_SXP1;
967 tc |= BIU_PCI1080_CONF1_SXP0;
968 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
969 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
970 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
971 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
972 oc | BIU_PCI1080_CONF1_DMA);
974 BXW2(pcs, IspVirt2Off(isp, regoff), val);
976 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
982 struct ispsoftc *isp;
986 static void imc(void *, bus_dma_segment_t *, int, int);
989 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
991 struct imush *imushp = (struct imush *) arg;
993 imushp->error = error;
995 struct ispsoftc *isp =imushp->isp;
996 bus_addr_t addr = segs->ds_addr;
998 isp->isp_rquest_dma = addr;
999 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1000 isp->isp_result_dma = addr;
1002 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1003 FCPARAM(isp)->isp_scdma = addr;
1009 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1011 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1014 isp_pci_mbxdma(struct ispsoftc *isp)
1016 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1020 bus_size_t alim, slim;
1024 * Already been here? If so, leave...
1026 if (isp->isp_rquest) {
1030 #ifdef ISP_DAC_SUPPORTED
1031 alim = BUS_SPACE_UNRESTRICTED;
1033 alim = BUS_SPACE_MAXADDR_32BIT;
1035 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1036 slim = BUS_SPACE_MAXADDR_32BIT;
1038 slim = BUS_SPACE_MAXADDR_24BIT;
1042 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1043 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1044 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1050 len = sizeof (XS_T **) * isp->isp_maxcmds;
1051 isp->isp_xflist = (XS_T **) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1052 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1053 pcs->dmaps = (bus_dmamap_t *) kmalloc(len, M_DEVBUF, M_WAITOK);
1056 * Allocate and map the request, result queues, plus FC scratch area.
1058 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1059 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1061 len += ISP2100_SCRLEN;
1064 ns = (len / PAGE_SIZE) + 1;
1065 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim,
1066 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1067 isp_prt(isp, ISP_LOGERR,
1068 "cannot create a dma tag for control spaces");
1069 kfree(pcs->dmaps, M_DEVBUF);
1070 kfree(isp->isp_xflist, M_DEVBUF);
1075 if (bus_dmamem_alloc(isp->isp_cdmat, (void *)&base, BUS_DMA_NOWAIT,
1076 &isp->isp_cdmap) != 0) {
1077 isp_prt(isp, ISP_LOGERR,
1078 "cannot allocate %d bytes of CCB memory", len);
1079 bus_dma_tag_destroy(isp->isp_cdmat);
1080 kfree(isp->isp_xflist, M_DEVBUF);
1081 kfree(pcs->dmaps, M_DEVBUF);
1086 for (i = 0; i < isp->isp_maxcmds; i++) {
1087 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1089 isp_prt(isp, ISP_LOGERR,
1090 "error %d creating per-cmd DMA maps", error);
1092 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1100 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1102 isp_prt(isp, ISP_LOGERR,
1103 "error %d loading dma map for control areas", im.error);
1107 isp->isp_rquest = base;
1108 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1109 isp->isp_result = base;
1111 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1112 FCPARAM(isp)->isp_scratch = base;
1118 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1119 bus_dma_tag_destroy(isp->isp_cdmat);
1120 kfree(isp->isp_xflist, M_DEVBUF);
1121 kfree(pcs->dmaps, M_DEVBUF);
1123 isp->isp_rquest = NULL;
1128 struct ispsoftc *isp;
1136 #define MUSHERR_NOQENTRIES -2
1138 #ifdef ISP_TARGET_MODE
1140 * We need to handle DMA for target mode differently from initiator mode.
1142 * DMA mapping and construction and submission of CTIO Request Entries
1143 * and rendevous for completion are very tightly coupled because we start
1144 * out by knowing (per platform) how much data we have to move, but we
1145 * don't know, up front, how many DMA mapping segments will have to be used
1146 * cover that data, so we don't know how many CTIO Request Entries we
1147 * will end up using. Further, for performance reasons we may want to
1148 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1150 * The standard vector still goes through isp_pci_dmasetup, but the callback
1151 * for the DMA mapping routines comes here instead with the whole transfer
1152 * mapped and a pointer to a partially filled in already allocated request
1153 * queue entry. We finish the job.
1155 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1156 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1158 #define STATUS_WITH_DATA 1
1161 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1164 struct ccb_scsiio *csio;
1165 struct ispsoftc *isp;
1166 struct isp_pcisoftc *pcs;
1168 ct_entry_t *cto, *qe;
1169 u_int8_t scsi_status;
1170 u_int16_t curi, nxti, handle;
1173 int nth_ctio, nctios, send_status;
1175 mp = (mush_t *) arg;
1182 csio = mp->cmd_token;
1184 curi = isp->isp_reqidx;
1185 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1188 cto->ct_seg_count = 0;
1189 cto->ct_header.rqs_entry_count = 1;
1190 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1193 cto->ct_header.rqs_seqno = 1;
1194 isp_prt(isp, ISP_LOGTDEBUG1,
1195 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1196 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1197 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1198 cto->ct_scsi_status, cto->ct_resid);
1199 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1200 isp_put_ctio(isp, cto, qe);
1204 nctios = nseg / ISP_RQDSEG;
1205 if (nseg % ISP_RQDSEG) {
1210 * Save syshandle, and potentially any SCSI status, which we'll
1211 * reinsert on the last CTIO we're going to send.
1214 handle = cto->ct_syshandle;
1215 cto->ct_syshandle = 0;
1216 cto->ct_header.rqs_seqno = 0;
1217 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1220 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1221 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1223 * Preserve residual.
1225 resid = cto->ct_resid;
1228 * Save actual SCSI status.
1230 scsi_status = cto->ct_scsi_status;
1232 #ifndef STATUS_WITH_DATA
1233 sflags |= CT_NO_DATA;
1235 * We can't do a status at the same time as a data CTIO, so
1236 * we need to synthesize an extra CTIO at this level.
1241 sflags = scsi_status = resid = 0;
1245 cto->ct_scsi_status = 0;
1247 pcs = (struct isp_pcisoftc *)isp;
1248 dp = &pcs->dmaps[isp_handle_index(handle)];
1249 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1250 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1252 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1257 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1264 if (seglim > ISP_RQDSEG)
1265 seglim = ISP_RQDSEG;
1267 for (seg = 0; seg < seglim; seg++, nseg--) {
1269 * Unlike normal initiator commands, we don't
1270 * do any swizzling here.
1272 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1273 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1274 cto->ct_xfrlen += dm_segs->ds_len;
1277 cto->ct_seg_count = seg;
1280 * This case should only happen when we're sending an
1281 * extra CTIO with final status.
1283 if (send_status == 0) {
1284 isp_prt(isp, ISP_LOGWARN,
1285 "tdma_mk ran out of segments");
1292 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1293 * ct_tagtype, and ct_timeout have been carried over
1294 * unchanged from what our caller had set.
1296 * The dataseg fields and the seg_count fields we just got
1297 * through setting. The data direction we've preserved all
1298 * along and only clear it if we're now sending status.
1301 if (nth_ctio == nctios - 1) {
1303 * We're the last in a sequence of CTIOs, so mark
1304 * this CTIO and save the handle to the CCB such that
1305 * when this CTIO completes we can free dma resources
1306 * and do whatever else we need to do to finish the
1307 * rest of the command. We *don't* give this to the
1308 * firmware to work on- the caller will do that.
1311 cto->ct_syshandle = handle;
1312 cto->ct_header.rqs_seqno = 1;
1315 cto->ct_scsi_status = scsi_status;
1316 cto->ct_flags |= sflags;
1317 cto->ct_resid = resid;
1320 isp_prt(isp, ISP_LOGTDEBUG1,
1321 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1322 "scsi status %x resid %d",
1323 cto->ct_fwhandle, csio->ccb_h.target_lun,
1324 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1325 cto->ct_scsi_status, cto->ct_resid);
1327 isp_prt(isp, ISP_LOGTDEBUG1,
1328 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1329 cto->ct_fwhandle, csio->ccb_h.target_lun,
1330 cto->ct_iid, cto->ct_tag_val,
1333 isp_put_ctio(isp, cto, qe);
1334 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1336 MEMORYBARRIER(isp, SYNC_REQUEST,
1340 ct_entry_t *oqe = qe;
1343 * Make sure syshandle fields are clean
1345 cto->ct_syshandle = 0;
1346 cto->ct_header.rqs_seqno = 0;
1348 isp_prt(isp, ISP_LOGTDEBUG1,
1349 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1350 cto->ct_fwhandle, csio->ccb_h.target_lun,
1351 cto->ct_iid, cto->ct_flags);
1357 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1358 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1359 if (nxti == mp->optr) {
1360 isp_prt(isp, ISP_LOGTDEBUG0,
1361 "Queue Overflow in tdma_mk");
1362 mp->error = MUSHERR_NOQENTRIES;
1367 * Now that we're done with the old CTIO,
1368 * flush it out to the request queue.
1370 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1371 isp_put_ctio(isp, cto, oqe);
1372 if (nth_ctio != 0) {
1373 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1376 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1379 * Reset some fields in the CTIO so we can reuse
1380 * for the next one we'll flush to the request
1383 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1384 cto->ct_header.rqs_entry_count = 1;
1385 cto->ct_header.rqs_flags = 0;
1387 cto->ct_scsi_status = 0;
1390 cto->ct_seg_count = 0;
1391 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1398 * We don't have to do multiple CTIOs here. Instead, we can just do
1399 * continuation segments as needed. This greatly simplifies the code
1400 * improves performance.
1404 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1407 struct ccb_scsiio *csio;
1408 struct ispsoftc *isp;
1409 ct2_entry_t *cto, *qe;
1410 u_int16_t curi, nxti;
1413 mp = (mush_t *) arg;
1420 csio = mp->cmd_token;
1423 curi = isp->isp_reqidx;
1424 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1427 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1428 isp_prt(isp, ISP_LOGWARN,
1429 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1430 "set (0x%x)", cto->ct_flags);
1435 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1436 * flags to NO DATA and clear relative offset flags.
1437 * We preserve the ct_resid and the response area.
1439 cto->ct_header.rqs_seqno = 1;
1440 cto->ct_seg_count = 0;
1442 isp_prt(isp, ISP_LOGTDEBUG1,
1443 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1444 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1445 cto->ct_iid, cto->ct_flags, cto->ct_status,
1446 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1447 isp_put_ctio2(isp, cto, qe);
1448 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1452 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1453 isp_prt(isp, ISP_LOGERR,
1454 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1455 "(0x%x)", cto->ct_flags);
1464 * Set up the CTIO2 data segments.
1466 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1467 cto->ct_seg_count++, segcnt++) {
1468 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1469 dm_segs[segcnt].ds_addr;
1470 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1471 dm_segs[segcnt].ds_len;
1472 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1473 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d",
1474 cto->ct_seg_count, dm_segs[segcnt].ds_addr,
1475 dm_segs[segcnt].ds_len);
1478 while (segcnt < nseg) {
1481 ispcontreq_t local, *crq = &local, *qep;
1483 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1485 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1486 if (nxti == mp->optr) {
1488 isp_prt(isp, ISP_LOGTDEBUG0,
1489 "tdma_mkfc: request queue overflow");
1490 mp->error = MUSHERR_NOQENTRIES;
1493 cto->ct_header.rqs_entry_count++;
1494 MEMZERO((void *)crq, sizeof (*crq));
1495 crq->req_header.rqs_entry_count = 1;
1496 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1497 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1499 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1500 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1501 isp_prt(isp, ISP_LOGTDEBUG1,
1502 "isp_send_ctio2: ent%d[%d]%x:%u",
1503 cto->ct_header.rqs_entry_count-1, seg,
1504 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len);
1505 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1506 cto->ct_seg_count++;
1508 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1509 isp_put_cont_req(isp, crq, qep);
1510 ISP_TDQE(isp, "cont entry", curi, qep);
1514 * No do final twiddling for the CTIO itself.
1516 cto->ct_header.rqs_seqno = 1;
1517 isp_prt(isp, ISP_LOGTDEBUG1,
1518 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1519 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1520 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1522 isp_put_ctio2(isp, cto, qe);
1523 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1528 static void dma2(void *, bus_dma_segment_t *, int, int);
1531 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1534 struct ispsoftc *isp;
1535 struct ccb_scsiio *csio;
1536 struct isp_pcisoftc *pcs;
1538 bus_dma_segment_t *eseg;
1540 int seglim, datalen;
1543 mp = (mush_t *) arg;
1550 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1554 csio = mp->cmd_token;
1557 pcs = (struct isp_pcisoftc *)mp->isp;
1558 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1561 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1562 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1564 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1567 datalen = XS_XFRLEN(csio);
1570 * We're passed an initial partially filled in entry that
1571 * has most fields filled in except for data transfer
1574 * Our job is to fill in the initial request queue entry and
1575 * then to start allocating and filling in continuation entries
1576 * until we've covered the entire transfer.
1580 seglim = ISP_RQDSEG_T2;
1581 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1582 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1583 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1585 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1588 if (csio->cdb_len > 12) {
1591 seglim = ISP_RQDSEG;
1593 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1594 rq->req_flags |= REQFLAG_DATA_IN;
1596 rq->req_flags |= REQFLAG_DATA_OUT;
1600 eseg = dm_segs + nseg;
1602 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1604 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1605 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1607 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1610 rq->req_dataseg[rq->req_seg_count].ds_base =
1612 rq->req_dataseg[rq->req_seg_count].ds_count =
1615 datalen -= dm_segs->ds_len;
1616 rq->req_seg_count++;
1620 while (datalen > 0 && dm_segs != eseg) {
1622 ispcontreq_t local, *crq = &local, *cqe;
1624 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1626 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1627 if (nxti == mp->optr) {
1628 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1629 mp->error = MUSHERR_NOQENTRIES;
1632 rq->req_header.rqs_entry_count++;
1633 MEMZERO((void *)crq, sizeof (*crq));
1634 crq->req_header.rqs_entry_count = 1;
1635 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1638 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1639 crq->req_dataseg[seglim].ds_base =
1641 crq->req_dataseg[seglim].ds_count =
1643 rq->req_seg_count++;
1646 datalen -= dm_segs->ds_len;
1648 isp_put_cont_req(isp, crq, cqe);
1649 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1655 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1656 u_int16_t *nxtip, u_int16_t optr)
1658 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1660 bus_dmamap_t *dp = NULL;
1662 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1664 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1665 #ifdef ISP_TARGET_MODE
1666 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1672 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1673 (csio->dxfer_len == 0)) {
1676 mp->cmd_token = csio;
1677 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1681 (*eptr)(mp, NULL, 0, 0);
1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1690 (csio->dxfer_len == 0)) {
1691 rq->req_seg_count = 1;
1696 * Do a virtual grapevine step to collect info for
1697 * the callback dma allocation that we have to use...
1701 mp->cmd_token = csio;
1707 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1708 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1710 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1712 error = bus_dmamap_load(pcs->dmat, *dp,
1713 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1714 if (error == EINPROGRESS) {
1715 bus_dmamap_unload(pcs->dmat, *dp);
1717 isp_prt(isp, ISP_LOGERR,
1718 "deferred dma allocation not supported");
1719 } else if (error && mp->error == 0) {
1721 isp_prt(isp, ISP_LOGERR,
1722 "error %d in dma mapping code", error);
1728 /* Pointer to physical buffer */
1729 struct bus_dma_segment seg;
1730 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1731 seg.ds_len = csio->dxfer_len;
1732 (*eptr)(mp, &seg, 1, 0);
1735 struct bus_dma_segment *segs;
1737 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1738 isp_prt(isp, ISP_LOGERR,
1739 "Physical segment pointers unsupported");
1741 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1742 isp_prt(isp, ISP_LOGERR,
1743 "Virtual segment addresses unsupported");
1746 /* Just use the segments provided */
1747 segs = (struct bus_dma_segment *) csio->data_ptr;
1748 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1752 int retval = CMD_COMPLETE;
1753 if (mp->error == MUSHERR_NOQENTRIES) {
1754 retval = CMD_EAGAIN;
1755 } else if (mp->error == EFBIG) {
1756 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1757 } else if (mp->error == EINVAL) {
1758 XS_SETERR(csio, CAM_REQ_INVALID);
1760 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1765 switch (rq->req_header.rqs_entry_type) {
1766 case RQSTYPE_REQUEST:
1767 isp_put_request(isp, rq, qep);
1769 case RQSTYPE_CMDONLY:
1770 isp_put_extended_request(isp, (ispextreq_t *)rq,
1771 (ispextreq_t *)qep);
1774 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1777 return (CMD_QUEUED);
1781 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1783 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1784 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1785 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1786 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1788 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1790 bus_dmamap_unload(pcs->dmat, *dp);
1795 isp_pci_reset1(struct ispsoftc *isp)
1797 /* Make sure the BIOS is disabled */
1798 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1799 /* and enable interrupts */
1804 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1806 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1808 kprintf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1810 kprintf("%s:\n", device_get_nameunit(isp->isp_dev));
1812 kprintf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1814 kprintf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1815 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1816 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1817 kprintf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1821 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1822 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1823 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1824 ISP_READ(isp, CDMA_FIFO_STS));
1825 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1826 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1827 ISP_READ(isp, DDMA_FIFO_STS));
1828 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1829 ISP_READ(isp, SXP_INTERRUPT),
1830 ISP_READ(isp, SXP_GROSS_ERR),
1831 ISP_READ(isp, SXP_PINS_CTRL));
1832 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1834 kprintf(" mbox regs: %x %x %x %x %x\n",
1835 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1836 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1837 ISP_READ(isp, OUTMAILBOX4));
1838 kprintf(" PCI Status Command/Status=%x\n",
1839 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));