2 * Copyright (c) 1997-2008 by Matthew Jacob
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.159 2011/11/16 02:52:24 mjacob Exp $
29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/linker.h>
38 #include <sys/firmware.h>
40 #include <sys/stdint.h>
41 #include <bus/pci/pcireg.h>
42 #include <bus/pci/pcivar.h>
44 #include <sys/malloc.h>
47 #include <dev/disk/isp/isp_freebsd.h>
49 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
50 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
51 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
52 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
53 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
54 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
55 static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
56 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
57 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
58 static int isp_pci_mbxdma(ispsoftc_t *);
59 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
62 static void isp_pci_reset0(ispsoftc_t *);
63 static void isp_pci_reset1(ispsoftc_t *);
64 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
66 static struct ispmdvec mdvec = {
72 isp_common_dmateardown,
77 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
80 static struct ispmdvec mdvec_1080 = {
86 isp_common_dmateardown,
91 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
94 static struct ispmdvec mdvec_12160 = {
100 isp_common_dmateardown,
105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
108 static struct ispmdvec mdvec_2100 = {
114 isp_common_dmateardown,
120 static struct ispmdvec mdvec_2200 = {
126 isp_common_dmateardown,
132 static struct ispmdvec mdvec_2300 = {
138 isp_common_dmateardown,
144 static struct ispmdvec mdvec_2400 = {
150 isp_common_dmateardown,
156 static struct ispmdvec mdvec_2500 = {
162 isp_common_dmateardown,
168 #ifndef PCIM_CMD_INVEN
169 #define PCIM_CMD_INVEN 0x10
171 #ifndef PCIM_CMD_BUSMASTEREN
172 #define PCIM_CMD_BUSMASTEREN 0x0004
174 #ifndef PCIM_CMD_PERRESPEN
175 #define PCIM_CMD_PERRESPEN 0x0040
177 #ifndef PCIM_CMD_SEREN
178 #define PCIM_CMD_SEREN 0x0100
180 #ifndef PCIM_CMD_INTX_DISABLE
181 #define PCIM_CMD_INTX_DISABLE 0x0400
185 #define PCIR_COMMAND 0x04
188 #ifndef PCIR_CACHELNSZ
189 #define PCIR_CACHELNSZ 0x0c
192 #ifndef PCIR_LATTIMER
193 #define PCIR_LATTIMER 0x0d
197 #define PCIR_ROMADDR 0x30
200 #ifndef PCI_VENDOR_QLOGIC
201 #define PCI_VENDOR_QLOGIC 0x1077
204 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
205 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
208 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
209 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
212 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
213 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
216 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
217 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
220 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
221 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
224 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
225 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
228 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
229 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
232 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
233 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
236 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
237 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
240 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
241 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
244 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
245 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
248 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
249 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
252 #ifndef PCI_PRODUCT_QLOGIC_ISP2432
253 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432
256 #ifndef PCI_PRODUCT_QLOGIC_ISP2532
257 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532
260 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
261 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
264 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
265 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
268 #ifndef PCI_PRODUCT_QLOGIC_ISP5432
269 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432
272 #define PCI_QLOGIC_ISP5432 \
273 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
275 #define PCI_QLOGIC_ISP1020 \
276 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
278 #define PCI_QLOGIC_ISP1080 \
279 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
281 #define PCI_QLOGIC_ISP10160 \
282 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
284 #define PCI_QLOGIC_ISP12160 \
285 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
287 #define PCI_QLOGIC_ISP1240 \
288 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
290 #define PCI_QLOGIC_ISP1280 \
291 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
293 #define PCI_QLOGIC_ISP2100 \
294 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
296 #define PCI_QLOGIC_ISP2200 \
297 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
299 #define PCI_QLOGIC_ISP2300 \
300 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
302 #define PCI_QLOGIC_ISP2312 \
303 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
305 #define PCI_QLOGIC_ISP2322 \
306 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
308 #define PCI_QLOGIC_ISP2422 \
309 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
311 #define PCI_QLOGIC_ISP2432 \
312 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
314 #define PCI_QLOGIC_ISP2532 \
315 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
317 #define PCI_QLOGIC_ISP6312 \
318 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
320 #define PCI_QLOGIC_ISP6322 \
321 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
324 * Odd case for some AMI raid cards... We need to *not* attach to this.
326 #define AMI_RAID_SUBVENDOR_ID 0x101e
328 #define IO_MAP_REG 0x10
329 #define MEM_MAP_REG 0x14
331 #define PCI_DFLT_LTNCY 0x40
332 #define PCI_DFLT_LNSZ 0x10
334 static int isp_pci_probe (device_t);
335 static int isp_pci_attach (device_t);
336 static int isp_pci_detach (device_t);
339 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev
340 struct isp_pcisoftc {
343 struct resource * regs;
349 int16_t pci_poff[_NREG_BLKS];
355 static device_method_t isp_pci_methods[] = {
356 /* Device interface */
357 DEVMETHOD(device_probe, isp_pci_probe),
358 DEVMETHOD(device_attach, isp_pci_attach),
359 DEVMETHOD(device_detach, isp_pci_detach),
363 static driver_t isp_pci_driver = {
364 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
366 static devclass_t isp_devclass;
367 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
370 isp_pci_probe(device_t dev)
372 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
373 case PCI_QLOGIC_ISP1020:
374 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
376 case PCI_QLOGIC_ISP1080:
377 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
379 case PCI_QLOGIC_ISP1240:
380 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
382 case PCI_QLOGIC_ISP1280:
383 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
385 case PCI_QLOGIC_ISP10160:
386 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
388 case PCI_QLOGIC_ISP12160:
389 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
392 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
394 case PCI_QLOGIC_ISP2100:
395 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
397 case PCI_QLOGIC_ISP2200:
398 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
400 case PCI_QLOGIC_ISP2300:
401 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
403 case PCI_QLOGIC_ISP2312:
404 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
406 case PCI_QLOGIC_ISP2322:
407 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
409 case PCI_QLOGIC_ISP2422:
410 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
412 case PCI_QLOGIC_ISP2432:
413 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
415 case PCI_QLOGIC_ISP2532:
416 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter");
418 case PCI_QLOGIC_ISP5432:
419 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter");
421 case PCI_QLOGIC_ISP6312:
422 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
424 case PCI_QLOGIC_ISP6322:
425 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
430 if (isp_announced == 0 && bootverbose) {
431 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
432 "Core Version %d.%d\n",
433 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
434 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
438 * XXXX: Here is where we might load the f/w module
439 * XXXX: (or increase a reference count to it).
441 return (BUS_PROBE_DEFAULT);
445 isp_get_generic_options(device_t dev, ispsoftc_t *isp, int *nvp)
450 * Figure out if we're supposed to skip this one.
453 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) {
454 device_printf(dev, "disabled at user request\n");
455 isp->isp_osinfo.disabled = 1;
460 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) {
461 isp->isp_confopts |= ISP_CFG_NORELOAD;
464 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
465 isp->isp_confopts |= ISP_CFG_NONVRAM;
468 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval);
470 isp->isp_dblev = tval;
472 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
475 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
477 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval);
478 if (tval > 0 && tval < 127) {
484 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval);
485 isp_autoconfig = tval;
487 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval);
488 isp_quickboot_time = tval;
491 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "forcemulti", &tval) == 0 && tval != 0) {
492 isp->isp_osinfo.forcemulti = 1;
497 isp_get_pci_options(device_t dev, int *m1, int *m2)
501 * Which we should try first - memory mapping or i/o mapping?
503 * We used to try memory first followed by i/o on alpha, otherwise
504 * the reverse, but we should just try memory first all the time now.
506 *m1 = PCIM_CMD_MEMEN;
507 *m2 = PCIM_CMD_PORTEN;
510 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) {
511 *m1 = PCIM_CMD_PORTEN;
512 *m2 = PCIM_CMD_MEMEN;
515 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) {
516 *m1 = PCIM_CMD_MEMEN;
517 *m2 = PCIM_CMD_PORTEN;
522 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp)
527 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval)) {
529 ISP_FC_PC(isp, chan)->default_id = 109 - chan;
532 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev);
534 ISP_SPI_PC(isp, chan)->iid = 7;
539 ISP_FC_PC(isp, chan)->default_id = tval - chan;
541 ISP_SPI_PC(isp, chan)->iid = tval;
543 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
547 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &tval) == 0) {
550 case ISP_ROLE_INITIATOR:
551 case ISP_ROLE_TARGET:
552 case ISP_ROLE_INITIATOR|ISP_ROLE_TARGET:
553 device_printf(dev, "setting role to 0x%x\n", tval);
561 tval = ISP_DEFAULT_ROLES;
565 ISP_SPI_PC(isp, chan)->def_role = tval;
568 ISP_FC_PC(isp, chan)->def_role = tval;
571 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) {
572 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
575 if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", &sptr) == 0 && sptr != NULL) {
576 if (strcmp(sptr, "lport") == 0) {
577 isp->isp_confopts |= ISP_CFG_LPORT;
578 } else if (strcmp(sptr, "nport") == 0) {
579 isp->isp_confopts |= ISP_CFG_NPORT;
580 } else if (strcmp(sptr, "lport-only") == 0) {
581 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
582 } else if (strcmp(sptr, "nport-only") == 0) {
583 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
588 * Because the resource_*_value functions can neither return
589 * 64 bit integer values, nor can they be directly coerced
590 * to interpret the right hand side of the assignment as
591 * you want them to interpret it, we have to force WWN
592 * hint replacement to specify WWN strings with a leading
593 * 'w' (e..g w50000000aaaa0001). Sigh.
596 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", &sptr);
597 if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
599 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16);
600 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) {
601 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
602 ISP_FC_PC(isp, chan)->def_wwpn = 0;
607 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", &sptr);
608 if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
610 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16);
611 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) {
612 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
613 ISP_FC_PC(isp, chan)->def_wwnn = 0;
618 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "hysteresis", &tval);
619 if (tval >= 0 && tval < 256) {
620 ISP_FC_PC(isp, chan)->hysteresis = tval;
622 ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis;
626 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "loop_down_limit", &tval);
627 if (tval >= 0 && tval < 0xffff) {
628 ISP_FC_PC(isp, chan)->loop_down_limit = tval;
630 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit;
634 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "gone_device_time", &tval);
635 if (tval >= 0 && tval < 0xffff) {
636 ISP_FC_PC(isp, chan)->gone_device_time = tval;
638 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time;
643 isp_pci_attach(device_t dev)
645 int i, m1, m2, locksetup = 0;
647 uint32_t data, cmd, linesz, did;
648 struct isp_pcisoftc *pcs;
653 pcs = device_get_softc(dev);
655 device_printf(dev, "cannot get softc\n");
658 memset(pcs, 0, sizeof (*pcs));
666 * Get Generic Options
668 isp_get_generic_options(dev, isp, &isp_nvports);
671 * Check to see if options have us disabled
673 if (isp->isp_osinfo.disabled) {
675 * But return zero to preserve unit numbering
681 * Get PCI options- which in this case are just mapping preferences.
683 isp_get_pci_options(dev, &m1, &m2);
685 linesz = PCI_DFLT_LNSZ;
686 pcs->irq = pcs->regs = NULL;
687 pcs->rgd = pcs->rtp = pcs->iqd = 0;
689 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
691 pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
692 pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
693 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE);
695 if (pcs->regs == NULL && (cmd & m2)) {
696 pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
697 pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
698 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE);
700 if (pcs->regs == NULL) {
701 device_printf(dev, "unable to map any ports\n");
705 device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory");
707 isp->isp_bus_tag = rman_get_bustag(pcs->regs);
708 isp->isp_bus_handle = rman_get_bushandle(pcs->regs);
711 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
712 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
713 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
714 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
715 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
717 switch (pci_get_devid(dev)) {
718 case PCI_QLOGIC_ISP1020:
720 isp->isp_mdvec = &mdvec;
721 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
723 case PCI_QLOGIC_ISP1080:
725 isp->isp_mdvec = &mdvec_1080;
726 isp->isp_type = ISP_HA_SCSI_1080;
727 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
729 case PCI_QLOGIC_ISP1240:
731 isp->isp_mdvec = &mdvec_1080;
732 isp->isp_type = ISP_HA_SCSI_1240;
734 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
736 case PCI_QLOGIC_ISP1280:
738 isp->isp_mdvec = &mdvec_1080;
739 isp->isp_type = ISP_HA_SCSI_1280;
740 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
742 case PCI_QLOGIC_ISP10160:
744 isp->isp_mdvec = &mdvec_12160;
745 isp->isp_type = ISP_HA_SCSI_10160;
746 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
748 case PCI_QLOGIC_ISP12160:
751 isp->isp_mdvec = &mdvec_12160;
752 isp->isp_type = ISP_HA_SCSI_12160;
753 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
755 case PCI_QLOGIC_ISP2100:
757 isp->isp_mdvec = &mdvec_2100;
758 isp->isp_type = ISP_HA_FC_2100;
759 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
760 if (pci_get_revid(dev) < 3) {
762 * XXX: Need to get the actual revision
763 * XXX: number of the 2100 FB. At any rate,
764 * XXX: lower cache line size for early revision
770 case PCI_QLOGIC_ISP2200:
772 isp->isp_mdvec = &mdvec_2200;
773 isp->isp_type = ISP_HA_FC_2200;
774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
776 case PCI_QLOGIC_ISP2300:
778 isp->isp_mdvec = &mdvec_2300;
779 isp->isp_type = ISP_HA_FC_2300;
780 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
782 case PCI_QLOGIC_ISP2312:
783 case PCI_QLOGIC_ISP6312:
785 isp->isp_mdvec = &mdvec_2300;
786 isp->isp_type = ISP_HA_FC_2312;
787 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
789 case PCI_QLOGIC_ISP2322:
790 case PCI_QLOGIC_ISP6322:
792 isp->isp_mdvec = &mdvec_2300;
793 isp->isp_type = ISP_HA_FC_2322;
794 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
796 case PCI_QLOGIC_ISP2422:
797 case PCI_QLOGIC_ISP2432:
799 isp->isp_nchan += isp_nvports;
800 isp->isp_mdvec = &mdvec_2400;
801 isp->isp_type = ISP_HA_FC_2400;
802 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
804 case PCI_QLOGIC_ISP2532:
806 isp->isp_nchan += isp_nvports;
807 isp->isp_mdvec = &mdvec_2500;
808 isp->isp_type = ISP_HA_FC_2500;
809 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
811 case PCI_QLOGIC_ISP5432:
813 isp->isp_mdvec = &mdvec_2500;
814 isp->isp_type = ISP_HA_FC_2500;
815 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
818 device_printf(dev, "unknown device type\n");
822 isp->isp_revision = pci_get_revid(dev);
825 psize = sizeof (fcparam);
826 xsize = sizeof (struct isp_fc);
828 psize = sizeof (sdparam);
829 xsize = sizeof (struct isp_spi);
831 psize *= isp->isp_nchan;
832 xsize *= isp->isp_nchan;
833 isp->isp_param = kmalloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
834 if (isp->isp_param == NULL) {
835 device_printf(dev, "cannot allocate parameter data\n");
838 isp->isp_osinfo.pc.ptr = kmalloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO);
839 if (isp->isp_osinfo.pc.ptr == NULL) {
840 device_printf(dev, "cannot allocate parameter data\n");
845 * Now that we know who we are (roughly) get/set specific options
847 for (i = 0; i < isp->isp_nchan; i++) {
848 isp_get_specific_options(dev, i, isp);
852 * The 'it' suffix really only matters for SCSI cards in target mode.
854 isp->isp_osinfo.fw = NULL;
855 if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->def_role & ISP_ROLE_TARGET)) {
856 ksnprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
857 isp->isp_osinfo.fw = firmware_get(fwname);
858 } else if (IS_24XX(isp) && (isp->isp_nchan > 1 || isp->isp_osinfo.forcemulti)) {
859 ksnprintf(fwname, sizeof (fwname), "isp_%04x_multi", did);
860 isp->isp_osinfo.fw = firmware_get(fwname);
862 if (isp->isp_osinfo.fw == NULL) {
863 ksnprintf(fwname, sizeof (fwname), "isp_%04x", did);
864 isp->isp_osinfo.fw = firmware_get(fwname);
866 if (isp->isp_osinfo.fw != NULL) {
867 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data;
871 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
874 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
875 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
877 if (IS_2300(isp)) { /* per QLogic errata */
878 cmd &= ~PCIM_CMD_INVEN;
881 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
882 cmd &= ~PCIM_CMD_INTX_DISABLE;
886 cmd &= ~PCIM_CMD_INTX_DISABLE;
889 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
892 * Make sure the Cache Line Size register is set sensibly.
894 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
895 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) {
896 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", linesz, data);
898 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
902 * Make sure the Latency Timer is sane.
904 data = pci_read_config(dev, PCIR_LATTIMER, 1);
905 if (data < PCI_DFLT_LTNCY) {
906 data = PCI_DFLT_LTNCY;
907 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
908 pci_write_config(dev, PCIR_LATTIMER, data, 1);
912 * Make sure we've disabled the ROM.
914 data = pci_read_config(dev, PCIR_ROMADDR, 4);
916 pci_write_config(dev, PCIR_ROMADDR, data, 4);
918 #if 0 /* XXX swildner */
922 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express)
924 if (IS_24XX(isp) || IS_2322(isp)) {
925 pcs->msicount = pci_msi_count(dev);
926 if (pcs->msicount > 1) {
929 if (pci_alloc_msi(dev, &pcs->msicount) == 0) {
936 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE);
937 if (pcs->irq == NULL) {
938 device_printf(dev, "could not allocate interrupt\n");
942 /* Make sure the lock is set up. */
943 lockinit(&isp->isp_osinfo.lock, "isp", 0, LK_CANRECURSE);
946 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, isp_platform_intr, isp, &pcs->ih, NULL)) {
947 device_printf(dev, "could not setup interrupt\n");
952 * Last minute checks...
954 if (IS_23XX(isp) || IS_24XX(isp)) {
955 isp->isp_port = pci_get_function(dev);
959 * Make sure we're in reset state.
963 if (isp->isp_state != ISP_RESETSTATE) {
968 if (isp->isp_state == ISP_INITSTATE) {
969 isp->isp_state = ISP_RUNSTATE;
972 if (isp_attach(isp)) {
982 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
985 lockuninit(&isp->isp_osinfo.lock);
988 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
991 pci_release_msi(dev);
994 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
996 if (pcs->pci_isp.isp_param) {
997 kfree(pcs->pci_isp.isp_param, M_DEVBUF);
998 pcs->pci_isp.isp_param = NULL;
1000 if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1001 kfree(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1002 pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1008 isp_pci_detach(device_t dev)
1010 struct isp_pcisoftc *pcs;
1014 pcs = device_get_softc(dev);
1018 isp = (ispsoftc_t *) pcs;
1019 status = isp_detach(isp);
1025 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
1028 lockuninit(&isp->isp_osinfo.lock);
1029 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
1030 if (pcs->msicount) {
1031 pci_release_msi(dev);
1033 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
1034 if (pcs->pci_isp.isp_param) {
1035 kfree(pcs->pci_isp.isp_param, M_DEVBUF);
1036 pcs->pci_isp.isp_param = NULL;
1038 if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1039 kfree(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1040 pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1045 #define IspVirt2Off(a, x) \
1046 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1047 _BLK_REG_SHFT] + ((x) & 0xfff))
1049 #define BXR2(isp, off) \
1050 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off)
1051 #define BXW2(isp, off, v) \
1052 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1053 #define BXR4(isp, off) \
1054 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off)
1055 #define BXW4(isp, off, v) \
1056 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1059 static ISP_INLINE int
1060 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1062 uint32_t val0, val1;
1066 val0 = BXR2(isp, IspVirt2Off(isp, off));
1067 val1 = BXR2(isp, IspVirt2Off(isp, off));
1068 } while (val0 != val1 && ++i < 1000);
1077 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp)
1082 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1085 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1089 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR));
1090 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA));
1092 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1093 isr &= INT_PENDING_MASK(isp);
1094 sema &= BIU_SEMA_LOCK;
1095 if (isr == 0 && sema == 0) {
1099 if ((*semap = sema) != 0) {
1101 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1105 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0));
1112 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p)
1117 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1121 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO));
1122 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1123 if ((r2hisr & BIU_R2HST_INTR) == 0) {
1127 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1128 case ISPR2HST_ROM_MBX_OK:
1129 case ISPR2HST_ROM_MBX_FAIL:
1130 case ISPR2HST_MBX_OK:
1131 case ISPR2HST_MBX_FAIL:
1132 case ISPR2HST_ASYNC_EVENT:
1133 *isrp = r2hisr & 0xffff;
1134 *mbox0p = (r2hisr >> 16);
1137 case ISPR2HST_RIO_16:
1138 *isrp = r2hisr & 0xffff;
1139 *mbox0p = ASYNC_RIO16_1;
1142 case ISPR2HST_FPOST:
1143 *isrp = r2hisr & 0xffff;
1144 *mbox0p = ASYNC_CMD_CMPLT;
1147 case ISPR2HST_FPOST_CTIO:
1148 *isrp = r2hisr & 0xffff;
1149 *mbox0p = ASYNC_CTIO_DONE;
1152 case ISPR2HST_RSPQ_UPDATE:
1153 *isrp = r2hisr & 0xffff;
1158 hccr = ISP_READ(isp, HCCR);
1159 if (hccr & HCCR_PAUSE) {
1160 ISP_WRITE(isp, HCCR, HCCR_RESET);
1161 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR));
1162 ISP_WRITE(isp, BIU_ICR, 0);
1164 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1171 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p)
1175 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO));
1176 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1177 if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
1181 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
1182 case ISP2400R2HST_ROM_MBX_OK:
1183 case ISP2400R2HST_ROM_MBX_FAIL:
1184 case ISP2400R2HST_MBX_OK:
1185 case ISP2400R2HST_MBX_FAIL:
1186 case ISP2400R2HST_ASYNC_EVENT:
1187 *isrp = r2hisr & 0xffff;
1188 *mbox0p = (r2hisr >> 16);
1191 case ISP2400R2HST_RSPQ_UPDATE:
1192 case ISP2400R2HST_ATIO_RSPQ_UPDATE:
1193 case ISP2400R2HST_ATIO_RQST_UPDATE:
1194 *isrp = r2hisr & 0xffff;
1199 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1200 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1206 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1211 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1213 * We will assume that someone has paused the RISC processor.
1215 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1216 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP);
1217 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1219 rv = BXR2(isp, IspVirt2Off(isp, regoff));
1220 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1221 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1222 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1228 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1232 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1234 * We will assume that someone has paused the RISC processor.
1236 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1237 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1238 oldconf | BIU_PCI_CONF1_SXP);
1239 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1241 BXW2(isp, IspVirt2Off(isp, regoff), val);
1242 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1243 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1244 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1245 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1251 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1253 uint32_t rv, oc = 0;
1255 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1256 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1259 * We will assume that someone has paused the RISC processor.
1261 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1262 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1263 if (regoff & SXP_BANK1_SELECT)
1264 tc |= BIU_PCI1080_CONF1_SXP1;
1266 tc |= BIU_PCI1080_CONF1_SXP0;
1267 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1268 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1269 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1270 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1271 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1272 oc | BIU_PCI1080_CONF1_DMA);
1273 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1275 rv = BXR2(isp, IspVirt2Off(isp, regoff));
1277 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1278 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1284 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1288 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1289 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1292 * We will assume that someone has paused the RISC processor.
1294 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1295 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1296 if (regoff & SXP_BANK1_SELECT)
1297 tc |= BIU_PCI1080_CONF1_SXP1;
1299 tc |= BIU_PCI1080_CONF1_SXP0;
1300 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1301 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1302 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1303 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1304 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1305 oc | BIU_PCI1080_CONF1_DMA);
1306 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1308 BXW2(isp, IspVirt2Off(isp, regoff), val);
1309 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1311 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1312 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1317 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1320 int block = regoff & _BLK_REG_MASK;
1326 return (BXR2(isp, IspVirt2Off(isp, regoff)));
1328 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
1329 return (0xffffffff);
1331 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
1332 return (0xffffffff);
1334 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
1335 return (0xffffffff);
1337 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
1338 return (0xffffffff);
1343 case BIU2400_FLASH_ADDR:
1344 case BIU2400_FLASH_DATA:
1348 case BIU2400_REQINP:
1349 case BIU2400_REQOUTP:
1350 case BIU2400_RSPINP:
1351 case BIU2400_RSPOUTP:
1352 case BIU2400_PRI_REQINP:
1353 case BIU2400_PRI_REQOUTP:
1354 case BIU2400_ATIO_RSPINP:
1355 case BIU2400_ATIO_RSPOUTP:
1360 rv = BXR4(isp, IspVirt2Off(isp, regoff));
1362 case BIU2400_R2HSTSLO:
1363 rv = BXR4(isp, IspVirt2Off(isp, regoff));
1365 case BIU2400_R2HSTSHI:
1366 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16;
1369 isp_prt(isp, ISP_LOGERR,
1370 "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1378 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1380 int block = regoff & _BLK_REG_MASK;
1386 BXW2(isp, IspVirt2Off(isp, regoff), val);
1387 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1390 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1393 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1396 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1399 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1405 case BIU2400_FLASH_ADDR:
1406 case BIU2400_FLASH_DATA:
1410 case BIU2400_REQINP:
1411 case BIU2400_REQOUTP:
1412 case BIU2400_RSPINP:
1413 case BIU2400_RSPOUTP:
1414 case BIU2400_PRI_REQINP:
1415 case BIU2400_PRI_REQOUTP:
1416 case BIU2400_ATIO_RSPINP:
1417 case BIU2400_ATIO_RSPOUTP:
1422 BXW4(isp, IspVirt2Off(isp, regoff), val);
1423 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1);
1426 isp_prt(isp, ISP_LOGERR,
1427 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1440 static void imc(void *, bus_dma_segment_t *, int, int);
1441 static void imc1(void *, bus_dma_segment_t *, int, int);
1444 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1446 struct imush *imushp = (struct imush *) arg;
1449 imushp->error = error;
1453 imushp->error = EINVAL;
1456 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
1457 imushp->isp->isp_rquest = imushp->vbase;
1458 imushp->isp->isp_rquest_dma = segs->ds_addr;
1459 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
1460 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
1461 imushp->isp->isp_result_dma = segs->ds_addr;
1462 imushp->isp->isp_result = imushp->vbase;
1464 #ifdef ISP_TARGET_MODE
1465 if (IS_24XX(imushp->isp)) {
1466 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
1467 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
1468 imushp->isp->isp_atioq_dma = segs->ds_addr;
1469 imushp->isp->isp_atioq = imushp->vbase;
1475 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1477 struct imush *imushp = (struct imush *) arg;
1479 imushp->error = error;
1483 imushp->error = EINVAL;
1486 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
1487 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr;
1488 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase;
1492 isp_pci_mbxdma(ispsoftc_t *isp)
1496 int i, error, ns, cmap = 0;
1497 bus_size_t slim; /* segment size */
1498 bus_addr_t llim; /* low limit of unavailable dma */
1499 bus_addr_t hlim; /* high limit of unavailable dma */
1503 * Already been here? If so, leave...
1505 if (isp->isp_rquest) {
1510 if (isp->isp_maxcmds == 0) {
1511 isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1516 hlim = BUS_SPACE_MAXADDR;
1517 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1518 if (sizeof (bus_size_t) > 4) {
1519 slim = (bus_size_t) (1ULL << 32);
1521 slim = (bus_size_t) (1UL << 31);
1523 llim = BUS_SPACE_MAXADDR;
1525 llim = BUS_SPACE_MAXADDR_32BIT;
1529 len = isp->isp_maxcmds * sizeof (struct isp_pcmd);
1530 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1531 if (isp->isp_osinfo.pcmd_pool == NULL) {
1532 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds");
1538 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1540 #ifdef ISP_TARGET_MODE
1541 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1542 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1543 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1549 if (isp_dma_tag_create(NULL, 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &isp->isp_osinfo.dmat)) {
1550 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1552 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1556 len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1557 isp->isp_xflist = (isp_hdl_t *) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1558 if (isp->isp_xflist == NULL) {
1559 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1561 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1564 for (len = 0; len < isp->isp_maxcmds - 1; len++) {
1565 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
1567 isp->isp_xffree = isp->isp_xflist;
1568 #ifdef ISP_TARGET_MODE
1569 len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1570 isp->isp_tgtlist = (isp_hdl_t *) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1571 if (isp->isp_tgtlist == NULL) {
1572 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1573 kfree(isp->isp_xflist, M_DEVBUF);
1575 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1578 for (len = 0; len < isp->isp_maxcmds - 1; len++) {
1579 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1];
1581 isp->isp_tgtfree = isp->isp_tgtlist;
1585 * Allocate and map the request and result queues (and ATIO queue
1586 * if we're a 2400 supporting target mode).
1588 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1589 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1590 #ifdef ISP_TARGET_MODE
1592 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1596 ns = (len / PAGE_SIZE) + 1;
1599 * Create a tag for the control spaces. We don't always need this
1600 * to be 32 bits, but we do this for simplicity and speed's sake.
1602 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, ns, slim, 0, &isp->isp_osinfo.cdmat)) {
1603 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces");
1604 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1605 kfree(isp->isp_xflist, M_DEVBUF);
1606 #ifdef ISP_TARGET_MODE
1607 kfree(isp->isp_tgtlist, M_DEVBUF);
1613 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) {
1614 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len);
1615 bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
1616 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1617 kfree(isp->isp_xflist, M_DEVBUF);
1618 #ifdef ISP_TARGET_MODE
1619 kfree(isp->isp_tgtlist, M_DEVBUF);
1630 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0);
1632 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error);
1637 for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
1638 struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1639 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) {
1642 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) {
1643 bus_dma_tag_destroy(fc->tdmat);
1650 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0);
1652 bus_dmamem_free(fc->tdmat, base, fc->tdmap);
1653 bus_dma_tag_destroy(fc->tdmat);
1659 for (i = 0; i < isp->isp_maxcmds; i++) {
1660 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
1661 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap);
1663 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error);
1665 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap);
1669 callout_init(&pcmd->wdog);
1670 if (i == isp->isp_maxcmds-1) {
1673 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1];
1676 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
1681 while (--cmap >= 0) {
1682 struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1683 bus_dmamem_free(fc->tdmat, base, fc->tdmap);
1684 bus_dma_tag_destroy(fc->tdmat);
1686 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap);
1687 bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
1688 kfree(isp->isp_xflist, M_DEVBUF);
1689 #ifdef ISP_TARGET_MODE
1690 kfree(isp->isp_tgtlist, M_DEVBUF);
1692 kfree(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1693 isp->isp_rquest = NULL;
1701 void *rq; /* original request */
1706 #define MUSHERR_NOQENTRIES -2
1708 #ifdef ISP_TARGET_MODE
1709 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1710 static void tdma2(void *, bus_dma_segment_t *, int, int);
1713 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1717 mp->mapsize = mapsize;
1718 tdma2(arg, dm_segs, nseg, error);
1722 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1726 struct ccb_scsiio *csio;
1730 mp = (mush_t *) arg;
1735 csio = mp->cmd_token;
1739 if (sizeof (bus_addr_t) > 4) {
1740 if (nseg >= ISP_NSEG64_MAX) {
1741 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1745 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) {
1746 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3;
1749 if (nseg >= ISP_NSEG_MAX) {
1750 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1755 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1756 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
1757 ddir = ISP_TO_DEVICE;
1758 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1759 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1760 ddir = ISP_FROM_DEVICE;
1772 if (isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len) != CMD_QUEUED) {
1773 mp->error = MUSHERR_NOQENTRIES;
1778 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1779 static void dma2(void *, bus_dma_segment_t *, int, int);
1782 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1786 mp->mapsize = mapsize;
1787 dma2(arg, dm_segs, nseg, error);
1791 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1795 struct ccb_scsiio *csio;
1799 mp = (mush_t *) arg;
1804 csio = mp->cmd_token;
1808 if (sizeof (bus_addr_t) > 4) {
1809 if (nseg >= ISP_NSEG64_MAX) {
1810 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1814 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) {
1815 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1816 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) {
1817 rq->req_header.rqs_entry_type = RQSTYPE_A64;
1820 if (nseg >= ISP_NSEG_MAX) {
1821 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1826 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1827 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1828 ddir = ISP_FROM_DEVICE;
1829 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1830 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
1831 ddir = ISP_TO_DEVICE;
1841 if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) {
1842 mp->error = MUSHERR_NOQENTRIES;
1847 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
1850 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1851 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int);
1855 mp->cmd_token = csio;
1860 #ifdef ISP_TARGET_MODE
1861 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1872 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) {
1873 (*eptr)(mp, NULL, 0, 0);
1874 } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1875 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1877 error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1879 xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error);
1882 if (error == EINPROGRESS) {
1883 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
1885 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
1886 } else if (error && mp->error == 0) {
1888 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
1893 /* Pointer to physical buffer */
1894 struct bus_dma_segment seg;
1895 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
1896 seg.ds_len = csio->dxfer_len;
1897 (*eptr)(mp, &seg, 1, 0);
1900 struct bus_dma_segment *segs;
1902 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1903 isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported");
1905 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1910 * We're taking advantage of the fact that
1911 * the pointer/length sizes and layout of the iovec
1912 * structure are the same as the bus_dma_segment
1913 * structure. This might be a little dangerous,
1914 * but only if they change the structures, which
1917 KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) &&
1918 sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) &&
1919 sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed"));
1920 sguio.uio_iov = (struct iovec *)csio->data_ptr;
1921 sguio.uio_iovcnt = csio->sglist_cnt;
1922 sguio.uio_resid = csio->dxfer_len;
1923 sguio.uio_segflg = UIO_SYSSPACE;
1925 error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0);
1927 if (error != 0 && mp->error == 0) {
1928 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
1932 /* Just use the segments provided */
1933 segs = (struct bus_dma_segment *) csio->data_ptr;
1934 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1938 int retval = CMD_COMPLETE;
1939 if (mp->error == MUSHERR_NOQENTRIES) {
1940 retval = CMD_EAGAIN;
1941 } else if (mp->error == EFBIG) {
1942 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1943 } else if (mp->error == EINVAL) {
1944 XS_SETERR(csio, CAM_REQ_INVALID);
1946 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1950 return (CMD_QUEUED);
1954 isp_pci_reset0(ispsoftc_t *isp)
1956 ISP_DISABLE_INTS(isp);
1960 isp_pci_reset1(ispsoftc_t *isp)
1962 if (!IS_24XX(isp)) {
1963 /* Make sure the BIOS is disabled */
1964 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1966 /* and enable interrupts */
1967 ISP_ENABLE_INTS(isp);
1971 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
1973 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1975 kprintf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1977 kprintf("%s:\n", device_get_nameunit(isp->isp_dev));
1979 kprintf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1981 kprintf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1982 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1983 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1984 kprintf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1988 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1989 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1990 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1991 ISP_READ(isp, CDMA_FIFO_STS));
1992 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1993 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1994 ISP_READ(isp, DDMA_FIFO_STS));
1995 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1996 ISP_READ(isp, SXP_INTERRUPT),
1997 ISP_READ(isp, SXP_GROSS_ERR),
1998 ISP_READ(isp, SXP_PINS_CTRL));
1999 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2001 kprintf(" mbox regs: %x %x %x %x %x\n",
2002 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2003 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2004 ISP_READ(isp, OUTMAILBOX4));
2005 kprintf(" PCI Status Command/Status=%x\n",
2006 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));