2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
66 #include <dev/usb/controller/xhcireg.h>
67 #include <dev/usb/controller/ehcireg.h>
68 #include <dev/usb/controller/ohcireg.h>
69 #include <dev/usb/controller/uhcireg.h>
74 #define PCIR_IS_BIOS(cfg, reg) \
75 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
76 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
78 static int pci_has_quirk(uint32_t devid, int quirk);
79 static pci_addr_t pci_mapbase(uint64_t mapreg);
80 static const char *pci_maptype(uint64_t mapreg);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
96 static int pci_detach(device_t dev);
98 static void pci_load_vendor_data(void);
99 static int pci_describe_parse_line(char **ptr, int *vendor,
100 int *device, char **desc);
101 static char *pci_describe_device(device_t dev);
102 static int pci_modevent(module_t mod, int what, void *arg);
103 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
105 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t *data);
109 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t data);
112 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void pci_mask_msix(device_t dev, u_int index);
114 static void pci_unmask_msix(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static int pci_msix_blacklisted(void);
117 static void pci_resume_msi(device_t dev);
118 static void pci_resume_msix(device_t dev);
119 static int pci_remap_intr_method(device_t bus, device_t dev,
122 static uint16_t pci_get_rid_method(device_t dev, device_t child);
124 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, int d, int b, int s,
125 int f, uint16_t vid, uint16_t did, size_t size);
127 static device_method_t pci_methods[] = {
128 /* Device interface */
129 DEVMETHOD(device_probe, pci_probe),
130 DEVMETHOD(device_attach, pci_attach),
132 DEVMETHOD(device_detach, pci_detach),
134 DEVMETHOD(device_detach, bus_generic_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, bus_generic_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_setup_intr, pci_setup_intr),
147 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
156 DEVMETHOD(bus_release_resource, pci_release_resource),
157 DEVMETHOD(bus_activate_resource, pci_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
159 DEVMETHOD(bus_child_detached, pci_child_detached),
160 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
161 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
162 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
163 DEVMETHOD(bus_suspend_child, pci_suspend_child),
164 DEVMETHOD(bus_resume_child, pci_resume_child),
167 DEVMETHOD(pci_read_config, pci_read_config_method),
168 DEVMETHOD(pci_write_config, pci_write_config_method),
169 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
170 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
171 DEVMETHOD(pci_enable_io, pci_enable_io_method),
172 DEVMETHOD(pci_disable_io, pci_disable_io_method),
173 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
174 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
175 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
176 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
177 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
178 DEVMETHOD(pci_find_cap, pci_find_cap_method),
179 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
180 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
181 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
182 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
183 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
184 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
185 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
186 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
187 DEVMETHOD(pci_release_msi, pci_release_msi_method),
188 DEVMETHOD(pci_msi_count, pci_msi_count_method),
189 DEVMETHOD(pci_msix_count, pci_msix_count_method),
190 DEVMETHOD(pci_get_rid, pci_get_rid_method),
191 DEVMETHOD(pci_child_added, pci_child_added_method),
196 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
198 static devclass_t pci_devclass;
199 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
200 MODULE_VERSION(pci, 1);
202 static char *pci_vendordata;
203 static size_t pci_vendordata_size;
206 uint32_t devid; /* Vendor/device of the card */
208 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
209 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
210 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
211 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
212 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
213 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
218 static const struct pci_quirk pci_quirks[] = {
219 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
220 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
222 /* As does the Serverworks OSB4 (the SMBus mapping register) */
223 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
226 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
227 * or the CMIC-SL (AKA ServerWorks GC_LE).
229 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
230 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233 * MSI doesn't work on earlier Intel chipsets including
234 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
236 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
245 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
248 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
251 * MSI-X allocation doesn't work properly for devices passed through
252 * by VMware up to at least ESXi 5.1.
254 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
255 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
258 * Some virtualization environments emulate an older chipset
259 * but support MSI just fine. QEMU uses the Intel 82440.
261 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
264 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
265 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
266 * It prevents us from attaching hpet(4) when the bit is unset.
267 * Note this quirk only affects SB600 revision A13 and earlier.
268 * For SB600 A21 and later, firmware must set the bit to hide it.
269 * For SB700 and later, it is unused and hardcoded to zero.
271 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
274 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
275 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
276 * command register is set.
278 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
279 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
280 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
283 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
284 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
286 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
287 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
288 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
289 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
290 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
291 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
296 /* map register information */
297 #define PCI_MAPMEM 0x01 /* memory map */
298 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
299 #define PCI_MAPPORT 0x04 /* port map */
301 struct devlist pci_devq;
302 uint32_t pci_generation;
303 uint32_t pci_numdevs = 0;
304 static int pcie_chipset, pcix_chipset;
307 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
309 static int pci_enable_io_modes = 1;
310 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
311 &pci_enable_io_modes, 1,
312 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
313 enable these bits correctly. We'd like to do this all the time, but there\n\
314 are some peripherals that this causes problems with.");
316 static int pci_do_realloc_bars = 0;
317 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
318 &pci_do_realloc_bars, 0,
319 "Attempt to allocate a new range for any BARs whose original "
320 "firmware-assigned ranges fail to allocate during the initial device scan.");
322 static int pci_do_power_nodriver = 0;
323 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
324 &pci_do_power_nodriver, 0,
325 "Place a function into D3 state when no driver attaches to it. 0 means\n\
326 disable. 1 means conservatively place devices into D3 state. 2 means\n\
327 agressively place devices into D3 state. 3 means put absolutely everything\n\
330 int pci_do_power_resume = 1;
331 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
332 &pci_do_power_resume, 1,
333 "Transition from D3 -> D0 on resume.");
335 int pci_do_power_suspend = 1;
336 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
337 &pci_do_power_suspend, 1,
338 "Transition from D0 -> D3 on suspend.");
340 static int pci_do_msi = 1;
341 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
342 "Enable support for MSI interrupts");
344 static int pci_do_msix = 1;
345 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
346 "Enable support for MSI-X interrupts");
348 static int pci_honor_msi_blacklist = 1;
349 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
350 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
352 #if defined(__i386__) || defined(__amd64__)
353 static int pci_usb_takeover = 1;
355 static int pci_usb_takeover = 0;
357 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
358 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
359 Disable this if you depend on BIOS emulation of USB devices, that is\n\
360 you use USB devices (like keyboard or mouse) but do not load USB drivers");
362 static int pci_clear_bars;
363 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
364 "Ignore firmware-assigned resources for BARs.");
366 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
367 static int pci_clear_buses;
368 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
369 "Ignore firmware-assigned bus numbers.");
372 static int pci_enable_ari = 1;
373 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
374 0, "Enable support for PCIe Alternative RID Interpretation");
377 pci_has_quirk(uint32_t devid, int quirk)
379 const struct pci_quirk *q;
381 for (q = &pci_quirks[0]; q->devid; q++) {
382 if (q->devid == devid && q->type == quirk)
388 /* Find a device_t by bus/slot/function in domain 0 */
391 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
394 return (pci_find_dbsf(0, bus, slot, func));
397 /* Find a device_t by domain/bus/slot/function */
400 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
402 struct pci_devinfo *dinfo;
404 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
405 if ((dinfo->cfg.domain == domain) &&
406 (dinfo->cfg.bus == bus) &&
407 (dinfo->cfg.slot == slot) &&
408 (dinfo->cfg.func == func)) {
409 return (dinfo->cfg.dev);
416 /* Find a device_t by vendor/device ID */
419 pci_find_device(uint16_t vendor, uint16_t device)
421 struct pci_devinfo *dinfo;
423 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
424 if ((dinfo->cfg.vendor == vendor) &&
425 (dinfo->cfg.device == device)) {
426 return (dinfo->cfg.dev);
434 pci_find_class(uint8_t class, uint8_t subclass)
436 struct pci_devinfo *dinfo;
438 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
439 if (dinfo->cfg.baseclass == class &&
440 dinfo->cfg.subclass == subclass) {
441 return (dinfo->cfg.dev);
449 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
454 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
457 retval += vprintf(fmt, ap);
462 /* return base address of memory or port map */
465 pci_mapbase(uint64_t mapreg)
468 if (PCI_BAR_MEM(mapreg))
469 return (mapreg & PCIM_BAR_MEM_BASE);
471 return (mapreg & PCIM_BAR_IO_BASE);
474 /* return map type of memory or port map */
477 pci_maptype(uint64_t mapreg)
480 if (PCI_BAR_IO(mapreg))
482 if (mapreg & PCIM_BAR_MEM_PREFETCH)
483 return ("Prefetchable Memory");
487 /* return log2 of map size decoded for memory or port map */
490 pci_mapsize(uint64_t testval)
494 testval = pci_mapbase(testval);
497 while ((testval & 1) == 0)
506 /* return base address of device ROM */
509 pci_rombase(uint64_t mapreg)
512 return (mapreg & PCIM_BIOS_ADDR_MASK);
515 /* return log2 of map size decided for device ROM */
518 pci_romsize(uint64_t testval)
522 testval = pci_rombase(testval);
525 while ((testval & 1) == 0)
534 /* return log2 of address range supported by map register */
537 pci_maprange(uint64_t mapreg)
541 if (PCI_BAR_IO(mapreg))
544 switch (mapreg & PCIM_BAR_MEM_TYPE) {
545 case PCIM_BAR_MEM_32:
548 case PCIM_BAR_MEM_1MB:
551 case PCIM_BAR_MEM_64:
558 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
561 pci_fixancient(pcicfgregs *cfg)
563 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
566 /* PCI to PCI bridges use header type 1 */
567 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
568 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
571 /* extract header type specific config data */
574 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
576 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
577 switch (cfg->hdrtype & PCIM_HDRTYPE) {
578 case PCIM_HDRTYPE_NORMAL:
579 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
580 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
581 cfg->nummaps = PCI_MAXMAPS_0;
583 case PCIM_HDRTYPE_BRIDGE:
584 cfg->nummaps = PCI_MAXMAPS_1;
586 case PCIM_HDRTYPE_CARDBUS:
587 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
588 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
589 cfg->nummaps = PCI_MAXMAPS_2;
595 /* read configuration header into pcicfgregs structure */
597 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
599 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
602 vid = REG(PCIR_VENDOR, 2);
603 did = REG(PCIR_DEVICE, 2);
605 return (pci_fill_devinfo(pcib, d, b, s, f, vid, did, size));
610 static struct pci_devinfo *
611 pci_fill_devinfo(device_t pcib, int d, int b, int s, int f, uint16_t vid,
612 uint16_t did, size_t size)
614 struct pci_devinfo *devlist_entry;
617 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
619 cfg = &devlist_entry->cfg;
627 cfg->cmdreg = REG(PCIR_COMMAND, 2);
628 cfg->statreg = REG(PCIR_STATUS, 2);
629 cfg->baseclass = REG(PCIR_CLASS, 1);
630 cfg->subclass = REG(PCIR_SUBCLASS, 1);
631 cfg->progif = REG(PCIR_PROGIF, 1);
632 cfg->revid = REG(PCIR_REVID, 1);
633 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
634 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
635 cfg->lattimer = REG(PCIR_LATTIMER, 1);
636 cfg->intpin = REG(PCIR_INTPIN, 1);
637 cfg->intline = REG(PCIR_INTLINE, 1);
639 cfg->mingnt = REG(PCIR_MINGNT, 1);
640 cfg->maxlat = REG(PCIR_MAXLAT, 1);
642 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
643 cfg->hdrtype &= ~PCIM_MFDEV;
644 STAILQ_INIT(&cfg->maps);
647 pci_hdrtypedata(pcib, b, s, f, cfg);
649 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
650 pci_read_cap(pcib, cfg);
652 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
654 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
655 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
656 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
657 devlist_entry->conf.pc_sel.pc_func = cfg->func;
658 devlist_entry->conf.pc_hdr = cfg->hdrtype;
660 devlist_entry->conf.pc_subvendor = cfg->subvendor;
661 devlist_entry->conf.pc_subdevice = cfg->subdevice;
662 devlist_entry->conf.pc_vendor = cfg->vendor;
663 devlist_entry->conf.pc_device = cfg->device;
665 devlist_entry->conf.pc_class = cfg->baseclass;
666 devlist_entry->conf.pc_subclass = cfg->subclass;
667 devlist_entry->conf.pc_progif = cfg->progif;
668 devlist_entry->conf.pc_revid = cfg->revid;
673 return (devlist_entry);
678 pci_read_cap(device_t pcib, pcicfgregs *cfg)
680 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
681 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
682 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
686 int ptr, nextptr, ptrptr;
688 switch (cfg->hdrtype & PCIM_HDRTYPE) {
689 case PCIM_HDRTYPE_NORMAL:
690 case PCIM_HDRTYPE_BRIDGE:
691 ptrptr = PCIR_CAP_PTR;
693 case PCIM_HDRTYPE_CARDBUS:
694 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
697 return; /* no extended capabilities support */
699 nextptr = REG(ptrptr, 1); /* sanity check? */
702 * Read capability entries.
704 while (nextptr != 0) {
707 printf("illegal PCI extended capability offset %d\n",
711 /* Find the next entry */
713 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
715 /* Process this entry */
716 switch (REG(ptr + PCICAP_ID, 1)) {
717 case PCIY_PMG: /* PCI power management */
718 if (cfg->pp.pp_cap == 0) {
719 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
720 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
721 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
722 if ((nextptr - ptr) > PCIR_POWER_DATA)
723 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
726 case PCIY_HT: /* HyperTransport */
727 /* Determine HT-specific capability type. */
728 val = REG(ptr + PCIR_HT_COMMAND, 2);
730 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
731 cfg->ht.ht_slave = ptr;
733 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
734 switch (val & PCIM_HTCMD_CAP_MASK) {
735 case PCIM_HTCAP_MSI_MAPPING:
736 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
737 /* Sanity check the mapping window. */
738 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
741 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
743 if (addr != MSI_INTEL_ADDR_BASE)
745 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
746 cfg->domain, cfg->bus,
747 cfg->slot, cfg->func,
750 addr = MSI_INTEL_ADDR_BASE;
752 cfg->ht.ht_msimap = ptr;
753 cfg->ht.ht_msictrl = val;
754 cfg->ht.ht_msiaddr = addr;
759 case PCIY_MSI: /* PCI MSI */
760 cfg->msi.msi_location = ptr;
761 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
762 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
763 PCIM_MSICTRL_MMC_MASK)>>1);
765 case PCIY_MSIX: /* PCI MSI-X */
766 cfg->msix.msix_location = ptr;
767 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
768 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
769 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
770 val = REG(ptr + PCIR_MSIX_TABLE, 4);
771 cfg->msix.msix_table_bar = PCIR_BAR(val &
773 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
774 val = REG(ptr + PCIR_MSIX_PBA, 4);
775 cfg->msix.msix_pba_bar = PCIR_BAR(val &
777 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
779 case PCIY_VPD: /* PCI Vital Product Data */
780 cfg->vpd.vpd_reg = ptr;
783 /* Should always be true. */
784 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
785 PCIM_HDRTYPE_BRIDGE) {
786 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
787 cfg->subvendor = val & 0xffff;
788 cfg->subdevice = val >> 16;
791 case PCIY_PCIX: /* PCI-X */
793 * Assume we have a PCI-X chipset if we have
794 * at least one PCI-PCI bridge with a PCI-X
795 * capability. Note that some systems with
796 * PCI-express or HT chipsets might match on
797 * this check as well.
799 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
802 cfg->pcix.pcix_location = ptr;
804 case PCIY_EXPRESS: /* PCI-express */
806 * Assume we have a PCI-express chipset if we have
807 * at least one PCI-express device.
810 cfg->pcie.pcie_location = ptr;
811 val = REG(ptr + PCIER_FLAGS, 2);
812 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
819 #if defined(__powerpc__)
821 * Enable the MSI mapping window for all HyperTransport
822 * slaves. PCI-PCI bridges have their windows enabled via
825 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
826 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
828 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
829 cfg->domain, cfg->bus, cfg->slot, cfg->func);
830 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
831 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
835 /* REG and WREG use carry through to next functions */
839 * PCI Vital Product Data
842 #define PCI_VPD_TIMEOUT 1000000
845 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
847 int count = PCI_VPD_TIMEOUT;
849 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
851 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
853 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
856 DELAY(1); /* limit looping */
858 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
865 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
867 int count = PCI_VPD_TIMEOUT;
869 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
871 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
872 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
873 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
876 DELAY(1); /* limit looping */
883 #undef PCI_VPD_TIMEOUT
885 struct vpd_readstate {
895 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
900 if (vrs->bytesinval == 0) {
901 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
903 vrs->val = le32toh(reg);
905 byte = vrs->val & 0xff;
908 vrs->val = vrs->val >> 8;
909 byte = vrs->val & 0xff;
919 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
921 struct vpd_readstate vrs;
926 int alloc, off; /* alloc/off for RO/W arrays */
932 /* init vpd reader */
940 name = remain = i = 0; /* shut up stupid gcc */
941 alloc = off = 0; /* shut up stupid gcc */
942 dflen = 0; /* shut up stupid gcc */
945 if (vpd_nextbyte(&vrs, &byte)) {
950 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
951 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
952 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
955 case 0: /* item name */
957 if (vpd_nextbyte(&vrs, &byte2)) {
962 if (vpd_nextbyte(&vrs, &byte2)) {
966 remain |= byte2 << 8;
967 if (remain > (0x7f*4 - vrs.off)) {
970 "invalid VPD data, remain %#x\n",
976 name = (byte >> 3) & 0xf;
979 case 0x2: /* String */
980 cfg->vpd.vpd_ident = malloc(remain + 1,
988 case 0x10: /* VPD-R */
991 cfg->vpd.vpd_ros = malloc(alloc *
992 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
996 case 0x11: /* VPD-W */
999 cfg->vpd.vpd_w = malloc(alloc *
1000 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1004 default: /* Invalid data, abort */
1010 case 1: /* Identifier String */
1011 cfg->vpd.vpd_ident[i++] = byte;
1014 cfg->vpd.vpd_ident[i] = '\0';
1019 case 2: /* VPD-R Keyword Header */
1021 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1022 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1023 M_DEVBUF, M_WAITOK | M_ZERO);
1025 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1026 if (vpd_nextbyte(&vrs, &byte2)) {
1030 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1031 if (vpd_nextbyte(&vrs, &byte2)) {
1035 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1037 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1040 * if this happens, we can't trust the rest
1043 pci_printf(cfg, "bad keyword length: %d\n",
1048 } else if (dflen == 0) {
1049 cfg->vpd.vpd_ros[off].value = malloc(1 *
1050 sizeof(*cfg->vpd.vpd_ros[off].value),
1051 M_DEVBUF, M_WAITOK);
1052 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1054 cfg->vpd.vpd_ros[off].value = malloc(
1056 sizeof(*cfg->vpd.vpd_ros[off].value),
1057 M_DEVBUF, M_WAITOK);
1060 /* keep in sync w/ state 3's transistions */
1061 if (dflen == 0 && remain == 0)
1063 else if (dflen == 0)
1069 case 3: /* VPD-R Keyword Value */
1070 cfg->vpd.vpd_ros[off].value[i++] = byte;
1071 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1072 "RV", 2) == 0 && cksumvalid == -1) {
1078 "bad VPD cksum, remain %hhu\n",
1087 /* keep in sync w/ state 2's transistions */
1089 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1090 if (dflen == 0 && remain == 0) {
1091 cfg->vpd.vpd_rocnt = off;
1092 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1093 off * sizeof(*cfg->vpd.vpd_ros),
1094 M_DEVBUF, M_WAITOK | M_ZERO);
1096 } else if (dflen == 0)
1106 case 5: /* VPD-W Keyword Header */
1108 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1109 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1110 M_DEVBUF, M_WAITOK | M_ZERO);
1112 cfg->vpd.vpd_w[off].keyword[0] = byte;
1113 if (vpd_nextbyte(&vrs, &byte2)) {
1117 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1118 if (vpd_nextbyte(&vrs, &byte2)) {
1122 cfg->vpd.vpd_w[off].len = dflen = byte2;
1123 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1124 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1125 sizeof(*cfg->vpd.vpd_w[off].value),
1126 M_DEVBUF, M_WAITOK);
1129 /* keep in sync w/ state 6's transistions */
1130 if (dflen == 0 && remain == 0)
1132 else if (dflen == 0)
1138 case 6: /* VPD-W Keyword Value */
1139 cfg->vpd.vpd_w[off].value[i++] = byte;
1142 /* keep in sync w/ state 5's transistions */
1144 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1145 if (dflen == 0 && remain == 0) {
1146 cfg->vpd.vpd_wcnt = off;
1147 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1148 off * sizeof(*cfg->vpd.vpd_w),
1149 M_DEVBUF, M_WAITOK | M_ZERO);
1151 } else if (dflen == 0)
1156 pci_printf(cfg, "invalid state: %d\n", state);
1162 if (cksumvalid == 0 || state < -1) {
1163 /* read-only data bad, clean up */
1164 if (cfg->vpd.vpd_ros != NULL) {
1165 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1166 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1167 free(cfg->vpd.vpd_ros, M_DEVBUF);
1168 cfg->vpd.vpd_ros = NULL;
1172 /* I/O error, clean up */
1173 pci_printf(cfg, "failed to read VPD data.\n");
1174 if (cfg->vpd.vpd_ident != NULL) {
1175 free(cfg->vpd.vpd_ident, M_DEVBUF);
1176 cfg->vpd.vpd_ident = NULL;
1178 if (cfg->vpd.vpd_w != NULL) {
1179 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1180 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1181 free(cfg->vpd.vpd_w, M_DEVBUF);
1182 cfg->vpd.vpd_w = NULL;
1185 cfg->vpd.vpd_cached = 1;
1191 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1193 struct pci_devinfo *dinfo = device_get_ivars(child);
1194 pcicfgregs *cfg = &dinfo->cfg;
1196 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1197 pci_read_vpd(device_get_parent(dev), cfg);
1199 *identptr = cfg->vpd.vpd_ident;
1201 if (*identptr == NULL)
1208 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1211 struct pci_devinfo *dinfo = device_get_ivars(child);
1212 pcicfgregs *cfg = &dinfo->cfg;
1215 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1216 pci_read_vpd(device_get_parent(dev), cfg);
1218 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1219 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1220 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1221 *vptr = cfg->vpd.vpd_ros[i].value;
1230 pci_fetch_vpd_list(device_t dev)
1232 struct pci_devinfo *dinfo = device_get_ivars(dev);
1233 pcicfgregs *cfg = &dinfo->cfg;
1235 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1236 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1241 * Find the requested HyperTransport capability and return the offset
1242 * in configuration space via the pointer provided. The function
1243 * returns 0 on success and an error code otherwise.
1246 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1251 error = pci_find_cap(child, PCIY_HT, &ptr);
1256 * Traverse the capabilities list checking each HT capability
1257 * to see if it matches the requested HT capability.
1260 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1261 if (capability == PCIM_HTCAP_SLAVE ||
1262 capability == PCIM_HTCAP_HOST)
1265 val &= PCIM_HTCMD_CAP_MASK;
1266 if (val == capability) {
1272 /* Skip to the next HT capability. */
1274 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1275 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1284 * Find the requested capability and return the offset in
1285 * configuration space via the pointer provided. The function returns
1286 * 0 on success and an error code otherwise.
1289 pci_find_cap_method(device_t dev, device_t child, int capability,
1292 struct pci_devinfo *dinfo = device_get_ivars(child);
1293 pcicfgregs *cfg = &dinfo->cfg;
1298 * Check the CAP_LIST bit of the PCI status register first.
1300 status = pci_read_config(child, PCIR_STATUS, 2);
1301 if (!(status & PCIM_STATUS_CAPPRESENT))
1305 * Determine the start pointer of the capabilities list.
1307 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1308 case PCIM_HDRTYPE_NORMAL:
1309 case PCIM_HDRTYPE_BRIDGE:
1312 case PCIM_HDRTYPE_CARDBUS:
1313 ptr = PCIR_CAP_PTR_2;
1317 return (ENXIO); /* no extended capabilities support */
1319 ptr = pci_read_config(child, ptr, 1);
1322 * Traverse the capabilities list.
1325 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1330 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1337 * Find the requested extended capability and return the offset in
1338 * configuration space via the pointer provided. The function returns
1339 * 0 on success and an error code otherwise.
1342 pci_find_extcap_method(device_t dev, device_t child, int capability,
1345 struct pci_devinfo *dinfo = device_get_ivars(child);
1346 pcicfgregs *cfg = &dinfo->cfg;
1350 /* Only supported for PCI-express devices. */
1351 if (cfg->pcie.pcie_location == 0)
1355 ecap = pci_read_config(child, ptr, 4);
1356 if (ecap == 0xffffffff || ecap == 0)
1359 if (PCI_EXTCAP_ID(ecap) == capability) {
1364 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1367 ecap = pci_read_config(child, ptr, 4);
1374 * Support for MSI-X message interrupts.
1377 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1378 uint64_t address, uint32_t data)
1380 struct pci_devinfo *dinfo = device_get_ivars(child);
1381 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1384 KASSERT(msix->msix_table_len > index, ("bogus index"));
1385 offset = msix->msix_table_offset + index * 16;
1386 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1387 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1388 bus_write_4(msix->msix_table_res, offset + 8, data);
1390 /* Enable MSI -> HT mapping. */
1391 pci_ht_map_msi(child, address);
1395 pci_mask_msix(device_t dev, u_int index)
1397 struct pci_devinfo *dinfo = device_get_ivars(dev);
1398 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1399 uint32_t offset, val;
1401 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1402 offset = msix->msix_table_offset + index * 16 + 12;
1403 val = bus_read_4(msix->msix_table_res, offset);
1404 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1405 val |= PCIM_MSIX_VCTRL_MASK;
1406 bus_write_4(msix->msix_table_res, offset, val);
1411 pci_unmask_msix(device_t dev, u_int index)
1413 struct pci_devinfo *dinfo = device_get_ivars(dev);
1414 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1415 uint32_t offset, val;
1417 KASSERT(msix->msix_table_len > index, ("bogus index"));
1418 offset = msix->msix_table_offset + index * 16 + 12;
1419 val = bus_read_4(msix->msix_table_res, offset);
1420 if (val & PCIM_MSIX_VCTRL_MASK) {
1421 val &= ~PCIM_MSIX_VCTRL_MASK;
1422 bus_write_4(msix->msix_table_res, offset, val);
1427 pci_pending_msix(device_t dev, u_int index)
1429 struct pci_devinfo *dinfo = device_get_ivars(dev);
1430 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1431 uint32_t offset, bit;
1433 KASSERT(msix->msix_table_len > index, ("bogus index"));
1434 offset = msix->msix_pba_offset + (index / 32) * 4;
1435 bit = 1 << index % 32;
1436 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1440 * Restore MSI-X registers and table during resume. If MSI-X is
1441 * enabled then walk the virtual table to restore the actual MSI-X
1445 pci_resume_msix(device_t dev)
1447 struct pci_devinfo *dinfo = device_get_ivars(dev);
1448 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1449 struct msix_table_entry *mte;
1450 struct msix_vector *mv;
1453 if (msix->msix_alloc > 0) {
1454 /* First, mask all vectors. */
1455 for (i = 0; i < msix->msix_msgnum; i++)
1456 pci_mask_msix(dev, i);
1458 /* Second, program any messages with at least one handler. */
1459 for (i = 0; i < msix->msix_table_len; i++) {
1460 mte = &msix->msix_table[i];
1461 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1463 mv = &msix->msix_vectors[mte->mte_vector - 1];
1464 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1465 pci_unmask_msix(dev, i);
1468 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1469 msix->msix_ctrl, 2);
1473 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1474 * returned in *count. After this function returns, each message will be
1475 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1478 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1480 struct pci_devinfo *dinfo = device_get_ivars(child);
1481 pcicfgregs *cfg = &dinfo->cfg;
1482 struct resource_list_entry *rle;
1483 int actual, error, i, irq, max;
1485 /* Don't let count == 0 get us into trouble. */
1489 /* If rid 0 is allocated, then fail. */
1490 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1491 if (rle != NULL && rle->res != NULL)
1494 /* Already have allocated messages? */
1495 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1498 /* If MSI-X is blacklisted for this system, fail. */
1499 if (pci_msix_blacklisted())
1502 /* MSI-X capability present? */
1503 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1506 /* Make sure the appropriate BARs are mapped. */
1507 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1508 cfg->msix.msix_table_bar);
1509 if (rle == NULL || rle->res == NULL ||
1510 !(rman_get_flags(rle->res) & RF_ACTIVE))
1512 cfg->msix.msix_table_res = rle->res;
1513 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1514 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1515 cfg->msix.msix_pba_bar);
1516 if (rle == NULL || rle->res == NULL ||
1517 !(rman_get_flags(rle->res) & RF_ACTIVE))
1520 cfg->msix.msix_pba_res = rle->res;
1523 device_printf(child,
1524 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1525 *count, cfg->msix.msix_msgnum);
1526 max = min(*count, cfg->msix.msix_msgnum);
1527 for (i = 0; i < max; i++) {
1528 /* Allocate a message. */
1529 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1535 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1541 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1543 device_printf(child, "using IRQ %lu for MSI-X\n",
1549 * Be fancy and try to print contiguous runs of
1550 * IRQ values as ranges. 'irq' is the previous IRQ.
1551 * 'run' is true if we are in a range.
1553 device_printf(child, "using IRQs %lu", rle->start);
1556 for (i = 1; i < actual; i++) {
1557 rle = resource_list_find(&dinfo->resources,
1558 SYS_RES_IRQ, i + 1);
1560 /* Still in a run? */
1561 if (rle->start == irq + 1) {
1567 /* Finish previous range. */
1573 /* Start new range. */
1574 printf(",%lu", rle->start);
1578 /* Unfinished range? */
1581 printf(" for MSI-X\n");
1585 /* Mask all vectors. */
1586 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1587 pci_mask_msix(child, i);
1589 /* Allocate and initialize vector data and virtual table. */
1590 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1591 M_DEVBUF, M_WAITOK | M_ZERO);
1592 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1593 M_DEVBUF, M_WAITOK | M_ZERO);
1594 for (i = 0; i < actual; i++) {
1595 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1596 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1597 cfg->msix.msix_table[i].mte_vector = i + 1;
1600 /* Update control register to enable MSI-X. */
1601 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1602 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1603 cfg->msix.msix_ctrl, 2);
1605 /* Update counts of alloc'd messages. */
1606 cfg->msix.msix_alloc = actual;
1607 cfg->msix.msix_table_len = actual;
1613 * By default, pci_alloc_msix() will assign the allocated IRQ
1614 * resources consecutively to the first N messages in the MSI-X table.
1615 * However, device drivers may want to use different layouts if they
1616 * either receive fewer messages than they asked for, or they wish to
1617 * populate the MSI-X table sparsely. This method allows the driver
1618 * to specify what layout it wants. It must be called after a
1619 * successful pci_alloc_msix() but before any of the associated
1620 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1622 * The 'vectors' array contains 'count' message vectors. The array
1623 * maps directly to the MSI-X table in that index 0 in the array
1624 * specifies the vector for the first message in the MSI-X table, etc.
1625 * The vector value in each array index can either be 0 to indicate
1626 * that no vector should be assigned to a message slot, or it can be a
1627 * number from 1 to N (where N is the count returned from a
1628 * succcessful call to pci_alloc_msix()) to indicate which message
1629 * vector (IRQ) to be used for the corresponding message.
1631 * On successful return, each message with a non-zero vector will have
1632 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1633 * 1. Additionally, if any of the IRQs allocated via the previous
1634 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1635 * will be freed back to the system automatically.
1637 * For example, suppose a driver has a MSI-X table with 6 messages and
1638 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1639 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1640 * C. After the call to pci_alloc_msix(), the device will be setup to
1641 * have an MSI-X table of ABC--- (where - means no vector assigned).
1642 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1643 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1644 * be freed back to the system. This device will also have valid
1645 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1647 * In any case, the SYS_RES_IRQ rid X will always map to the message
1648 * at MSI-X table index X - 1 and will only be valid if a vector is
1649 * assigned to that table entry.
1652 pci_remap_msix_method(device_t dev, device_t child, int count,
1653 const u_int *vectors)
1655 struct pci_devinfo *dinfo = device_get_ivars(child);
1656 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1657 struct resource_list_entry *rle;
1658 int i, irq, j, *used;
1661 * Have to have at least one message in the table but the
1662 * table can't be bigger than the actual MSI-X table in the
1665 if (count == 0 || count > msix->msix_msgnum)
1668 /* Sanity check the vectors. */
1669 for (i = 0; i < count; i++)
1670 if (vectors[i] > msix->msix_alloc)
1674 * Make sure there aren't any holes in the vectors to be used.
1675 * It's a big pain to support it, and it doesn't really make
1676 * sense anyway. Also, at least one vector must be used.
1678 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1680 for (i = 0; i < count; i++)
1681 if (vectors[i] != 0)
1682 used[vectors[i] - 1] = 1;
1683 for (i = 0; i < msix->msix_alloc - 1; i++)
1684 if (used[i] == 0 && used[i + 1] == 1) {
1685 free(used, M_DEVBUF);
1689 free(used, M_DEVBUF);
1693 /* Make sure none of the resources are allocated. */
1694 for (i = 0; i < msix->msix_table_len; i++) {
1695 if (msix->msix_table[i].mte_vector == 0)
1697 if (msix->msix_table[i].mte_handlers > 0)
1699 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1700 KASSERT(rle != NULL, ("missing resource"));
1701 if (rle->res != NULL)
1705 /* Free the existing resource list entries. */
1706 for (i = 0; i < msix->msix_table_len; i++) {
1707 if (msix->msix_table[i].mte_vector == 0)
1709 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1713 * Build the new virtual table keeping track of which vectors are
1716 free(msix->msix_table, M_DEVBUF);
1717 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1718 M_DEVBUF, M_WAITOK | M_ZERO);
1719 for (i = 0; i < count; i++)
1720 msix->msix_table[i].mte_vector = vectors[i];
1721 msix->msix_table_len = count;
1723 /* Free any unused IRQs and resize the vectors array if necessary. */
1724 j = msix->msix_alloc - 1;
1726 struct msix_vector *vec;
1728 while (used[j] == 0) {
1729 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1730 msix->msix_vectors[j].mv_irq);
1733 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1735 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1737 free(msix->msix_vectors, M_DEVBUF);
1738 msix->msix_vectors = vec;
1739 msix->msix_alloc = j + 1;
1741 free(used, M_DEVBUF);
1743 /* Map the IRQs onto the rids. */
1744 for (i = 0; i < count; i++) {
1745 if (vectors[i] == 0)
1747 irq = msix->msix_vectors[vectors[i]].mv_irq;
1748 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1753 device_printf(child, "Remapped MSI-X IRQs as: ");
1754 for (i = 0; i < count; i++) {
1757 if (vectors[i] == 0)
1761 msix->msix_vectors[vectors[i]].mv_irq);
1770 pci_release_msix(device_t dev, device_t child)
1772 struct pci_devinfo *dinfo = device_get_ivars(child);
1773 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1774 struct resource_list_entry *rle;
1777 /* Do we have any messages to release? */
1778 if (msix->msix_alloc == 0)
1781 /* Make sure none of the resources are allocated. */
1782 for (i = 0; i < msix->msix_table_len; i++) {
1783 if (msix->msix_table[i].mte_vector == 0)
1785 if (msix->msix_table[i].mte_handlers > 0)
1787 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1788 KASSERT(rle != NULL, ("missing resource"));
1789 if (rle->res != NULL)
1793 /* Update control register to disable MSI-X. */
1794 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1795 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1796 msix->msix_ctrl, 2);
1798 /* Free the resource list entries. */
1799 for (i = 0; i < msix->msix_table_len; i++) {
1800 if (msix->msix_table[i].mte_vector == 0)
1802 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1804 free(msix->msix_table, M_DEVBUF);
1805 msix->msix_table_len = 0;
1807 /* Release the IRQs. */
1808 for (i = 0; i < msix->msix_alloc; i++)
1809 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1810 msix->msix_vectors[i].mv_irq);
1811 free(msix->msix_vectors, M_DEVBUF);
1812 msix->msix_alloc = 0;
1817 * Return the max supported MSI-X messages this device supports.
1818 * Basically, assuming the MD code can alloc messages, this function
1819 * should return the maximum value that pci_alloc_msix() can return.
1820 * Thus, it is subject to the tunables, etc.
1823 pci_msix_count_method(device_t dev, device_t child)
1825 struct pci_devinfo *dinfo = device_get_ivars(child);
1826 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1828 if (pci_do_msix && msix->msix_location != 0)
1829 return (msix->msix_msgnum);
1834 * HyperTransport MSI mapping control
1837 pci_ht_map_msi(device_t dev, uint64_t addr)
1839 struct pci_devinfo *dinfo = device_get_ivars(dev);
1840 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1845 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1846 ht->ht_msiaddr >> 20 == addr >> 20) {
1847 /* Enable MSI -> HT mapping. */
1848 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1849 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1853 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1854 /* Disable MSI -> HT mapping. */
1855 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1856 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1862 pci_get_max_read_req(device_t dev)
1864 struct pci_devinfo *dinfo = device_get_ivars(dev);
1868 cap = dinfo->cfg.pcie.pcie_location;
1871 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1872 val &= PCIEM_CTL_MAX_READ_REQUEST;
1874 return (1 << (val + 7));
1878 pci_set_max_read_req(device_t dev, int size)
1880 struct pci_devinfo *dinfo = device_get_ivars(dev);
1884 cap = dinfo->cfg.pcie.pcie_location;
1891 size = (1 << (fls(size) - 1));
1892 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1893 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1894 val |= (fls(size) - 8) << 12;
1895 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1900 * Support for MSI message signalled interrupts.
1903 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1906 struct pci_devinfo *dinfo = device_get_ivars(child);
1907 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1909 /* Write data and address values. */
1910 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1911 address & 0xffffffff, 4);
1912 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1913 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1915 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1918 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
1921 /* Enable MSI in the control register. */
1922 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1923 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1926 /* Enable MSI -> HT mapping. */
1927 pci_ht_map_msi(child, address);
1931 pci_disable_msi_method(device_t dev, device_t child)
1933 struct pci_devinfo *dinfo = device_get_ivars(child);
1934 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1936 /* Disable MSI -> HT mapping. */
1937 pci_ht_map_msi(child, 0);
1939 /* Disable MSI in the control register. */
1940 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1941 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1946 * Restore MSI registers during resume. If MSI is enabled then
1947 * restore the data and address registers in addition to the control
1951 pci_resume_msi(device_t dev)
1953 struct pci_devinfo *dinfo = device_get_ivars(dev);
1954 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1958 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1959 address = msi->msi_addr;
1960 data = msi->msi_data;
1961 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1962 address & 0xffffffff, 4);
1963 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1964 pci_write_config(dev, msi->msi_location +
1965 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1966 pci_write_config(dev, msi->msi_location +
1967 PCIR_MSI_DATA_64BIT, data, 2);
1969 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1972 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1977 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1979 struct pci_devinfo *dinfo = device_get_ivars(dev);
1980 pcicfgregs *cfg = &dinfo->cfg;
1981 struct resource_list_entry *rle;
1982 struct msix_table_entry *mte;
1983 struct msix_vector *mv;
1989 * Handle MSI first. We try to find this IRQ among our list
1990 * of MSI IRQs. If we find it, we request updated address and
1991 * data registers and apply the results.
1993 if (cfg->msi.msi_alloc > 0) {
1995 /* If we don't have any active handlers, nothing to do. */
1996 if (cfg->msi.msi_handlers == 0)
1998 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1999 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2001 if (rle->start == irq) {
2002 error = PCIB_MAP_MSI(device_get_parent(bus),
2003 dev, irq, &addr, &data);
2006 pci_disable_msi(dev);
2007 dinfo->cfg.msi.msi_addr = addr;
2008 dinfo->cfg.msi.msi_data = data;
2009 pci_enable_msi(dev, addr, data);
2017 * For MSI-X, we check to see if we have this IRQ. If we do,
2018 * we request the updated mapping info. If that works, we go
2019 * through all the slots that use this IRQ and update them.
2021 if (cfg->msix.msix_alloc > 0) {
2022 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2023 mv = &cfg->msix.msix_vectors[i];
2024 if (mv->mv_irq == irq) {
2025 error = PCIB_MAP_MSI(device_get_parent(bus),
2026 dev, irq, &addr, &data);
2029 mv->mv_address = addr;
2031 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2032 mte = &cfg->msix.msix_table[j];
2033 if (mte->mte_vector != i + 1)
2035 if (mte->mte_handlers == 0)
2037 pci_mask_msix(dev, j);
2038 pci_enable_msix(dev, j, addr, data);
2039 pci_unmask_msix(dev, j);
2050 * Returns true if the specified device is blacklisted because MSI
2054 pci_msi_device_blacklisted(device_t dev)
2057 if (!pci_honor_msi_blacklist)
2060 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2064 * Determine if MSI is blacklisted globally on this system. Currently,
2065 * we just check for blacklisted chipsets as represented by the
2066 * host-PCI bridge at device 0:0:0. In the future, it may become
2067 * necessary to check other system attributes, such as the kenv values
2068 * that give the motherboard manufacturer and model number.
2071 pci_msi_blacklisted(void)
2075 if (!pci_honor_msi_blacklist)
2078 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2079 if (!(pcie_chipset || pcix_chipset)) {
2080 if (vm_guest != VM_GUEST_NO) {
2082 * Whitelist older chipsets in virtual
2083 * machines known to support MSI.
2085 dev = pci_find_bsf(0, 0, 0);
2087 return (!pci_has_quirk(pci_get_devid(dev),
2088 PCI_QUIRK_ENABLE_MSI_VM));
2093 dev = pci_find_bsf(0, 0, 0);
2095 return (pci_msi_device_blacklisted(dev));
2100 * Returns true if the specified device is blacklisted because MSI-X
2101 * doesn't work. Note that this assumes that if MSI doesn't work,
2102 * MSI-X doesn't either.
2105 pci_msix_device_blacklisted(device_t dev)
2108 if (!pci_honor_msi_blacklist)
2111 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2114 return (pci_msi_device_blacklisted(dev));
2118 * Determine if MSI-X is blacklisted globally on this system. If MSI
2119 * is blacklisted, assume that MSI-X is as well. Check for additional
2120 * chipsets where MSI works but MSI-X does not.
2123 pci_msix_blacklisted(void)
2127 if (!pci_honor_msi_blacklist)
2130 dev = pci_find_bsf(0, 0, 0);
2131 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2132 PCI_QUIRK_DISABLE_MSIX))
2135 return (pci_msi_blacklisted());
2139 * Attempt to allocate *count MSI messages. The actual number allocated is
2140 * returned in *count. After this function returns, each message will be
2141 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2144 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2146 struct pci_devinfo *dinfo = device_get_ivars(child);
2147 pcicfgregs *cfg = &dinfo->cfg;
2148 struct resource_list_entry *rle;
2149 int actual, error, i, irqs[32];
2152 /* Don't let count == 0 get us into trouble. */
2156 /* If rid 0 is allocated, then fail. */
2157 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2158 if (rle != NULL && rle->res != NULL)
2161 /* Already have allocated messages? */
2162 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2165 /* If MSI is blacklisted for this system, fail. */
2166 if (pci_msi_blacklisted())
2169 /* MSI capability present? */
2170 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2174 device_printf(child,
2175 "attempting to allocate %d MSI vectors (%d supported)\n",
2176 *count, cfg->msi.msi_msgnum);
2178 /* Don't ask for more than the device supports. */
2179 actual = min(*count, cfg->msi.msi_msgnum);
2181 /* Don't ask for more than 32 messages. */
2182 actual = min(actual, 32);
2184 /* MSI requires power of 2 number of messages. */
2185 if (!powerof2(actual))
2189 /* Try to allocate N messages. */
2190 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2202 * We now have N actual messages mapped onto SYS_RES_IRQ
2203 * resources in the irqs[] array, so add new resources
2204 * starting at rid 1.
2206 for (i = 0; i < actual; i++)
2207 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2208 irqs[i], irqs[i], 1);
2212 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2217 * Be fancy and try to print contiguous runs
2218 * of IRQ values as ranges. 'run' is true if
2219 * we are in a range.
2221 device_printf(child, "using IRQs %d", irqs[0]);
2223 for (i = 1; i < actual; i++) {
2225 /* Still in a run? */
2226 if (irqs[i] == irqs[i - 1] + 1) {
2231 /* Finish previous range. */
2233 printf("-%d", irqs[i - 1]);
2237 /* Start new range. */
2238 printf(",%d", irqs[i]);
2241 /* Unfinished range? */
2243 printf("-%d", irqs[actual - 1]);
2244 printf(" for MSI\n");
2248 /* Update control register with actual count. */
2249 ctrl = cfg->msi.msi_ctrl;
2250 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2251 ctrl |= (ffs(actual) - 1) << 4;
2252 cfg->msi.msi_ctrl = ctrl;
2253 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2255 /* Update counts of alloc'd messages. */
2256 cfg->msi.msi_alloc = actual;
2257 cfg->msi.msi_handlers = 0;
2262 /* Release the MSI messages associated with this device. */
2264 pci_release_msi_method(device_t dev, device_t child)
2266 struct pci_devinfo *dinfo = device_get_ivars(child);
2267 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2268 struct resource_list_entry *rle;
2269 int error, i, irqs[32];
2271 /* Try MSI-X first. */
2272 error = pci_release_msix(dev, child);
2273 if (error != ENODEV)
2276 /* Do we have any messages to release? */
2277 if (msi->msi_alloc == 0)
2279 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2281 /* Make sure none of the resources are allocated. */
2282 if (msi->msi_handlers > 0)
2284 for (i = 0; i < msi->msi_alloc; i++) {
2285 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2286 KASSERT(rle != NULL, ("missing MSI resource"));
2287 if (rle->res != NULL)
2289 irqs[i] = rle->start;
2292 /* Update control register with 0 count. */
2293 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2294 ("%s: MSI still enabled", __func__));
2295 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2296 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2299 /* Release the messages. */
2300 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2301 for (i = 0; i < msi->msi_alloc; i++)
2302 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2304 /* Update alloc count. */
2312 * Return the max supported MSI messages this device supports.
2313 * Basically, assuming the MD code can alloc messages, this function
2314 * should return the maximum value that pci_alloc_msi() can return.
2315 * Thus, it is subject to the tunables, etc.
2318 pci_msi_count_method(device_t dev, device_t child)
2320 struct pci_devinfo *dinfo = device_get_ivars(child);
2321 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2323 if (pci_do_msi && msi->msi_location != 0)
2324 return (msi->msi_msgnum);
2328 /* free pcicfgregs structure and all depending data structures */
2331 pci_freecfg(struct pci_devinfo *dinfo)
2333 struct devlist *devlist_head;
2334 struct pci_map *pm, *next;
2337 devlist_head = &pci_devq;
2339 if (dinfo->cfg.vpd.vpd_reg) {
2340 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2341 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2342 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2343 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2344 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2345 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2346 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2348 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2351 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2352 free(dinfo, M_DEVBUF);
2354 /* increment the generation count */
2357 /* we're losing one device */
2363 * PCI power manangement
2366 pci_set_powerstate_method(device_t dev, device_t child, int state)
2368 struct pci_devinfo *dinfo = device_get_ivars(child);
2369 pcicfgregs *cfg = &dinfo->cfg;
2371 int result, oldstate, highest, delay;
2373 if (cfg->pp.pp_cap == 0)
2374 return (EOPNOTSUPP);
2377 * Optimize a no state change request away. While it would be OK to
2378 * write to the hardware in theory, some devices have shown odd
2379 * behavior when going from D3 -> D3.
2381 oldstate = pci_get_powerstate(child);
2382 if (oldstate == state)
2386 * The PCI power management specification states that after a state
2387 * transition between PCI power states, system software must
2388 * guarantee a minimal delay before the function accesses the device.
2389 * Compute the worst case delay that we need to guarantee before we
2390 * access the device. Many devices will be responsive much more
2391 * quickly than this delay, but there are some that don't respond
2392 * instantly to state changes. Transitions to/from D3 state require
2393 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2394 * is done below with DELAY rather than a sleeper function because
2395 * this function can be called from contexts where we cannot sleep.
2397 highest = (oldstate > state) ? oldstate : state;
2398 if (highest == PCI_POWERSTATE_D3)
2400 else if (highest == PCI_POWERSTATE_D2)
2404 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2405 & ~PCIM_PSTAT_DMASK;
2408 case PCI_POWERSTATE_D0:
2409 status |= PCIM_PSTAT_D0;
2411 case PCI_POWERSTATE_D1:
2412 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2413 return (EOPNOTSUPP);
2414 status |= PCIM_PSTAT_D1;
2416 case PCI_POWERSTATE_D2:
2417 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2418 return (EOPNOTSUPP);
2419 status |= PCIM_PSTAT_D2;
2421 case PCI_POWERSTATE_D3:
2422 status |= PCIM_PSTAT_D3;
2429 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2432 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2439 pci_get_powerstate_method(device_t dev, device_t child)
2441 struct pci_devinfo *dinfo = device_get_ivars(child);
2442 pcicfgregs *cfg = &dinfo->cfg;
2446 if (cfg->pp.pp_cap != 0) {
2447 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2448 switch (status & PCIM_PSTAT_DMASK) {
2450 result = PCI_POWERSTATE_D0;
2453 result = PCI_POWERSTATE_D1;
2456 result = PCI_POWERSTATE_D2;
2459 result = PCI_POWERSTATE_D3;
2462 result = PCI_POWERSTATE_UNKNOWN;
2466 /* No support, device is always at D0 */
2467 result = PCI_POWERSTATE_D0;
2473 * Some convenience functions for PCI device drivers.
2476 static __inline void
2477 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2481 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2483 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2486 static __inline void
2487 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2491 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2493 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2497 pci_enable_busmaster_method(device_t dev, device_t child)
2499 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2504 pci_disable_busmaster_method(device_t dev, device_t child)
2506 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2511 pci_enable_io_method(device_t dev, device_t child, int space)
2516 case SYS_RES_IOPORT:
2517 bit = PCIM_CMD_PORTEN;
2519 case SYS_RES_MEMORY:
2520 bit = PCIM_CMD_MEMEN;
2525 pci_set_command_bit(dev, child, bit);
2530 pci_disable_io_method(device_t dev, device_t child, int space)
2535 case SYS_RES_IOPORT:
2536 bit = PCIM_CMD_PORTEN;
2538 case SYS_RES_MEMORY:
2539 bit = PCIM_CMD_MEMEN;
2544 pci_clear_command_bit(dev, child, bit);
2549 * New style pci driver. Parent device is either a pci-host-bridge or a
2550 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2554 pci_print_verbose(struct pci_devinfo *dinfo)
2558 pcicfgregs *cfg = &dinfo->cfg;
2560 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2561 cfg->vendor, cfg->device, cfg->revid);
2562 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2563 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2564 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2565 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2567 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2568 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2569 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2570 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2571 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2572 if (cfg->intpin > 0)
2573 printf("\tintpin=%c, irq=%d\n",
2574 cfg->intpin +'a' -1, cfg->intline);
2575 if (cfg->pp.pp_cap) {
2578 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2579 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2580 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2581 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2582 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2583 status & PCIM_PSTAT_DMASK);
2585 if (cfg->msi.msi_location) {
2588 ctrl = cfg->msi.msi_ctrl;
2589 printf("\tMSI supports %d message%s%s%s\n",
2590 cfg->msi.msi_msgnum,
2591 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2592 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2593 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2595 if (cfg->msix.msix_location) {
2596 printf("\tMSI-X supports %d message%s ",
2597 cfg->msix.msix_msgnum,
2598 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2599 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2600 printf("in map 0x%x\n",
2601 cfg->msix.msix_table_bar);
2603 printf("in maps 0x%x and 0x%x\n",
2604 cfg->msix.msix_table_bar,
2605 cfg->msix.msix_pba_bar);
2611 pci_porten(device_t dev)
2613 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2617 pci_memen(device_t dev)
2619 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2623 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
2626 struct pci_devinfo *dinfo;
2627 pci_addr_t map, testval;
2632 * The device ROM BAR is special. It is always a 32-bit
2633 * memory BAR. Bit 0 is special and should not be set when
2636 dinfo = device_get_ivars(dev);
2637 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2638 map = pci_read_config(dev, reg, 4);
2639 pci_write_config(dev, reg, 0xfffffffe, 4);
2640 testval = pci_read_config(dev, reg, 4);
2641 pci_write_config(dev, reg, map, 4);
2643 *testvalp = testval;
2649 map = pci_read_config(dev, reg, 4);
2650 ln2range = pci_maprange(map);
2652 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2655 * Disable decoding via the command register before
2656 * determining the BAR's length since we will be placing it in
2659 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2660 pci_write_config(dev, PCIR_COMMAND,
2661 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2664 * Determine the BAR's length by writing all 1's. The bottom
2665 * log_2(size) bits of the BAR will stick as 0 when we read
2668 pci_write_config(dev, reg, 0xffffffff, 4);
2669 testval = pci_read_config(dev, reg, 4);
2670 if (ln2range == 64) {
2671 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2672 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2676 * Restore the original value of the BAR. We may have reprogrammed
2677 * the BAR of the low-level console device and when booting verbose,
2678 * we need the console device addressable.
2680 pci_write_config(dev, reg, map, 4);
2682 pci_write_config(dev, reg + 4, map >> 32, 4);
2683 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2686 *testvalp = testval;
2688 *bar64 = (ln2range == 64);
2692 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2694 struct pci_devinfo *dinfo;
2697 /* The device ROM BAR is always a 32-bit memory BAR. */
2698 dinfo = device_get_ivars(dev);
2699 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2702 ln2range = pci_maprange(pm->pm_value);
2703 pci_write_config(dev, pm->pm_reg, base, 4);
2705 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2706 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2708 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2709 pm->pm_reg + 4, 4) << 32;
2713 pci_find_bar(device_t dev, int reg)
2715 struct pci_devinfo *dinfo;
2718 dinfo = device_get_ivars(dev);
2719 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2720 if (pm->pm_reg == reg)
2727 pci_bar_enabled(device_t dev, struct pci_map *pm)
2729 struct pci_devinfo *dinfo;
2732 dinfo = device_get_ivars(dev);
2733 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2734 !(pm->pm_value & PCIM_BIOS_ENABLE))
2736 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2737 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2738 return ((cmd & PCIM_CMD_MEMEN) != 0);
2740 return ((cmd & PCIM_CMD_PORTEN) != 0);
2744 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2746 struct pci_devinfo *dinfo;
2747 struct pci_map *pm, *prev;
2749 dinfo = device_get_ivars(dev);
2750 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2752 pm->pm_value = value;
2754 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2755 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2757 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2758 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2762 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2764 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2769 pci_restore_bars(device_t dev)
2771 struct pci_devinfo *dinfo;
2775 dinfo = device_get_ivars(dev);
2776 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2777 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2780 ln2range = pci_maprange(pm->pm_value);
2781 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2783 pci_write_config(dev, pm->pm_reg + 4,
2784 pm->pm_value >> 32, 4);
2789 * Add a resource based on a pci map register. Return 1 if the map
2790 * register is a 32bit map register or 2 if it is a 64bit register.
2793 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2794 int force, int prefetch)
2797 pci_addr_t base, map, testval;
2798 pci_addr_t start, end, count;
2799 int barlen, basezero, flags, maprange, mapsize, type;
2801 struct resource *res;
2804 * The BAR may already exist if the device is a CardBus card
2805 * whose CIS is stored in this BAR.
2807 pm = pci_find_bar(dev, reg);
2809 maprange = pci_maprange(pm->pm_value);
2810 barlen = maprange == 64 ? 2 : 1;
2814 pci_read_bar(dev, reg, &map, &testval, NULL);
2815 if (PCI_BAR_MEM(map)) {
2816 type = SYS_RES_MEMORY;
2817 if (map & PCIM_BAR_MEM_PREFETCH)
2820 type = SYS_RES_IOPORT;
2821 mapsize = pci_mapsize(testval);
2822 base = pci_mapbase(map);
2823 #ifdef __PCI_BAR_ZERO_VALID
2826 basezero = base == 0;
2828 maprange = pci_maprange(map);
2829 barlen = maprange == 64 ? 2 : 1;
2832 * For I/O registers, if bottom bit is set, and the next bit up
2833 * isn't clear, we know we have a BAR that doesn't conform to the
2834 * spec, so ignore it. Also, sanity check the size of the data
2835 * areas to the type of memory involved. Memory must be at least
2836 * 16 bytes in size, while I/O ranges must be at least 4.
2838 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2840 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2841 (type == SYS_RES_IOPORT && mapsize < 2))
2844 /* Save a record of this BAR. */
2845 pm = pci_add_bar(dev, reg, map, mapsize);
2847 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2848 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2849 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2850 printf(", port disabled\n");
2851 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2852 printf(", memory disabled\n");
2854 printf(", enabled\n");
2858 * If base is 0, then we have problems if this architecture does
2859 * not allow that. It is best to ignore such entries for the
2860 * moment. These will be allocated later if the driver specifically
2861 * requests them. However, some removable busses look better when
2862 * all resources are allocated, so allow '0' to be overriden.
2864 * Similarly treat maps whose values is the same as the test value
2865 * read back. These maps have had all f's written to them by the
2866 * BIOS in an attempt to disable the resources.
2868 if (!force && (basezero || map == testval))
2870 if ((u_long)base != base) {
2872 "pci%d:%d:%d:%d bar %#x too many address bits",
2873 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2874 pci_get_function(dev), reg);
2879 * This code theoretically does the right thing, but has
2880 * undesirable side effects in some cases where peripherals
2881 * respond oddly to having these bits enabled. Let the user
2882 * be able to turn them off (since pci_enable_io_modes is 1 by
2885 if (pci_enable_io_modes) {
2886 /* Turn on resources that have been left off by a lazy BIOS */
2887 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2888 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2889 cmd |= PCIM_CMD_PORTEN;
2890 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2892 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2893 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2894 cmd |= PCIM_CMD_MEMEN;
2895 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2898 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2900 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2904 count = (pci_addr_t)1 << mapsize;
2905 flags = RF_ALIGNMENT_LOG2(mapsize);
2907 flags |= RF_PREFETCHABLE;
2908 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2909 start = 0; /* Let the parent decide. */
2913 end = base + count - 1;
2915 resource_list_add(rl, type, reg, start, end, count);
2918 * Try to allocate the resource for this BAR from our parent
2919 * so that this resource range is already reserved. The
2920 * driver for this device will later inherit this resource in
2921 * pci_alloc_resource().
2923 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2925 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2927 * If the allocation fails, try to allocate a resource for
2928 * this BAR using any available range. The firmware felt
2929 * it was important enough to assign a resource, so don't
2930 * disable decoding if we can help it.
2932 resource_list_delete(rl, type, reg);
2933 resource_list_add(rl, type, reg, 0, ~0ul, count);
2934 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2939 * If the allocation fails, delete the resource list entry
2940 * and disable decoding for this device.
2942 * If the driver requests this resource in the future,
2943 * pci_reserve_map() will try to allocate a fresh
2946 resource_list_delete(rl, type, reg);
2947 pci_disable_io(dev, type);
2950 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2951 pci_get_domain(dev), pci_get_bus(dev),
2952 pci_get_slot(dev), pci_get_function(dev), reg);
2954 start = rman_get_start(res);
2955 pci_write_bar(dev, pm, start);
2961 * For ATA devices we need to decide early what addressing mode to use.
2962 * Legacy demands that the primary and secondary ATA ports sits on the
2963 * same addresses that old ISA hardware did. This dictates that we use
2964 * those addresses and ignore the BAR's if we cannot set PCI native
2968 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2969 uint32_t prefetchmask)
2972 int rid, type, progif;
2974 /* if this device supports PCI native addressing use it */
2975 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2976 if ((progif & 0x8a) == 0x8a) {
2977 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2978 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2979 printf("Trying ATA native PCI addressing mode\n");
2980 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2984 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2985 type = SYS_RES_IOPORT;
2986 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2987 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2988 prefetchmask & (1 << 0));
2989 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2990 prefetchmask & (1 << 1));
2993 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2994 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2997 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2998 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3001 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3002 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3003 prefetchmask & (1 << 2));
3004 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3005 prefetchmask & (1 << 3));
3008 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3009 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3012 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3013 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3016 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3017 prefetchmask & (1 << 4));
3018 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3019 prefetchmask & (1 << 5));
3023 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3025 struct pci_devinfo *dinfo = device_get_ivars(dev);
3026 pcicfgregs *cfg = &dinfo->cfg;
3027 char tunable_name[64];
3030 /* Has to have an intpin to have an interrupt. */
3031 if (cfg->intpin == 0)
3034 /* Let the user override the IRQ with a tunable. */
3035 irq = PCI_INVALID_IRQ;
3036 snprintf(tunable_name, sizeof(tunable_name),
3037 "hw.pci%d.%d.%d.INT%c.irq",
3038 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3039 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3040 irq = PCI_INVALID_IRQ;
3043 * If we didn't get an IRQ via the tunable, then we either use the
3044 * IRQ value in the intline register or we ask the bus to route an
3045 * interrupt for us. If force_route is true, then we only use the
3046 * value in the intline register if the bus was unable to assign an
3049 if (!PCI_INTERRUPT_VALID(irq)) {
3050 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3051 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3052 if (!PCI_INTERRUPT_VALID(irq))
3056 /* If after all that we don't have an IRQ, just bail. */
3057 if (!PCI_INTERRUPT_VALID(irq))
3060 /* Update the config register if it changed. */
3061 if (irq != cfg->intline) {
3063 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3066 /* Add this IRQ as rid 0 interrupt resource. */
3067 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3070 /* Perform early OHCI takeover from SMM. */
3072 ohci_early_takeover(device_t self)
3074 struct resource *res;
3080 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3084 ctl = bus_read_4(res, OHCI_CONTROL);
3085 if (ctl & OHCI_IR) {
3087 printf("ohci early: "
3088 "SMM active, request owner change\n");
3089 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3090 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3092 ctl = bus_read_4(res, OHCI_CONTROL);
3094 if (ctl & OHCI_IR) {
3096 printf("ohci early: "
3097 "SMM does not respond, resetting\n");
3098 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3100 /* Disable interrupts */
3101 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3104 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3107 /* Perform early UHCI takeover from SMM. */
3109 uhci_early_takeover(device_t self)
3111 struct resource *res;
3115 * Set the PIRQD enable bit and switch off all the others. We don't
3116 * want legacy support to interfere with us XXX Does this also mean
3117 * that the BIOS won't touch the keyboard anymore if it is connected
3118 * to the ports of the root hub?
3120 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3122 /* Disable interrupts */
3123 rid = PCI_UHCI_BASE_REG;
3124 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3126 bus_write_2(res, UHCI_INTR, 0);
3127 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3131 /* Perform early EHCI takeover from SMM. */
3133 ehci_early_takeover(device_t self)
3135 struct resource *res;
3145 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3149 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3151 /* Synchronise with the BIOS if it owns the controller. */
3152 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3153 eecp = EHCI_EECP_NEXT(eec)) {
3154 eec = pci_read_config(self, eecp, 4);
3155 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3158 bios_sem = pci_read_config(self, eecp +
3159 EHCI_LEGSUP_BIOS_SEM, 1);
3160 if (bios_sem == 0) {
3164 printf("ehci early: "
3165 "SMM active, request owner change\n");
3167 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3169 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3171 bios_sem = pci_read_config(self, eecp +
3172 EHCI_LEGSUP_BIOS_SEM, 1);
3175 if (bios_sem != 0) {
3177 printf("ehci early: "
3178 "SMM does not respond\n");
3180 /* Disable interrupts */
3181 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3182 bus_write_4(res, offs + EHCI_USBINTR, 0);
3184 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3187 /* Perform early XHCI takeover from SMM. */
3189 xhci_early_takeover(device_t self)
3191 struct resource *res;
3201 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3205 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3209 /* Synchronise with the BIOS if it owns the controller. */
3210 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3211 eecp += XHCI_XECP_NEXT(eec) << 2) {
3212 eec = bus_read_4(res, eecp);
3214 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3217 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3222 printf("xhci early: "
3223 "SMM active, request owner change\n");
3225 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3227 /* wait a maximum of 5 second */
3229 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3231 bios_sem = bus_read_1(res, eecp +
3232 XHCI_XECP_BIOS_SEM);
3235 if (bios_sem != 0) {
3237 printf("xhci early: "
3238 "SMM does not respond\n");
3241 /* Disable interrupts */
3242 offs = bus_read_1(res, XHCI_CAPLENGTH);
3243 bus_write_4(res, offs + XHCI_USBCMD, 0);
3244 bus_read_4(res, offs + XHCI_USBSTS);
3246 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3249 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3251 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3252 struct resource_list *rl)
3254 struct resource *res;
3256 u_long start, end, count;
3257 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3259 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3260 case PCIM_HDRTYPE_BRIDGE:
3261 sec_reg = PCIR_SECBUS_1;
3262 sub_reg = PCIR_SUBBUS_1;
3264 case PCIM_HDRTYPE_CARDBUS:
3265 sec_reg = PCIR_SECBUS_2;
3266 sub_reg = PCIR_SUBBUS_2;
3273 * If the existing bus range is valid, attempt to reserve it
3274 * from our parent. If this fails for any reason, clear the
3275 * secbus and subbus registers.
3277 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3278 * This would at least preserve the existing sec_bus if it is
3281 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3282 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3284 /* Quirk handling. */
3285 switch (pci_get_devid(dev)) {
3286 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3287 sup_bus = pci_read_config(dev, 0x41, 1);
3288 if (sup_bus != 0xff) {
3289 sec_bus = sup_bus + 1;
3290 sub_bus = sup_bus + 1;
3291 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3292 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3297 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3298 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3300 if (strncmp(cp, "Compal", 6) != 0) {
3305 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3307 if (strncmp(cp, "08A0", 4) != 0) {
3312 if (sub_bus < 0xa) {
3314 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3320 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3321 if (sec_bus > 0 && sub_bus >= sec_bus) {
3324 count = end - start + 1;
3326 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3329 * If requested, clear secondary bus registers in
3330 * bridge devices to force a complete renumbering
3331 * rather than reserving the existing range. However,
3332 * preserve the existing size.
3334 if (pci_clear_buses)
3338 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3339 start, end, count, 0);
3345 "pci%d:%d:%d:%d secbus failed to allocate\n",
3346 pci_get_domain(dev), pci_get_bus(dev),
3347 pci_get_slot(dev), pci_get_function(dev));
3351 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3352 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3355 static struct resource *
3356 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3357 u_long end, u_long count, u_int flags)
3359 struct pci_devinfo *dinfo;
3361 struct resource_list *rl;
3362 struct resource *res;
3363 int sec_reg, sub_reg;
3365 dinfo = device_get_ivars(child);
3367 rl = &dinfo->resources;
3368 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3369 case PCIM_HDRTYPE_BRIDGE:
3370 sec_reg = PCIR_SECBUS_1;
3371 sub_reg = PCIR_SUBBUS_1;
3373 case PCIM_HDRTYPE_CARDBUS:
3374 sec_reg = PCIR_SECBUS_2;
3375 sub_reg = PCIR_SUBBUS_2;
3384 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3385 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3386 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3387 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3388 start, end, count, flags & ~RF_ACTIVE);
3390 resource_list_delete(rl, PCI_RES_BUS, *rid);
3391 device_printf(child, "allocating %lu bus%s failed\n",
3392 count, count == 1 ? "" : "es");
3396 device_printf(child,
3397 "Lazy allocation of %lu bus%s at %lu\n", count,
3398 count == 1 ? "" : "es", rman_get_start(res));
3399 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3400 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3402 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3403 end, count, flags));
3408 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3410 struct pci_devinfo *dinfo;
3412 struct resource_list *rl;
3413 const struct pci_quirk *q;
3417 dinfo = device_get_ivars(dev);
3419 rl = &dinfo->resources;
3420 devid = (cfg->device << 16) | cfg->vendor;
3422 /* ATA devices needs special map treatment */
3423 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3424 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3425 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3426 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3427 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3428 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3430 for (i = 0; i < cfg->nummaps;) {
3432 * Skip quirked resources.
3434 for (q = &pci_quirks[0]; q->devid != 0; q++)
3435 if (q->devid == devid &&
3436 q->type == PCI_QUIRK_UNMAP_REG &&
3437 q->arg1 == PCIR_BAR(i))
3439 if (q->devid != 0) {
3443 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3444 prefetchmask & (1 << i));
3448 * Add additional, quirked resources.
3450 for (q = &pci_quirks[0]; q->devid != 0; q++)
3451 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3452 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3454 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3455 #ifdef __PCI_REROUTE_INTERRUPT
3457 * Try to re-route interrupts. Sometimes the BIOS or
3458 * firmware may leave bogus values in these registers.
3459 * If the re-route fails, then just stick with what we
3462 pci_assign_interrupt(bus, dev, 1);
3464 pci_assign_interrupt(bus, dev, 0);
3468 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3469 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3470 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3471 xhci_early_takeover(dev);
3472 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3473 ehci_early_takeover(dev);
3474 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3475 ohci_early_takeover(dev);
3476 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3477 uhci_early_takeover(dev);
3480 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3482 * Reserve resources for secondary bus ranges behind bridge
3485 pci_reserve_secbus(bus, dev, cfg, rl);
3489 static struct pci_devinfo *
3490 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3491 int slot, int func, size_t dinfo_size)
3493 struct pci_devinfo *dinfo;
3495 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3497 pci_add_child(dev, dinfo);
3503 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3505 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3506 device_t pcib = device_get_parent(dev);
3507 struct pci_devinfo *dinfo;
3509 int s, f, pcifunchigh;
3514 * Try to detect a device at slot 0, function 0. If it exists, try to
3515 * enable ARI. We must enable ARI before detecting the rest of the
3516 * functions on this bus as ARI changes the set of slots and functions
3517 * that are legal on this bus.
3519 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3521 if (dinfo != NULL && pci_enable_ari)
3522 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3525 * Start looking for new devices on slot 0 at function 1 because we
3526 * just identified the device at slot 0, function 0.
3530 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3531 ("dinfo_size too small"));
3532 maxslots = PCIB_MAXSLOTS(pcib);
3533 for (s = 0; s <= maxslots; s++, first_func = 0) {
3537 hdrtype = REG(PCIR_HDRTYPE, 1);
3538 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3540 if (hdrtype & PCIM_MFDEV)
3541 pcifunchigh = PCIB_MAXFUNCS(pcib);
3542 for (f = first_func; f <= pcifunchigh; f++)
3543 pci_identify_function(pcib, dev, domain, busno, s, f,
3550 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3552 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3553 device_set_ivars(dinfo->cfg.dev, dinfo);
3554 resource_list_init(&dinfo->resources);
3555 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3556 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3557 pci_print_verbose(dinfo);
3558 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3559 pci_child_added(dinfo->cfg.dev);
3563 pci_child_added_method(device_t dev, device_t child)
3569 pci_probe(device_t dev)
3572 device_set_desc(dev, "PCI bus");
3574 /* Allow other subclasses to override this driver. */
3575 return (BUS_PROBE_GENERIC);
3579 pci_attach_common(device_t dev)
3581 struct pci_softc *sc;
3583 #ifdef PCI_DMA_BOUNDARY
3584 int error, tag_valid;
3590 sc = device_get_softc(dev);
3591 domain = pcib_get_domain(dev);
3592 busno = pcib_get_bus(dev);
3595 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3597 if (sc->sc_bus == NULL) {
3598 device_printf(dev, "failed to allocate bus number\n");
3603 device_printf(dev, "domain=%d, physical bus=%d\n",
3605 #ifdef PCI_DMA_BOUNDARY
3607 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3608 devclass_find("pci")) {
3609 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3610 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3611 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3612 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3614 device_printf(dev, "Failed to create DMA tag: %d\n",
3621 sc->sc_dma_tag = bus_get_dma_tag(dev);
3626 pci_attach(device_t dev)
3628 int busno, domain, error;
3630 error = pci_attach_common(dev);
3635 * Since there can be multiple independantly numbered PCI
3636 * busses on systems with multiple PCI domains, we can't use
3637 * the unit number to decide which bus we are probing. We ask
3638 * the parent pcib what our domain and bus numbers are.
3640 domain = pcib_get_domain(dev);
3641 busno = pcib_get_bus(dev);
3642 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3643 return (bus_generic_attach(dev));
3648 pci_detach(device_t dev)
3650 struct pci_softc *sc;
3653 error = bus_generic_detach(dev);
3656 sc = device_get_softc(dev);
3657 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3662 pci_set_power_child(device_t dev, device_t child, int state)
3664 struct pci_devinfo *dinfo;
3669 * Set the device to the given state. If the firmware suggests
3670 * a different power state, use it instead. If power management
3671 * is not present, the firmware is responsible for managing
3672 * device power. Skip children who aren't attached since they
3673 * are handled separately.
3675 pcib = device_get_parent(dev);
3676 dinfo = device_get_ivars(child);
3678 if (device_is_attached(child) &&
3679 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
3680 pci_set_powerstate(child, dstate);
3684 pci_suspend_child(device_t dev, device_t child)
3686 struct pci_devinfo *dinfo;
3689 dinfo = device_get_ivars(child);
3692 * Save the PCI configuration space for the child and set the
3693 * device in the appropriate power state for this sleep state.
3695 pci_cfg_save(child, dinfo, 0);
3697 /* Suspend devices before potentially powering them down. */
3698 error = bus_generic_suspend_child(dev, child);
3703 if (pci_do_power_suspend)
3704 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
3710 pci_resume_child(device_t dev, device_t child)
3712 struct pci_devinfo *dinfo;
3714 if (pci_do_power_resume)
3715 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
3717 dinfo = device_get_ivars(child);
3718 pci_cfg_restore(child, dinfo);
3719 if (!device_is_attached(child))
3720 pci_cfg_save(child, dinfo, 1);
3722 bus_generic_resume_child(dev, child);
3728 pci_resume(device_t dev)
3730 device_t child, *devlist;
3731 int error, i, numdevs;
3733 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3737 * Resume critical devices first, then everything else later.
3739 for (i = 0; i < numdevs; i++) {
3741 switch (pci_get_class(child)) {
3745 case PCIC_BASEPERIPH:
3746 BUS_RESUME_CHILD(dev, child);
3750 for (i = 0; i < numdevs; i++) {
3752 switch (pci_get_class(child)) {
3756 case PCIC_BASEPERIPH:
3759 BUS_RESUME_CHILD(dev, child);
3762 free(devlist, M_TEMP);
3767 pci_load_vendor_data(void)
3773 data = preload_search_by_type("pci_vendor_data");
3775 ptr = preload_fetch_addr(data);
3776 sz = preload_fetch_size(data);
3777 if (ptr != NULL && sz != 0) {
3778 pci_vendordata = ptr;
3779 pci_vendordata_size = sz;
3780 /* terminate the database */
3781 pci_vendordata[pci_vendordata_size] = '\n';
3787 pci_driver_added(device_t dev, driver_t *driver)
3792 struct pci_devinfo *dinfo;
3796 device_printf(dev, "driver added\n");
3797 DEVICE_IDENTIFY(driver, dev);
3798 if (device_get_children(dev, &devlist, &numdevs) != 0)
3800 for (i = 0; i < numdevs; i++) {
3802 if (device_get_state(child) != DS_NOTPRESENT)
3804 dinfo = device_get_ivars(child);
3805 pci_print_verbose(dinfo);
3807 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3808 pci_cfg_restore(child, dinfo);
3809 if (device_probe_and_attach(child) != 0)
3810 pci_child_detached(dev, child);
3812 free(devlist, M_TEMP);
3816 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3817 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3819 struct pci_devinfo *dinfo;
3820 struct msix_table_entry *mte;
3821 struct msix_vector *mv;
3827 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3832 /* If this is not a direct child, just bail out. */
3833 if (device_get_parent(child) != dev) {
3838 rid = rman_get_rid(irq);
3840 /* Make sure that INTx is enabled */
3841 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3844 * Check to see if the interrupt is MSI or MSI-X.
3845 * Ask our parent to map the MSI and give
3846 * us the address and data register values.
3847 * If we fail for some reason, teardown the
3848 * interrupt handler.
3850 dinfo = device_get_ivars(child);
3851 if (dinfo->cfg.msi.msi_alloc > 0) {
3852 if (dinfo->cfg.msi.msi_addr == 0) {
3853 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3854 ("MSI has handlers, but vectors not mapped"));
3855 error = PCIB_MAP_MSI(device_get_parent(dev),
3856 child, rman_get_start(irq), &addr, &data);
3859 dinfo->cfg.msi.msi_addr = addr;
3860 dinfo->cfg.msi.msi_data = data;
3862 if (dinfo->cfg.msi.msi_handlers == 0)
3863 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3864 dinfo->cfg.msi.msi_data);
3865 dinfo->cfg.msi.msi_handlers++;
3867 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3868 ("No MSI or MSI-X interrupts allocated"));
3869 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3870 ("MSI-X index too high"));
3871 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3872 KASSERT(mte->mte_vector != 0, ("no message vector"));
3873 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3874 KASSERT(mv->mv_irq == rman_get_start(irq),
3876 if (mv->mv_address == 0) {
3877 KASSERT(mte->mte_handlers == 0,
3878 ("MSI-X table entry has handlers, but vector not mapped"));
3879 error = PCIB_MAP_MSI(device_get_parent(dev),
3880 child, rman_get_start(irq), &addr, &data);
3883 mv->mv_address = addr;
3886 if (mte->mte_handlers == 0) {
3887 pci_enable_msix(child, rid - 1, mv->mv_address,
3889 pci_unmask_msix(child, rid - 1);
3891 mte->mte_handlers++;
3895 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3896 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3897 * in which case we "enable" INTx so MSI/MSI-X actually works.
3899 if (!pci_has_quirk(pci_get_devid(child),
3900 PCI_QUIRK_MSI_INTX_BUG))
3901 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3903 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3906 (void)bus_generic_teardown_intr(dev, child, irq,
3916 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3919 struct msix_table_entry *mte;
3920 struct resource_list_entry *rle;
3921 struct pci_devinfo *dinfo;
3924 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3927 /* If this isn't a direct child, just bail out */
3928 if (device_get_parent(child) != dev)
3929 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3931 rid = rman_get_rid(irq);
3934 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3937 * Check to see if the interrupt is MSI or MSI-X. If so,
3938 * decrement the appropriate handlers count and mask the
3939 * MSI-X message, or disable MSI messages if the count
3942 dinfo = device_get_ivars(child);
3943 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3944 if (rle->res != irq)
3946 if (dinfo->cfg.msi.msi_alloc > 0) {
3947 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3948 ("MSI-X index too high"));
3949 if (dinfo->cfg.msi.msi_handlers == 0)
3951 dinfo->cfg.msi.msi_handlers--;
3952 if (dinfo->cfg.msi.msi_handlers == 0)
3953 pci_disable_msi(child);
3955 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3956 ("No MSI or MSI-X interrupts allocated"));
3957 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3958 ("MSI-X index too high"));
3959 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3960 if (mte->mte_handlers == 0)
3962 mte->mte_handlers--;
3963 if (mte->mte_handlers == 0)
3964 pci_mask_msix(child, rid - 1);
3967 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3970 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3975 pci_print_child(device_t dev, device_t child)
3977 struct pci_devinfo *dinfo;
3978 struct resource_list *rl;
3981 dinfo = device_get_ivars(child);
3982 rl = &dinfo->resources;
3984 retval += bus_print_child_header(dev, child);
3986 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3987 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3988 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3989 if (device_get_flags(dev))
3990 retval += printf(" flags %#x", device_get_flags(dev));
3992 retval += printf(" at device %d.%d", pci_get_slot(child),
3993 pci_get_function(child));
3995 retval += bus_print_child_domain(dev, child);
3996 retval += bus_print_child_footer(dev, child);
4005 int report; /* 0 = bootverbose, 1 = always */
4007 } pci_nomatch_tab[] = {
4008 {PCIC_OLD, -1, 1, "old"},
4009 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4010 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4011 {PCIC_STORAGE, -1, 1, "mass storage"},
4012 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4013 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4014 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4015 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4016 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4017 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4018 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4019 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4020 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4021 {PCIC_NETWORK, -1, 1, "network"},
4022 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4023 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4024 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4025 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4026 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4027 {PCIC_DISPLAY, -1, 1, "display"},
4028 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4029 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4030 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4031 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4032 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4033 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4034 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4035 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4036 {PCIC_MEMORY, -1, 1, "memory"},
4037 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4038 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4039 {PCIC_BRIDGE, -1, 1, "bridge"},
4040 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4041 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4042 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4043 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4044 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4045 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4046 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4047 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4048 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4049 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4050 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4051 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4052 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4053 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4054 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4055 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4056 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4057 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4058 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4059 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4060 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4061 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4062 {PCIC_INPUTDEV, -1, 1, "input device"},
4063 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4064 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4065 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4066 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4067 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4068 {PCIC_DOCKING, -1, 1, "docking station"},
4069 {PCIC_PROCESSOR, -1, 1, "processor"},
4070 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4071 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4072 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4073 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4074 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4075 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4076 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4077 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4078 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4079 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4080 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4081 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4082 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4083 {PCIC_SATCOM, -1, 1, "satellite communication"},
4084 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4085 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4086 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4087 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4088 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4089 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4090 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4091 {PCIC_DASP, -1, 0, "dasp"},
4092 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4097 pci_probe_nomatch(device_t dev, device_t child)
4100 const char *cp, *scp;
4104 * Look for a listing for this device in a loaded device database.
4107 if ((device = pci_describe_device(child)) != NULL) {
4108 device_printf(dev, "<%s>", device);
4109 free(device, M_DEVBUF);
4112 * Scan the class/subclass descriptions for a general
4117 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4118 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4119 if (pci_nomatch_tab[i].subclass == -1) {
4120 cp = pci_nomatch_tab[i].desc;
4121 report = pci_nomatch_tab[i].report;
4122 } else if (pci_nomatch_tab[i].subclass ==
4123 pci_get_subclass(child)) {
4124 scp = pci_nomatch_tab[i].desc;
4125 report = pci_nomatch_tab[i].report;
4129 if (report || bootverbose) {
4130 device_printf(dev, "<%s%s%s>",
4132 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4136 if (report || bootverbose) {
4137 printf(" at device %d.%d (no driver attached)\n",
4138 pci_get_slot(child), pci_get_function(child));
4140 pci_cfg_save(child, device_get_ivars(child), 1);
4144 pci_child_detached(device_t dev, device_t child)
4146 struct pci_devinfo *dinfo;
4147 struct resource_list *rl;
4149 dinfo = device_get_ivars(child);
4150 rl = &dinfo->resources;
4153 * Have to deallocate IRQs before releasing any MSI messages and
4154 * have to release MSI messages before deallocating any memory
4157 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4158 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4159 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4160 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4161 (void)pci_release_msi(child);
4163 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4164 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4165 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4166 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4168 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4169 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4172 pci_cfg_save(child, dinfo, 1);
4176 * Parse the PCI device database, if loaded, and return a pointer to a
4177 * description of the device.
4179 * The database is flat text formatted as follows:
4181 * Any line not in a valid format is ignored.
4182 * Lines are terminated with newline '\n' characters.
4184 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4187 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4188 * - devices cannot be listed without a corresponding VENDOR line.
4189 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4190 * another TAB, then the device name.
4194 * Assuming (ptr) points to the beginning of a line in the database,
4195 * return the vendor or device and description of the next entry.
4196 * The value of (vendor) or (device) inappropriate for the entry type
4197 * is set to -1. Returns nonzero at the end of the database.
4199 * Note that this is slightly unrobust in the face of corrupt data;
4200 * we attempt to safeguard against this by spamming the end of the
4201 * database with a newline when we initialise.
4204 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4213 left = pci_vendordata_size - (cp - pci_vendordata);
4221 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4225 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4228 /* skip to next line */
4229 while (*cp != '\n' && left > 0) {
4238 /* skip to next line */
4239 while (*cp != '\n' && left > 0) {
4243 if (*cp == '\n' && left > 0)
4250 pci_describe_device(device_t dev)
4253 char *desc, *vp, *dp, *line;
4255 desc = vp = dp = NULL;
4258 * If we have no vendor data, we can't do anything.
4260 if (pci_vendordata == NULL)
4264 * Scan the vendor data looking for this device
4266 line = pci_vendordata;
4267 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4270 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4272 if (vendor == pci_get_vendor(dev))
4275 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4278 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4286 if (device == pci_get_device(dev))
4290 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4291 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4293 sprintf(desc, "%s, %s", vp, dp);
4303 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4305 struct pci_devinfo *dinfo;
4308 dinfo = device_get_ivars(child);
4312 case PCI_IVAR_ETHADDR:
4314 * The generic accessor doesn't deal with failure, so
4315 * we set the return value, then return an error.
4317 *((uint8_t **) result) = NULL;
4319 case PCI_IVAR_SUBVENDOR:
4320 *result = cfg->subvendor;
4322 case PCI_IVAR_SUBDEVICE:
4323 *result = cfg->subdevice;
4325 case PCI_IVAR_VENDOR:
4326 *result = cfg->vendor;
4328 case PCI_IVAR_DEVICE:
4329 *result = cfg->device;
4331 case PCI_IVAR_DEVID:
4332 *result = (cfg->device << 16) | cfg->vendor;
4334 case PCI_IVAR_CLASS:
4335 *result = cfg->baseclass;
4337 case PCI_IVAR_SUBCLASS:
4338 *result = cfg->subclass;
4340 case PCI_IVAR_PROGIF:
4341 *result = cfg->progif;
4343 case PCI_IVAR_REVID:
4344 *result = cfg->revid;
4346 case PCI_IVAR_INTPIN:
4347 *result = cfg->intpin;
4350 *result = cfg->intline;
4352 case PCI_IVAR_DOMAIN:
4353 *result = cfg->domain;
4359 *result = cfg->slot;
4361 case PCI_IVAR_FUNCTION:
4362 *result = cfg->func;
4364 case PCI_IVAR_CMDREG:
4365 *result = cfg->cmdreg;
4367 case PCI_IVAR_CACHELNSZ:
4368 *result = cfg->cachelnsz;
4370 case PCI_IVAR_MINGNT:
4371 *result = cfg->mingnt;
4373 case PCI_IVAR_MAXLAT:
4374 *result = cfg->maxlat;
4376 case PCI_IVAR_LATTIMER:
4377 *result = cfg->lattimer;
4386 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4388 struct pci_devinfo *dinfo;
4390 dinfo = device_get_ivars(child);
4393 case PCI_IVAR_INTPIN:
4394 dinfo->cfg.intpin = value;
4396 case PCI_IVAR_ETHADDR:
4397 case PCI_IVAR_SUBVENDOR:
4398 case PCI_IVAR_SUBDEVICE:
4399 case PCI_IVAR_VENDOR:
4400 case PCI_IVAR_DEVICE:
4401 case PCI_IVAR_DEVID:
4402 case PCI_IVAR_CLASS:
4403 case PCI_IVAR_SUBCLASS:
4404 case PCI_IVAR_PROGIF:
4405 case PCI_IVAR_REVID:
4407 case PCI_IVAR_DOMAIN:
4410 case PCI_IVAR_FUNCTION:
4411 return (EINVAL); /* disallow for now */
4418 #include "opt_ddb.h"
4420 #include <ddb/ddb.h>
4421 #include <sys/cons.h>
4424 * List resources based on pci map registers, used for within ddb
4427 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4429 struct pci_devinfo *dinfo;
4430 struct devlist *devlist_head;
4433 int i, error, none_count;
4436 /* get the head of the device queue */
4437 devlist_head = &pci_devq;
4440 * Go through the list of devices and print out devices
4442 for (error = 0, i = 0,
4443 dinfo = STAILQ_FIRST(devlist_head);
4444 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4445 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4447 /* Populate pd_name and pd_unit */
4450 name = device_get_name(dinfo->cfg.dev);
4453 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4454 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4455 (name && *name) ? name : "none",
4456 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4458 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4459 p->pc_sel.pc_func, (p->pc_class << 16) |
4460 (p->pc_subclass << 8) | p->pc_progif,
4461 (p->pc_subdevice << 16) | p->pc_subvendor,
4462 (p->pc_device << 16) | p->pc_vendor,
4463 p->pc_revid, p->pc_hdr);
4468 static struct resource *
4469 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4470 u_long start, u_long end, u_long count, u_int num, u_int flags)
4472 struct pci_devinfo *dinfo = device_get_ivars(child);
4473 struct resource_list *rl = &dinfo->resources;
4474 struct resource *res;
4476 pci_addr_t map, testval;
4480 pm = pci_find_bar(child, *rid);
4482 /* This is a BAR that we failed to allocate earlier. */
4483 mapsize = pm->pm_size;
4487 * Weed out the bogons, and figure out how large the
4488 * BAR/map is. BARs that read back 0 here are bogus
4489 * and unimplemented. Note: atapci in legacy mode are
4490 * special and handled elsewhere in the code. If you
4491 * have a atapci device in legacy mode and it fails
4492 * here, that other code is broken.
4494 pci_read_bar(child, *rid, &map, &testval, NULL);
4497 * Determine the size of the BAR and ignore BARs with a size
4498 * of 0. Device ROM BARs use a different mask value.
4500 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4501 mapsize = pci_romsize(testval);
4503 mapsize = pci_mapsize(testval);
4506 pm = pci_add_bar(child, *rid, map, mapsize);
4509 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4510 if (type != SYS_RES_MEMORY) {
4513 "child %s requested type %d for rid %#x,"
4514 " but the BAR says it is an memio\n",
4515 device_get_nameunit(child), type, *rid);
4519 if (type != SYS_RES_IOPORT) {
4522 "child %s requested type %d for rid %#x,"
4523 " but the BAR says it is an ioport\n",
4524 device_get_nameunit(child), type, *rid);
4530 * For real BARs, we need to override the size that
4531 * the driver requests, because that's what the BAR
4532 * actually uses and we would otherwise have a
4533 * situation where we might allocate the excess to
4534 * another driver, which won't work.
4536 count = ((pci_addr_t)1 << mapsize) * num;
4537 if (RF_ALIGNMENT(flags) < mapsize)
4538 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4539 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4540 flags |= RF_PREFETCHABLE;
4543 * Allocate enough resource, and then write back the
4544 * appropriate BAR for that resource.
4546 resource_list_add(rl, type, *rid, start, end, count);
4547 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4548 count, flags & ~RF_ACTIVE);
4550 resource_list_delete(rl, type, *rid);
4551 device_printf(child,
4552 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4553 count, *rid, type, start, end);
4557 device_printf(child,
4558 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4559 count, *rid, type, rman_get_start(res));
4560 map = rman_get_start(res);
4561 pci_write_bar(child, pm, map);
4567 pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
4568 u_long start, u_long end, u_long count, u_long num, u_int flags)
4570 struct pci_devinfo *dinfo;
4571 struct resource_list *rl;
4572 struct resource_list_entry *rle;
4573 struct resource *res;
4577 * Perform lazy resource allocation
4579 dinfo = device_get_ivars(child);
4580 rl = &dinfo->resources;
4583 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4585 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4590 * Can't alloc legacy interrupt once MSI messages have
4593 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4594 cfg->msix.msix_alloc > 0))
4598 * If the child device doesn't have an interrupt
4599 * routed and is deserving of an interrupt, try to
4602 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4604 pci_assign_interrupt(dev, child, 0);
4606 case SYS_RES_IOPORT:
4607 case SYS_RES_MEMORY:
4610 * PCI-PCI bridge I/O window resources are not BARs.
4611 * For those allocations just pass the request up the
4614 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4616 case PCIR_IOBASEL_1:
4617 case PCIR_MEMBASE_1:
4618 case PCIR_PMBASEL_1:
4620 * XXX: Should we bother creating a resource
4623 return (bus_generic_alloc_resource(dev, child,
4624 type, rid, start, end, count, flags));
4628 /* Reserve resources for this BAR if needed. */
4629 rle = resource_list_find(rl, type, *rid);
4631 res = pci_reserve_map(dev, child, type, rid, start, end,
4637 return (resource_list_alloc(rl, dev, child, type, rid,
4638 start, end, count, flags));
4642 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4643 u_long start, u_long end, u_long count, u_int flags)
4646 if (device_get_parent(child) != dev)
4647 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4648 type, rid, start, end, count, flags));
4650 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
4655 pci_release_resource(device_t dev, device_t child, int type, int rid,
4658 struct pci_devinfo *dinfo;
4659 struct resource_list *rl;
4662 if (device_get_parent(child) != dev)
4663 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4666 dinfo = device_get_ivars(child);
4670 * PCI-PCI bridge I/O window resources are not BARs. For
4671 * those allocations just pass the request up the tree.
4673 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4674 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4676 case PCIR_IOBASEL_1:
4677 case PCIR_MEMBASE_1:
4678 case PCIR_PMBASEL_1:
4679 return (bus_generic_release_resource(dev, child, type,
4685 rl = &dinfo->resources;
4686 return (resource_list_release(rl, dev, child, type, rid, r));
4690 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4693 struct pci_devinfo *dinfo;
4696 error = bus_generic_activate_resource(dev, child, type, rid, r);
4700 /* Enable decoding in the command register when activating BARs. */
4701 if (device_get_parent(child) == dev) {
4702 /* Device ROMs need their decoding explicitly enabled. */
4703 dinfo = device_get_ivars(child);
4704 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4705 pci_write_bar(child, pci_find_bar(child, rid),
4706 rman_get_start(r) | PCIM_BIOS_ENABLE);
4708 case SYS_RES_IOPORT:
4709 case SYS_RES_MEMORY:
4710 error = PCI_ENABLE_IO(dev, child, type);
4718 pci_deactivate_resource(device_t dev, device_t child, int type,
4719 int rid, struct resource *r)
4721 struct pci_devinfo *dinfo;
4724 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4728 /* Disable decoding for device ROMs. */
4729 if (device_get_parent(child) == dev) {
4730 dinfo = device_get_ivars(child);
4731 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4732 pci_write_bar(child, pci_find_bar(child, rid),
4739 pci_delete_child(device_t dev, device_t child)
4741 struct resource_list_entry *rle;
4742 struct resource_list *rl;
4743 struct pci_devinfo *dinfo;
4745 dinfo = device_get_ivars(child);
4746 rl = &dinfo->resources;
4748 if (device_is_attached(child))
4749 device_detach(child);
4751 /* Turn off access to resources we're about to free */
4752 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4753 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4755 /* Free all allocated resources */
4756 STAILQ_FOREACH(rle, rl, link) {
4758 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4759 resource_list_busy(rl, rle->type, rle->rid)) {
4760 pci_printf(&dinfo->cfg,
4761 "Resource still owned, oops. "
4762 "(type=%d, rid=%d, addr=%lx)\n",
4763 rle->type, rle->rid,
4764 rman_get_start(rle->res));
4765 bus_release_resource(child, rle->type, rle->rid,
4768 resource_list_unreserve(rl, dev, child, rle->type,
4772 resource_list_free(rl);
4774 device_delete_child(dev, child);
4779 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4781 struct pci_devinfo *dinfo;
4782 struct resource_list *rl;
4783 struct resource_list_entry *rle;
4785 if (device_get_parent(child) != dev)
4788 dinfo = device_get_ivars(child);
4789 rl = &dinfo->resources;
4790 rle = resource_list_find(rl, type, rid);
4795 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4796 resource_list_busy(rl, type, rid)) {
4797 device_printf(dev, "delete_resource: "
4798 "Resource still owned by child, oops. "
4799 "(type=%d, rid=%d, addr=%lx)\n",
4800 type, rid, rman_get_start(rle->res));
4803 resource_list_unreserve(rl, dev, child, type, rid);
4805 resource_list_delete(rl, type, rid);
4808 struct resource_list *
4809 pci_get_resource_list (device_t dev, device_t child)
4811 struct pci_devinfo *dinfo = device_get_ivars(child);
4813 return (&dinfo->resources);
4817 pci_get_dma_tag(device_t bus, device_t dev)
4819 struct pci_softc *sc = device_get_softc(bus);
4821 return (sc->sc_dma_tag);
4825 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4827 struct pci_devinfo *dinfo = device_get_ivars(child);
4828 pcicfgregs *cfg = &dinfo->cfg;
4830 return (PCIB_READ_CONFIG(device_get_parent(dev),
4831 cfg->bus, cfg->slot, cfg->func, reg, width));
4835 pci_write_config_method(device_t dev, device_t child, int reg,
4836 uint32_t val, int width)
4838 struct pci_devinfo *dinfo = device_get_ivars(child);
4839 pcicfgregs *cfg = &dinfo->cfg;
4841 PCIB_WRITE_CONFIG(device_get_parent(dev),
4842 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4846 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4850 snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child),
4851 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
4856 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4859 struct pci_devinfo *dinfo;
4862 dinfo = device_get_ivars(child);
4864 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4865 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4866 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4872 pci_assign_interrupt_method(device_t dev, device_t child)
4874 struct pci_devinfo *dinfo = device_get_ivars(child);
4875 pcicfgregs *cfg = &dinfo->cfg;
4877 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4882 pci_lookup(void *arg, const char *name, device_t *dev)
4886 int domain, bus, slot, func;
4892 * Accept pciconf-style selectors of either pciD:B:S:F or
4893 * pciB:S:F. In the latter case, the domain is assumed to
4896 if (strncmp(name, "pci", 3) != 0)
4898 val = strtol(name + 3, &end, 10);
4899 if (val < 0 || val > INT_MAX || *end != ':')
4902 val = strtol(end + 1, &end, 10);
4903 if (val < 0 || val > INT_MAX || *end != ':')
4906 val = strtol(end + 1, &end, 10);
4907 if (val < 0 || val > INT_MAX)
4911 val = strtol(end + 1, &end, 10);
4912 if (val < 0 || val > INT_MAX || *end != '\0')
4915 } else if (*end == '\0') {
4923 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
4924 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
4927 *dev = pci_find_dbsf(domain, bus, slot, func);
4931 pci_modevent(module_t mod, int what, void *arg)
4933 static struct cdev *pci_cdev;
4934 static eventhandler_tag tag;
4938 STAILQ_INIT(&pci_devq);
4940 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4942 pci_load_vendor_data();
4943 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
4949 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
4950 destroy_dev(pci_cdev);
4958 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4960 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4961 struct pcicfg_pcie *cfg;
4964 cfg = &dinfo->cfg.pcie;
4965 pos = cfg->pcie_location;
4967 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4969 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4971 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4972 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4973 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4974 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4976 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4977 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4978 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4979 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4981 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4982 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4983 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4986 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4987 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4988 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4994 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4996 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4997 dinfo->cfg.pcix.pcix_command, 2);
5001 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5005 * Only do header type 0 devices. Type 1 devices are bridges,
5006 * which we know need special treatment. Type 2 devices are
5007 * cardbus bridges which also require special treatment.
5008 * Other types are unknown, and we err on the side of safety
5011 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5015 * Restore the device to full power mode. We must do this
5016 * before we restore the registers because moving from D3 to
5017 * D0 will cause the chip's BARs and some other registers to
5018 * be reset to some unknown power on reset values. Cut down
5019 * the noise on boot by doing nothing if we are already in
5022 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5023 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5024 pci_restore_bars(dev);
5025 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5026 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5027 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5028 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5029 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5030 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5031 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5032 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5033 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5036 * Restore extended capabilities for PCI-Express and PCI-X
5038 if (dinfo->cfg.pcie.pcie_location != 0)
5039 pci_cfg_restore_pcie(dev, dinfo);
5040 if (dinfo->cfg.pcix.pcix_location != 0)
5041 pci_cfg_restore_pcix(dev, dinfo);
5043 /* Restore MSI and MSI-X configurations if they are present. */
5044 if (dinfo->cfg.msi.msi_location != 0)
5045 pci_resume_msi(dev);
5046 if (dinfo->cfg.msix.msix_location != 0)
5047 pci_resume_msix(dev);
5051 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5053 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5054 struct pcicfg_pcie *cfg;
5057 cfg = &dinfo->cfg.pcie;
5058 pos = cfg->pcie_location;
5060 cfg->pcie_flags = RREG(PCIER_FLAGS);
5062 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5064 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5066 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5067 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5068 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5069 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5071 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5072 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5073 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5074 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5076 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5077 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5078 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5081 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5082 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5083 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5089 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5091 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5092 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5096 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5102 * Only do header type 0 devices. Type 1 devices are bridges, which
5103 * we know need special treatment. Type 2 devices are cardbus bridges
5104 * which also require special treatment. Other types are unknown, and
5105 * we err on the side of safety by ignoring them. Powering down
5106 * bridges should not be undertaken lightly.
5108 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5112 * Some drivers apparently write to these registers w/o updating our
5113 * cached copy. No harm happens if we update the copy, so do so here
5114 * so we can restore them. The COMMAND register is modified by the
5115 * bus w/o updating the cache. This should represent the normally
5116 * writable portion of the 'defined' part of type 0 headers. In
5117 * theory we also need to save/restore the PCI capability structures
5118 * we know about, but apart from power we don't know any that are
5121 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5122 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5123 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5124 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5125 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5126 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5127 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5128 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5129 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5130 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5131 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5132 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5133 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5134 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5135 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5137 if (dinfo->cfg.pcie.pcie_location != 0)
5138 pci_cfg_save_pcie(dev, dinfo);
5140 if (dinfo->cfg.pcix.pcix_location != 0)
5141 pci_cfg_save_pcix(dev, dinfo);
5144 * don't set the state for display devices, base peripherals and
5145 * memory devices since bad things happen when they are powered down.
5146 * We should (a) have drivers that can easily detach and (b) use
5147 * generic drivers for these devices so that some device actually
5148 * attaches. We need to make sure that when we implement (a) we don't
5149 * power the device down on a reattach.
5151 cls = pci_get_class(dev);
5154 switch (pci_do_power_nodriver)
5156 case 0: /* NO powerdown at all */
5158 case 1: /* Conservative about what to power down */
5159 if (cls == PCIC_STORAGE)
5162 case 2: /* Agressive about what to power down */
5163 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5164 cls == PCIC_BASEPERIPH)
5167 case 3: /* Power down everything */
5171 * PCI spec says we can only go into D3 state from D0 state.
5172 * Transition from D[12] into D0 before going to D3 state.
5174 ps = pci_get_powerstate(dev);
5175 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5176 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5177 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5178 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5181 /* Wrapper APIs suitable for device driver use. */
5183 pci_save_state(device_t dev)
5185 struct pci_devinfo *dinfo;
5187 dinfo = device_get_ivars(dev);
5188 pci_cfg_save(dev, dinfo, 0);
5192 pci_restore_state(device_t dev)
5194 struct pci_devinfo *dinfo;
5196 dinfo = device_get_ivars(dev);
5197 pci_cfg_restore(dev, dinfo);
5201 pci_get_rid_method(device_t dev, device_t child)
5204 return (PCIB_GET_RID(device_get_parent(dev), child));