2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
33 #include "opt_compat_oldpci.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
46 #include <sys/machintr.h>
48 #include <machine/msi_machdep.h>
52 #include <vm/vm_extern.h>
56 #include <sys/device.h>
58 #include <sys/pciio.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pci_private.h>
67 #include <contrib/dev/acpica/acpi.h>
70 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 extern struct dev_ops pcic_ops; /* XXX */
75 typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
77 static uint32_t pci_mapbase(unsigned mapreg);
78 static const char *pci_maptype(unsigned mapreg);
79 static int pci_mapsize(unsigned testval);
80 static int pci_maprange(unsigned mapreg);
81 static void pci_fixancient(pcicfgregs *cfg);
83 static int pci_porten(device_t pcib, int b, int s, int f);
84 static int pci_memen(device_t pcib, int b, int s, int f);
85 static void pci_assign_interrupt(device_t bus, device_t dev,
87 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90 static int pci_probe(device_t dev);
91 static int pci_attach(device_t dev);
92 static void pci_child_detached(device_t, device_t);
93 static void pci_load_vendor_data(void);
94 static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96 static char *pci_describe_device(device_t dev);
97 static int pci_modevent(module_t mod, int what, void *arg);
98 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
100 static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
101 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
104 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
107 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108 static void pci_disable_msi(device_t dev);
109 static void pci_enable_msi(device_t dev, uint64_t address,
111 static void pci_setup_msix_vector(device_t dev, u_int index,
112 uint64_t address, uint32_t data);
113 static void pci_mask_msix_vector(device_t dev, u_int index);
114 static void pci_unmask_msix_vector(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static void pci_resume_msi(device_t dev);
117 static void pci_resume_msix(device_t dev);
118 static int pcie_slotimpl(const pcicfgregs *);
119 static void pci_print_verbose_expr(const pcicfgregs *);
121 static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
122 static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
123 static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
124 static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
125 static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
126 static void pci_read_cap_subvendor(device_t, int, int,
128 static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
129 static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
131 static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, bus_generic_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, pci_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_child_detached, pci_child_detached),
147 DEVMETHOD(bus_setup_intr, pci_setup_intr),
148 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
156 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
157 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
158 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
159 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
162 DEVMETHOD(pci_read_config, pci_read_config_method),
163 DEVMETHOD(pci_write_config, pci_write_config_method),
164 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
165 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
166 DEVMETHOD(pci_enable_io, pci_enable_io_method),
167 DEVMETHOD(pci_disable_io, pci_disable_io_method),
168 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
169 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
170 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
171 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
172 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
173 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
174 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
175 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
176 DEVMETHOD(pci_release_msi, pci_release_msi_method),
177 DEVMETHOD(pci_msi_count, pci_msi_count_method),
178 DEVMETHOD(pci_msix_count, pci_msix_count_method),
183 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
185 static devclass_t pci_devclass;
186 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
187 MODULE_VERSION(pci, 1);
189 static char *pci_vendordata;
190 static size_t pci_vendordata_size;
193 static const struct pci_read_cap {
195 pci_read_cap_t read_cap;
196 } pci_read_caps[] = {
197 { PCIY_PMG, pci_read_cap_pmgt },
198 { PCIY_HT, pci_read_cap_ht },
199 { PCIY_MSI, pci_read_cap_msi },
200 { PCIY_MSIX, pci_read_cap_msix },
201 { PCIY_VPD, pci_read_cap_vpd },
202 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
203 { PCIY_PCIX, pci_read_cap_pcix },
204 { PCIY_EXPRESS, pci_read_cap_express },
205 { 0, NULL } /* required last entry */
209 uint32_t devid; /* Vendor/device of the card */
211 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
212 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
217 struct pci_quirk pci_quirks[] = {
218 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
219 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
220 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 /* As does the Serverworks OSB4 (the SMBus mapping register) */
222 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
225 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
226 * or the CMIC-SL (AKA ServerWorks GC_LE).
228 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
229 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 * MSI doesn't work on earlier Intel chipsets including
233 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
235 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
247 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 /* map register information */
253 #define PCI_MAPMEM 0x01 /* memory map */
254 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
255 #define PCI_MAPPORT 0x04 /* port map */
257 struct devlist pci_devq;
258 uint32_t pci_generation;
259 uint32_t pci_numdevs = 0;
260 static int pcie_chipset, pcix_chipset;
263 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
265 static int pci_enable_io_modes = 1;
266 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
267 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
268 &pci_enable_io_modes, 1,
269 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
270 enable these bits correctly. We'd like to do this all the time, but there\n\
271 are some peripherals that this causes problems with.");
273 static int pci_do_power_nodriver = 0;
274 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
275 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
276 &pci_do_power_nodriver, 0,
277 "Place a function into D3 state when no driver attaches to it. 0 means\n\
278 disable. 1 means conservatively place devices into D3 state. 2 means\n\
279 aggressively place devices into D3 state. 3 means put absolutely everything\n\
282 static int pci_do_power_resume = 1;
283 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
284 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
285 &pci_do_power_resume, 1,
286 "Transition from D3 -> D0 on resume.");
288 static int pci_do_msi = 1;
289 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
290 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
291 "Enable support for MSI interrupts");
293 static int pci_do_msix = 0;
295 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
296 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
297 "Enable support for MSI-X interrupts");
300 static int pci_honor_msi_blacklist = 1;
301 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
302 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
303 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
305 static int pci_msi_cpuid;
307 /* Find a device_t by bus/slot/function in domain 0 */
310 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
313 return (pci_find_dbsf(0, bus, slot, func));
316 /* Find a device_t by domain/bus/slot/function */
319 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
321 struct pci_devinfo *dinfo;
323 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
324 if ((dinfo->cfg.domain == domain) &&
325 (dinfo->cfg.bus == bus) &&
326 (dinfo->cfg.slot == slot) &&
327 (dinfo->cfg.func == func)) {
328 return (dinfo->cfg.dev);
335 /* Find a device_t by vendor/device ID */
338 pci_find_device(uint16_t vendor, uint16_t device)
340 struct pci_devinfo *dinfo;
342 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
343 if ((dinfo->cfg.vendor == vendor) &&
344 (dinfo->cfg.device == device)) {
345 return (dinfo->cfg.dev);
352 /* return base address of memory or port map */
355 pci_mapbase(uint32_t mapreg)
358 if (PCI_BAR_MEM(mapreg))
359 return (mapreg & PCIM_BAR_MEM_BASE);
361 return (mapreg & PCIM_BAR_IO_BASE);
364 /* return map type of memory or port map */
367 pci_maptype(unsigned mapreg)
370 if (PCI_BAR_IO(mapreg))
372 if (mapreg & PCIM_BAR_MEM_PREFETCH)
373 return ("Prefetchable Memory");
377 /* return log2 of map size decoded for memory or port map */
380 pci_mapsize(uint32_t testval)
384 testval = pci_mapbase(testval);
387 while ((testval & 1) == 0)
396 /* return log2 of address range supported by map register */
399 pci_maprange(unsigned mapreg)
403 if (PCI_BAR_IO(mapreg))
406 switch (mapreg & PCIM_BAR_MEM_TYPE) {
407 case PCIM_BAR_MEM_32:
410 case PCIM_BAR_MEM_1MB:
413 case PCIM_BAR_MEM_64:
420 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
423 pci_fixancient(pcicfgregs *cfg)
425 if (cfg->hdrtype != 0)
428 /* PCI to PCI bridges use header type 1 */
429 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
433 /* extract header type specific config data */
436 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
438 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
439 switch (cfg->hdrtype) {
441 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
442 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
443 cfg->nummaps = PCI_MAXMAPS_0;
446 cfg->nummaps = PCI_MAXMAPS_1;
448 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
452 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
453 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
454 cfg->nummaps = PCI_MAXMAPS_2;
456 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
463 /* read configuration header into pcicfgregs structure */
465 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
467 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
468 pcicfgregs *cfg = NULL;
469 struct pci_devinfo *devlist_entry;
470 struct devlist *devlist_head;
472 devlist_head = &pci_devq;
474 devlist_entry = NULL;
476 if (REG(PCIR_DEVVENDOR, 4) != -1) {
477 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
479 cfg = &devlist_entry->cfg;
485 cfg->vendor = REG(PCIR_VENDOR, 2);
486 cfg->device = REG(PCIR_DEVICE, 2);
487 cfg->cmdreg = REG(PCIR_COMMAND, 2);
488 cfg->statreg = REG(PCIR_STATUS, 2);
489 cfg->baseclass = REG(PCIR_CLASS, 1);
490 cfg->subclass = REG(PCIR_SUBCLASS, 1);
491 cfg->progif = REG(PCIR_PROGIF, 1);
492 cfg->revid = REG(PCIR_REVID, 1);
493 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
494 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
495 cfg->lattimer = REG(PCIR_LATTIMER, 1);
496 cfg->intpin = REG(PCIR_INTPIN, 1);
497 cfg->intline = REG(PCIR_INTLINE, 1);
499 cfg->mingnt = REG(PCIR_MINGNT, 1);
500 cfg->maxlat = REG(PCIR_MAXLAT, 1);
502 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
503 cfg->hdrtype &= ~PCIM_MFDEV;
506 pci_hdrtypedata(pcib, b, s, f, cfg);
508 pci_read_capabilities(pcib, cfg);
510 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
512 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
513 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
514 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
515 devlist_entry->conf.pc_sel.pc_func = cfg->func;
516 devlist_entry->conf.pc_hdr = cfg->hdrtype;
518 devlist_entry->conf.pc_subvendor = cfg->subvendor;
519 devlist_entry->conf.pc_subdevice = cfg->subdevice;
520 devlist_entry->conf.pc_vendor = cfg->vendor;
521 devlist_entry->conf.pc_device = cfg->device;
523 devlist_entry->conf.pc_class = cfg->baseclass;
524 devlist_entry->conf.pc_subclass = cfg->subclass;
525 devlist_entry->conf.pc_progif = cfg->progif;
526 devlist_entry->conf.pc_revid = cfg->revid;
531 return (devlist_entry);
536 pci_fixup_nextptr(int *nextptr0)
538 int nextptr = *nextptr0;
540 /* "Next pointer" is only one byte */
541 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
545 * PCI local bus spec 3.0:
547 * "... The bottom two bits of all pointers are reserved
548 * and must be implemented as 00b although software must
549 * mask them to allow for future uses of these bits ..."
552 kprintf("Illegal PCI extended capability "
553 "offset, fixup 0x%02x -> 0x%02x\n",
554 nextptr, nextptr & ~0x3);
560 if (nextptr < 0x40) {
562 kprintf("Illegal PCI extended capability "
563 "offset 0x%02x", nextptr);
571 pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
574 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
576 struct pcicfg_pp *pp = &cfg->pp;
581 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
582 pp->pp_status = ptr + PCIR_POWER_STATUS;
583 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
585 if ((nextptr - ptr) > PCIR_POWER_DATA) {
588 * We should write to data_select and read back from
589 * data_scale to determine whether data register is
593 pp->pp_data = ptr + PCIR_POWER_DATA;
603 pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
605 #if defined(__i386__) || defined(__x86_64__)
608 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
610 struct pcicfg_ht *ht = &cfg->ht;
614 /* Determine HT-specific capability type. */
615 val = REG(ptr + PCIR_HT_COMMAND, 2);
617 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
618 cfg->ht.ht_slave = ptr;
620 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
623 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
624 /* Sanity check the mapping window. */
625 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
627 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
628 if (addr != MSI_X86_ADDR_BASE) {
629 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
630 "has non-default MSI window 0x%llx\n",
631 cfg->domain, cfg->bus, cfg->slot, cfg->func,
635 addr = MSI_X86_ADDR_BASE;
639 ht->ht_msictrl = val;
640 ht->ht_msiaddr = addr;
644 #endif /* __i386__ || __x86_64__ */
648 pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
651 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
653 struct pcicfg_msi *msi = &cfg->msi;
655 msi->msi_location = ptr;
656 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
657 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
663 pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
666 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
668 struct pcicfg_msix *msix = &cfg->msix;
671 msix->msix_location = ptr;
672 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
673 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
675 val = REG(ptr + PCIR_MSIX_TABLE, 4);
676 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
677 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
679 val = REG(ptr + PCIR_MSIX_PBA, 4);
680 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
681 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
687 pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
689 cfg->vpd.vpd_reg = ptr;
693 pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
696 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
698 /* Should always be true. */
699 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
702 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
703 cfg->subvendor = val & 0xffff;
704 cfg->subdevice = val >> 16;
711 pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
714 * Assume we have a PCI-X chipset if we have
715 * at least one PCI-PCI bridge with a PCI-X
716 * capability. Note that some systems with
717 * PCI-express or HT chipsets might match on
718 * this check as well.
720 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
723 cfg->pcix.pcix_ptr = ptr;
727 pcie_slotimpl(const pcicfgregs *cfg)
729 const struct pcicfg_expr *expr = &cfg->expr;
733 * Only version 1 can be parsed currently
735 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
739 * - Slot implemented bit is meaningful iff current port is
740 * root port or down stream port.
741 * - Testing for root port or down stream port is meanningful
742 * iff PCI configure has type 1 header.
745 if (cfg->hdrtype != 1)
748 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
749 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
752 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
759 pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
762 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
764 struct pcicfg_expr *expr = &cfg->expr;
767 * Assume we have a PCI-express chipset if we have
768 * at least one PCI-express device.
772 expr->expr_ptr = ptr;
773 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
776 * Only version 1 can be parsed currently
778 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
782 * Read slot capabilities. Slot capabilities exists iff
783 * current port's slot is implemented
785 if (pcie_slotimpl(cfg))
786 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
792 pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
794 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
795 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
800 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
801 /* No capabilities */
805 switch (cfg->hdrtype & PCIM_HDRTYPE) {
808 ptrptr = PCIR_CAP_PTR;
811 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
814 return; /* no capabilities support */
816 nextptr = REG(ptrptr, 1); /* sanity check? */
819 * Read capability entries.
821 while (pci_fixup_nextptr(&nextptr)) {
822 const struct pci_read_cap *rc;
825 /* Find the next entry */
826 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
828 /* Process this entry */
829 val = REG(ptr + PCICAP_ID, 1);
830 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
831 if (rc->cap == val) {
832 rc->read_cap(pcib, ptr, nextptr, cfg);
838 #if defined(__i386__) || defined(__x86_64__)
840 * Enable the MSI mapping window for all HyperTransport
841 * slaves. PCI-PCI bridges have their windows enabled via
844 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
845 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
847 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
848 cfg->domain, cfg->bus, cfg->slot, cfg->func);
849 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
850 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
855 /* REG and WREG use carry through to next functions */
859 * PCI Vital Product Data
862 #define PCI_VPD_TIMEOUT 1000000
865 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
867 int count = PCI_VPD_TIMEOUT;
869 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
871 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
873 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
876 DELAY(1); /* limit looping */
878 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
885 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
887 int count = PCI_VPD_TIMEOUT;
889 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
891 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
892 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
893 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
896 DELAY(1); /* limit looping */
903 #undef PCI_VPD_TIMEOUT
905 struct vpd_readstate {
915 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
920 if (vrs->bytesinval == 0) {
921 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
923 vrs->val = le32toh(reg);
925 byte = vrs->val & 0xff;
928 vrs->val = vrs->val >> 8;
929 byte = vrs->val & 0xff;
939 pcie_slot_implemented(device_t dev)
941 struct pci_devinfo *dinfo = device_get_ivars(dev);
943 return pcie_slotimpl(&dinfo->cfg);
947 pcie_set_max_readrq(device_t dev, uint16_t rqsize)
952 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
953 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
954 panic("%s: invalid max read request size 0x%02x\n",
955 device_get_nameunit(dev), rqsize);
958 expr_ptr = pci_get_pciecap_ptr(dev);
960 panic("%s: not PCIe device\n", device_get_nameunit(dev));
962 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
963 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
965 device_printf(dev, "adjust device control 0x%04x", val);
967 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
969 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
972 kprintf(" -> 0x%04x\n", val);
977 pcie_get_max_readrq(device_t dev)
982 expr_ptr = pci_get_pciecap_ptr(dev);
984 panic("%s: not PCIe device\n", device_get_nameunit(dev));
986 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
987 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
991 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
993 struct vpd_readstate vrs;
998 int alloc, off; /* alloc/off for RO/W arrays */
1004 /* init vpd reader */
1012 name = remain = i = 0; /* shut up stupid gcc */
1013 alloc = off = 0; /* shut up stupid gcc */
1014 dflen = 0; /* shut up stupid gcc */
1016 while (state >= 0) {
1017 if (vpd_nextbyte(&vrs, &byte)) {
1022 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1023 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1024 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1027 case 0: /* item name */
1029 if (vpd_nextbyte(&vrs, &byte2)) {
1034 if (vpd_nextbyte(&vrs, &byte2)) {
1038 remain |= byte2 << 8;
1039 if (remain > (0x7f*4 - vrs.off)) {
1042 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1043 cfg->domain, cfg->bus, cfg->slot,
1048 remain = byte & 0x7;
1049 name = (byte >> 3) & 0xf;
1052 case 0x2: /* String */
1053 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1054 M_DEVBUF, M_WAITOK);
1061 case 0x10: /* VPD-R */
1064 cfg->vpd.vpd_ros = kmalloc(alloc *
1065 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1069 case 0x11: /* VPD-W */
1072 cfg->vpd.vpd_w = kmalloc(alloc *
1073 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1077 default: /* Invalid data, abort */
1083 case 1: /* Identifier String */
1084 cfg->vpd.vpd_ident[i++] = byte;
1087 cfg->vpd.vpd_ident[i] = '\0';
1092 case 2: /* VPD-R Keyword Header */
1094 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1095 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1096 M_DEVBUF, M_WAITOK | M_ZERO);
1098 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1099 if (vpd_nextbyte(&vrs, &byte2)) {
1103 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1104 if (vpd_nextbyte(&vrs, &byte2)) {
1110 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1113 * if this happens, we can't trust the rest
1117 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1118 cfg->domain, cfg->bus, cfg->slot,
1123 } else if (dflen == 0) {
1124 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1125 sizeof(*cfg->vpd.vpd_ros[off].value),
1126 M_DEVBUF, M_WAITOK);
1127 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1129 cfg->vpd.vpd_ros[off].value = kmalloc(
1131 sizeof(*cfg->vpd.vpd_ros[off].value),
1132 M_DEVBUF, M_WAITOK);
1135 /* keep in sync w/ state 3's transistions */
1136 if (dflen == 0 && remain == 0)
1138 else if (dflen == 0)
1144 case 3: /* VPD-R Keyword Value */
1145 cfg->vpd.vpd_ros[off].value[i++] = byte;
1146 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1147 "RV", 2) == 0 && cksumvalid == -1) {
1153 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1154 cfg->domain, cfg->bus,
1155 cfg->slot, cfg->func,
1164 /* keep in sync w/ state 2's transistions */
1166 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1167 if (dflen == 0 && remain == 0) {
1168 cfg->vpd.vpd_rocnt = off;
1169 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1170 off * sizeof(*cfg->vpd.vpd_ros),
1171 M_DEVBUF, M_WAITOK | M_ZERO);
1173 } else if (dflen == 0)
1183 case 5: /* VPD-W Keyword Header */
1185 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1186 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1187 M_DEVBUF, M_WAITOK | M_ZERO);
1189 cfg->vpd.vpd_w[off].keyword[0] = byte;
1190 if (vpd_nextbyte(&vrs, &byte2)) {
1194 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1195 if (vpd_nextbyte(&vrs, &byte2)) {
1199 cfg->vpd.vpd_w[off].len = dflen = byte2;
1200 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1201 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1202 sizeof(*cfg->vpd.vpd_w[off].value),
1203 M_DEVBUF, M_WAITOK);
1206 /* keep in sync w/ state 6's transistions */
1207 if (dflen == 0 && remain == 0)
1209 else if (dflen == 0)
1215 case 6: /* VPD-W Keyword Value */
1216 cfg->vpd.vpd_w[off].value[i++] = byte;
1219 /* keep in sync w/ state 5's transistions */
1221 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1222 if (dflen == 0 && remain == 0) {
1223 cfg->vpd.vpd_wcnt = off;
1224 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1225 off * sizeof(*cfg->vpd.vpd_w),
1226 M_DEVBUF, M_WAITOK | M_ZERO);
1228 } else if (dflen == 0)
1233 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1234 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1241 if (cksumvalid == 0 || state < -1) {
1242 /* read-only data bad, clean up */
1243 if (cfg->vpd.vpd_ros != NULL) {
1244 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1245 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1246 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1247 cfg->vpd.vpd_ros = NULL;
1251 /* I/O error, clean up */
1252 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1253 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1254 if (cfg->vpd.vpd_ident != NULL) {
1255 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1256 cfg->vpd.vpd_ident = NULL;
1258 if (cfg->vpd.vpd_w != NULL) {
1259 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1260 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1261 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1262 cfg->vpd.vpd_w = NULL;
1265 cfg->vpd.vpd_cached = 1;
1271 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1273 struct pci_devinfo *dinfo = device_get_ivars(child);
1274 pcicfgregs *cfg = &dinfo->cfg;
1276 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1277 pci_read_vpd(device_get_parent(dev), cfg);
1279 *identptr = cfg->vpd.vpd_ident;
1281 if (*identptr == NULL)
1288 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1291 struct pci_devinfo *dinfo = device_get_ivars(child);
1292 pcicfgregs *cfg = &dinfo->cfg;
1295 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1296 pci_read_vpd(device_get_parent(dev), cfg);
1298 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1299 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1300 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1301 *vptr = cfg->vpd.vpd_ros[i].value;
1304 if (i != cfg->vpd.vpd_rocnt)
1312 * Return the offset in configuration space of the requested extended
1313 * capability entry or 0 if the specified capability was not found.
1316 pci_find_extcap_method(device_t dev, device_t child, int capability,
1319 struct pci_devinfo *dinfo = device_get_ivars(child);
1320 pcicfgregs *cfg = &dinfo->cfg;
1325 * Check the CAP_LIST bit of the PCI status register first.
1327 status = pci_read_config(child, PCIR_STATUS, 2);
1328 if (!(status & PCIM_STATUS_CAPPRESENT))
1332 * Determine the start pointer of the capabilities list.
1334 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1340 ptr = PCIR_CAP_PTR_2;
1344 return (ENXIO); /* no extended capabilities support */
1346 ptr = pci_read_config(child, ptr, 1);
1349 * Traverse the capabilities list.
1352 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1357 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1364 * Support for MSI-X message interrupts.
1367 pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1370 struct pci_devinfo *dinfo = device_get_ivars(dev);
1371 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1374 KASSERT(msix->msix_table_len > index, ("bogus index"));
1375 offset = msix->msix_table_offset + index * 16;
1376 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1377 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1378 bus_write_4(msix->msix_table_res, offset + 8, data);
1380 /* Enable MSI -> HT mapping. */
1381 pci_ht_map_msi(dev, address);
1385 pci_mask_msix_vector(device_t dev, u_int index)
1387 struct pci_devinfo *dinfo = device_get_ivars(dev);
1388 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1389 uint32_t offset, val;
1391 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1392 offset = msix->msix_table_offset + index * 16 + 12;
1393 val = bus_read_4(msix->msix_table_res, offset);
1394 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1395 val |= PCIM_MSIX_VCTRL_MASK;
1396 bus_write_4(msix->msix_table_res, offset, val);
1401 pci_unmask_msix_vector(device_t dev, u_int index)
1403 struct pci_devinfo *dinfo = device_get_ivars(dev);
1404 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1405 uint32_t offset, val;
1407 KASSERT(msix->msix_table_len > index, ("bogus index"));
1408 offset = msix->msix_table_offset + index * 16 + 12;
1409 val = bus_read_4(msix->msix_table_res, offset);
1410 if (val & PCIM_MSIX_VCTRL_MASK) {
1411 val &= ~PCIM_MSIX_VCTRL_MASK;
1412 bus_write_4(msix->msix_table_res, offset, val);
1417 pci_pending_msix_vector(device_t dev, u_int index)
1419 struct pci_devinfo *dinfo = device_get_ivars(dev);
1420 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1421 uint32_t offset, bit;
1423 KASSERT(msix->msix_table_len > index, ("bogus index"));
1424 offset = msix->msix_pba_offset + (index / 32) * 4;
1425 bit = 1 << index % 32;
1426 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1430 * Restore MSI-X registers and table during resume. If MSI-X is
1431 * enabled then walk the virtual table to restore the actual MSI-X
1435 pci_resume_msix(device_t dev)
1437 struct pci_devinfo *dinfo = device_get_ivars(dev);
1438 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1439 struct msix_table_entry *mte;
1440 struct msix_vector *mv;
1443 if (msix->msix_alloc > 0) {
1444 /* First, mask all vectors. */
1445 for (i = 0; i < msix->msix_msgnum; i++)
1446 pci_mask_msix_vector(dev, i);
1448 /* Second, program any messages with at least one handler. */
1449 for (i = 0; i < msix->msix_table_len; i++) {
1450 mte = &msix->msix_table[i];
1451 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1453 mv = &msix->msix_vectors[mte->mte_vector - 1];
1454 pci_setup_msix_vector(dev, i, mv->mv_address,
1456 pci_unmask_msix_vector(dev, i);
1459 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1460 msix->msix_ctrl, 2);
1464 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1465 * returned in *count. After this function returns, each message will be
1466 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1469 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1471 struct pci_devinfo *dinfo = device_get_ivars(child);
1472 pcicfgregs *cfg = &dinfo->cfg;
1473 struct resource_list_entry *rle;
1474 int actual, error, i, irq, max;
1476 /* Don't let count == 0 get us into trouble. */
1480 /* If rid 0 is allocated, then fail. */
1481 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1482 if (rle != NULL && rle->res != NULL)
1485 /* Already have allocated messages? */
1486 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1489 /* If MSI is blacklisted for this system, fail. */
1490 if (pci_msi_blacklisted())
1493 /* MSI-X capability present? */
1494 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1497 /* Make sure the appropriate BARs are mapped. */
1498 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1499 cfg->msix.msix_table_bar);
1500 if (rle == NULL || rle->res == NULL ||
1501 !(rman_get_flags(rle->res) & RF_ACTIVE))
1503 cfg->msix.msix_table_res = rle->res;
1504 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1505 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1506 cfg->msix.msix_pba_bar);
1507 if (rle == NULL || rle->res == NULL ||
1508 !(rman_get_flags(rle->res) & RF_ACTIVE))
1511 cfg->msix.msix_pba_res = rle->res;
1514 device_printf(child,
1515 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1516 *count, cfg->msix.msix_msgnum);
1517 max = min(*count, cfg->msix.msix_msgnum);
1518 for (i = 0; i < max; i++) {
1519 /* Allocate a message. */
1520 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq,
1524 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1531 device_printf(child,
1532 "could not allocate any MSI-X vectors\n");
1538 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1540 device_printf(child, "using IRQ %lu for MSI-X\n",
1546 * Be fancy and try to print contiguous runs of
1547 * IRQ values as ranges. 'irq' is the previous IRQ.
1548 * 'run' is true if we are in a range.
1550 device_printf(child, "using IRQs %lu", rle->start);
1553 for (i = 1; i < actual; i++) {
1554 rle = resource_list_find(&dinfo->resources,
1555 SYS_RES_IRQ, i + 1);
1557 /* Still in a run? */
1558 if (rle->start == irq + 1) {
1564 /* Finish previous range. */
1566 kprintf("-%d", irq);
1570 /* Start new range. */
1571 kprintf(",%lu", rle->start);
1575 /* Unfinished range? */
1577 kprintf("-%d", irq);
1578 kprintf(" for MSI-X\n");
1582 /* Mask all vectors. */
1583 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1584 pci_mask_msix_vector(child, i);
1586 /* Allocate and initialize vector data and virtual table. */
1587 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1588 M_DEVBUF, M_WAITOK | M_ZERO);
1589 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1591 for (i = 0; i < actual; i++) {
1592 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1593 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1594 cfg->msix.msix_table[i].mte_vector = i + 1;
1597 /* Update control register to enable MSI-X. */
1598 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1599 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1600 cfg->msix.msix_ctrl, 2);
1602 /* Update counts of alloc'd messages. */
1603 cfg->msix.msix_alloc = actual;
1604 cfg->msix.msix_table_len = actual;
1611 pci_release_msix(device_t dev, device_t child)
1613 struct pci_devinfo *dinfo = device_get_ivars(child);
1614 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1615 struct resource_list_entry *rle;
1618 /* Do we have any messages to release? */
1619 if (msix->msix_alloc == 0)
1622 /* Make sure none of the resources are allocated. */
1623 for (i = 0; i < msix->msix_table_len; i++) {
1624 if (msix->msix_table[i].mte_vector == 0)
1626 if (msix->msix_table[i].mte_handlers > 0)
1628 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1629 KASSERT(rle != NULL, ("missing resource"));
1630 if (rle->res != NULL)
1634 /* Update control register to disable MSI-X. */
1635 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1636 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1637 msix->msix_ctrl, 2);
1639 /* Free the resource list entries. */
1640 for (i = 0; i < msix->msix_table_len; i++) {
1641 if (msix->msix_table[i].mte_vector == 0)
1643 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1645 kfree(msix->msix_table, M_DEVBUF);
1646 msix->msix_table_len = 0;
1648 /* Release the IRQs. */
1649 for (i = 0; i < msix->msix_alloc; i++)
1650 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1651 msix->msix_vectors[i].mv_irq, -1 /* XXX */);
1652 kfree(msix->msix_vectors, M_DEVBUF);
1653 msix->msix_alloc = 0;
1659 * Return the max supported MSI-X messages this device supports.
1660 * Basically, assuming the MD code can alloc messages, this function
1661 * should return the maximum value that pci_alloc_msix() can return.
1662 * Thus, it is subject to the tunables, etc.
1665 pci_msix_count_method(device_t dev, device_t child)
1667 struct pci_devinfo *dinfo = device_get_ivars(child);
1668 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1670 if (pci_do_msix && msix->msix_location != 0)
1671 return (msix->msix_msgnum);
1676 * HyperTransport MSI mapping control
1679 pci_ht_map_msi(device_t dev, uint64_t addr)
1681 struct pci_devinfo *dinfo = device_get_ivars(dev);
1682 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1687 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1688 ht->ht_msiaddr >> 20 == addr >> 20) {
1689 /* Enable MSI -> HT mapping. */
1690 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1691 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1695 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1696 /* Disable MSI -> HT mapping. */
1697 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1698 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1704 * Support for MSI message signalled interrupts.
1707 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1709 struct pci_devinfo *dinfo = device_get_ivars(dev);
1710 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1712 /* Write data and address values. */
1713 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1714 address & 0xffffffff, 4);
1715 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1716 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1718 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1721 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1724 /* Enable MSI in the control register. */
1725 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1726 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1729 /* Enable MSI -> HT mapping. */
1730 pci_ht_map_msi(dev, address);
1734 pci_disable_msi(device_t dev)
1736 struct pci_devinfo *dinfo = device_get_ivars(dev);
1737 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1739 /* Disable MSI -> HT mapping. */
1740 pci_ht_map_msi(dev, 0);
1742 /* Disable MSI in the control register. */
1743 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1744 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1749 * Restore MSI registers during resume. If MSI is enabled then
1750 * restore the data and address registers in addition to the control
1754 pci_resume_msi(device_t dev)
1756 struct pci_devinfo *dinfo = device_get_ivars(dev);
1757 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1761 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1762 address = msi->msi_addr;
1763 data = msi->msi_data;
1764 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1765 address & 0xffffffff, 4);
1766 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1767 pci_write_config(dev, msi->msi_location +
1768 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1769 pci_write_config(dev, msi->msi_location +
1770 PCIR_MSI_DATA_64BIT, data, 2);
1772 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1775 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1780 * Returns true if the specified device is blacklisted because MSI
1784 pci_msi_device_blacklisted(device_t dev)
1786 struct pci_quirk *q;
1788 if (!pci_honor_msi_blacklist)
1791 for (q = &pci_quirks[0]; q->devid; q++) {
1792 if (q->devid == pci_get_devid(dev) &&
1793 q->type == PCI_QUIRK_DISABLE_MSI)
1800 * Determine if MSI is blacklisted globally on this sytem. Currently,
1801 * we just check for blacklisted chipsets as represented by the
1802 * host-PCI bridge at device 0:0:0. In the future, it may become
1803 * necessary to check other system attributes, such as the kenv values
1804 * that give the motherboard manufacturer and model number.
1807 pci_msi_blacklisted(void)
1811 if (!pci_honor_msi_blacklist)
1814 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1815 if (!(pcie_chipset || pcix_chipset))
1818 dev = pci_find_bsf(0, 0, 0);
1820 return (pci_msi_device_blacklisted(dev));
1825 * Attempt to allocate count MSI messages on start_cpuid.
1827 * If start_cpuid < 0, then the MSI messages' target CPU will be
1828 * selected automaticly.
1830 * If the caller explicitly specified the MSI messages' target CPU,
1831 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1832 * messages on the specified CPU, if the allocation fails due to MD
1833 * does not have enough vectors (EMSGSIZE), then we will try next
1834 * available CPU, until the allocation fails on all CPUs.
1836 * EMSGSIZE will be returned, if all available CPUs does not have
1837 * enough vectors for the requested amount of MSI messages. Caller
1838 * should either reduce the amount of MSI messages to be requested,
1839 * or simply giving up using MSI.
1841 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1842 * returned in 'rid' array, if the allocation succeeds.
1845 pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1848 struct pci_devinfo *dinfo = device_get_ivars(child);
1849 pcicfgregs *cfg = &dinfo->cfg;
1850 struct resource_list_entry *rle;
1851 int error, i, irqs[32], cpuid = 0;
1854 KASSERT(count != 0 && count <= 32 && powerof2(count),
1855 ("invalid MSI count %d\n", count));
1856 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
1858 /* If rid 0 is allocated, then fail. */
1859 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1860 if (rle != NULL && rle->res != NULL)
1863 /* Already have allocated messages? */
1864 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1867 /* If MSI is blacklisted for this system, fail. */
1868 if (pci_msi_blacklisted())
1871 /* MSI capability present? */
1872 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1875 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1876 count, cfg->msi.msi_msgnum));
1879 device_printf(child,
1880 "attempting to allocate %d MSI vectors (%d supported)\n",
1881 count, cfg->msi.msi_msgnum);
1884 if (start_cpuid < 0)
1885 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
1888 for (i = 0; i < ncpus; ++i) {
1889 cpuid = (start_cpuid + i) % ncpus;
1891 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
1892 cfg->msi.msi_msgnum, irqs, cpuid);
1895 else if (error != EMSGSIZE)
1902 * We now have N messages mapped onto SYS_RES_IRQ resources in
1903 * the irqs[] array, so add new resources starting at rid 1.
1905 for (i = 0; i < count; i++) {
1907 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1908 irqs[i], irqs[i], 1, cpuid);
1913 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1919 * Be fancy and try to print contiguous runs
1920 * of IRQ values as ranges. 'run' is true if
1921 * we are in a range.
1923 device_printf(child, "using IRQs %d", irqs[0]);
1925 for (i = 1; i < count; i++) {
1927 /* Still in a run? */
1928 if (irqs[i] == irqs[i - 1] + 1) {
1933 /* Finish previous range. */
1935 kprintf("-%d", irqs[i - 1]);
1939 /* Start new range. */
1940 kprintf(",%d", irqs[i]);
1943 /* Unfinished range? */
1945 kprintf("-%d", irqs[count - 1]);
1946 kprintf(" for MSI on cpu%d\n", cpuid);
1950 /* Update control register with count. */
1951 ctrl = cfg->msi.msi_ctrl;
1952 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1953 ctrl |= (ffs(count) - 1) << 4;
1954 cfg->msi.msi_ctrl = ctrl;
1955 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1957 /* Update counts of alloc'd messages. */
1958 cfg->msi.msi_alloc = count;
1959 cfg->msi.msi_handlers = 0;
1963 /* Release the MSI messages associated with this device. */
1965 pci_release_msi_method(device_t dev, device_t child)
1967 struct pci_devinfo *dinfo = device_get_ivars(child);
1968 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1969 struct resource_list_entry *rle;
1970 int i, irqs[32], cpuid = -1;
1972 /* Do we have any messages to release? */
1973 if (msi->msi_alloc == 0)
1975 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1977 /* Make sure none of the resources are allocated. */
1978 if (msi->msi_handlers > 0)
1980 for (i = 0; i < msi->msi_alloc; i++) {
1981 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1982 KASSERT(rle != NULL, ("missing MSI resource"));
1983 if (rle->res != NULL)
1987 KASSERT(cpuid >= 0 && cpuid < ncpus,
1988 ("invalid MSI target cpuid %d\n", cpuid));
1990 KASSERT(rle->cpuid == cpuid,
1991 ("MSI targets different cpus, "
1992 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
1994 irqs[i] = rle->start;
1997 /* Update control register with 0 count. */
1998 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1999 ("%s: MSI still enabled", __func__));
2000 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2001 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2004 /* Release the messages. */
2005 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2007 for (i = 0; i < msi->msi_alloc; i++)
2008 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2010 /* Update alloc count. */
2018 * Return the max supported MSI messages this device supports.
2019 * Basically, assuming the MD code can alloc messages, this function
2020 * should return the maximum value that pci_alloc_msi() can return.
2021 * Thus, it is subject to the tunables, etc.
2024 pci_msi_count_method(device_t dev, device_t child)
2026 struct pci_devinfo *dinfo = device_get_ivars(child);
2027 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2029 if (pci_do_msi && msi->msi_location != 0)
2030 return (msi->msi_msgnum);
2034 /* kfree pcicfgregs structure and all depending data structures */
2037 pci_freecfg(struct pci_devinfo *dinfo)
2039 struct devlist *devlist_head;
2042 devlist_head = &pci_devq;
2044 if (dinfo->cfg.vpd.vpd_reg) {
2045 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2046 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2047 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2048 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2049 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2050 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2051 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2053 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2054 kfree(dinfo, M_DEVBUF);
2056 /* increment the generation count */
2059 /* we're losing one device */
2065 * PCI power manangement
2068 pci_set_powerstate_method(device_t dev, device_t child, int state)
2070 struct pci_devinfo *dinfo = device_get_ivars(child);
2071 pcicfgregs *cfg = &dinfo->cfg;
2073 int result, oldstate, highest, delay;
2075 if (cfg->pp.pp_cap == 0)
2076 return (EOPNOTSUPP);
2079 * Optimize a no state change request away. While it would be OK to
2080 * write to the hardware in theory, some devices have shown odd
2081 * behavior when going from D3 -> D3.
2083 oldstate = pci_get_powerstate(child);
2084 if (oldstate == state)
2088 * The PCI power management specification states that after a state
2089 * transition between PCI power states, system software must
2090 * guarantee a minimal delay before the function accesses the device.
2091 * Compute the worst case delay that we need to guarantee before we
2092 * access the device. Many devices will be responsive much more
2093 * quickly than this delay, but there are some that don't respond
2094 * instantly to state changes. Transitions to/from D3 state require
2095 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2096 * is done below with DELAY rather than a sleeper function because
2097 * this function can be called from contexts where we cannot sleep.
2099 highest = (oldstate > state) ? oldstate : state;
2100 if (highest == PCI_POWERSTATE_D3)
2102 else if (highest == PCI_POWERSTATE_D2)
2106 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2107 & ~PCIM_PSTAT_DMASK;
2110 case PCI_POWERSTATE_D0:
2111 status |= PCIM_PSTAT_D0;
2113 case PCI_POWERSTATE_D1:
2114 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2115 return (EOPNOTSUPP);
2116 status |= PCIM_PSTAT_D1;
2118 case PCI_POWERSTATE_D2:
2119 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2120 return (EOPNOTSUPP);
2121 status |= PCIM_PSTAT_D2;
2123 case PCI_POWERSTATE_D3:
2124 status |= PCIM_PSTAT_D3;
2132 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2133 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2134 dinfo->cfg.func, oldstate, state);
2136 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2143 pci_get_powerstate_method(device_t dev, device_t child)
2145 struct pci_devinfo *dinfo = device_get_ivars(child);
2146 pcicfgregs *cfg = &dinfo->cfg;
2150 if (cfg->pp.pp_cap != 0) {
2151 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2152 switch (status & PCIM_PSTAT_DMASK) {
2154 result = PCI_POWERSTATE_D0;
2157 result = PCI_POWERSTATE_D1;
2160 result = PCI_POWERSTATE_D2;
2163 result = PCI_POWERSTATE_D3;
2166 result = PCI_POWERSTATE_UNKNOWN;
2170 /* No support, device is always at D0 */
2171 result = PCI_POWERSTATE_D0;
2177 * Some convenience functions for PCI device drivers.
2180 static __inline void
2181 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2185 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2187 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2190 static __inline void
2191 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2195 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2197 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2201 pci_enable_busmaster_method(device_t dev, device_t child)
2203 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2208 pci_disable_busmaster_method(device_t dev, device_t child)
2210 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2215 pci_enable_io_method(device_t dev, device_t child, int space)
2225 case SYS_RES_IOPORT:
2226 bit = PCIM_CMD_PORTEN;
2229 case SYS_RES_MEMORY:
2230 bit = PCIM_CMD_MEMEN;
2236 pci_set_command_bit(dev, child, bit);
2237 /* Some devices seem to need a brief stall here, what do to? */
2238 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2241 device_printf(child, "failed to enable %s mapping!\n", error);
2246 pci_disable_io_method(device_t dev, device_t child, int space)
2256 case SYS_RES_IOPORT:
2257 bit = PCIM_CMD_PORTEN;
2260 case SYS_RES_MEMORY:
2261 bit = PCIM_CMD_MEMEN;
2267 pci_clear_command_bit(dev, child, bit);
2268 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2269 if (command & bit) {
2270 device_printf(child, "failed to disable %s mapping!\n", error);
2277 * New style pci driver. Parent device is either a pci-host-bridge or a
2278 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2282 pci_print_verbose(struct pci_devinfo *dinfo)
2286 pcicfgregs *cfg = &dinfo->cfg;
2288 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2289 cfg->vendor, cfg->device, cfg->revid);
2290 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2291 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2292 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2293 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2295 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2296 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2297 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2298 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2299 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2300 if (cfg->intpin > 0)
2301 kprintf("\tintpin=%c, irq=%d\n",
2302 cfg->intpin +'a' -1, cfg->intline);
2303 if (cfg->pp.pp_cap) {
2306 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2307 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2308 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2309 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2310 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2311 status & PCIM_PSTAT_DMASK);
2313 if (cfg->msi.msi_location) {
2316 ctrl = cfg->msi.msi_ctrl;
2317 kprintf("\tMSI supports %d message%s%s%s\n",
2318 cfg->msi.msi_msgnum,
2319 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2320 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2321 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2323 if (cfg->msix.msix_location) {
2324 kprintf("\tMSI-X supports %d message%s ",
2325 cfg->msix.msix_msgnum,
2326 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2327 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2328 kprintf("in map 0x%x\n",
2329 cfg->msix.msix_table_bar);
2331 kprintf("in maps 0x%x and 0x%x\n",
2332 cfg->msix.msix_table_bar,
2333 cfg->msix.msix_pba_bar);
2335 pci_print_verbose_expr(cfg);
2340 pci_print_verbose_expr(const pcicfgregs *cfg)
2342 const struct pcicfg_expr *expr = &cfg->expr;
2343 const char *port_name;
2349 if (expr->expr_ptr == 0) /* No PCI Express capability */
2352 kprintf("\tPCI Express ver.%d cap=0x%04x",
2353 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2354 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2357 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2359 switch (port_type) {
2360 case PCIE_END_POINT:
2361 port_name = "DEVICE";
2363 case PCIE_LEG_END_POINT:
2364 port_name = "LEGDEV";
2366 case PCIE_ROOT_PORT:
2369 case PCIE_UP_STREAM_PORT:
2370 port_name = "UPSTREAM";
2372 case PCIE_DOWN_STREAM_PORT:
2373 port_name = "DOWNSTRM";
2375 case PCIE_PCIE2PCI_BRIDGE:
2376 port_name = "PCIE2PCI";
2378 case PCIE_PCI2PCIE_BRIDGE:
2379 port_name = "PCI2PCIE";
2385 if ((port_type == PCIE_ROOT_PORT ||
2386 port_type == PCIE_DOWN_STREAM_PORT) &&
2387 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2389 if (port_name != NULL)
2390 kprintf("[%s]", port_name);
2392 if (pcie_slotimpl(cfg)) {
2393 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2394 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2395 kprintf("[HOTPLUG]");
2402 pci_porten(device_t pcib, int b, int s, int f)
2404 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2405 & PCIM_CMD_PORTEN) != 0;
2409 pci_memen(device_t pcib, int b, int s, int f)
2411 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2412 & PCIM_CMD_MEMEN) != 0;
2416 * Add a resource based on a pci map register. Return 1 if the map
2417 * register is a 32bit map register or 2 if it is a 64bit register.
2420 pci_add_map(device_t pcib, device_t bus, device_t dev,
2421 int b, int s, int f, int reg, struct resource_list *rl, int force,
2426 pci_addr_t start, end, count;
2433 struct resource *res;
2435 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2436 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2437 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2438 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2440 if (PCI_BAR_MEM(map)) {
2441 type = SYS_RES_MEMORY;
2442 if (map & PCIM_BAR_MEM_PREFETCH)
2445 type = SYS_RES_IOPORT;
2446 ln2size = pci_mapsize(testval);
2447 ln2range = pci_maprange(testval);
2448 base = pci_mapbase(map);
2449 barlen = ln2range == 64 ? 2 : 1;
2452 * For I/O registers, if bottom bit is set, and the next bit up
2453 * isn't clear, we know we have a BAR that doesn't conform to the
2454 * spec, so ignore it. Also, sanity check the size of the data
2455 * areas to the type of memory involved. Memory must be at least
2456 * 16 bytes in size, while I/O ranges must be at least 4.
2458 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2460 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2461 (type == SYS_RES_IOPORT && ln2size < 2))
2465 /* Read the other half of a 64bit map register */
2466 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2468 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2469 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2470 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2471 kprintf(", port disabled\n");
2472 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2473 kprintf(", memory disabled\n");
2475 kprintf(", enabled\n");
2479 * If base is 0, then we have problems. It is best to ignore
2480 * such entries for the moment. These will be allocated later if
2481 * the driver specifically requests them. However, some
2482 * removable busses look better when all resources are allocated,
2483 * so allow '0' to be overriden.
2485 * Similarly treat maps whose values is the same as the test value
2486 * read back. These maps have had all f's written to them by the
2487 * BIOS in an attempt to disable the resources.
2489 if (!force && (base == 0 || map == testval))
2491 if ((u_long)base != base) {
2493 "pci%d:%d:%d:%d bar %#x too many address bits",
2494 pci_get_domain(dev), b, s, f, reg);
2499 * This code theoretically does the right thing, but has
2500 * undesirable side effects in some cases where peripherals
2501 * respond oddly to having these bits enabled. Let the user
2502 * be able to turn them off (since pci_enable_io_modes is 1 by
2505 if (pci_enable_io_modes) {
2506 /* Turn on resources that have been left off by a lazy BIOS */
2507 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2508 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2509 cmd |= PCIM_CMD_PORTEN;
2510 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2512 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2513 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2514 cmd |= PCIM_CMD_MEMEN;
2515 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2518 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2520 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2524 count = 1 << ln2size;
2525 if (base == 0 || base == pci_mapbase(testval)) {
2526 start = 0; /* Let the parent decide. */
2530 end = base + (1 << ln2size) - 1;
2532 resource_list_add(rl, type, reg, start, end, count, -1);
2535 * Try to allocate the resource for this BAR from our parent
2536 * so that this resource range is already reserved. The
2537 * driver for this device will later inherit this resource in
2538 * pci_alloc_resource().
2540 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2541 prefetch ? RF_PREFETCHABLE : 0, -1);
2544 * If the allocation fails, delete the resource list
2545 * entry to force pci_alloc_resource() to allocate
2546 * resources from the parent.
2548 resource_list_delete(rl, type, reg);
2549 #ifdef PCI_BAR_CLEAR
2552 #else /* !PCI_BAR_CLEAR */
2554 * Don't clear BAR here. Some BIOS lists HPET as a
2555 * PCI function, clearing the BAR causes HPET timer
2559 kprintf("pci:%d:%d:%d: resource reservation failed "
2560 "%#jx - %#jx\n", b, s, f,
2561 (intmax_t)start, (intmax_t)end);
2564 #endif /* PCI_BAR_CLEAR */
2566 start = rman_get_start(res);
2568 pci_write_config(dev, reg, start, 4);
2570 pci_write_config(dev, reg + 4, start >> 32, 4);
2575 * For ATA devices we need to decide early what addressing mode to use.
2576 * Legacy demands that the primary and secondary ATA ports sits on the
2577 * same addresses that old ISA hardware did. This dictates that we use
2578 * those addresses and ignore the BAR's if we cannot set PCI native
2582 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2583 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2585 int rid, type, progif;
2587 /* if this device supports PCI native addressing use it */
2588 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2589 if ((progif & 0x8a) == 0x8a) {
2590 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2591 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2592 kprintf("Trying ATA native PCI addressing mode\n");
2593 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2597 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2598 type = SYS_RES_IOPORT;
2599 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2600 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2601 prefetchmask & (1 << 0));
2602 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2603 prefetchmask & (1 << 1));
2606 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
2607 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2610 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
2611 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2614 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2615 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2616 prefetchmask & (1 << 2));
2617 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2618 prefetchmask & (1 << 3));
2621 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
2622 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2625 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
2626 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2629 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2630 prefetchmask & (1 << 4));
2631 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2632 prefetchmask & (1 << 5));
2636 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2638 struct pci_devinfo *dinfo = device_get_ivars(dev);
2639 pcicfgregs *cfg = &dinfo->cfg;
2640 char tunable_name[64];
2643 /* Has to have an intpin to have an interrupt. */
2644 if (cfg->intpin == 0)
2647 /* Let the user override the IRQ with a tunable. */
2648 irq = PCI_INVALID_IRQ;
2649 ksnprintf(tunable_name, sizeof(tunable_name),
2650 "hw.pci%d.%d.%d.INT%c.irq",
2651 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2652 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2653 irq = PCI_INVALID_IRQ;
2656 * If we didn't get an IRQ via the tunable, then we either use the
2657 * IRQ value in the intline register or we ask the bus to route an
2658 * interrupt for us. If force_route is true, then we only use the
2659 * value in the intline register if the bus was unable to assign an
2662 if (!PCI_INTERRUPT_VALID(irq)) {
2663 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2664 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2665 if (!PCI_INTERRUPT_VALID(irq))
2669 /* If after all that we don't have an IRQ, just bail. */
2670 if (!PCI_INTERRUPT_VALID(irq))
2673 /* Update the config register if it changed. */
2674 if (irq != cfg->intline) {
2676 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2679 /* Add this IRQ as rid 0 interrupt resource. */
2680 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
2681 machintr_legacy_intr_cpuid(irq));
2685 pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
2687 struct pci_devinfo *dinfo = device_get_ivars(dev);
2688 pcicfgregs *cfg = &dinfo->cfg;
2689 struct resource_list *rl = &dinfo->resources;
2690 struct pci_quirk *q;
2697 /* ATA devices needs special map treatment */
2698 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2699 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2700 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2701 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2702 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2703 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2705 for (i = 0; i < cfg->nummaps;)
2706 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2707 rl, force, prefetchmask & (1 << i));
2710 * Add additional, quirked resources.
2712 for (q = &pci_quirks[0]; q->devid; q++) {
2713 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2714 && q->type == PCI_QUIRK_MAP_REG)
2715 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2719 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2721 * Try to re-route interrupts. Sometimes the BIOS or
2722 * firmware may leave bogus values in these registers.
2723 * If the re-route fails, then just stick with what we
2726 pci_assign_interrupt(bus, dev, 1);
2731 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2733 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2734 device_t pcib = device_get_parent(dev);
2735 struct pci_devinfo *dinfo;
2737 int s, f, pcifunchigh;
2740 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2741 ("dinfo_size too small"));
2742 maxslots = PCIB_MAXSLOTS(pcib);
2743 for (s = 0; s <= maxslots; s++) {
2747 hdrtype = REG(PCIR_HDRTYPE, 1);
2748 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2750 if (hdrtype & PCIM_MFDEV)
2751 pcifunchigh = PCI_FUNCMAX;
2752 for (f = 0; f <= pcifunchigh; f++) {
2753 dinfo = pci_read_device(pcib, domain, busno, s, f,
2755 if (dinfo != NULL) {
2756 pci_add_child(dev, dinfo);
2764 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2768 pcib = device_get_parent(bus);
2769 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2770 device_set_ivars(dinfo->cfg.dev, dinfo);
2771 resource_list_init(&dinfo->resources);
2772 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2773 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2774 pci_print_verbose(dinfo);
2775 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
2779 pci_probe(device_t dev)
2781 device_set_desc(dev, "PCI bus");
2783 /* Allow other subclasses to override this driver. */
2788 pci_attach(device_t dev)
2793 * Since there can be multiple independantly numbered PCI
2794 * busses on systems with multiple PCI domains, we can't use
2795 * the unit number to decide which bus we are probing. We ask
2796 * the parent pcib what our domain and bus numbers are.
2798 domain = pcib_get_domain(dev);
2799 busno = pcib_get_bus(dev);
2801 device_printf(dev, "domain=%d, physical bus=%d\n",
2804 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2806 return (bus_generic_attach(dev));
2810 pci_suspend(device_t dev)
2812 int dstate, error, i, numdevs;
2813 device_t acpi_dev, child, *devlist;
2814 struct pci_devinfo *dinfo;
2817 * Save the PCI configuration space for each child and set the
2818 * device in the appropriate power state for this sleep state.
2821 if (pci_do_power_resume)
2822 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2823 device_get_children(dev, &devlist, &numdevs);
2824 for (i = 0; i < numdevs; i++) {
2826 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2827 pci_cfg_save(child, dinfo, 0);
2830 /* Suspend devices before potentially powering them down. */
2831 error = bus_generic_suspend(dev);
2833 kfree(devlist, M_TEMP);
2838 * Always set the device to D3. If ACPI suggests a different
2839 * power state, use it instead. If ACPI is not present, the
2840 * firmware is responsible for managing device power. Skip
2841 * children who aren't attached since they are powered down
2842 * separately. Only manage type 0 devices for now.
2844 for (i = 0; acpi_dev && i < numdevs; i++) {
2846 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2847 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2848 dstate = PCI_POWERSTATE_D3;
2849 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2850 pci_set_powerstate(child, dstate);
2853 kfree(devlist, M_TEMP);
2858 pci_resume(device_t dev)
2861 device_t acpi_dev, child, *devlist;
2862 struct pci_devinfo *dinfo;
2865 * Set each child to D0 and restore its PCI configuration space.
2868 if (pci_do_power_resume)
2869 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2870 device_get_children(dev, &devlist, &numdevs);
2871 for (i = 0; i < numdevs; i++) {
2873 * Notify ACPI we're going to D0 but ignore the result. If
2874 * ACPI is not present, the firmware is responsible for
2875 * managing device power. Only manage type 0 devices for now.
2878 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2879 if (acpi_dev && device_is_attached(child) &&
2880 dinfo->cfg.hdrtype == 0) {
2881 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2882 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2885 /* Now the device is powered up, restore its config space. */
2886 pci_cfg_restore(child, dinfo);
2888 kfree(devlist, M_TEMP);
2889 return (bus_generic_resume(dev));
2893 pci_load_vendor_data(void)
2895 caddr_t vendordata, info;
2897 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2898 info = preload_search_info(vendordata, MODINFO_ADDR);
2899 pci_vendordata = *(char **)info;
2900 info = preload_search_info(vendordata, MODINFO_SIZE);
2901 pci_vendordata_size = *(size_t *)info;
2902 /* terminate the database */
2903 pci_vendordata[pci_vendordata_size] = '\n';
2908 pci_driver_added(device_t dev, driver_t *driver)
2913 struct pci_devinfo *dinfo;
2917 device_printf(dev, "driver added\n");
2918 DEVICE_IDENTIFY(driver, dev);
2919 device_get_children(dev, &devlist, &numdevs);
2920 for (i = 0; i < numdevs; i++) {
2922 if (device_get_state(child) != DS_NOTPRESENT)
2924 dinfo = device_get_ivars(child);
2925 pci_print_verbose(dinfo);
2927 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2928 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2930 pci_cfg_restore(child, dinfo);
2931 if (device_probe_and_attach(child) != 0)
2932 pci_cfg_save(child, dinfo, 1);
2934 kfree(devlist, M_TEMP);
2938 pci_child_detached(device_t parent __unused, device_t child)
2940 /* Turn child's power off */
2941 pci_cfg_save(child, device_get_ivars(child), 1);
2945 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2946 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
2948 struct pci_devinfo *dinfo;
2949 struct msix_table_entry *mte;
2950 struct msix_vector *mv;
2956 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
2957 arg, &cookie, serializer);
2961 /* If this is not a direct child, just bail out. */
2962 if (device_get_parent(child) != dev) {
2967 rid = rman_get_rid(irq);
2969 /* Make sure that INTx is enabled */
2970 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
2973 * Check to see if the interrupt is MSI or MSI-X.
2974 * Ask our parent to map the MSI and give
2975 * us the address and data register values.
2976 * If we fail for some reason, teardown the
2977 * interrupt handler.
2979 dinfo = device_get_ivars(child);
2980 if (dinfo->cfg.msi.msi_alloc > 0) {
2981 if (dinfo->cfg.msi.msi_addr == 0) {
2982 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2983 ("MSI has handlers, but vectors not mapped"));
2984 error = PCIB_MAP_MSI(device_get_parent(dev),
2985 child, rman_get_start(irq), &addr, &data,
2986 rman_get_cpuid(irq));
2989 dinfo->cfg.msi.msi_addr = addr;
2990 dinfo->cfg.msi.msi_data = data;
2991 pci_enable_msi(child, addr, data);
2993 dinfo->cfg.msi.msi_handlers++;
2995 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2996 ("No MSI or MSI-X interrupts allocated"));
2997 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2998 ("MSI-X index too high"));
2999 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3000 KASSERT(mte->mte_vector != 0, ("no message vector"));
3001 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3002 KASSERT(mv->mv_irq == rman_get_start(irq),
3004 if (mv->mv_address == 0) {
3005 KASSERT(mte->mte_handlers == 0,
3006 ("MSI-X table entry has handlers, but vector not mapped"));
3007 error = PCIB_MAP_MSI(device_get_parent(dev),
3008 child, rman_get_start(irq), &addr, &data,
3009 rman_get_cpuid(irq));
3012 mv->mv_address = addr;
3015 if (mte->mte_handlers == 0) {
3016 pci_setup_msix_vector(child, rid - 1,
3017 mv->mv_address, mv->mv_data);
3018 pci_unmask_msix_vector(child, rid - 1);
3020 mte->mte_handlers++;
3023 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3024 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3027 (void)bus_generic_teardown_intr(dev, child, irq,
3037 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3040 struct msix_table_entry *mte;
3041 struct resource_list_entry *rle;
3042 struct pci_devinfo *dinfo;
3045 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3048 /* If this isn't a direct child, just bail out */
3049 if (device_get_parent(child) != dev)
3050 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3052 rid = rman_get_rid(irq);
3055 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3058 * Check to see if the interrupt is MSI or MSI-X. If so,
3059 * decrement the appropriate handlers count and mask the
3060 * MSI-X message, or disable MSI messages if the count
3063 dinfo = device_get_ivars(child);
3064 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3065 if (rle->res != irq)
3067 if (dinfo->cfg.msi.msi_alloc > 0) {
3068 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3069 ("MSI-X index too high"));
3070 if (dinfo->cfg.msi.msi_handlers == 0)
3072 dinfo->cfg.msi.msi_handlers--;
3073 if (dinfo->cfg.msi.msi_handlers == 0)
3074 pci_disable_msi(child);
3076 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3077 ("No MSI or MSI-X interrupts allocated"));
3078 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3079 ("MSI-X index too high"));
3080 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3081 if (mte->mte_handlers == 0)
3083 mte->mte_handlers--;
3084 if (mte->mte_handlers == 0)
3085 pci_mask_msix_vector(child, rid - 1);
3088 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3091 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3096 pci_print_child(device_t dev, device_t child)
3098 struct pci_devinfo *dinfo;
3099 struct resource_list *rl;
3102 dinfo = device_get_ivars(child);
3103 rl = &dinfo->resources;
3105 retval += bus_print_child_header(dev, child);
3107 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3108 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3109 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3110 if (device_get_flags(dev))
3111 retval += kprintf(" flags %#x", device_get_flags(dev));
3113 retval += kprintf(" at device %d.%d", pci_get_slot(child),
3114 pci_get_function(child));
3116 retval += bus_print_child_footer(dev, child);
3126 } pci_nomatch_tab[] = {
3127 {PCIC_OLD, -1, "old"},
3128 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3129 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3130 {PCIC_STORAGE, -1, "mass storage"},
3131 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3132 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3133 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3134 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3135 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3136 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3137 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3138 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3139 {PCIC_NETWORK, -1, "network"},
3140 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3141 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3142 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3143 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3144 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3145 {PCIC_DISPLAY, -1, "display"},
3146 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3147 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3148 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3149 {PCIC_MULTIMEDIA, -1, "multimedia"},
3150 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3151 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3152 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3153 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3154 {PCIC_MEMORY, -1, "memory"},
3155 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3156 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3157 {PCIC_BRIDGE, -1, "bridge"},
3158 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3159 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3160 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3161 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3162 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3163 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3164 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3165 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3166 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3167 {PCIC_SIMPLECOMM, -1, "simple comms"},
3168 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3169 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3170 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3171 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3172 {PCIC_BASEPERIPH, -1, "base peripheral"},
3173 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3174 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3175 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3176 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3177 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3178 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3179 {PCIC_INPUTDEV, -1, "input device"},
3180 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3181 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3182 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3183 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3184 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3185 {PCIC_DOCKING, -1, "docking station"},
3186 {PCIC_PROCESSOR, -1, "processor"},
3187 {PCIC_SERIALBUS, -1, "serial bus"},
3188 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3189 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3190 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3191 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3192 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3193 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3194 {PCIC_WIRELESS, -1, "wireless controller"},
3195 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3196 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3197 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3198 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3199 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3200 {PCIC_SATCOM, -1, "satellite communication"},
3201 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3202 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3203 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3204 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3205 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3206 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3207 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3208 {PCIC_DASP, -1, "dasp"},
3209 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3214 pci_probe_nomatch(device_t dev, device_t child)
3217 char *cp, *scp, *device;
3220 * Look for a listing for this device in a loaded device database.
3222 if ((device = pci_describe_device(child)) != NULL) {
3223 device_printf(dev, "<%s>", device);
3224 kfree(device, M_DEVBUF);
3227 * Scan the class/subclass descriptions for a general
3232 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3233 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3234 if (pci_nomatch_tab[i].subclass == -1) {
3235 cp = pci_nomatch_tab[i].desc;
3236 } else if (pci_nomatch_tab[i].subclass ==
3237 pci_get_subclass(child)) {
3238 scp = pci_nomatch_tab[i].desc;
3242 device_printf(dev, "<%s%s%s>",
3244 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3247 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3248 pci_get_vendor(child), pci_get_device(child),
3249 pci_get_slot(child), pci_get_function(child));
3250 if (pci_get_intpin(child) > 0) {
3253 irq = pci_get_irq(child);
3254 if (PCI_INTERRUPT_VALID(irq))
3255 kprintf(" irq %d", irq);
3259 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3263 * Parse the PCI device database, if loaded, and return a pointer to a
3264 * description of the device.
3266 * The database is flat text formatted as follows:
3268 * Any line not in a valid format is ignored.
3269 * Lines are terminated with newline '\n' characters.
3271 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3274 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3275 * - devices cannot be listed without a corresponding VENDOR line.
3276 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3277 * another TAB, then the device name.
3281 * Assuming (ptr) points to the beginning of a line in the database,
3282 * return the vendor or device and description of the next entry.
3283 * The value of (vendor) or (device) inappropriate for the entry type
3284 * is set to -1. Returns nonzero at the end of the database.
3286 * Note that this is slightly unrobust in the face of corrupt data;
3287 * we attempt to safeguard against this by spamming the end of the
3288 * database with a newline when we initialise.
3291 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3300 left = pci_vendordata_size - (cp - pci_vendordata);
3308 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3312 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3315 /* skip to next line */
3316 while (*cp != '\n' && left > 0) {
3325 /* skip to next line */
3326 while (*cp != '\n' && left > 0) {
3330 if (*cp == '\n' && left > 0)
3337 pci_describe_device(device_t dev)
3340 char *desc, *vp, *dp, *line;
3342 desc = vp = dp = NULL;
3345 * If we have no vendor data, we can't do anything.
3347 if (pci_vendordata == NULL)
3351 * Scan the vendor data looking for this device
3353 line = pci_vendordata;
3354 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3357 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3359 if (vendor == pci_get_vendor(dev))
3362 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3365 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3373 if (device == pci_get_device(dev))
3377 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3378 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3380 ksprintf(desc, "%s, %s", vp, dp);
3383 kfree(vp, M_DEVBUF);
3385 kfree(dp, M_DEVBUF);
3390 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3392 struct pci_devinfo *dinfo;
3395 dinfo = device_get_ivars(child);
3399 case PCI_IVAR_ETHADDR:
3401 * The generic accessor doesn't deal with failure, so
3402 * we set the return value, then return an error.
3404 *((uint8_t **) result) = NULL;
3406 case PCI_IVAR_SUBVENDOR:
3407 *result = cfg->subvendor;
3409 case PCI_IVAR_SUBDEVICE:
3410 *result = cfg->subdevice;
3412 case PCI_IVAR_VENDOR:
3413 *result = cfg->vendor;
3415 case PCI_IVAR_DEVICE:
3416 *result = cfg->device;
3418 case PCI_IVAR_DEVID:
3419 *result = (cfg->device << 16) | cfg->vendor;
3421 case PCI_IVAR_CLASS:
3422 *result = cfg->baseclass;
3424 case PCI_IVAR_SUBCLASS:
3425 *result = cfg->subclass;
3427 case PCI_IVAR_PROGIF:
3428 *result = cfg->progif;
3430 case PCI_IVAR_REVID:
3431 *result = cfg->revid;
3433 case PCI_IVAR_INTPIN:
3434 *result = cfg->intpin;
3437 *result = cfg->intline;
3439 case PCI_IVAR_DOMAIN:
3440 *result = cfg->domain;
3446 *result = cfg->slot;
3448 case PCI_IVAR_FUNCTION:
3449 *result = cfg->func;
3451 case PCI_IVAR_CMDREG:
3452 *result = cfg->cmdreg;
3454 case PCI_IVAR_CACHELNSZ:
3455 *result = cfg->cachelnsz;
3457 case PCI_IVAR_MINGNT:
3458 *result = cfg->mingnt;
3460 case PCI_IVAR_MAXLAT:
3461 *result = cfg->maxlat;
3463 case PCI_IVAR_LATTIMER:
3464 *result = cfg->lattimer;
3466 case PCI_IVAR_PCIXCAP_PTR:
3467 *result = cfg->pcix.pcix_ptr;
3469 case PCI_IVAR_PCIECAP_PTR:
3470 *result = cfg->expr.expr_ptr;
3472 case PCI_IVAR_VPDCAP_PTR:
3473 *result = cfg->vpd.vpd_reg;
3482 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3484 struct pci_devinfo *dinfo;
3486 dinfo = device_get_ivars(child);
3489 case PCI_IVAR_INTPIN:
3490 dinfo->cfg.intpin = value;
3492 case PCI_IVAR_ETHADDR:
3493 case PCI_IVAR_SUBVENDOR:
3494 case PCI_IVAR_SUBDEVICE:
3495 case PCI_IVAR_VENDOR:
3496 case PCI_IVAR_DEVICE:
3497 case PCI_IVAR_DEVID:
3498 case PCI_IVAR_CLASS:
3499 case PCI_IVAR_SUBCLASS:
3500 case PCI_IVAR_PROGIF:
3501 case PCI_IVAR_REVID:
3503 case PCI_IVAR_DOMAIN:
3506 case PCI_IVAR_FUNCTION:
3507 return (EINVAL); /* disallow for now */
3514 #include "opt_ddb.h"
3516 #include <ddb/ddb.h>
3517 #include <sys/cons.h>
3520 * List resources based on pci map registers, used for within ddb
3523 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3525 struct pci_devinfo *dinfo;
3526 struct devlist *devlist_head;
3529 int i, error, none_count;
3532 /* get the head of the device queue */
3533 devlist_head = &pci_devq;
3536 * Go through the list of devices and print out devices
3538 for (error = 0, i = 0,
3539 dinfo = STAILQ_FIRST(devlist_head);
3540 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3541 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3543 /* Populate pd_name and pd_unit */
3546 name = device_get_name(dinfo->cfg.dev);
3549 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3550 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3551 (name && *name) ? name : "none",
3552 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3554 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3555 p->pc_sel.pc_func, (p->pc_class << 16) |
3556 (p->pc_subclass << 8) | p->pc_progif,
3557 (p->pc_subdevice << 16) | p->pc_subvendor,
3558 (p->pc_device << 16) | p->pc_vendor,
3559 p->pc_revid, p->pc_hdr);
3565 static struct resource *
3566 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3567 u_long start, u_long end, u_long count, u_int flags)
3569 struct pci_devinfo *dinfo = device_get_ivars(child);
3570 struct resource_list *rl = &dinfo->resources;
3571 struct resource_list_entry *rle;
3572 struct resource *res;
3573 pci_addr_t map, testval;
3577 * Weed out the bogons, and figure out how large the BAR/map
3578 * is. Bars that read back 0 here are bogus and unimplemented.
3579 * Note: atapci in legacy mode are special and handled elsewhere
3580 * in the code. If you have a atapci device in legacy mode and
3581 * it fails here, that other code is broken.
3584 map = pci_read_config(child, *rid, 4);
3585 pci_write_config(child, *rid, 0xffffffff, 4);
3586 testval = pci_read_config(child, *rid, 4);
3587 if (pci_maprange(testval) == 64)
3588 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3589 if (pci_mapbase(testval) == 0)
3593 * Restore the original value of the BAR. We may have reprogrammed
3594 * the BAR of the low-level console device and when booting verbose,
3595 * we need the console device addressable.
3597 pci_write_config(child, *rid, map, 4);
3599 if (PCI_BAR_MEM(testval)) {
3600 if (type != SYS_RES_MEMORY) {
3603 "child %s requested type %d for rid %#x,"
3604 " but the BAR says it is an memio\n",
3605 device_get_nameunit(child), type, *rid);
3609 if (type != SYS_RES_IOPORT) {
3612 "child %s requested type %d for rid %#x,"
3613 " but the BAR says it is an ioport\n",
3614 device_get_nameunit(child), type, *rid);
3619 * For real BARs, we need to override the size that
3620 * the driver requests, because that's what the BAR
3621 * actually uses and we would otherwise have a
3622 * situation where we might allocate the excess to
3623 * another driver, which won't work.
3625 mapsize = pci_mapsize(testval);
3626 count = 1UL << mapsize;
3627 if (RF_ALIGNMENT(flags) < mapsize)
3628 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3629 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3630 flags |= RF_PREFETCHABLE;
3633 * Allocate enough resource, and then write back the
3634 * appropriate bar for that resource.
3636 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3637 start, end, count, flags, -1);
3639 device_printf(child,
3640 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3641 count, *rid, type, start, end);
3644 resource_list_add(rl, type, *rid, start, end, count, -1);
3645 rle = resource_list_find(rl, type, *rid);
3647 panic("pci_alloc_map: unexpectedly can't find resource.");
3649 rle->start = rman_get_start(res);
3650 rle->end = rman_get_end(res);
3653 device_printf(child,
3654 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3655 count, *rid, type, rman_get_start(res));
3656 map = rman_get_start(res);
3658 pci_write_config(child, *rid, map, 4);
3659 if (pci_maprange(testval) == 64)
3660 pci_write_config(child, *rid + 4, map >> 32, 4);
3666 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3667 u_long start, u_long end, u_long count, u_int flags, int cpuid)
3669 struct pci_devinfo *dinfo = device_get_ivars(child);
3670 struct resource_list *rl = &dinfo->resources;
3671 struct resource_list_entry *rle;
3672 pcicfgregs *cfg = &dinfo->cfg;
3675 * Perform lazy resource allocation
3677 if (device_get_parent(child) == dev) {
3681 * Can't alloc legacy interrupt once MSI messages
3682 * have been allocated.
3684 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3685 cfg->msix.msix_alloc > 0))
3688 * If the child device doesn't have an
3689 * interrupt routed and is deserving of an
3690 * interrupt, try to assign it one.
3692 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3694 pci_assign_interrupt(dev, child, 0);
3696 case SYS_RES_IOPORT:
3697 case SYS_RES_MEMORY:
3698 if (*rid < PCIR_BAR(cfg->nummaps)) {
3700 * Enable the I/O mode. We should
3701 * also be assigning resources too
3702 * when none are present. The
3703 * resource_list_alloc kind of sorta does
3706 if (PCI_ENABLE_IO(dev, child, type))
3709 rle = resource_list_find(rl, type, *rid);
3711 return (pci_alloc_map(dev, child, type, rid,
3712 start, end, count, flags));
3716 * If we've already allocated the resource, then
3717 * return it now. But first we may need to activate
3718 * it, since we don't allocate the resource as active
3719 * above. Normally this would be done down in the
3720 * nexus, but since we short-circuit that path we have
3721 * to do its job here. Not sure if we should kfree the
3722 * resource if it fails to activate.
3724 rle = resource_list_find(rl, type, *rid);
3725 if (rle != NULL && rle->res != NULL) {
3727 device_printf(child,
3728 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3729 rman_get_size(rle->res), *rid, type,
3730 rman_get_start(rle->res));
3731 if ((flags & RF_ACTIVE) &&
3732 bus_generic_activate_resource(dev, child, type,
3733 *rid, rle->res) != 0)
3738 return (resource_list_alloc(rl, dev, child, type, rid,
3739 start, end, count, flags, cpuid));
3743 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3745 struct pci_devinfo *dinfo;
3746 struct resource_list *rl;
3747 struct resource_list_entry *rle;
3749 if (device_get_parent(child) != dev)
3752 dinfo = device_get_ivars(child);
3753 rl = &dinfo->resources;
3754 rle = resource_list_find(rl, type, rid);
3757 if (rman_get_device(rle->res) != dev ||
3758 rman_get_flags(rle->res) & RF_ACTIVE) {
3759 device_printf(dev, "delete_resource: "
3760 "Resource still owned by child, oops. "
3761 "(type=%d, rid=%d, addr=%lx)\n",
3762 rle->type, rle->rid,
3763 rman_get_start(rle->res));
3766 bus_release_resource(dev, type, rid, rle->res);
3768 resource_list_delete(rl, type, rid);
3771 * Why do we turn off the PCI configuration BAR when we delete a
3774 pci_write_config(child, rid, 0, 4);
3775 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3778 struct resource_list *
3779 pci_get_resource_list (device_t dev, device_t child)
3781 struct pci_devinfo *dinfo = device_get_ivars(child);
3786 return (&dinfo->resources);
3790 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3792 struct pci_devinfo *dinfo = device_get_ivars(child);
3793 pcicfgregs *cfg = &dinfo->cfg;
3795 return (PCIB_READ_CONFIG(device_get_parent(dev),
3796 cfg->bus, cfg->slot, cfg->func, reg, width));
3800 pci_write_config_method(device_t dev, device_t child, int reg,
3801 uint32_t val, int width)
3803 struct pci_devinfo *dinfo = device_get_ivars(child);
3804 pcicfgregs *cfg = &dinfo->cfg;
3806 PCIB_WRITE_CONFIG(device_get_parent(dev),
3807 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3811 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3815 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3816 pci_get_function(child));
3821 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3824 struct pci_devinfo *dinfo;
3827 dinfo = device_get_ivars(child);
3829 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3830 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3831 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3837 pci_assign_interrupt_method(device_t dev, device_t child)
3839 struct pci_devinfo *dinfo = device_get_ivars(child);
3840 pcicfgregs *cfg = &dinfo->cfg;
3842 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3847 pci_modevent(module_t mod, int what, void *arg)
3849 static struct cdev *pci_cdev;
3853 STAILQ_INIT(&pci_devq);
3855 pci_cdev = make_dev(&pcic_ops, 0, UID_ROOT, GID_WHEEL, 0644,
3857 pci_load_vendor_data();
3861 destroy_dev(pci_cdev);
3869 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3874 * Only do header type 0 devices. Type 1 devices are bridges,
3875 * which we know need special treatment. Type 2 devices are
3876 * cardbus bridges which also require special treatment.
3877 * Other types are unknown, and we err on the side of safety
3880 if (dinfo->cfg.hdrtype != 0)
3884 * Restore the device to full power mode. We must do this
3885 * before we restore the registers because moving from D3 to
3886 * D0 will cause the chip's BARs and some other registers to
3887 * be reset to some unknown power on reset values. Cut down
3888 * the noise on boot by doing nothing if we are already in
3891 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3892 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3894 for (i = 0; i < dinfo->cfg.nummaps; i++)
3895 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3896 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3897 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3898 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3899 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3900 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3901 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3902 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3903 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3904 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3905 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3907 /* Restore MSI and MSI-X configurations if they are present. */
3908 if (dinfo->cfg.msi.msi_location != 0)
3909 pci_resume_msi(dev);
3910 if (dinfo->cfg.msix.msix_location != 0)
3911 pci_resume_msix(dev);
3915 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3922 * Only do header type 0 devices. Type 1 devices are bridges, which
3923 * we know need special treatment. Type 2 devices are cardbus bridges
3924 * which also require special treatment. Other types are unknown, and
3925 * we err on the side of safety by ignoring them. Powering down
3926 * bridges should not be undertaken lightly.
3928 if (dinfo->cfg.hdrtype != 0)
3930 for (i = 0; i < dinfo->cfg.nummaps; i++)
3931 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3932 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3935 * Some drivers apparently write to these registers w/o updating our
3936 * cached copy. No harm happens if we update the copy, so do so here
3937 * so we can restore them. The COMMAND register is modified by the
3938 * bus w/o updating the cache. This should represent the normally
3939 * writable portion of the 'defined' part of type 0 headers. In
3940 * theory we also need to save/restore the PCI capability structures
3941 * we know about, but apart from power we don't know any that are
3944 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3945 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3946 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3947 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3948 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3949 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3950 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3951 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3952 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3953 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3954 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3955 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3956 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3957 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3958 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3961 * don't set the state for display devices, base peripherals and
3962 * memory devices since bad things happen when they are powered down.
3963 * We should (a) have drivers that can easily detach and (b) use
3964 * generic drivers for these devices so that some device actually
3965 * attaches. We need to make sure that when we implement (a) we don't
3966 * power the device down on a reattach.
3968 cls = pci_get_class(dev);
3971 switch (pci_do_power_nodriver)
3973 case 0: /* NO powerdown at all */
3975 case 1: /* Conservative about what to power down */
3976 if (cls == PCIC_STORAGE)
3979 case 2: /* Agressive about what to power down */
3980 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3981 cls == PCIC_BASEPERIPH)
3984 case 3: /* Power down everything */
3988 * PCI spec says we can only go into D3 state from D0 state.
3989 * Transition from D[12] into D0 before going to D3 state.
3991 ps = pci_get_powerstate(dev);
3992 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3993 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3994 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3995 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
3998 #ifdef COMPAT_OLDPCI
4001 * Locate the parent of a PCI device by scanning the PCI devlist
4002 * and return the entry for the parent.
4003 * For devices on PCI Bus 0 (the host bus), this is the PCI Host.
4004 * For devices on secondary PCI busses, this is that bus' PCI-PCI Bridge.
4007 pci_devlist_get_parent(pcicfgregs *cfg)
4009 struct devlist *devlist_head;
4010 struct pci_devinfo *dinfo;
4011 pcicfgregs *bridge_cfg;
4014 dinfo = STAILQ_FIRST(devlist_head = &pci_devq);
4016 /* If the device is on PCI bus 0, look for the host */
4017 if (cfg->bus == 0) {
4018 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4019 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4020 bridge_cfg = &dinfo->cfg;
4021 if (bridge_cfg->baseclass == PCIC_BRIDGE
4022 && bridge_cfg->subclass == PCIS_BRIDGE_HOST
4023 && bridge_cfg->bus == cfg->bus) {
4029 /* If the device is not on PCI bus 0, look for the PCI-PCI bridge */
4031 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4032 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4033 bridge_cfg = &dinfo->cfg;
4034 if (bridge_cfg->baseclass == PCIC_BRIDGE
4035 && bridge_cfg->subclass == PCIS_BRIDGE_PCI
4036 && bridge_cfg->secondarybus == cfg->bus) {
4045 #endif /* COMPAT_OLDPCI */
4048 pci_alloc_1intr(device_t dev, int msi_enable, int *rid0, u_int *flags0)
4055 type = PCI_INTR_TYPE_LEGACY;
4056 flags = RF_SHAREABLE | RF_ACTIVE;
4058 ksnprintf(env, sizeof(env), "hw.%s.msi.enable",
4059 device_get_nameunit(dev));
4060 kgetenv_int(env, &msi_enable);
4065 ksnprintf(env, sizeof(env), "hw.%s.msi.cpu",
4066 device_get_nameunit(dev));
4067 kgetenv_int(env, &cpu);
4071 if (pci_alloc_msi(dev, &rid, 1, cpu) == 0) {
4072 flags &= ~RF_SHAREABLE;
4073 type = PCI_INTR_TYPE_MSI;