2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
33 #include "opt_compat_oldpci.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
46 #include <sys/machintr.h>
48 #include <machine/msi_machdep.h>
52 #include <vm/vm_extern.h>
56 #include <sys/device.h>
58 #include <sys/pciio.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pci_private.h>
67 #include <contrib/dev/acpica/acpi.h>
70 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 extern struct dev_ops pcic_ops; /* XXX */
75 typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
77 static uint32_t pci_mapbase(unsigned mapreg);
78 static const char *pci_maptype(unsigned mapreg);
79 static int pci_mapsize(unsigned testval);
80 static int pci_maprange(unsigned mapreg);
81 static void pci_fixancient(pcicfgregs *cfg);
83 static int pci_porten(device_t pcib, int b, int s, int f);
84 static int pci_memen(device_t pcib, int b, int s, int f);
85 static void pci_assign_interrupt(device_t bus, device_t dev,
87 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90 static int pci_probe(device_t dev);
91 static int pci_attach(device_t dev);
92 static void pci_child_detached(device_t, device_t);
93 static void pci_load_vendor_data(void);
94 static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96 static char *pci_describe_device(device_t dev);
97 static int pci_modevent(module_t mod, int what, void *arg);
98 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
100 static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
101 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
104 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
107 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108 static void pci_disable_msi(device_t dev);
109 static void pci_enable_msi(device_t dev, uint64_t address,
111 static void pci_setup_msix_vector(device_t dev, u_int index,
112 uint64_t address, uint32_t data);
113 static void pci_mask_msix_vector(device_t dev, u_int index);
114 static void pci_unmask_msix_vector(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static void pci_resume_msi(device_t dev);
117 static void pci_resume_msix(device_t dev);
118 static int pcie_slotimpl(const pcicfgregs *);
119 static void pci_print_verbose_expr(const pcicfgregs *);
121 static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
122 static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
123 static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
124 static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
125 static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
126 static void pci_read_cap_subvendor(device_t, int, int,
128 static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
129 static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
131 static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, bus_generic_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, pci_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_child_detached, pci_child_detached),
147 DEVMETHOD(bus_setup_intr, pci_setup_intr),
148 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
156 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
157 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
158 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
159 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
162 DEVMETHOD(pci_read_config, pci_read_config_method),
163 DEVMETHOD(pci_write_config, pci_write_config_method),
164 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
165 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
166 DEVMETHOD(pci_enable_io, pci_enable_io_method),
167 DEVMETHOD(pci_disable_io, pci_disable_io_method),
168 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
169 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
170 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
171 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
172 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
173 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
174 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
175 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
176 DEVMETHOD(pci_release_msi, pci_release_msi_method),
177 DEVMETHOD(pci_msi_count, pci_msi_count_method),
178 DEVMETHOD(pci_msix_count, pci_msix_count_method),
183 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
185 static devclass_t pci_devclass;
186 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
187 MODULE_VERSION(pci, 1);
189 static char *pci_vendordata;
190 static size_t pci_vendordata_size;
193 static const struct pci_read_cap {
195 pci_read_cap_t read_cap;
196 } pci_read_caps[] = {
197 { PCIY_PMG, pci_read_cap_pmgt },
198 { PCIY_HT, pci_read_cap_ht },
199 { PCIY_MSI, pci_read_cap_msi },
200 { PCIY_MSIX, pci_read_cap_msix },
201 { PCIY_VPD, pci_read_cap_vpd },
202 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
203 { PCIY_PCIX, pci_read_cap_pcix },
204 { PCIY_EXPRESS, pci_read_cap_express },
205 { 0, NULL } /* required last entry */
209 uint32_t devid; /* Vendor/device of the card */
211 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
212 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
217 struct pci_quirk pci_quirks[] = {
218 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
219 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
220 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 /* As does the Serverworks OSB4 (the SMBus mapping register) */
222 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
225 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
226 * or the CMIC-SL (AKA ServerWorks GC_LE).
228 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
229 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 * MSI doesn't work on earlier Intel chipsets including
233 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
235 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
247 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 /* map register information */
253 #define PCI_MAPMEM 0x01 /* memory map */
254 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
255 #define PCI_MAPPORT 0x04 /* port map */
257 struct devlist pci_devq;
258 uint32_t pci_generation;
259 uint32_t pci_numdevs = 0;
260 static int pcie_chipset, pcix_chipset;
263 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
265 static int pci_enable_io_modes = 1;
266 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
267 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
268 &pci_enable_io_modes, 1,
269 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
270 enable these bits correctly. We'd like to do this all the time, but there\n\
271 are some peripherals that this causes problems with.");
273 static int pci_do_power_nodriver = 0;
274 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
275 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
276 &pci_do_power_nodriver, 0,
277 "Place a function into D3 state when no driver attaches to it. 0 means\n\
278 disable. 1 means conservatively place devices into D3 state. 2 means\n\
279 aggressively place devices into D3 state. 3 means put absolutely everything\n\
282 static int pci_do_power_resume = 1;
283 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
284 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
285 &pci_do_power_resume, 1,
286 "Transition from D3 -> D0 on resume.");
288 static int pci_do_msi = 1;
289 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
290 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
291 "Enable support for MSI interrupts");
293 static int pci_do_msix = 0;
295 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
296 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
297 "Enable support for MSI-X interrupts");
300 static int pci_honor_msi_blacklist = 1;
301 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
302 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
303 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
305 static int pci_msi_cpuid;
307 /* Find a device_t by bus/slot/function in domain 0 */
310 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
313 return (pci_find_dbsf(0, bus, slot, func));
316 /* Find a device_t by domain/bus/slot/function */
319 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
321 struct pci_devinfo *dinfo;
323 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
324 if ((dinfo->cfg.domain == domain) &&
325 (dinfo->cfg.bus == bus) &&
326 (dinfo->cfg.slot == slot) &&
327 (dinfo->cfg.func == func)) {
328 return (dinfo->cfg.dev);
335 /* Find a device_t by vendor/device ID */
338 pci_find_device(uint16_t vendor, uint16_t device)
340 struct pci_devinfo *dinfo;
342 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
343 if ((dinfo->cfg.vendor == vendor) &&
344 (dinfo->cfg.device == device)) {
345 return (dinfo->cfg.dev);
352 /* return base address of memory or port map */
355 pci_mapbase(uint32_t mapreg)
358 if (PCI_BAR_MEM(mapreg))
359 return (mapreg & PCIM_BAR_MEM_BASE);
361 return (mapreg & PCIM_BAR_IO_BASE);
364 /* return map type of memory or port map */
367 pci_maptype(unsigned mapreg)
370 if (PCI_BAR_IO(mapreg))
372 if (mapreg & PCIM_BAR_MEM_PREFETCH)
373 return ("Prefetchable Memory");
377 /* return log2 of map size decoded for memory or port map */
380 pci_mapsize(uint32_t testval)
384 testval = pci_mapbase(testval);
387 while ((testval & 1) == 0)
396 /* return log2 of address range supported by map register */
399 pci_maprange(unsigned mapreg)
403 if (PCI_BAR_IO(mapreg))
406 switch (mapreg & PCIM_BAR_MEM_TYPE) {
407 case PCIM_BAR_MEM_32:
410 case PCIM_BAR_MEM_1MB:
413 case PCIM_BAR_MEM_64:
420 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
423 pci_fixancient(pcicfgregs *cfg)
425 if (cfg->hdrtype != 0)
428 /* PCI to PCI bridges use header type 1 */
429 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
433 /* extract header type specific config data */
436 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
438 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
439 switch (cfg->hdrtype) {
441 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
442 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
443 cfg->nummaps = PCI_MAXMAPS_0;
446 cfg->nummaps = PCI_MAXMAPS_1;
448 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
452 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
453 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
454 cfg->nummaps = PCI_MAXMAPS_2;
456 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
463 /* read configuration header into pcicfgregs structure */
465 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
467 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
468 pcicfgregs *cfg = NULL;
469 struct pci_devinfo *devlist_entry;
470 struct devlist *devlist_head;
472 devlist_head = &pci_devq;
474 devlist_entry = NULL;
476 if (REG(PCIR_DEVVENDOR, 4) != -1) {
477 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
479 cfg = &devlist_entry->cfg;
485 cfg->vendor = REG(PCIR_VENDOR, 2);
486 cfg->device = REG(PCIR_DEVICE, 2);
487 cfg->cmdreg = REG(PCIR_COMMAND, 2);
488 cfg->statreg = REG(PCIR_STATUS, 2);
489 cfg->baseclass = REG(PCIR_CLASS, 1);
490 cfg->subclass = REG(PCIR_SUBCLASS, 1);
491 cfg->progif = REG(PCIR_PROGIF, 1);
492 cfg->revid = REG(PCIR_REVID, 1);
493 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
494 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
495 cfg->lattimer = REG(PCIR_LATTIMER, 1);
496 cfg->intpin = REG(PCIR_INTPIN, 1);
497 cfg->intline = REG(PCIR_INTLINE, 1);
499 cfg->mingnt = REG(PCIR_MINGNT, 1);
500 cfg->maxlat = REG(PCIR_MAXLAT, 1);
502 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
503 cfg->hdrtype &= ~PCIM_MFDEV;
506 pci_hdrtypedata(pcib, b, s, f, cfg);
508 pci_read_capabilities(pcib, cfg);
510 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
512 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
513 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
514 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
515 devlist_entry->conf.pc_sel.pc_func = cfg->func;
516 devlist_entry->conf.pc_hdr = cfg->hdrtype;
518 devlist_entry->conf.pc_subvendor = cfg->subvendor;
519 devlist_entry->conf.pc_subdevice = cfg->subdevice;
520 devlist_entry->conf.pc_vendor = cfg->vendor;
521 devlist_entry->conf.pc_device = cfg->device;
523 devlist_entry->conf.pc_class = cfg->baseclass;
524 devlist_entry->conf.pc_subclass = cfg->subclass;
525 devlist_entry->conf.pc_progif = cfg->progif;
526 devlist_entry->conf.pc_revid = cfg->revid;
531 return (devlist_entry);
536 pci_fixup_nextptr(int *nextptr0)
538 int nextptr = *nextptr0;
540 /* "Next pointer" is only one byte */
541 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
545 * PCI local bus spec 3.0:
547 * "... The bottom two bits of all pointers are reserved
548 * and must be implemented as 00b although software must
549 * mask them to allow for future uses of these bits ..."
552 kprintf("Illegal PCI extended capability "
553 "offset, fixup 0x%02x -> 0x%02x\n",
554 nextptr, nextptr & ~0x3);
560 if (nextptr < 0x40) {
562 kprintf("Illegal PCI extended capability "
563 "offset 0x%02x", nextptr);
571 pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
574 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
576 struct pcicfg_pp *pp = &cfg->pp;
581 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
582 pp->pp_status = ptr + PCIR_POWER_STATUS;
583 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
585 if ((nextptr - ptr) > PCIR_POWER_DATA) {
588 * We should write to data_select and read back from
589 * data_scale to determine whether data register is
593 pp->pp_data = ptr + PCIR_POWER_DATA;
603 pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
605 #if defined(__i386__) || defined(__x86_64__)
608 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
610 struct pcicfg_ht *ht = &cfg->ht;
614 /* Determine HT-specific capability type. */
615 val = REG(ptr + PCIR_HT_COMMAND, 2);
617 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
618 cfg->ht.ht_slave = ptr;
620 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
623 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
624 /* Sanity check the mapping window. */
625 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
627 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
628 if (addr != MSI_X86_ADDR_BASE) {
629 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
630 "has non-default MSI window 0x%llx\n",
631 cfg->domain, cfg->bus, cfg->slot, cfg->func,
635 addr = MSI_X86_ADDR_BASE;
639 ht->ht_msictrl = val;
640 ht->ht_msiaddr = addr;
644 #endif /* __i386__ || __x86_64__ */
648 pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
651 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
653 struct pcicfg_msi *msi = &cfg->msi;
655 msi->msi_location = ptr;
656 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
657 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
663 pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
666 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
668 struct pcicfg_msix *msix = &cfg->msix;
671 msix->msix_location = ptr;
672 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
673 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
675 val = REG(ptr + PCIR_MSIX_TABLE, 4);
676 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
677 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
679 val = REG(ptr + PCIR_MSIX_PBA, 4);
680 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
681 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
687 pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
689 cfg->vpd.vpd_reg = ptr;
693 pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
696 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
698 /* Should always be true. */
699 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
702 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
703 cfg->subvendor = val & 0xffff;
704 cfg->subdevice = val >> 16;
711 pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
714 * Assume we have a PCI-X chipset if we have
715 * at least one PCI-PCI bridge with a PCI-X
716 * capability. Note that some systems with
717 * PCI-express or HT chipsets might match on
718 * this check as well.
720 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
723 cfg->pcix.pcix_ptr = ptr;
727 pcie_slotimpl(const pcicfgregs *cfg)
729 const struct pcicfg_expr *expr = &cfg->expr;
733 * Only version 1 can be parsed currently
735 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
739 * - Slot implemented bit is meaningful iff current port is
740 * root port or down stream port.
741 * - Testing for root port or down stream port is meanningful
742 * iff PCI configure has type 1 header.
745 if (cfg->hdrtype != 1)
748 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
749 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
752 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
759 pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
762 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
764 struct pcicfg_expr *expr = &cfg->expr;
767 * Assume we have a PCI-express chipset if we have
768 * at least one PCI-express device.
772 expr->expr_ptr = ptr;
773 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
776 * Only version 1 can be parsed currently
778 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
782 * Read slot capabilities. Slot capabilities exists iff
783 * current port's slot is implemented
785 if (pcie_slotimpl(cfg))
786 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
792 pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
794 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
795 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
800 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
801 /* No capabilities */
805 switch (cfg->hdrtype & PCIM_HDRTYPE) {
808 ptrptr = PCIR_CAP_PTR;
811 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
814 return; /* no capabilities support */
816 nextptr = REG(ptrptr, 1); /* sanity check? */
819 * Read capability entries.
821 while (pci_fixup_nextptr(&nextptr)) {
822 const struct pci_read_cap *rc;
825 /* Find the next entry */
826 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
828 /* Process this entry */
829 val = REG(ptr + PCICAP_ID, 1);
830 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
831 if (rc->cap == val) {
832 rc->read_cap(pcib, ptr, nextptr, cfg);
838 #if defined(__i386__) || defined(__x86_64__)
840 * Enable the MSI mapping window for all HyperTransport
841 * slaves. PCI-PCI bridges have their windows enabled via
844 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
845 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
847 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
848 cfg->domain, cfg->bus, cfg->slot, cfg->func);
849 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
850 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
855 /* REG and WREG use carry through to next functions */
859 * PCI Vital Product Data
862 #define PCI_VPD_TIMEOUT 1000000
865 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
867 int count = PCI_VPD_TIMEOUT;
869 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
871 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
873 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
876 DELAY(1); /* limit looping */
878 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
885 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
887 int count = PCI_VPD_TIMEOUT;
889 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
891 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
892 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
893 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
896 DELAY(1); /* limit looping */
903 #undef PCI_VPD_TIMEOUT
905 struct vpd_readstate {
915 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
920 if (vrs->bytesinval == 0) {
921 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
923 vrs->val = le32toh(reg);
925 byte = vrs->val & 0xff;
928 vrs->val = vrs->val >> 8;
929 byte = vrs->val & 0xff;
939 pcie_slot_implemented(device_t dev)
941 struct pci_devinfo *dinfo = device_get_ivars(dev);
943 return pcie_slotimpl(&dinfo->cfg);
947 pcie_set_max_readrq(device_t dev, uint16_t rqsize)
952 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
953 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
954 panic("%s: invalid max read request size 0x%02x\n",
955 device_get_nameunit(dev), rqsize);
958 expr_ptr = pci_get_pciecap_ptr(dev);
960 panic("%s: not PCIe device\n", device_get_nameunit(dev));
962 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
963 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
965 device_printf(dev, "adjust device control 0x%04x", val);
967 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
969 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
972 kprintf(" -> 0x%04x\n", val);
977 pcie_get_max_readrq(device_t dev)
982 expr_ptr = pci_get_pciecap_ptr(dev);
984 panic("%s: not PCIe device\n", device_get_nameunit(dev));
986 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
987 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
991 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
993 struct vpd_readstate vrs;
998 int alloc, off; /* alloc/off for RO/W arrays */
1004 /* init vpd reader */
1012 name = remain = i = 0; /* shut up stupid gcc */
1013 alloc = off = 0; /* shut up stupid gcc */
1014 dflen = 0; /* shut up stupid gcc */
1016 while (state >= 0) {
1017 if (vpd_nextbyte(&vrs, &byte)) {
1022 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1023 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1024 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1027 case 0: /* item name */
1029 if (vpd_nextbyte(&vrs, &byte2)) {
1034 if (vpd_nextbyte(&vrs, &byte2)) {
1038 remain |= byte2 << 8;
1039 if (remain > (0x7f*4 - vrs.off)) {
1042 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1043 cfg->domain, cfg->bus, cfg->slot,
1048 remain = byte & 0x7;
1049 name = (byte >> 3) & 0xf;
1052 case 0x2: /* String */
1053 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1054 M_DEVBUF, M_WAITOK);
1061 case 0x10: /* VPD-R */
1064 cfg->vpd.vpd_ros = kmalloc(alloc *
1065 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1069 case 0x11: /* VPD-W */
1072 cfg->vpd.vpd_w = kmalloc(alloc *
1073 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1077 default: /* Invalid data, abort */
1083 case 1: /* Identifier String */
1084 cfg->vpd.vpd_ident[i++] = byte;
1087 cfg->vpd.vpd_ident[i] = '\0';
1092 case 2: /* VPD-R Keyword Header */
1094 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1095 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1096 M_DEVBUF, M_WAITOK | M_ZERO);
1098 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1099 if (vpd_nextbyte(&vrs, &byte2)) {
1103 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1104 if (vpd_nextbyte(&vrs, &byte2)) {
1110 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1113 * if this happens, we can't trust the rest
1117 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1118 cfg->domain, cfg->bus, cfg->slot,
1123 } else if (dflen == 0) {
1124 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1125 sizeof(*cfg->vpd.vpd_ros[off].value),
1126 M_DEVBUF, M_WAITOK);
1127 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1129 cfg->vpd.vpd_ros[off].value = kmalloc(
1131 sizeof(*cfg->vpd.vpd_ros[off].value),
1132 M_DEVBUF, M_WAITOK);
1135 /* keep in sync w/ state 3's transistions */
1136 if (dflen == 0 && remain == 0)
1138 else if (dflen == 0)
1144 case 3: /* VPD-R Keyword Value */
1145 cfg->vpd.vpd_ros[off].value[i++] = byte;
1146 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1147 "RV", 2) == 0 && cksumvalid == -1) {
1153 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1154 cfg->domain, cfg->bus,
1155 cfg->slot, cfg->func,
1164 /* keep in sync w/ state 2's transistions */
1166 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1167 if (dflen == 0 && remain == 0) {
1168 cfg->vpd.vpd_rocnt = off;
1169 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1170 off * sizeof(*cfg->vpd.vpd_ros),
1171 M_DEVBUF, M_WAITOK | M_ZERO);
1173 } else if (dflen == 0)
1183 case 5: /* VPD-W Keyword Header */
1185 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1186 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1187 M_DEVBUF, M_WAITOK | M_ZERO);
1189 cfg->vpd.vpd_w[off].keyword[0] = byte;
1190 if (vpd_nextbyte(&vrs, &byte2)) {
1194 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1195 if (vpd_nextbyte(&vrs, &byte2)) {
1199 cfg->vpd.vpd_w[off].len = dflen = byte2;
1200 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1201 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1202 sizeof(*cfg->vpd.vpd_w[off].value),
1203 M_DEVBUF, M_WAITOK);
1206 /* keep in sync w/ state 6's transistions */
1207 if (dflen == 0 && remain == 0)
1209 else if (dflen == 0)
1215 case 6: /* VPD-W Keyword Value */
1216 cfg->vpd.vpd_w[off].value[i++] = byte;
1219 /* keep in sync w/ state 5's transistions */
1221 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1222 if (dflen == 0 && remain == 0) {
1223 cfg->vpd.vpd_wcnt = off;
1224 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1225 off * sizeof(*cfg->vpd.vpd_w),
1226 M_DEVBUF, M_WAITOK | M_ZERO);
1228 } else if (dflen == 0)
1233 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1234 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1241 if (cksumvalid == 0 || state < -1) {
1242 /* read-only data bad, clean up */
1243 if (cfg->vpd.vpd_ros != NULL) {
1244 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1245 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1246 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1247 cfg->vpd.vpd_ros = NULL;
1251 /* I/O error, clean up */
1252 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1253 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1254 if (cfg->vpd.vpd_ident != NULL) {
1255 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1256 cfg->vpd.vpd_ident = NULL;
1258 if (cfg->vpd.vpd_w != NULL) {
1259 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1260 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1261 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1262 cfg->vpd.vpd_w = NULL;
1265 cfg->vpd.vpd_cached = 1;
1271 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1273 struct pci_devinfo *dinfo = device_get_ivars(child);
1274 pcicfgregs *cfg = &dinfo->cfg;
1276 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1277 pci_read_vpd(device_get_parent(dev), cfg);
1279 *identptr = cfg->vpd.vpd_ident;
1281 if (*identptr == NULL)
1288 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1291 struct pci_devinfo *dinfo = device_get_ivars(child);
1292 pcicfgregs *cfg = &dinfo->cfg;
1295 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1296 pci_read_vpd(device_get_parent(dev), cfg);
1298 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1299 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1300 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1301 *vptr = cfg->vpd.vpd_ros[i].value;
1304 if (i != cfg->vpd.vpd_rocnt)
1312 * Return the offset in configuration space of the requested extended
1313 * capability entry or 0 if the specified capability was not found.
1316 pci_find_extcap_method(device_t dev, device_t child, int capability,
1319 struct pci_devinfo *dinfo = device_get_ivars(child);
1320 pcicfgregs *cfg = &dinfo->cfg;
1325 * Check the CAP_LIST bit of the PCI status register first.
1327 status = pci_read_config(child, PCIR_STATUS, 2);
1328 if (!(status & PCIM_STATUS_CAPPRESENT))
1332 * Determine the start pointer of the capabilities list.
1334 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1340 ptr = PCIR_CAP_PTR_2;
1344 return (ENXIO); /* no extended capabilities support */
1346 ptr = pci_read_config(child, ptr, 1);
1349 * Traverse the capabilities list.
1352 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1357 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1364 * Support for MSI-X message interrupts.
1367 pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1370 struct pci_devinfo *dinfo = device_get_ivars(dev);
1371 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1374 KASSERT(msix->msix_table_len > index, ("bogus index"));
1375 offset = msix->msix_table_offset + index * 16;
1376 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1377 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1378 bus_write_4(msix->msix_table_res, offset + 8, data);
1380 /* Enable MSI -> HT mapping. */
1381 pci_ht_map_msi(dev, address);
1385 pci_mask_msix_vector(device_t dev, u_int index)
1387 struct pci_devinfo *dinfo = device_get_ivars(dev);
1388 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1389 uint32_t offset, val;
1391 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1392 offset = msix->msix_table_offset + index * 16 + 12;
1393 val = bus_read_4(msix->msix_table_res, offset);
1394 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1395 val |= PCIM_MSIX_VCTRL_MASK;
1396 bus_write_4(msix->msix_table_res, offset, val);
1401 pci_unmask_msix_vector(device_t dev, u_int index)
1403 struct pci_devinfo *dinfo = device_get_ivars(dev);
1404 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1405 uint32_t offset, val;
1407 KASSERT(msix->msix_table_len > index, ("bogus index"));
1408 offset = msix->msix_table_offset + index * 16 + 12;
1409 val = bus_read_4(msix->msix_table_res, offset);
1410 if (val & PCIM_MSIX_VCTRL_MASK) {
1411 val &= ~PCIM_MSIX_VCTRL_MASK;
1412 bus_write_4(msix->msix_table_res, offset, val);
1417 pci_pending_msix_vector(device_t dev, u_int index)
1419 struct pci_devinfo *dinfo = device_get_ivars(dev);
1420 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1421 uint32_t offset, bit;
1423 KASSERT(msix->msix_table_len > index, ("bogus index"));
1424 offset = msix->msix_pba_offset + (index / 32) * 4;
1425 bit = 1 << index % 32;
1426 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1430 * Restore MSI-X registers and table during resume. If MSI-X is
1431 * enabled then walk the virtual table to restore the actual MSI-X
1435 pci_resume_msix(device_t dev)
1437 struct pci_devinfo *dinfo = device_get_ivars(dev);
1438 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1439 struct msix_table_entry *mte;
1440 struct msix_vector *mv;
1443 if (msix->msix_alloc > 0) {
1444 /* First, mask all vectors. */
1445 for (i = 0; i < msix->msix_msgnum; i++)
1446 pci_mask_msix_vector(dev, i);
1448 /* Second, program any messages with at least one handler. */
1449 for (i = 0; i < msix->msix_table_len; i++) {
1450 mte = &msix->msix_table[i];
1451 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1453 mv = &msix->msix_vectors[mte->mte_vector - 1];
1454 pci_setup_msix_vector(dev, i, mv->mv_address,
1456 pci_unmask_msix_vector(dev, i);
1459 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1460 msix->msix_ctrl, 2);
1464 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1465 * returned in *count. After this function returns, each message will be
1466 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1469 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1471 struct pci_devinfo *dinfo = device_get_ivars(child);
1472 pcicfgregs *cfg = &dinfo->cfg;
1473 struct resource_list_entry *rle;
1474 int actual, error, i, irq, max;
1476 /* Don't let count == 0 get us into trouble. */
1480 /* If rid 0 is allocated, then fail. */
1481 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1482 if (rle != NULL && rle->res != NULL)
1485 /* Already have allocated messages? */
1486 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1489 /* If MSI is blacklisted for this system, fail. */
1490 if (pci_msi_blacklisted())
1493 /* MSI-X capability present? */
1494 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1497 /* Make sure the appropriate BARs are mapped. */
1498 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1499 cfg->msix.msix_table_bar);
1500 if (rle == NULL || rle->res == NULL ||
1501 !(rman_get_flags(rle->res) & RF_ACTIVE))
1503 cfg->msix.msix_table_res = rle->res;
1504 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1505 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1506 cfg->msix.msix_pba_bar);
1507 if (rle == NULL || rle->res == NULL ||
1508 !(rman_get_flags(rle->res) & RF_ACTIVE))
1511 cfg->msix.msix_pba_res = rle->res;
1514 device_printf(child,
1515 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1516 *count, cfg->msix.msix_msgnum);
1517 max = min(*count, cfg->msix.msix_msgnum);
1518 for (i = 0; i < max; i++) {
1519 /* Allocate a message. */
1520 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1523 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1530 device_printf(child,
1531 "could not allocate any MSI-X vectors\n");
1537 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1539 device_printf(child, "using IRQ %lu for MSI-X\n",
1545 * Be fancy and try to print contiguous runs of
1546 * IRQ values as ranges. 'irq' is the previous IRQ.
1547 * 'run' is true if we are in a range.
1549 device_printf(child, "using IRQs %lu", rle->start);
1552 for (i = 1; i < actual; i++) {
1553 rle = resource_list_find(&dinfo->resources,
1554 SYS_RES_IRQ, i + 1);
1556 /* Still in a run? */
1557 if (rle->start == irq + 1) {
1563 /* Finish previous range. */
1565 kprintf("-%d", irq);
1569 /* Start new range. */
1570 kprintf(",%lu", rle->start);
1574 /* Unfinished range? */
1576 kprintf("-%d", irq);
1577 kprintf(" for MSI-X\n");
1581 /* Mask all vectors. */
1582 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1583 pci_mask_msix_vector(child, i);
1585 /* Allocate and initialize vector data and virtual table. */
1586 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1587 M_DEVBUF, M_WAITOK | M_ZERO);
1588 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1589 M_DEVBUF, M_WAITOK | M_ZERO);
1590 for (i = 0; i < actual; i++) {
1591 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1592 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1593 cfg->msix.msix_table[i].mte_vector = i + 1;
1596 /* Update control register to enable MSI-X. */
1597 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1598 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1599 cfg->msix.msix_ctrl, 2);
1601 /* Update counts of alloc'd messages. */
1602 cfg->msix.msix_alloc = actual;
1603 cfg->msix.msix_table_len = actual;
1610 pci_release_msix(device_t dev, device_t child)
1612 struct pci_devinfo *dinfo = device_get_ivars(child);
1613 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1614 struct resource_list_entry *rle;
1617 /* Do we have any messages to release? */
1618 if (msix->msix_alloc == 0)
1621 /* Make sure none of the resources are allocated. */
1622 for (i = 0; i < msix->msix_table_len; i++) {
1623 if (msix->msix_table[i].mte_vector == 0)
1625 if (msix->msix_table[i].mte_handlers > 0)
1627 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1628 KASSERT(rle != NULL, ("missing resource"));
1629 if (rle->res != NULL)
1633 /* Update control register to disable MSI-X. */
1634 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1635 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1636 msix->msix_ctrl, 2);
1638 /* Free the resource list entries. */
1639 for (i = 0; i < msix->msix_table_len; i++) {
1640 if (msix->msix_table[i].mte_vector == 0)
1642 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1644 kfree(msix->msix_table, M_DEVBUF);
1645 msix->msix_table_len = 0;
1647 /* Release the IRQs. */
1648 for (i = 0; i < msix->msix_alloc; i++)
1649 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1650 msix->msix_vectors[i].mv_irq);
1651 kfree(msix->msix_vectors, M_DEVBUF);
1652 msix->msix_alloc = 0;
1658 * Return the max supported MSI-X messages this device supports.
1659 * Basically, assuming the MD code can alloc messages, this function
1660 * should return the maximum value that pci_alloc_msix() can return.
1661 * Thus, it is subject to the tunables, etc.
1664 pci_msix_count_method(device_t dev, device_t child)
1666 struct pci_devinfo *dinfo = device_get_ivars(child);
1667 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1669 if (pci_do_msix && msix->msix_location != 0)
1670 return (msix->msix_msgnum);
1675 * HyperTransport MSI mapping control
1678 pci_ht_map_msi(device_t dev, uint64_t addr)
1680 struct pci_devinfo *dinfo = device_get_ivars(dev);
1681 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1686 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1687 ht->ht_msiaddr >> 20 == addr >> 20) {
1688 /* Enable MSI -> HT mapping. */
1689 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1690 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1694 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1695 /* Disable MSI -> HT mapping. */
1696 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1697 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1703 * Support for MSI message signalled interrupts.
1706 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1708 struct pci_devinfo *dinfo = device_get_ivars(dev);
1709 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1711 /* Write data and address values. */
1712 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1713 address & 0xffffffff, 4);
1714 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1715 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1717 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1720 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1723 /* Enable MSI in the control register. */
1724 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1725 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1728 /* Enable MSI -> HT mapping. */
1729 pci_ht_map_msi(dev, address);
1733 pci_disable_msi(device_t dev)
1735 struct pci_devinfo *dinfo = device_get_ivars(dev);
1736 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1738 /* Disable MSI -> HT mapping. */
1739 pci_ht_map_msi(dev, 0);
1741 /* Disable MSI in the control register. */
1742 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1743 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1748 * Restore MSI registers during resume. If MSI is enabled then
1749 * restore the data and address registers in addition to the control
1753 pci_resume_msi(device_t dev)
1755 struct pci_devinfo *dinfo = device_get_ivars(dev);
1756 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1760 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1761 address = msi->msi_addr;
1762 data = msi->msi_data;
1763 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1764 address & 0xffffffff, 4);
1765 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1766 pci_write_config(dev, msi->msi_location +
1767 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1768 pci_write_config(dev, msi->msi_location +
1769 PCIR_MSI_DATA_64BIT, data, 2);
1771 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1774 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1779 * Returns true if the specified device is blacklisted because MSI
1783 pci_msi_device_blacklisted(device_t dev)
1785 struct pci_quirk *q;
1787 if (!pci_honor_msi_blacklist)
1790 for (q = &pci_quirks[0]; q->devid; q++) {
1791 if (q->devid == pci_get_devid(dev) &&
1792 q->type == PCI_QUIRK_DISABLE_MSI)
1799 * Determine if MSI is blacklisted globally on this sytem. Currently,
1800 * we just check for blacklisted chipsets as represented by the
1801 * host-PCI bridge at device 0:0:0. In the future, it may become
1802 * necessary to check other system attributes, such as the kenv values
1803 * that give the motherboard manufacturer and model number.
1806 pci_msi_blacklisted(void)
1810 if (!pci_honor_msi_blacklist)
1813 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1814 if (!(pcie_chipset || pcix_chipset))
1817 dev = pci_find_bsf(0, 0, 0);
1819 return (pci_msi_device_blacklisted(dev));
1824 * Attempt to allocate count MSI messages on start_cpuid.
1826 * If start_cpuid < 0, then the MSI messages' target CPU will be
1827 * selected automaticly.
1829 * If the caller explicitly specified the MSI messages' target CPU,
1830 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1831 * messages on the specified CPU, if the allocation fails due to MD
1832 * does not have enough vectors (EMSGSIZE), then we will try next
1833 * available CPU, until the allocation fails on all CPUs.
1835 * EMSGSIZE will be returned, if all available CPUs does not have
1836 * enough vectors for the requested amount of MSI messages. Caller
1837 * should either reduce the amount of MSI messages to be requested,
1838 * or simply giving up using MSI.
1840 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1841 * returned in 'rid' array, if the allocation succeeds.
1844 pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1847 struct pci_devinfo *dinfo = device_get_ivars(child);
1848 pcicfgregs *cfg = &dinfo->cfg;
1849 struct resource_list_entry *rle;
1850 int error, i, irqs[32], cpuid = 0;
1853 KASSERT(count != 0 && count <= 32 && powerof2(count),
1854 ("invalid MSI count %d\n", count));
1855 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
1857 /* If rid 0 is allocated, then fail. */
1858 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1859 if (rle != NULL && rle->res != NULL)
1862 /* Already have allocated messages? */
1863 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1866 /* If MSI is blacklisted for this system, fail. */
1867 if (pci_msi_blacklisted())
1870 /* MSI capability present? */
1871 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1874 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1875 count, cfg->msi.msi_msgnum));
1878 device_printf(child,
1879 "attempting to allocate %d MSI vectors (%d supported)\n",
1880 count, cfg->msi.msi_msgnum);
1883 if (start_cpuid < 0)
1884 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
1887 for (i = 0; i < ncpus; ++i) {
1888 cpuid = (start_cpuid + i) % ncpus;
1890 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
1891 cfg->msi.msi_msgnum, irqs, cpuid);
1894 else if (error != EMSGSIZE)
1901 * We now have N messages mapped onto SYS_RES_IRQ resources in
1902 * the irqs[] array, so add new resources starting at rid 1.
1904 for (i = 0; i < count; i++) {
1906 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1907 irqs[i], irqs[i], 1, cpuid);
1912 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1918 * Be fancy and try to print contiguous runs
1919 * of IRQ values as ranges. 'run' is true if
1920 * we are in a range.
1922 device_printf(child, "using IRQs %d", irqs[0]);
1924 for (i = 1; i < count; i++) {
1926 /* Still in a run? */
1927 if (irqs[i] == irqs[i - 1] + 1) {
1932 /* Finish previous range. */
1934 kprintf("-%d", irqs[i - 1]);
1938 /* Start new range. */
1939 kprintf(",%d", irqs[i]);
1942 /* Unfinished range? */
1944 kprintf("-%d", irqs[count - 1]);
1945 kprintf(" for MSI on cpu%d\n", cpuid);
1949 /* Update control register with count. */
1950 ctrl = cfg->msi.msi_ctrl;
1951 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1952 ctrl |= (ffs(count) - 1) << 4;
1953 cfg->msi.msi_ctrl = ctrl;
1954 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1956 /* Update counts of alloc'd messages. */
1957 cfg->msi.msi_alloc = count;
1958 cfg->msi.msi_handlers = 0;
1962 /* Release the MSI messages associated with this device. */
1964 pci_release_msi_method(device_t dev, device_t child)
1966 struct pci_devinfo *dinfo = device_get_ivars(child);
1967 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1968 struct resource_list_entry *rle;
1969 int i, irqs[32], cpuid = -1;
1971 /* Do we have any messages to release? */
1972 if (msi->msi_alloc == 0)
1974 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1976 /* Make sure none of the resources are allocated. */
1977 if (msi->msi_handlers > 0)
1979 for (i = 0; i < msi->msi_alloc; i++) {
1980 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1981 KASSERT(rle != NULL, ("missing MSI resource"));
1982 if (rle->res != NULL)
1986 KASSERT(cpuid >= 0 && cpuid < ncpus,
1987 ("invalid MSI target cpuid %d\n", cpuid));
1989 KASSERT(rle->cpuid == cpuid,
1990 ("MSI targets different cpus, "
1991 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
1993 irqs[i] = rle->start;
1996 /* Update control register with 0 count. */
1997 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1998 ("%s: MSI still enabled", __func__));
1999 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2000 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2003 /* Release the messages. */
2004 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2006 for (i = 0; i < msi->msi_alloc; i++)
2007 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2009 /* Update alloc count. */
2017 * Return the max supported MSI messages this device supports.
2018 * Basically, assuming the MD code can alloc messages, this function
2019 * should return the maximum value that pci_alloc_msi() can return.
2020 * Thus, it is subject to the tunables, etc.
2023 pci_msi_count_method(device_t dev, device_t child)
2025 struct pci_devinfo *dinfo = device_get_ivars(child);
2026 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2028 if (pci_do_msi && msi->msi_location != 0)
2029 return (msi->msi_msgnum);
2033 /* kfree pcicfgregs structure and all depending data structures */
2036 pci_freecfg(struct pci_devinfo *dinfo)
2038 struct devlist *devlist_head;
2041 devlist_head = &pci_devq;
2043 if (dinfo->cfg.vpd.vpd_reg) {
2044 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2045 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2046 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2047 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2048 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2049 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2050 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2052 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2053 kfree(dinfo, M_DEVBUF);
2055 /* increment the generation count */
2058 /* we're losing one device */
2064 * PCI power manangement
2067 pci_set_powerstate_method(device_t dev, device_t child, int state)
2069 struct pci_devinfo *dinfo = device_get_ivars(child);
2070 pcicfgregs *cfg = &dinfo->cfg;
2072 int result, oldstate, highest, delay;
2074 if (cfg->pp.pp_cap == 0)
2075 return (EOPNOTSUPP);
2078 * Optimize a no state change request away. While it would be OK to
2079 * write to the hardware in theory, some devices have shown odd
2080 * behavior when going from D3 -> D3.
2082 oldstate = pci_get_powerstate(child);
2083 if (oldstate == state)
2087 * The PCI power management specification states that after a state
2088 * transition between PCI power states, system software must
2089 * guarantee a minimal delay before the function accesses the device.
2090 * Compute the worst case delay that we need to guarantee before we
2091 * access the device. Many devices will be responsive much more
2092 * quickly than this delay, but there are some that don't respond
2093 * instantly to state changes. Transitions to/from D3 state require
2094 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2095 * is done below with DELAY rather than a sleeper function because
2096 * this function can be called from contexts where we cannot sleep.
2098 highest = (oldstate > state) ? oldstate : state;
2099 if (highest == PCI_POWERSTATE_D3)
2101 else if (highest == PCI_POWERSTATE_D2)
2105 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2106 & ~PCIM_PSTAT_DMASK;
2109 case PCI_POWERSTATE_D0:
2110 status |= PCIM_PSTAT_D0;
2112 case PCI_POWERSTATE_D1:
2113 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2114 return (EOPNOTSUPP);
2115 status |= PCIM_PSTAT_D1;
2117 case PCI_POWERSTATE_D2:
2118 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2119 return (EOPNOTSUPP);
2120 status |= PCIM_PSTAT_D2;
2122 case PCI_POWERSTATE_D3:
2123 status |= PCIM_PSTAT_D3;
2131 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2132 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2133 dinfo->cfg.func, oldstate, state);
2135 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2142 pci_get_powerstate_method(device_t dev, device_t child)
2144 struct pci_devinfo *dinfo = device_get_ivars(child);
2145 pcicfgregs *cfg = &dinfo->cfg;
2149 if (cfg->pp.pp_cap != 0) {
2150 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2151 switch (status & PCIM_PSTAT_DMASK) {
2153 result = PCI_POWERSTATE_D0;
2156 result = PCI_POWERSTATE_D1;
2159 result = PCI_POWERSTATE_D2;
2162 result = PCI_POWERSTATE_D3;
2165 result = PCI_POWERSTATE_UNKNOWN;
2169 /* No support, device is always at D0 */
2170 result = PCI_POWERSTATE_D0;
2176 * Some convenience functions for PCI device drivers.
2179 static __inline void
2180 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2184 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2186 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2189 static __inline void
2190 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2194 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2196 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2200 pci_enable_busmaster_method(device_t dev, device_t child)
2202 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2207 pci_disable_busmaster_method(device_t dev, device_t child)
2209 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2214 pci_enable_io_method(device_t dev, device_t child, int space)
2224 case SYS_RES_IOPORT:
2225 bit = PCIM_CMD_PORTEN;
2228 case SYS_RES_MEMORY:
2229 bit = PCIM_CMD_MEMEN;
2235 pci_set_command_bit(dev, child, bit);
2236 /* Some devices seem to need a brief stall here, what do to? */
2237 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2240 device_printf(child, "failed to enable %s mapping!\n", error);
2245 pci_disable_io_method(device_t dev, device_t child, int space)
2255 case SYS_RES_IOPORT:
2256 bit = PCIM_CMD_PORTEN;
2259 case SYS_RES_MEMORY:
2260 bit = PCIM_CMD_MEMEN;
2266 pci_clear_command_bit(dev, child, bit);
2267 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2268 if (command & bit) {
2269 device_printf(child, "failed to disable %s mapping!\n", error);
2276 * New style pci driver. Parent device is either a pci-host-bridge or a
2277 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2281 pci_print_verbose(struct pci_devinfo *dinfo)
2285 pcicfgregs *cfg = &dinfo->cfg;
2287 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2288 cfg->vendor, cfg->device, cfg->revid);
2289 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2290 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2291 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2292 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2294 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2295 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2296 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2297 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2298 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2299 if (cfg->intpin > 0)
2300 kprintf("\tintpin=%c, irq=%d\n",
2301 cfg->intpin +'a' -1, cfg->intline);
2302 if (cfg->pp.pp_cap) {
2305 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2306 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2307 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2308 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2309 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2310 status & PCIM_PSTAT_DMASK);
2312 if (cfg->msi.msi_location) {
2315 ctrl = cfg->msi.msi_ctrl;
2316 kprintf("\tMSI supports %d message%s%s%s\n",
2317 cfg->msi.msi_msgnum,
2318 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2319 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2320 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2322 if (cfg->msix.msix_location) {
2323 kprintf("\tMSI-X supports %d message%s ",
2324 cfg->msix.msix_msgnum,
2325 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2326 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2327 kprintf("in map 0x%x\n",
2328 cfg->msix.msix_table_bar);
2330 kprintf("in maps 0x%x and 0x%x\n",
2331 cfg->msix.msix_table_bar,
2332 cfg->msix.msix_pba_bar);
2334 pci_print_verbose_expr(cfg);
2339 pci_print_verbose_expr(const pcicfgregs *cfg)
2341 const struct pcicfg_expr *expr = &cfg->expr;
2342 const char *port_name;
2348 if (expr->expr_ptr == 0) /* No PCI Express capability */
2351 kprintf("\tPCI Express ver.%d cap=0x%04x",
2352 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2353 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2356 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2358 switch (port_type) {
2359 case PCIE_END_POINT:
2360 port_name = "DEVICE";
2362 case PCIE_LEG_END_POINT:
2363 port_name = "LEGDEV";
2365 case PCIE_ROOT_PORT:
2368 case PCIE_UP_STREAM_PORT:
2369 port_name = "UPSTREAM";
2371 case PCIE_DOWN_STREAM_PORT:
2372 port_name = "DOWNSTRM";
2374 case PCIE_PCIE2PCI_BRIDGE:
2375 port_name = "PCIE2PCI";
2377 case PCIE_PCI2PCIE_BRIDGE:
2378 port_name = "PCI2PCIE";
2384 if ((port_type == PCIE_ROOT_PORT ||
2385 port_type == PCIE_DOWN_STREAM_PORT) &&
2386 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2388 if (port_name != NULL)
2389 kprintf("[%s]", port_name);
2391 if (pcie_slotimpl(cfg)) {
2392 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2393 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2394 kprintf("[HOTPLUG]");
2401 pci_porten(device_t pcib, int b, int s, int f)
2403 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2404 & PCIM_CMD_PORTEN) != 0;
2408 pci_memen(device_t pcib, int b, int s, int f)
2410 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2411 & PCIM_CMD_MEMEN) != 0;
2415 * Add a resource based on a pci map register. Return 1 if the map
2416 * register is a 32bit map register or 2 if it is a 64bit register.
2419 pci_add_map(device_t pcib, device_t bus, device_t dev,
2420 int b, int s, int f, int reg, struct resource_list *rl, int force,
2425 pci_addr_t start, end, count;
2432 struct resource *res;
2434 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2435 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2436 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2437 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2439 if (PCI_BAR_MEM(map)) {
2440 type = SYS_RES_MEMORY;
2441 if (map & PCIM_BAR_MEM_PREFETCH)
2444 type = SYS_RES_IOPORT;
2445 ln2size = pci_mapsize(testval);
2446 ln2range = pci_maprange(testval);
2447 base = pci_mapbase(map);
2448 barlen = ln2range == 64 ? 2 : 1;
2451 * For I/O registers, if bottom bit is set, and the next bit up
2452 * isn't clear, we know we have a BAR that doesn't conform to the
2453 * spec, so ignore it. Also, sanity check the size of the data
2454 * areas to the type of memory involved. Memory must be at least
2455 * 16 bytes in size, while I/O ranges must be at least 4.
2457 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2459 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2460 (type == SYS_RES_IOPORT && ln2size < 2))
2464 /* Read the other half of a 64bit map register */
2465 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2467 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2468 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2469 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2470 kprintf(", port disabled\n");
2471 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2472 kprintf(", memory disabled\n");
2474 kprintf(", enabled\n");
2478 * If base is 0, then we have problems. It is best to ignore
2479 * such entries for the moment. These will be allocated later if
2480 * the driver specifically requests them. However, some
2481 * removable busses look better when all resources are allocated,
2482 * so allow '0' to be overriden.
2484 * Similarly treat maps whose values is the same as the test value
2485 * read back. These maps have had all f's written to them by the
2486 * BIOS in an attempt to disable the resources.
2488 if (!force && (base == 0 || map == testval))
2490 if ((u_long)base != base) {
2492 "pci%d:%d:%d:%d bar %#x too many address bits",
2493 pci_get_domain(dev), b, s, f, reg);
2498 * This code theoretically does the right thing, but has
2499 * undesirable side effects in some cases where peripherals
2500 * respond oddly to having these bits enabled. Let the user
2501 * be able to turn them off (since pci_enable_io_modes is 1 by
2504 if (pci_enable_io_modes) {
2505 /* Turn on resources that have been left off by a lazy BIOS */
2506 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2507 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2508 cmd |= PCIM_CMD_PORTEN;
2509 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2511 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2512 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2513 cmd |= PCIM_CMD_MEMEN;
2514 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2517 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2519 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2523 count = 1 << ln2size;
2524 if (base == 0 || base == pci_mapbase(testval)) {
2525 start = 0; /* Let the parent decide. */
2529 end = base + (1 << ln2size) - 1;
2531 resource_list_add(rl, type, reg, start, end, count, -1);
2534 * Try to allocate the resource for this BAR from our parent
2535 * so that this resource range is already reserved. The
2536 * driver for this device will later inherit this resource in
2537 * pci_alloc_resource().
2539 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2540 prefetch ? RF_PREFETCHABLE : 0, -1);
2543 * If the allocation fails, delete the resource list
2544 * entry to force pci_alloc_resource() to allocate
2545 * resources from the parent.
2547 resource_list_delete(rl, type, reg);
2548 #ifdef PCI_BAR_CLEAR
2551 #else /* !PCI_BAR_CLEAR */
2553 * Don't clear BAR here. Some BIOS lists HPET as a
2554 * PCI function, clearing the BAR causes HPET timer
2558 kprintf("pci:%d:%d:%d: resource reservation failed "
2559 "%#jx - %#jx\n", b, s, f,
2560 (intmax_t)start, (intmax_t)end);
2563 #endif /* PCI_BAR_CLEAR */
2565 start = rman_get_start(res);
2567 pci_write_config(dev, reg, start, 4);
2569 pci_write_config(dev, reg + 4, start >> 32, 4);
2574 * For ATA devices we need to decide early what addressing mode to use.
2575 * Legacy demands that the primary and secondary ATA ports sits on the
2576 * same addresses that old ISA hardware did. This dictates that we use
2577 * those addresses and ignore the BAR's if we cannot set PCI native
2581 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2582 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2584 int rid, type, progif;
2586 /* if this device supports PCI native addressing use it */
2587 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2588 if ((progif & 0x8a) == 0x8a) {
2589 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2590 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2591 kprintf("Trying ATA native PCI addressing mode\n");
2592 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2596 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2597 type = SYS_RES_IOPORT;
2598 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2599 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2600 prefetchmask & (1 << 0));
2601 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2602 prefetchmask & (1 << 1));
2605 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
2606 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2609 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
2610 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2613 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2614 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2615 prefetchmask & (1 << 2));
2616 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2617 prefetchmask & (1 << 3));
2620 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
2621 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2624 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
2625 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2628 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2629 prefetchmask & (1 << 4));
2630 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2631 prefetchmask & (1 << 5));
2635 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2637 struct pci_devinfo *dinfo = device_get_ivars(dev);
2638 pcicfgregs *cfg = &dinfo->cfg;
2639 char tunable_name[64];
2642 /* Has to have an intpin to have an interrupt. */
2643 if (cfg->intpin == 0)
2646 /* Let the user override the IRQ with a tunable. */
2647 irq = PCI_INVALID_IRQ;
2648 ksnprintf(tunable_name, sizeof(tunable_name),
2649 "hw.pci%d.%d.%d.INT%c.irq",
2650 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2651 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2652 irq = PCI_INVALID_IRQ;
2655 * If we didn't get an IRQ via the tunable, then we either use the
2656 * IRQ value in the intline register or we ask the bus to route an
2657 * interrupt for us. If force_route is true, then we only use the
2658 * value in the intline register if the bus was unable to assign an
2661 if (!PCI_INTERRUPT_VALID(irq)) {
2662 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2663 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2664 if (!PCI_INTERRUPT_VALID(irq))
2668 /* If after all that we don't have an IRQ, just bail. */
2669 if (!PCI_INTERRUPT_VALID(irq))
2672 /* Update the config register if it changed. */
2673 if (irq != cfg->intline) {
2675 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2678 /* Add this IRQ as rid 0 interrupt resource. */
2679 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
2680 machintr_legacy_intr_cpuid(irq));
2684 pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
2686 struct pci_devinfo *dinfo = device_get_ivars(dev);
2687 pcicfgregs *cfg = &dinfo->cfg;
2688 struct resource_list *rl = &dinfo->resources;
2689 struct pci_quirk *q;
2696 /* ATA devices needs special map treatment */
2697 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2698 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2699 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2700 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2701 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2702 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2704 for (i = 0; i < cfg->nummaps;)
2705 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2706 rl, force, prefetchmask & (1 << i));
2709 * Add additional, quirked resources.
2711 for (q = &pci_quirks[0]; q->devid; q++) {
2712 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2713 && q->type == PCI_QUIRK_MAP_REG)
2714 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2718 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2720 * Try to re-route interrupts. Sometimes the BIOS or
2721 * firmware may leave bogus values in these registers.
2722 * If the re-route fails, then just stick with what we
2725 pci_assign_interrupt(bus, dev, 1);
2730 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2732 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2733 device_t pcib = device_get_parent(dev);
2734 struct pci_devinfo *dinfo;
2736 int s, f, pcifunchigh;
2739 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2740 ("dinfo_size too small"));
2741 maxslots = PCIB_MAXSLOTS(pcib);
2742 for (s = 0; s <= maxslots; s++) {
2746 hdrtype = REG(PCIR_HDRTYPE, 1);
2747 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2749 if (hdrtype & PCIM_MFDEV)
2750 pcifunchigh = PCI_FUNCMAX;
2751 for (f = 0; f <= pcifunchigh; f++) {
2752 dinfo = pci_read_device(pcib, domain, busno, s, f,
2754 if (dinfo != NULL) {
2755 pci_add_child(dev, dinfo);
2763 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2767 pcib = device_get_parent(bus);
2768 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2769 device_set_ivars(dinfo->cfg.dev, dinfo);
2770 resource_list_init(&dinfo->resources);
2771 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2772 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2773 pci_print_verbose(dinfo);
2774 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
2778 pci_probe(device_t dev)
2780 device_set_desc(dev, "PCI bus");
2782 /* Allow other subclasses to override this driver. */
2787 pci_attach(device_t dev)
2792 * Since there can be multiple independantly numbered PCI
2793 * busses on systems with multiple PCI domains, we can't use
2794 * the unit number to decide which bus we are probing. We ask
2795 * the parent pcib what our domain and bus numbers are.
2797 domain = pcib_get_domain(dev);
2798 busno = pcib_get_bus(dev);
2800 device_printf(dev, "domain=%d, physical bus=%d\n",
2803 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2805 return (bus_generic_attach(dev));
2809 pci_suspend(device_t dev)
2811 int dstate, error, i, numdevs;
2812 device_t acpi_dev, child, *devlist;
2813 struct pci_devinfo *dinfo;
2816 * Save the PCI configuration space for each child and set the
2817 * device in the appropriate power state for this sleep state.
2820 if (pci_do_power_resume)
2821 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2822 device_get_children(dev, &devlist, &numdevs);
2823 for (i = 0; i < numdevs; i++) {
2825 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2826 pci_cfg_save(child, dinfo, 0);
2829 /* Suspend devices before potentially powering them down. */
2830 error = bus_generic_suspend(dev);
2832 kfree(devlist, M_TEMP);
2837 * Always set the device to D3. If ACPI suggests a different
2838 * power state, use it instead. If ACPI is not present, the
2839 * firmware is responsible for managing device power. Skip
2840 * children who aren't attached since they are powered down
2841 * separately. Only manage type 0 devices for now.
2843 for (i = 0; acpi_dev && i < numdevs; i++) {
2845 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2846 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2847 dstate = PCI_POWERSTATE_D3;
2848 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2849 pci_set_powerstate(child, dstate);
2852 kfree(devlist, M_TEMP);
2857 pci_resume(device_t dev)
2860 device_t acpi_dev, child, *devlist;
2861 struct pci_devinfo *dinfo;
2864 * Set each child to D0 and restore its PCI configuration space.
2867 if (pci_do_power_resume)
2868 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2869 device_get_children(dev, &devlist, &numdevs);
2870 for (i = 0; i < numdevs; i++) {
2872 * Notify ACPI we're going to D0 but ignore the result. If
2873 * ACPI is not present, the firmware is responsible for
2874 * managing device power. Only manage type 0 devices for now.
2877 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2878 if (acpi_dev && device_is_attached(child) &&
2879 dinfo->cfg.hdrtype == 0) {
2880 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2881 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2884 /* Now the device is powered up, restore its config space. */
2885 pci_cfg_restore(child, dinfo);
2887 kfree(devlist, M_TEMP);
2888 return (bus_generic_resume(dev));
2892 pci_load_vendor_data(void)
2894 caddr_t vendordata, info;
2896 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2897 info = preload_search_info(vendordata, MODINFO_ADDR);
2898 pci_vendordata = *(char **)info;
2899 info = preload_search_info(vendordata, MODINFO_SIZE);
2900 pci_vendordata_size = *(size_t *)info;
2901 /* terminate the database */
2902 pci_vendordata[pci_vendordata_size] = '\n';
2907 pci_driver_added(device_t dev, driver_t *driver)
2912 struct pci_devinfo *dinfo;
2916 device_printf(dev, "driver added\n");
2917 DEVICE_IDENTIFY(driver, dev);
2918 device_get_children(dev, &devlist, &numdevs);
2919 for (i = 0; i < numdevs; i++) {
2921 if (device_get_state(child) != DS_NOTPRESENT)
2923 dinfo = device_get_ivars(child);
2924 pci_print_verbose(dinfo);
2926 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2927 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2929 pci_cfg_restore(child, dinfo);
2930 if (device_probe_and_attach(child) != 0)
2931 pci_cfg_save(child, dinfo, 1);
2933 kfree(devlist, M_TEMP);
2937 pci_child_detached(device_t parent __unused, device_t child)
2939 /* Turn child's power off */
2940 pci_cfg_save(child, device_get_ivars(child), 1);
2944 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2945 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
2947 struct pci_devinfo *dinfo;
2948 struct msix_table_entry *mte;
2949 struct msix_vector *mv;
2955 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
2956 arg, &cookie, serializer);
2960 /* If this is not a direct child, just bail out. */
2961 if (device_get_parent(child) != dev) {
2966 rid = rman_get_rid(irq);
2968 /* Make sure that INTx is enabled */
2969 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
2972 * Check to see if the interrupt is MSI or MSI-X.
2973 * Ask our parent to map the MSI and give
2974 * us the address and data register values.
2975 * If we fail for some reason, teardown the
2976 * interrupt handler.
2978 dinfo = device_get_ivars(child);
2979 if (dinfo->cfg.msi.msi_alloc > 0) {
2980 if (dinfo->cfg.msi.msi_addr == 0) {
2981 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2982 ("MSI has handlers, but vectors not mapped"));
2983 error = PCIB_MAP_MSI(device_get_parent(dev),
2984 child, rman_get_start(irq), &addr, &data,
2985 rman_get_cpuid(irq));
2988 dinfo->cfg.msi.msi_addr = addr;
2989 dinfo->cfg.msi.msi_data = data;
2990 pci_enable_msi(child, addr, data);
2992 dinfo->cfg.msi.msi_handlers++;
2994 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2995 ("No MSI or MSI-X interrupts allocated"));
2996 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2997 ("MSI-X index too high"));
2998 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2999 KASSERT(mte->mte_vector != 0, ("no message vector"));
3000 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3001 KASSERT(mv->mv_irq == rman_get_start(irq),
3003 if (mv->mv_address == 0) {
3004 KASSERT(mte->mte_handlers == 0,
3005 ("MSI-X table entry has handlers, but vector not mapped"));
3006 error = PCIB_MAP_MSI(device_get_parent(dev),
3007 child, rman_get_start(irq), &addr, &data,
3008 rman_get_cpuid(irq));
3011 mv->mv_address = addr;
3014 if (mte->mte_handlers == 0) {
3015 pci_setup_msix_vector(child, rid - 1,
3016 mv->mv_address, mv->mv_data);
3017 pci_unmask_msix_vector(child, rid - 1);
3019 mte->mte_handlers++;
3022 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3023 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3026 (void)bus_generic_teardown_intr(dev, child, irq,
3036 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3039 struct msix_table_entry *mte;
3040 struct resource_list_entry *rle;
3041 struct pci_devinfo *dinfo;
3044 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3047 /* If this isn't a direct child, just bail out */
3048 if (device_get_parent(child) != dev)
3049 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3051 rid = rman_get_rid(irq);
3054 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3057 * Check to see if the interrupt is MSI or MSI-X. If so,
3058 * decrement the appropriate handlers count and mask the
3059 * MSI-X message, or disable MSI messages if the count
3062 dinfo = device_get_ivars(child);
3063 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3064 if (rle->res != irq)
3066 if (dinfo->cfg.msi.msi_alloc > 0) {
3067 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3068 ("MSI-X index too high"));
3069 if (dinfo->cfg.msi.msi_handlers == 0)
3071 dinfo->cfg.msi.msi_handlers--;
3072 if (dinfo->cfg.msi.msi_handlers == 0)
3073 pci_disable_msi(child);
3075 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3076 ("No MSI or MSI-X interrupts allocated"));
3077 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3078 ("MSI-X index too high"));
3079 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3080 if (mte->mte_handlers == 0)
3082 mte->mte_handlers--;
3083 if (mte->mte_handlers == 0)
3084 pci_mask_msix_vector(child, rid - 1);
3087 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3090 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3095 pci_print_child(device_t dev, device_t child)
3097 struct pci_devinfo *dinfo;
3098 struct resource_list *rl;
3101 dinfo = device_get_ivars(child);
3102 rl = &dinfo->resources;
3104 retval += bus_print_child_header(dev, child);
3106 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3107 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3108 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3109 if (device_get_flags(dev))
3110 retval += kprintf(" flags %#x", device_get_flags(dev));
3112 retval += kprintf(" at device %d.%d", pci_get_slot(child),
3113 pci_get_function(child));
3115 retval += bus_print_child_footer(dev, child);
3125 } pci_nomatch_tab[] = {
3126 {PCIC_OLD, -1, "old"},
3127 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3128 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3129 {PCIC_STORAGE, -1, "mass storage"},
3130 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3131 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3132 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3133 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3134 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3135 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3136 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3137 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3138 {PCIC_NETWORK, -1, "network"},
3139 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3140 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3141 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3142 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3143 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3144 {PCIC_DISPLAY, -1, "display"},
3145 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3146 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3147 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3148 {PCIC_MULTIMEDIA, -1, "multimedia"},
3149 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3150 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3151 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3152 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3153 {PCIC_MEMORY, -1, "memory"},
3154 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3155 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3156 {PCIC_BRIDGE, -1, "bridge"},
3157 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3158 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3159 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3160 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3161 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3162 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3163 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3164 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3165 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3166 {PCIC_SIMPLECOMM, -1, "simple comms"},
3167 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3168 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3169 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3170 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3171 {PCIC_BASEPERIPH, -1, "base peripheral"},
3172 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3173 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3174 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3175 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3176 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3177 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3178 {PCIC_INPUTDEV, -1, "input device"},
3179 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3180 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3181 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3182 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3183 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3184 {PCIC_DOCKING, -1, "docking station"},
3185 {PCIC_PROCESSOR, -1, "processor"},
3186 {PCIC_SERIALBUS, -1, "serial bus"},
3187 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3188 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3189 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3190 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3191 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3192 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3193 {PCIC_WIRELESS, -1, "wireless controller"},
3194 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3195 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3196 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3197 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3198 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3199 {PCIC_SATCOM, -1, "satellite communication"},
3200 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3201 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3202 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3203 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3204 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3205 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3206 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3207 {PCIC_DASP, -1, "dasp"},
3208 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3213 pci_probe_nomatch(device_t dev, device_t child)
3216 char *cp, *scp, *device;
3219 * Look for a listing for this device in a loaded device database.
3221 if ((device = pci_describe_device(child)) != NULL) {
3222 device_printf(dev, "<%s>", device);
3223 kfree(device, M_DEVBUF);
3226 * Scan the class/subclass descriptions for a general
3231 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3232 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3233 if (pci_nomatch_tab[i].subclass == -1) {
3234 cp = pci_nomatch_tab[i].desc;
3235 } else if (pci_nomatch_tab[i].subclass ==
3236 pci_get_subclass(child)) {
3237 scp = pci_nomatch_tab[i].desc;
3241 device_printf(dev, "<%s%s%s>",
3243 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3246 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3247 pci_get_vendor(child), pci_get_device(child),
3248 pci_get_slot(child), pci_get_function(child));
3249 if (pci_get_intpin(child) > 0) {
3252 irq = pci_get_irq(child);
3253 if (PCI_INTERRUPT_VALID(irq))
3254 kprintf(" irq %d", irq);
3258 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3262 * Parse the PCI device database, if loaded, and return a pointer to a
3263 * description of the device.
3265 * The database is flat text formatted as follows:
3267 * Any line not in a valid format is ignored.
3268 * Lines are terminated with newline '\n' characters.
3270 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3273 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3274 * - devices cannot be listed without a corresponding VENDOR line.
3275 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3276 * another TAB, then the device name.
3280 * Assuming (ptr) points to the beginning of a line in the database,
3281 * return the vendor or device and description of the next entry.
3282 * The value of (vendor) or (device) inappropriate for the entry type
3283 * is set to -1. Returns nonzero at the end of the database.
3285 * Note that this is slightly unrobust in the face of corrupt data;
3286 * we attempt to safeguard against this by spamming the end of the
3287 * database with a newline when we initialise.
3290 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3299 left = pci_vendordata_size - (cp - pci_vendordata);
3307 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3311 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3314 /* skip to next line */
3315 while (*cp != '\n' && left > 0) {
3324 /* skip to next line */
3325 while (*cp != '\n' && left > 0) {
3329 if (*cp == '\n' && left > 0)
3336 pci_describe_device(device_t dev)
3339 char *desc, *vp, *dp, *line;
3341 desc = vp = dp = NULL;
3344 * If we have no vendor data, we can't do anything.
3346 if (pci_vendordata == NULL)
3350 * Scan the vendor data looking for this device
3352 line = pci_vendordata;
3353 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3356 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3358 if (vendor == pci_get_vendor(dev))
3361 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3364 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3372 if (device == pci_get_device(dev))
3376 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3377 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3379 ksprintf(desc, "%s, %s", vp, dp);
3382 kfree(vp, M_DEVBUF);
3384 kfree(dp, M_DEVBUF);
3389 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3391 struct pci_devinfo *dinfo;
3394 dinfo = device_get_ivars(child);
3398 case PCI_IVAR_ETHADDR:
3400 * The generic accessor doesn't deal with failure, so
3401 * we set the return value, then return an error.
3403 *((uint8_t **) result) = NULL;
3405 case PCI_IVAR_SUBVENDOR:
3406 *result = cfg->subvendor;
3408 case PCI_IVAR_SUBDEVICE:
3409 *result = cfg->subdevice;
3411 case PCI_IVAR_VENDOR:
3412 *result = cfg->vendor;
3414 case PCI_IVAR_DEVICE:
3415 *result = cfg->device;
3417 case PCI_IVAR_DEVID:
3418 *result = (cfg->device << 16) | cfg->vendor;
3420 case PCI_IVAR_CLASS:
3421 *result = cfg->baseclass;
3423 case PCI_IVAR_SUBCLASS:
3424 *result = cfg->subclass;
3426 case PCI_IVAR_PROGIF:
3427 *result = cfg->progif;
3429 case PCI_IVAR_REVID:
3430 *result = cfg->revid;
3432 case PCI_IVAR_INTPIN:
3433 *result = cfg->intpin;
3436 *result = cfg->intline;
3438 case PCI_IVAR_DOMAIN:
3439 *result = cfg->domain;
3445 *result = cfg->slot;
3447 case PCI_IVAR_FUNCTION:
3448 *result = cfg->func;
3450 case PCI_IVAR_CMDREG:
3451 *result = cfg->cmdreg;
3453 case PCI_IVAR_CACHELNSZ:
3454 *result = cfg->cachelnsz;
3456 case PCI_IVAR_MINGNT:
3457 *result = cfg->mingnt;
3459 case PCI_IVAR_MAXLAT:
3460 *result = cfg->maxlat;
3462 case PCI_IVAR_LATTIMER:
3463 *result = cfg->lattimer;
3465 case PCI_IVAR_PCIXCAP_PTR:
3466 *result = cfg->pcix.pcix_ptr;
3468 case PCI_IVAR_PCIECAP_PTR:
3469 *result = cfg->expr.expr_ptr;
3471 case PCI_IVAR_VPDCAP_PTR:
3472 *result = cfg->vpd.vpd_reg;
3481 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3483 struct pci_devinfo *dinfo;
3485 dinfo = device_get_ivars(child);
3488 case PCI_IVAR_INTPIN:
3489 dinfo->cfg.intpin = value;
3491 case PCI_IVAR_ETHADDR:
3492 case PCI_IVAR_SUBVENDOR:
3493 case PCI_IVAR_SUBDEVICE:
3494 case PCI_IVAR_VENDOR:
3495 case PCI_IVAR_DEVICE:
3496 case PCI_IVAR_DEVID:
3497 case PCI_IVAR_CLASS:
3498 case PCI_IVAR_SUBCLASS:
3499 case PCI_IVAR_PROGIF:
3500 case PCI_IVAR_REVID:
3502 case PCI_IVAR_DOMAIN:
3505 case PCI_IVAR_FUNCTION:
3506 return (EINVAL); /* disallow for now */
3513 #include "opt_ddb.h"
3515 #include <ddb/ddb.h>
3516 #include <sys/cons.h>
3519 * List resources based on pci map registers, used for within ddb
3522 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3524 struct pci_devinfo *dinfo;
3525 struct devlist *devlist_head;
3528 int i, error, none_count;
3531 /* get the head of the device queue */
3532 devlist_head = &pci_devq;
3535 * Go through the list of devices and print out devices
3537 for (error = 0, i = 0,
3538 dinfo = STAILQ_FIRST(devlist_head);
3539 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3540 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3542 /* Populate pd_name and pd_unit */
3545 name = device_get_name(dinfo->cfg.dev);
3548 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3549 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3550 (name && *name) ? name : "none",
3551 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3553 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3554 p->pc_sel.pc_func, (p->pc_class << 16) |
3555 (p->pc_subclass << 8) | p->pc_progif,
3556 (p->pc_subdevice << 16) | p->pc_subvendor,
3557 (p->pc_device << 16) | p->pc_vendor,
3558 p->pc_revid, p->pc_hdr);
3564 static struct resource *
3565 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3566 u_long start, u_long end, u_long count, u_int flags)
3568 struct pci_devinfo *dinfo = device_get_ivars(child);
3569 struct resource_list *rl = &dinfo->resources;
3570 struct resource_list_entry *rle;
3571 struct resource *res;
3572 pci_addr_t map, testval;
3576 * Weed out the bogons, and figure out how large the BAR/map
3577 * is. Bars that read back 0 here are bogus and unimplemented.
3578 * Note: atapci in legacy mode are special and handled elsewhere
3579 * in the code. If you have a atapci device in legacy mode and
3580 * it fails here, that other code is broken.
3583 map = pci_read_config(child, *rid, 4);
3584 pci_write_config(child, *rid, 0xffffffff, 4);
3585 testval = pci_read_config(child, *rid, 4);
3586 if (pci_maprange(testval) == 64)
3587 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3588 if (pci_mapbase(testval) == 0)
3592 * Restore the original value of the BAR. We may have reprogrammed
3593 * the BAR of the low-level console device and when booting verbose,
3594 * we need the console device addressable.
3596 pci_write_config(child, *rid, map, 4);
3598 if (PCI_BAR_MEM(testval)) {
3599 if (type != SYS_RES_MEMORY) {
3602 "child %s requested type %d for rid %#x,"
3603 " but the BAR says it is an memio\n",
3604 device_get_nameunit(child), type, *rid);
3608 if (type != SYS_RES_IOPORT) {
3611 "child %s requested type %d for rid %#x,"
3612 " but the BAR says it is an ioport\n",
3613 device_get_nameunit(child), type, *rid);
3618 * For real BARs, we need to override the size that
3619 * the driver requests, because that's what the BAR
3620 * actually uses and we would otherwise have a
3621 * situation where we might allocate the excess to
3622 * another driver, which won't work.
3624 mapsize = pci_mapsize(testval);
3625 count = 1UL << mapsize;
3626 if (RF_ALIGNMENT(flags) < mapsize)
3627 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3628 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3629 flags |= RF_PREFETCHABLE;
3632 * Allocate enough resource, and then write back the
3633 * appropriate bar for that resource.
3635 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3636 start, end, count, flags, -1);
3638 device_printf(child,
3639 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3640 count, *rid, type, start, end);
3643 resource_list_add(rl, type, *rid, start, end, count, -1);
3644 rle = resource_list_find(rl, type, *rid);
3646 panic("pci_alloc_map: unexpectedly can't find resource.");
3648 rle->start = rman_get_start(res);
3649 rle->end = rman_get_end(res);
3652 device_printf(child,
3653 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3654 count, *rid, type, rman_get_start(res));
3655 map = rman_get_start(res);
3657 pci_write_config(child, *rid, map, 4);
3658 if (pci_maprange(testval) == 64)
3659 pci_write_config(child, *rid + 4, map >> 32, 4);
3665 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3666 u_long start, u_long end, u_long count, u_int flags, int cpuid)
3668 struct pci_devinfo *dinfo = device_get_ivars(child);
3669 struct resource_list *rl = &dinfo->resources;
3670 struct resource_list_entry *rle;
3671 pcicfgregs *cfg = &dinfo->cfg;
3674 * Perform lazy resource allocation
3676 if (device_get_parent(child) == dev) {
3680 * Can't alloc legacy interrupt once MSI messages
3681 * have been allocated.
3683 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3684 cfg->msix.msix_alloc > 0))
3687 * If the child device doesn't have an
3688 * interrupt routed and is deserving of an
3689 * interrupt, try to assign it one.
3691 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3693 pci_assign_interrupt(dev, child, 0);
3695 case SYS_RES_IOPORT:
3696 case SYS_RES_MEMORY:
3697 if (*rid < PCIR_BAR(cfg->nummaps)) {
3699 * Enable the I/O mode. We should
3700 * also be assigning resources too
3701 * when none are present. The
3702 * resource_list_alloc kind of sorta does
3705 if (PCI_ENABLE_IO(dev, child, type))
3708 rle = resource_list_find(rl, type, *rid);
3710 return (pci_alloc_map(dev, child, type, rid,
3711 start, end, count, flags));
3715 * If we've already allocated the resource, then
3716 * return it now. But first we may need to activate
3717 * it, since we don't allocate the resource as active
3718 * above. Normally this would be done down in the
3719 * nexus, but since we short-circuit that path we have
3720 * to do its job here. Not sure if we should kfree the
3721 * resource if it fails to activate.
3723 rle = resource_list_find(rl, type, *rid);
3724 if (rle != NULL && rle->res != NULL) {
3726 device_printf(child,
3727 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3728 rman_get_size(rle->res), *rid, type,
3729 rman_get_start(rle->res));
3730 if ((flags & RF_ACTIVE) &&
3731 bus_generic_activate_resource(dev, child, type,
3732 *rid, rle->res) != 0)
3737 return (resource_list_alloc(rl, dev, child, type, rid,
3738 start, end, count, flags, cpuid));
3742 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3744 struct pci_devinfo *dinfo;
3745 struct resource_list *rl;
3746 struct resource_list_entry *rle;
3748 if (device_get_parent(child) != dev)
3751 dinfo = device_get_ivars(child);
3752 rl = &dinfo->resources;
3753 rle = resource_list_find(rl, type, rid);
3756 if (rman_get_device(rle->res) != dev ||
3757 rman_get_flags(rle->res) & RF_ACTIVE) {
3758 device_printf(dev, "delete_resource: "
3759 "Resource still owned by child, oops. "
3760 "(type=%d, rid=%d, addr=%lx)\n",
3761 rle->type, rle->rid,
3762 rman_get_start(rle->res));
3765 bus_release_resource(dev, type, rid, rle->res);
3767 resource_list_delete(rl, type, rid);
3770 * Why do we turn off the PCI configuration BAR when we delete a
3773 pci_write_config(child, rid, 0, 4);
3774 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3777 struct resource_list *
3778 pci_get_resource_list (device_t dev, device_t child)
3780 struct pci_devinfo *dinfo = device_get_ivars(child);
3785 return (&dinfo->resources);
3789 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3791 struct pci_devinfo *dinfo = device_get_ivars(child);
3792 pcicfgregs *cfg = &dinfo->cfg;
3794 return (PCIB_READ_CONFIG(device_get_parent(dev),
3795 cfg->bus, cfg->slot, cfg->func, reg, width));
3799 pci_write_config_method(device_t dev, device_t child, int reg,
3800 uint32_t val, int width)
3802 struct pci_devinfo *dinfo = device_get_ivars(child);
3803 pcicfgregs *cfg = &dinfo->cfg;
3805 PCIB_WRITE_CONFIG(device_get_parent(dev),
3806 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3810 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3814 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3815 pci_get_function(child));
3820 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3823 struct pci_devinfo *dinfo;
3826 dinfo = device_get_ivars(child);
3828 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3829 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3830 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3836 pci_assign_interrupt_method(device_t dev, device_t child)
3838 struct pci_devinfo *dinfo = device_get_ivars(child);
3839 pcicfgregs *cfg = &dinfo->cfg;
3841 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3846 pci_modevent(module_t mod, int what, void *arg)
3848 static struct cdev *pci_cdev;
3852 STAILQ_INIT(&pci_devq);
3854 pci_cdev = make_dev(&pcic_ops, 0, UID_ROOT, GID_WHEEL, 0644,
3856 pci_load_vendor_data();
3860 destroy_dev(pci_cdev);
3868 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3873 * Only do header type 0 devices. Type 1 devices are bridges,
3874 * which we know need special treatment. Type 2 devices are
3875 * cardbus bridges which also require special treatment.
3876 * Other types are unknown, and we err on the side of safety
3879 if (dinfo->cfg.hdrtype != 0)
3883 * Restore the device to full power mode. We must do this
3884 * before we restore the registers because moving from D3 to
3885 * D0 will cause the chip's BARs and some other registers to
3886 * be reset to some unknown power on reset values. Cut down
3887 * the noise on boot by doing nothing if we are already in
3890 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3891 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3893 for (i = 0; i < dinfo->cfg.nummaps; i++)
3894 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3895 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3896 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3897 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3898 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3899 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3900 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3901 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3902 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3903 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3904 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3906 /* Restore MSI and MSI-X configurations if they are present. */
3907 if (dinfo->cfg.msi.msi_location != 0)
3908 pci_resume_msi(dev);
3909 if (dinfo->cfg.msix.msix_location != 0)
3910 pci_resume_msix(dev);
3914 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3921 * Only do header type 0 devices. Type 1 devices are bridges, which
3922 * we know need special treatment. Type 2 devices are cardbus bridges
3923 * which also require special treatment. Other types are unknown, and
3924 * we err on the side of safety by ignoring them. Powering down
3925 * bridges should not be undertaken lightly.
3927 if (dinfo->cfg.hdrtype != 0)
3929 for (i = 0; i < dinfo->cfg.nummaps; i++)
3930 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3931 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3934 * Some drivers apparently write to these registers w/o updating our
3935 * cached copy. No harm happens if we update the copy, so do so here
3936 * so we can restore them. The COMMAND register is modified by the
3937 * bus w/o updating the cache. This should represent the normally
3938 * writable portion of the 'defined' part of type 0 headers. In
3939 * theory we also need to save/restore the PCI capability structures
3940 * we know about, but apart from power we don't know any that are
3943 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3944 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3945 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3946 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3947 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3948 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3949 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3950 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3951 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3952 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3953 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3954 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3955 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3956 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3957 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3960 * don't set the state for display devices, base peripherals and
3961 * memory devices since bad things happen when they are powered down.
3962 * We should (a) have drivers that can easily detach and (b) use
3963 * generic drivers for these devices so that some device actually
3964 * attaches. We need to make sure that when we implement (a) we don't
3965 * power the device down on a reattach.
3967 cls = pci_get_class(dev);
3970 switch (pci_do_power_nodriver)
3972 case 0: /* NO powerdown at all */
3974 case 1: /* Conservative about what to power down */
3975 if (cls == PCIC_STORAGE)
3978 case 2: /* Agressive about what to power down */
3979 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3980 cls == PCIC_BASEPERIPH)
3983 case 3: /* Power down everything */
3987 * PCI spec says we can only go into D3 state from D0 state.
3988 * Transition from D[12] into D0 before going to D3 state.
3990 ps = pci_get_powerstate(dev);
3991 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3992 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3993 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3994 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
3997 #ifdef COMPAT_OLDPCI
4000 * Locate the parent of a PCI device by scanning the PCI devlist
4001 * and return the entry for the parent.
4002 * For devices on PCI Bus 0 (the host bus), this is the PCI Host.
4003 * For devices on secondary PCI busses, this is that bus' PCI-PCI Bridge.
4006 pci_devlist_get_parent(pcicfgregs *cfg)
4008 struct devlist *devlist_head;
4009 struct pci_devinfo *dinfo;
4010 pcicfgregs *bridge_cfg;
4013 dinfo = STAILQ_FIRST(devlist_head = &pci_devq);
4015 /* If the device is on PCI bus 0, look for the host */
4016 if (cfg->bus == 0) {
4017 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4018 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4019 bridge_cfg = &dinfo->cfg;
4020 if (bridge_cfg->baseclass == PCIC_BRIDGE
4021 && bridge_cfg->subclass == PCIS_BRIDGE_HOST
4022 && bridge_cfg->bus == cfg->bus) {
4028 /* If the device is not on PCI bus 0, look for the PCI-PCI bridge */
4030 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4031 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4032 bridge_cfg = &dinfo->cfg;
4033 if (bridge_cfg->baseclass == PCIC_BRIDGE
4034 && bridge_cfg->subclass == PCIS_BRIDGE_PCI
4035 && bridge_cfg->secondarybus == cfg->bus) {
4044 #endif /* COMPAT_OLDPCI */
4047 pci_alloc_1intr(device_t dev, int msi_enable, int *rid0, u_int *flags0)
4054 type = PCI_INTR_TYPE_LEGACY;
4055 flags = RF_SHAREABLE | RF_ACTIVE;
4057 ksnprintf(env, sizeof(env), "hw.%s.msi.enable",
4058 device_get_nameunit(dev));
4059 kgetenv_int(env, &msi_enable);
4064 ksnprintf(env, sizeof(env), "hw.%s.msi.cpu",
4065 device_get_nameunit(dev));
4066 kgetenv_int(env, &cpu);
4070 if (pci_alloc_msi(dev, &rid, 1, cpu) == 0) {
4071 flags &= ~RF_SHAREABLE;
4072 type = PCI_INTR_TYPE_MSI;