2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
33 #include "opt_compat_oldpci.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
46 #include <sys/machintr.h>
48 #include <machine/msi_machdep.h>
52 #include <vm/vm_extern.h>
56 #include <sys/device.h>
58 #include <sys/pciio.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pci_private.h>
67 #include <contrib/dev/acpica/acpi.h>
70 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 extern struct dev_ops pcic_ops; /* XXX */
75 typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
77 static uint32_t pci_mapbase(unsigned mapreg);
78 static const char *pci_maptype(unsigned mapreg);
79 static int pci_mapsize(unsigned testval);
80 static int pci_maprange(unsigned mapreg);
81 static void pci_fixancient(pcicfgregs *cfg);
83 static int pci_porten(device_t pcib, int b, int s, int f);
84 static int pci_memen(device_t pcib, int b, int s, int f);
85 static void pci_assign_interrupt(device_t bus, device_t dev,
87 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90 static int pci_probe(device_t dev);
91 static int pci_attach(device_t dev);
92 static void pci_child_detached(device_t, device_t);
93 static void pci_load_vendor_data(void);
94 static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96 static char *pci_describe_device(device_t dev);
97 static int pci_modevent(module_t mod, int what, void *arg);
98 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
100 static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
101 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
104 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
107 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108 static void pci_disable_msi(device_t dev);
109 static void pci_enable_msi(device_t dev, uint64_t address,
111 static void pci_setup_msix_vector(device_t dev, u_int index,
112 uint64_t address, uint32_t data);
113 static void pci_mask_msix_vector(device_t dev, u_int index);
114 static void pci_unmask_msix_vector(device_t dev, u_int index);
115 static void pci_mask_msix_allvectors(device_t dev);
116 static struct msix_vector *pci_find_msix_vector(device_t dev, int rid);
117 static int pci_msi_blacklisted(void);
118 static void pci_resume_msi(device_t dev);
119 static void pci_resume_msix(device_t dev);
120 static int pcie_slotimpl(const pcicfgregs *);
121 static void pci_print_verbose_expr(const pcicfgregs *);
123 static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
124 static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
125 static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
126 static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
127 static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
128 static void pci_read_cap_subvendor(device_t, int, int,
130 static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
131 static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
133 static device_method_t pci_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, pci_probe),
136 DEVMETHOD(device_attach, pci_attach),
137 DEVMETHOD(device_detach, bus_generic_detach),
138 DEVMETHOD(device_shutdown, bus_generic_shutdown),
139 DEVMETHOD(device_suspend, pci_suspend),
140 DEVMETHOD(device_resume, pci_resume),
143 DEVMETHOD(bus_print_child, pci_print_child),
144 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
145 DEVMETHOD(bus_read_ivar, pci_read_ivar),
146 DEVMETHOD(bus_write_ivar, pci_write_ivar),
147 DEVMETHOD(bus_driver_added, pci_driver_added),
148 DEVMETHOD(bus_child_detached, pci_child_detached),
149 DEVMETHOD(bus_setup_intr, pci_setup_intr),
150 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
152 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
153 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
154 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
155 DEVMETHOD(bus_delete_resource, pci_delete_resource),
156 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
157 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
158 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
159 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
160 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
161 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
164 DEVMETHOD(pci_read_config, pci_read_config_method),
165 DEVMETHOD(pci_write_config, pci_write_config_method),
166 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
167 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
168 DEVMETHOD(pci_enable_io, pci_enable_io_method),
169 DEVMETHOD(pci_disable_io, pci_disable_io_method),
170 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
171 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
172 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
173 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
174 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
175 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
176 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
177 DEVMETHOD(pci_release_msi, pci_release_msi_method),
178 DEVMETHOD(pci_alloc_msix_vector, pci_alloc_msix_vector_method),
179 DEVMETHOD(pci_release_msix_vector, pci_release_msix_vector_method),
180 DEVMETHOD(pci_msi_count, pci_msi_count_method),
181 DEVMETHOD(pci_msix_count, pci_msix_count_method),
186 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
188 static devclass_t pci_devclass;
189 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
190 MODULE_VERSION(pci, 1);
192 static char *pci_vendordata;
193 static size_t pci_vendordata_size;
196 static const struct pci_read_cap {
198 pci_read_cap_t read_cap;
199 } pci_read_caps[] = {
200 { PCIY_PMG, pci_read_cap_pmgt },
201 { PCIY_HT, pci_read_cap_ht },
202 { PCIY_MSI, pci_read_cap_msi },
203 { PCIY_MSIX, pci_read_cap_msix },
204 { PCIY_VPD, pci_read_cap_vpd },
205 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
206 { PCIY_PCIX, pci_read_cap_pcix },
207 { PCIY_EXPRESS, pci_read_cap_express },
208 { 0, NULL } /* required last entry */
212 uint32_t devid; /* Vendor/device of the card */
214 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
215 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
220 struct pci_quirk pci_quirks[] = {
221 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
222 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
224 /* As does the Serverworks OSB4 (the SMBus mapping register) */
225 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
228 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
229 * or the CMIC-SL (AKA ServerWorks GC_LE).
231 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 * MSI doesn't work on earlier Intel chipsets including
236 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
238 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
250 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
255 /* map register information */
256 #define PCI_MAPMEM 0x01 /* memory map */
257 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
258 #define PCI_MAPPORT 0x04 /* port map */
260 #define PCI_MSIX_RID2VEC(rid) ((rid) - 1) /* rid -> MSI-X vector # */
261 #define PCI_MSIX_VEC2RID(vec) ((vec) + 1) /* MSI-X vector # -> rid */
263 struct devlist pci_devq;
264 uint32_t pci_generation;
265 uint32_t pci_numdevs = 0;
266 static int pcie_chipset, pcix_chipset;
269 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
271 static int pci_enable_io_modes = 1;
272 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
273 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
274 &pci_enable_io_modes, 1,
275 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
276 enable these bits correctly. We'd like to do this all the time, but there\n\
277 are some peripherals that this causes problems with.");
279 static int pci_do_power_nodriver = 0;
280 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
281 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
282 &pci_do_power_nodriver, 0,
283 "Place a function into D3 state when no driver attaches to it. 0 means\n\
284 disable. 1 means conservatively place devices into D3 state. 2 means\n\
285 aggressively place devices into D3 state. 3 means put absolutely everything\n\
288 static int pci_do_power_resume = 1;
289 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
290 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
291 &pci_do_power_resume, 1,
292 "Transition from D3 -> D0 on resume.");
294 static int pci_do_msi = 1;
295 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
296 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
297 "Enable support for MSI interrupts");
299 static int pci_do_msix = 0;
301 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
302 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
303 "Enable support for MSI-X interrupts");
306 static int pci_honor_msi_blacklist = 1;
307 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
308 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
309 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
311 static int pci_msi_cpuid;
313 /* Find a device_t by bus/slot/function in domain 0 */
316 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
319 return (pci_find_dbsf(0, bus, slot, func));
322 /* Find a device_t by domain/bus/slot/function */
325 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
327 struct pci_devinfo *dinfo;
329 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
330 if ((dinfo->cfg.domain == domain) &&
331 (dinfo->cfg.bus == bus) &&
332 (dinfo->cfg.slot == slot) &&
333 (dinfo->cfg.func == func)) {
334 return (dinfo->cfg.dev);
341 /* Find a device_t by vendor/device ID */
344 pci_find_device(uint16_t vendor, uint16_t device)
346 struct pci_devinfo *dinfo;
348 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
349 if ((dinfo->cfg.vendor == vendor) &&
350 (dinfo->cfg.device == device)) {
351 return (dinfo->cfg.dev);
358 /* return base address of memory or port map */
361 pci_mapbase(uint32_t mapreg)
364 if (PCI_BAR_MEM(mapreg))
365 return (mapreg & PCIM_BAR_MEM_BASE);
367 return (mapreg & PCIM_BAR_IO_BASE);
370 /* return map type of memory or port map */
373 pci_maptype(unsigned mapreg)
376 if (PCI_BAR_IO(mapreg))
378 if (mapreg & PCIM_BAR_MEM_PREFETCH)
379 return ("Prefetchable Memory");
383 /* return log2 of map size decoded for memory or port map */
386 pci_mapsize(uint32_t testval)
390 testval = pci_mapbase(testval);
393 while ((testval & 1) == 0)
402 /* return log2 of address range supported by map register */
405 pci_maprange(unsigned mapreg)
409 if (PCI_BAR_IO(mapreg))
412 switch (mapreg & PCIM_BAR_MEM_TYPE) {
413 case PCIM_BAR_MEM_32:
416 case PCIM_BAR_MEM_1MB:
419 case PCIM_BAR_MEM_64:
426 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
429 pci_fixancient(pcicfgregs *cfg)
431 if (cfg->hdrtype != 0)
434 /* PCI to PCI bridges use header type 1 */
435 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
439 /* extract header type specific config data */
442 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
444 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
445 switch (cfg->hdrtype) {
447 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
448 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
449 cfg->nummaps = PCI_MAXMAPS_0;
452 cfg->nummaps = PCI_MAXMAPS_1;
454 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
458 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
459 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
460 cfg->nummaps = PCI_MAXMAPS_2;
462 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
469 /* read configuration header into pcicfgregs structure */
471 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
473 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
474 pcicfgregs *cfg = NULL;
475 struct pci_devinfo *devlist_entry;
476 struct devlist *devlist_head;
478 devlist_head = &pci_devq;
480 devlist_entry = NULL;
482 if (REG(PCIR_DEVVENDOR, 4) != -1) {
483 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
485 cfg = &devlist_entry->cfg;
491 cfg->vendor = REG(PCIR_VENDOR, 2);
492 cfg->device = REG(PCIR_DEVICE, 2);
493 cfg->cmdreg = REG(PCIR_COMMAND, 2);
494 cfg->statreg = REG(PCIR_STATUS, 2);
495 cfg->baseclass = REG(PCIR_CLASS, 1);
496 cfg->subclass = REG(PCIR_SUBCLASS, 1);
497 cfg->progif = REG(PCIR_PROGIF, 1);
498 cfg->revid = REG(PCIR_REVID, 1);
499 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
500 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
501 cfg->lattimer = REG(PCIR_LATTIMER, 1);
502 cfg->intpin = REG(PCIR_INTPIN, 1);
503 cfg->intline = REG(PCIR_INTLINE, 1);
505 cfg->mingnt = REG(PCIR_MINGNT, 1);
506 cfg->maxlat = REG(PCIR_MAXLAT, 1);
508 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
509 cfg->hdrtype &= ~PCIM_MFDEV;
512 pci_hdrtypedata(pcib, b, s, f, cfg);
514 pci_read_capabilities(pcib, cfg);
516 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
518 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
519 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
520 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
521 devlist_entry->conf.pc_sel.pc_func = cfg->func;
522 devlist_entry->conf.pc_hdr = cfg->hdrtype;
524 devlist_entry->conf.pc_subvendor = cfg->subvendor;
525 devlist_entry->conf.pc_subdevice = cfg->subdevice;
526 devlist_entry->conf.pc_vendor = cfg->vendor;
527 devlist_entry->conf.pc_device = cfg->device;
529 devlist_entry->conf.pc_class = cfg->baseclass;
530 devlist_entry->conf.pc_subclass = cfg->subclass;
531 devlist_entry->conf.pc_progif = cfg->progif;
532 devlist_entry->conf.pc_revid = cfg->revid;
537 return (devlist_entry);
542 pci_fixup_nextptr(int *nextptr0)
544 int nextptr = *nextptr0;
546 /* "Next pointer" is only one byte */
547 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
551 * PCI local bus spec 3.0:
553 * "... The bottom two bits of all pointers are reserved
554 * and must be implemented as 00b although software must
555 * mask them to allow for future uses of these bits ..."
558 kprintf("Illegal PCI extended capability "
559 "offset, fixup 0x%02x -> 0x%02x\n",
560 nextptr, nextptr & ~0x3);
566 if (nextptr < 0x40) {
568 kprintf("Illegal PCI extended capability "
569 "offset 0x%02x", nextptr);
577 pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
580 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
582 struct pcicfg_pp *pp = &cfg->pp;
587 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
588 pp->pp_status = ptr + PCIR_POWER_STATUS;
589 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
591 if ((nextptr - ptr) > PCIR_POWER_DATA) {
594 * We should write to data_select and read back from
595 * data_scale to determine whether data register is
599 pp->pp_data = ptr + PCIR_POWER_DATA;
609 pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
611 #if defined(__i386__) || defined(__x86_64__)
614 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
616 struct pcicfg_ht *ht = &cfg->ht;
620 /* Determine HT-specific capability type. */
621 val = REG(ptr + PCIR_HT_COMMAND, 2);
623 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
624 cfg->ht.ht_slave = ptr;
626 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
629 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
630 /* Sanity check the mapping window. */
631 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
633 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
634 if (addr != MSI_X86_ADDR_BASE) {
635 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
636 "has non-default MSI window 0x%llx\n",
637 cfg->domain, cfg->bus, cfg->slot, cfg->func,
641 addr = MSI_X86_ADDR_BASE;
645 ht->ht_msictrl = val;
646 ht->ht_msiaddr = addr;
650 #endif /* __i386__ || __x86_64__ */
654 pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
657 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
659 struct pcicfg_msi *msi = &cfg->msi;
661 msi->msi_location = ptr;
662 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
663 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
669 pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
672 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
674 struct pcicfg_msix *msix = &cfg->msix;
677 msix->msix_location = ptr;
678 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
679 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
681 val = REG(ptr + PCIR_MSIX_TABLE, 4);
682 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
683 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
685 val = REG(ptr + PCIR_MSIX_PBA, 4);
686 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
687 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
689 TAILQ_INIT(&msix->msix_vectors);
695 pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
697 cfg->vpd.vpd_reg = ptr;
701 pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
704 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
706 /* Should always be true. */
707 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
710 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
711 cfg->subvendor = val & 0xffff;
712 cfg->subdevice = val >> 16;
719 pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
722 * Assume we have a PCI-X chipset if we have
723 * at least one PCI-PCI bridge with a PCI-X
724 * capability. Note that some systems with
725 * PCI-express or HT chipsets might match on
726 * this check as well.
728 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
731 cfg->pcix.pcix_ptr = ptr;
735 pcie_slotimpl(const pcicfgregs *cfg)
737 const struct pcicfg_expr *expr = &cfg->expr;
741 * Only version 1 can be parsed currently
743 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
747 * - Slot implemented bit is meaningful iff current port is
748 * root port or down stream port.
749 * - Testing for root port or down stream port is meanningful
750 * iff PCI configure has type 1 header.
753 if (cfg->hdrtype != 1)
756 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
757 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
760 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
767 pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
770 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
772 struct pcicfg_expr *expr = &cfg->expr;
775 * Assume we have a PCI-express chipset if we have
776 * at least one PCI-express device.
780 expr->expr_ptr = ptr;
781 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
784 * Only version 1 can be parsed currently
786 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
790 * Read slot capabilities. Slot capabilities exists iff
791 * current port's slot is implemented
793 if (pcie_slotimpl(cfg))
794 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
800 pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
802 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
803 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
808 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
809 /* No capabilities */
813 switch (cfg->hdrtype & PCIM_HDRTYPE) {
816 ptrptr = PCIR_CAP_PTR;
819 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
822 return; /* no capabilities support */
824 nextptr = REG(ptrptr, 1); /* sanity check? */
827 * Read capability entries.
829 while (pci_fixup_nextptr(&nextptr)) {
830 const struct pci_read_cap *rc;
833 /* Find the next entry */
834 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
836 /* Process this entry */
837 val = REG(ptr + PCICAP_ID, 1);
838 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
839 if (rc->cap == val) {
840 rc->read_cap(pcib, ptr, nextptr, cfg);
846 #if defined(__i386__) || defined(__x86_64__)
848 * Enable the MSI mapping window for all HyperTransport
849 * slaves. PCI-PCI bridges have their windows enabled via
852 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
853 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
855 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
856 cfg->domain, cfg->bus, cfg->slot, cfg->func);
857 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
858 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
863 /* REG and WREG use carry through to next functions */
867 * PCI Vital Product Data
870 #define PCI_VPD_TIMEOUT 1000000
873 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
875 int count = PCI_VPD_TIMEOUT;
877 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
879 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
881 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
884 DELAY(1); /* limit looping */
886 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
893 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
895 int count = PCI_VPD_TIMEOUT;
897 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
899 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
900 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
901 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
904 DELAY(1); /* limit looping */
911 #undef PCI_VPD_TIMEOUT
913 struct vpd_readstate {
923 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
928 if (vrs->bytesinval == 0) {
929 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
931 vrs->val = le32toh(reg);
933 byte = vrs->val & 0xff;
936 vrs->val = vrs->val >> 8;
937 byte = vrs->val & 0xff;
947 pcie_slot_implemented(device_t dev)
949 struct pci_devinfo *dinfo = device_get_ivars(dev);
951 return pcie_slotimpl(&dinfo->cfg);
955 pcie_set_max_readrq(device_t dev, uint16_t rqsize)
960 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
961 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
962 panic("%s: invalid max read request size 0x%02x\n",
963 device_get_nameunit(dev), rqsize);
966 expr_ptr = pci_get_pciecap_ptr(dev);
968 panic("%s: not PCIe device\n", device_get_nameunit(dev));
970 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
971 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
973 device_printf(dev, "adjust device control 0x%04x", val);
975 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
977 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
980 kprintf(" -> 0x%04x\n", val);
985 pcie_get_max_readrq(device_t dev)
990 expr_ptr = pci_get_pciecap_ptr(dev);
992 panic("%s: not PCIe device\n", device_get_nameunit(dev));
994 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
995 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
999 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1001 struct vpd_readstate vrs;
1006 int alloc, off; /* alloc/off for RO/W arrays */
1012 /* init vpd reader */
1020 name = remain = i = 0; /* shut up stupid gcc */
1021 alloc = off = 0; /* shut up stupid gcc */
1022 dflen = 0; /* shut up stupid gcc */
1024 while (state >= 0) {
1025 if (vpd_nextbyte(&vrs, &byte)) {
1030 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1031 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1032 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1035 case 0: /* item name */
1037 if (vpd_nextbyte(&vrs, &byte2)) {
1042 if (vpd_nextbyte(&vrs, &byte2)) {
1046 remain |= byte2 << 8;
1047 if (remain > (0x7f*4 - vrs.off)) {
1050 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1051 cfg->domain, cfg->bus, cfg->slot,
1056 remain = byte & 0x7;
1057 name = (byte >> 3) & 0xf;
1060 case 0x2: /* String */
1061 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1062 M_DEVBUF, M_WAITOK);
1069 case 0x10: /* VPD-R */
1072 cfg->vpd.vpd_ros = kmalloc(alloc *
1073 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1077 case 0x11: /* VPD-W */
1080 cfg->vpd.vpd_w = kmalloc(alloc *
1081 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1085 default: /* Invalid data, abort */
1091 case 1: /* Identifier String */
1092 cfg->vpd.vpd_ident[i++] = byte;
1095 cfg->vpd.vpd_ident[i] = '\0';
1100 case 2: /* VPD-R Keyword Header */
1102 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1103 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1104 M_DEVBUF, M_WAITOK | M_ZERO);
1106 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1107 if (vpd_nextbyte(&vrs, &byte2)) {
1111 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1112 if (vpd_nextbyte(&vrs, &byte2)) {
1118 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1121 * if this happens, we can't trust the rest
1125 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1126 cfg->domain, cfg->bus, cfg->slot,
1131 } else if (dflen == 0) {
1132 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1133 sizeof(*cfg->vpd.vpd_ros[off].value),
1134 M_DEVBUF, M_WAITOK);
1135 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1137 cfg->vpd.vpd_ros[off].value = kmalloc(
1139 sizeof(*cfg->vpd.vpd_ros[off].value),
1140 M_DEVBUF, M_WAITOK);
1143 /* keep in sync w/ state 3's transistions */
1144 if (dflen == 0 && remain == 0)
1146 else if (dflen == 0)
1152 case 3: /* VPD-R Keyword Value */
1153 cfg->vpd.vpd_ros[off].value[i++] = byte;
1154 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1155 "RV", 2) == 0 && cksumvalid == -1) {
1161 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1162 cfg->domain, cfg->bus,
1163 cfg->slot, cfg->func,
1172 /* keep in sync w/ state 2's transistions */
1174 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1175 if (dflen == 0 && remain == 0) {
1176 cfg->vpd.vpd_rocnt = off;
1177 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1178 off * sizeof(*cfg->vpd.vpd_ros),
1179 M_DEVBUF, M_WAITOK | M_ZERO);
1181 } else if (dflen == 0)
1191 case 5: /* VPD-W Keyword Header */
1193 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1194 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1195 M_DEVBUF, M_WAITOK | M_ZERO);
1197 cfg->vpd.vpd_w[off].keyword[0] = byte;
1198 if (vpd_nextbyte(&vrs, &byte2)) {
1202 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1203 if (vpd_nextbyte(&vrs, &byte2)) {
1207 cfg->vpd.vpd_w[off].len = dflen = byte2;
1208 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1209 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1210 sizeof(*cfg->vpd.vpd_w[off].value),
1211 M_DEVBUF, M_WAITOK);
1214 /* keep in sync w/ state 6's transistions */
1215 if (dflen == 0 && remain == 0)
1217 else if (dflen == 0)
1223 case 6: /* VPD-W Keyword Value */
1224 cfg->vpd.vpd_w[off].value[i++] = byte;
1227 /* keep in sync w/ state 5's transistions */
1229 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1230 if (dflen == 0 && remain == 0) {
1231 cfg->vpd.vpd_wcnt = off;
1232 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1233 off * sizeof(*cfg->vpd.vpd_w),
1234 M_DEVBUF, M_WAITOK | M_ZERO);
1236 } else if (dflen == 0)
1241 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1242 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1249 if (cksumvalid == 0 || state < -1) {
1250 /* read-only data bad, clean up */
1251 if (cfg->vpd.vpd_ros != NULL) {
1252 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1253 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1254 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1255 cfg->vpd.vpd_ros = NULL;
1259 /* I/O error, clean up */
1260 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1261 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1262 if (cfg->vpd.vpd_ident != NULL) {
1263 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1264 cfg->vpd.vpd_ident = NULL;
1266 if (cfg->vpd.vpd_w != NULL) {
1267 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1268 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1269 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1270 cfg->vpd.vpd_w = NULL;
1273 cfg->vpd.vpd_cached = 1;
1279 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1281 struct pci_devinfo *dinfo = device_get_ivars(child);
1282 pcicfgregs *cfg = &dinfo->cfg;
1284 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1285 pci_read_vpd(device_get_parent(dev), cfg);
1287 *identptr = cfg->vpd.vpd_ident;
1289 if (*identptr == NULL)
1296 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1299 struct pci_devinfo *dinfo = device_get_ivars(child);
1300 pcicfgregs *cfg = &dinfo->cfg;
1303 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1304 pci_read_vpd(device_get_parent(dev), cfg);
1306 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1307 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1308 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1309 *vptr = cfg->vpd.vpd_ros[i].value;
1312 if (i != cfg->vpd.vpd_rocnt)
1320 * Return the offset in configuration space of the requested extended
1321 * capability entry or 0 if the specified capability was not found.
1324 pci_find_extcap_method(device_t dev, device_t child, int capability,
1327 struct pci_devinfo *dinfo = device_get_ivars(child);
1328 pcicfgregs *cfg = &dinfo->cfg;
1333 * Check the CAP_LIST bit of the PCI status register first.
1335 status = pci_read_config(child, PCIR_STATUS, 2);
1336 if (!(status & PCIM_STATUS_CAPPRESENT))
1340 * Determine the start pointer of the capabilities list.
1342 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1348 ptr = PCIR_CAP_PTR_2;
1352 return (ENXIO); /* no extended capabilities support */
1354 ptr = pci_read_config(child, ptr, 1);
1357 * Traverse the capabilities list.
1360 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1365 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1372 * Support for MSI-X message interrupts.
1375 pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1378 struct pci_devinfo *dinfo = device_get_ivars(dev);
1379 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1382 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1383 offset = msix->msix_table_offset + index * 16;
1384 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1385 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1386 bus_write_4(msix->msix_table_res, offset + 8, data);
1388 /* Enable MSI -> HT mapping. */
1389 pci_ht_map_msi(dev, address);
1393 pci_mask_msix_vector(device_t dev, u_int index)
1395 struct pci_devinfo *dinfo = device_get_ivars(dev);
1396 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1397 uint32_t offset, val;
1399 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1400 offset = msix->msix_table_offset + index * 16 + 12;
1401 val = bus_read_4(msix->msix_table_res, offset);
1402 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1403 val |= PCIM_MSIX_VCTRL_MASK;
1404 bus_write_4(msix->msix_table_res, offset, val);
1409 pci_unmask_msix_vector(device_t dev, u_int index)
1411 struct pci_devinfo *dinfo = device_get_ivars(dev);
1412 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1413 uint32_t offset, val;
1415 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1416 offset = msix->msix_table_offset + index * 16 + 12;
1417 val = bus_read_4(msix->msix_table_res, offset);
1418 if (val & PCIM_MSIX_VCTRL_MASK) {
1419 val &= ~PCIM_MSIX_VCTRL_MASK;
1420 bus_write_4(msix->msix_table_res, offset, val);
1425 pci_pending_msix_vector(device_t dev, u_int index)
1427 struct pci_devinfo *dinfo = device_get_ivars(dev);
1428 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1429 uint32_t offset, bit;
1431 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL,
1432 ("MSI-X is not setup yet\n"));
1434 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1435 offset = msix->msix_pba_offset + (index / 32) * 4;
1436 bit = 1 << index % 32;
1437 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1441 * Restore MSI-X registers and table during resume. If MSI-X is
1442 * enabled then walk the virtual table to restore the actual MSI-X
1446 pci_resume_msix(device_t dev)
1448 struct pci_devinfo *dinfo = device_get_ivars(dev);
1449 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1451 if (msix->msix_alloc > 0) {
1452 const struct msix_vector *mv;
1454 pci_mask_msix_allvectors(dev);
1456 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1459 if (mv->mv_address == 0)
1462 vector = PCI_MSIX_RID2VEC(mv->mv_rid);
1463 pci_setup_msix_vector(dev, vector,
1464 mv->mv_address, mv->mv_data);
1465 pci_unmask_msix_vector(dev, vector);
1468 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1469 msix->msix_ctrl, 2);
1473 * Attempt to allocate one MSI-X message at the specified vector on cpuid.
1475 * After this function returns, the MSI-X's rid will be saved in rid0.
1478 pci_alloc_msix_vector_method(device_t dev, device_t child, u_int vector,
1479 int *rid0, int cpuid)
1481 struct pci_devinfo *dinfo = device_get_ivars(child);
1482 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1483 struct msix_vector *mv;
1484 struct resource_list_entry *rle;
1485 int error, irq, rid;
1487 KASSERT(msix->msix_table_res != NULL &&
1488 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1489 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d\n", cpuid));
1490 KASSERT(vector < msix->msix_msgnum,
1491 ("invalid MSI-X vector %u, total %d\n", vector, msix->msix_msgnum));
1494 device_printf(child,
1495 "attempting to allocate MSI-X #%u vector (%d supported)\n",
1496 vector, msix->msix_msgnum);
1499 /* Set rid according to vector number */
1500 rid = PCI_MSIX_VEC2RID(vector);
1502 /* Vector has already been allocated */
1503 mv = pci_find_msix_vector(child, rid);
1507 /* Allocate a message. */
1508 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq, cpuid);
1511 resource_list_add(&dinfo->resources, SYS_RES_IRQ, rid,
1512 irq, irq, 1, cpuid);
1515 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1516 device_printf(child, "using IRQ %lu for MSI-X on cpu%d\n",
1520 /* Update counts of alloc'd messages. */
1523 mv = kmalloc(sizeof(*mv), M_DEVBUF, M_WAITOK | M_ZERO);
1525 TAILQ_INSERT_TAIL(&msix->msix_vectors, mv, mv_link);
1532 pci_release_msix_vector_method(device_t dev, device_t child, int rid)
1534 struct pci_devinfo *dinfo = device_get_ivars(child);
1535 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1536 struct resource_list_entry *rle;
1537 struct msix_vector *mv;
1540 KASSERT(msix->msix_table_res != NULL &&
1541 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1542 KASSERT(msix->msix_alloc > 0, ("No MSI-X allocated\n"));
1543 KASSERT(rid > 0, ("invalid rid %d\n", rid));
1545 mv = pci_find_msix_vector(child, rid);
1546 KASSERT(mv != NULL, ("MSI-X rid %d is not allocated\n", rid));
1547 KASSERT(mv->mv_address == 0, ("MSI-X rid %d not teardown\n", rid));
1549 /* Make sure resource is no longer allocated. */
1550 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1551 KASSERT(rle != NULL, ("missing MSI-X resource, rid %d\n", rid));
1552 KASSERT(rle->res == NULL,
1553 ("MSI-X resource is still allocated, rid %d\n", rid));
1558 /* Free the resource list entries. */
1559 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, rid);
1561 /* Release the IRQ. */
1562 PCIB_RELEASE_MSIX(device_get_parent(dev), child, irq, cpuid);
1564 TAILQ_REMOVE(&msix->msix_vectors, mv, mv_link);
1565 kfree(mv, M_DEVBUF);
1572 * Return the max supported MSI-X messages this device supports.
1573 * Basically, assuming the MD code can alloc messages, this function
1574 * should return the maximum value that pci_alloc_msix() can return.
1575 * Thus, it is subject to the tunables, etc.
1578 pci_msix_count_method(device_t dev, device_t child)
1580 struct pci_devinfo *dinfo = device_get_ivars(child);
1581 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1583 if (pci_do_msix && msix->msix_location != 0)
1584 return (msix->msix_msgnum);
1589 pci_setup_msix(device_t dev)
1591 struct pci_devinfo *dinfo = device_get_ivars(dev);
1592 pcicfgregs *cfg = &dinfo->cfg;
1593 struct resource_list_entry *rle;
1594 struct resource *table_res, *pba_res;
1596 KASSERT(cfg->msix.msix_table_res == NULL &&
1597 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet\n"));
1599 /* If rid 0 is allocated, then fail. */
1600 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1601 if (rle != NULL && rle->res != NULL)
1604 /* Already have allocated MSIs? */
1605 if (cfg->msi.msi_alloc != 0)
1608 /* If MSI is blacklisted for this system, fail. */
1609 if (pci_msi_blacklisted())
1612 /* MSI-X capability present? */
1613 if (cfg->msix.msix_location == 0 || cfg->msix.msix_msgnum == 0 ||
1617 /* Make sure the appropriate BARs are mapped. */
1618 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1619 cfg->msix.msix_table_bar);
1620 if (rle == NULL || rle->res == NULL ||
1621 !(rman_get_flags(rle->res) & RF_ACTIVE))
1623 table_res = rle->res;
1624 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1625 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1626 cfg->msix.msix_pba_bar);
1627 if (rle == NULL || rle->res == NULL ||
1628 !(rman_get_flags(rle->res) & RF_ACTIVE))
1633 cfg->msix.msix_table_res = table_res;
1634 cfg->msix.msix_pba_res = pba_res;
1636 pci_mask_msix_allvectors(dev);
1642 pci_teardown_msix(device_t dev)
1644 struct pci_devinfo *dinfo = device_get_ivars(dev);
1645 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1647 KASSERT(msix->msix_table_res != NULL &&
1648 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1650 pci_mask_msix_allvectors(dev);
1652 msix->msix_table_res = NULL;
1653 msix->msix_pba_res = NULL;
1657 pci_enable_msix(device_t dev)
1659 struct pci_devinfo *dinfo = device_get_ivars(dev);
1660 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1662 KASSERT(msix->msix_table_res != NULL &&
1663 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1665 /* Update control register to enable MSI-X. */
1666 msix->msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1667 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1668 msix->msix_ctrl, 2);
1672 pci_disable_msix(device_t dev)
1674 struct pci_devinfo *dinfo = device_get_ivars(dev);
1675 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1677 KASSERT(msix->msix_table_res != NULL &&
1678 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1680 /* Disable MSI -> HT mapping. */
1681 pci_ht_map_msi(dev, 0);
1683 /* Update control register to disable MSI-X. */
1684 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1685 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1686 msix->msix_ctrl, 2);
1690 pci_mask_msix_allvectors(device_t dev)
1692 struct pci_devinfo *dinfo = device_get_ivars(dev);
1695 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i)
1696 pci_mask_msix_vector(dev, i);
1699 static struct msix_vector *
1700 pci_find_msix_vector(device_t dev, int rid)
1702 struct pci_devinfo *dinfo = device_get_ivars(dev);
1703 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1704 struct msix_vector *mv;
1706 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1707 if (mv->mv_rid == rid)
1714 * HyperTransport MSI mapping control
1717 pci_ht_map_msi(device_t dev, uint64_t addr)
1719 struct pci_devinfo *dinfo = device_get_ivars(dev);
1720 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1725 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1726 ht->ht_msiaddr >> 20 == addr >> 20) {
1727 /* Enable MSI -> HT mapping. */
1728 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1729 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1733 if (!addr && (ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
1734 /* Disable MSI -> HT mapping. */
1735 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1736 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1742 * Support for MSI message signalled interrupts.
1745 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1747 struct pci_devinfo *dinfo = device_get_ivars(dev);
1748 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1750 /* Write data and address values. */
1751 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1752 address & 0xffffffff, 4);
1753 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1754 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1756 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1759 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1762 /* Enable MSI in the control register. */
1763 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1764 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1767 /* Enable MSI -> HT mapping. */
1768 pci_ht_map_msi(dev, address);
1772 pci_disable_msi(device_t dev)
1774 struct pci_devinfo *dinfo = device_get_ivars(dev);
1775 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1777 /* Disable MSI -> HT mapping. */
1778 pci_ht_map_msi(dev, 0);
1780 /* Disable MSI in the control register. */
1781 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1782 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1787 * Restore MSI registers during resume. If MSI is enabled then
1788 * restore the data and address registers in addition to the control
1792 pci_resume_msi(device_t dev)
1794 struct pci_devinfo *dinfo = device_get_ivars(dev);
1795 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1799 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1800 address = msi->msi_addr;
1801 data = msi->msi_data;
1802 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1803 address & 0xffffffff, 4);
1804 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1805 pci_write_config(dev, msi->msi_location +
1806 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1807 pci_write_config(dev, msi->msi_location +
1808 PCIR_MSI_DATA_64BIT, data, 2);
1810 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1813 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1818 * Returns true if the specified device is blacklisted because MSI
1822 pci_msi_device_blacklisted(device_t dev)
1824 struct pci_quirk *q;
1826 if (!pci_honor_msi_blacklist)
1829 for (q = &pci_quirks[0]; q->devid; q++) {
1830 if (q->devid == pci_get_devid(dev) &&
1831 q->type == PCI_QUIRK_DISABLE_MSI)
1838 * Determine if MSI is blacklisted globally on this sytem. Currently,
1839 * we just check for blacklisted chipsets as represented by the
1840 * host-PCI bridge at device 0:0:0. In the future, it may become
1841 * necessary to check other system attributes, such as the kenv values
1842 * that give the motherboard manufacturer and model number.
1845 pci_msi_blacklisted(void)
1849 if (!pci_honor_msi_blacklist)
1852 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1853 if (!(pcie_chipset || pcix_chipset))
1856 dev = pci_find_bsf(0, 0, 0);
1858 return (pci_msi_device_blacklisted(dev));
1863 * Attempt to allocate count MSI messages on start_cpuid.
1865 * If start_cpuid < 0, then the MSI messages' target CPU will be
1866 * selected automaticly.
1868 * If the caller explicitly specified the MSI messages' target CPU,
1869 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1870 * messages on the specified CPU, if the allocation fails due to MD
1871 * does not have enough vectors (EMSGSIZE), then we will try next
1872 * available CPU, until the allocation fails on all CPUs.
1874 * EMSGSIZE will be returned, if all available CPUs does not have
1875 * enough vectors for the requested amount of MSI messages. Caller
1876 * should either reduce the amount of MSI messages to be requested,
1877 * or simply giving up using MSI.
1879 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1880 * returned in 'rid' array, if the allocation succeeds.
1883 pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1886 struct pci_devinfo *dinfo = device_get_ivars(child);
1887 pcicfgregs *cfg = &dinfo->cfg;
1888 struct resource_list_entry *rle;
1889 int error, i, irqs[32], cpuid = 0;
1892 KASSERT(count != 0 && count <= 32 && powerof2(count),
1893 ("invalid MSI count %d\n", count));
1894 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
1896 /* If rid 0 is allocated, then fail. */
1897 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1898 if (rle != NULL && rle->res != NULL)
1901 /* Already have allocated messages? */
1902 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1905 /* If MSI is blacklisted for this system, fail. */
1906 if (pci_msi_blacklisted())
1909 /* MSI capability present? */
1910 if (cfg->msi.msi_location == 0 || cfg->msi.msi_msgnum == 0 ||
1914 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1915 count, cfg->msi.msi_msgnum));
1918 device_printf(child,
1919 "attempting to allocate %d MSI vectors (%d supported)\n",
1920 count, cfg->msi.msi_msgnum);
1923 if (start_cpuid < 0)
1924 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
1927 for (i = 0; i < ncpus; ++i) {
1928 cpuid = (start_cpuid + i) % ncpus;
1930 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
1931 cfg->msi.msi_msgnum, irqs, cpuid);
1934 else if (error != EMSGSIZE)
1941 * We now have N messages mapped onto SYS_RES_IRQ resources in
1942 * the irqs[] array, so add new resources starting at rid 1.
1944 for (i = 0; i < count; i++) {
1946 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1947 irqs[i], irqs[i], 1, cpuid);
1952 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1958 * Be fancy and try to print contiguous runs
1959 * of IRQ values as ranges. 'run' is true if
1960 * we are in a range.
1962 device_printf(child, "using IRQs %d", irqs[0]);
1964 for (i = 1; i < count; i++) {
1966 /* Still in a run? */
1967 if (irqs[i] == irqs[i - 1] + 1) {
1972 /* Finish previous range. */
1974 kprintf("-%d", irqs[i - 1]);
1978 /* Start new range. */
1979 kprintf(",%d", irqs[i]);
1982 /* Unfinished range? */
1984 kprintf("-%d", irqs[count - 1]);
1985 kprintf(" for MSI on cpu%d\n", cpuid);
1989 /* Update control register with count. */
1990 ctrl = cfg->msi.msi_ctrl;
1991 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1992 ctrl |= (ffs(count) - 1) << 4;
1993 cfg->msi.msi_ctrl = ctrl;
1994 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1996 /* Update counts of alloc'd messages. */
1997 cfg->msi.msi_alloc = count;
1998 cfg->msi.msi_handlers = 0;
2002 /* Release the MSI messages associated with this device. */
2004 pci_release_msi_method(device_t dev, device_t child)
2006 struct pci_devinfo *dinfo = device_get_ivars(child);
2007 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2008 struct resource_list_entry *rle;
2009 int i, irqs[32], cpuid = -1;
2011 /* Do we have any messages to release? */
2012 if (msi->msi_alloc == 0)
2014 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2016 /* Make sure none of the resources are allocated. */
2017 if (msi->msi_handlers > 0)
2019 for (i = 0; i < msi->msi_alloc; i++) {
2020 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2021 KASSERT(rle != NULL, ("missing MSI resource"));
2022 if (rle->res != NULL)
2026 KASSERT(cpuid >= 0 && cpuid < ncpus,
2027 ("invalid MSI target cpuid %d\n", cpuid));
2029 KASSERT(rle->cpuid == cpuid,
2030 ("MSI targets different cpus, "
2031 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
2033 irqs[i] = rle->start;
2036 /* Update control register with 0 count. */
2037 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2038 ("%s: MSI still enabled", __func__));
2039 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2040 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2043 /* Release the messages. */
2044 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2046 for (i = 0; i < msi->msi_alloc; i++)
2047 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2049 /* Update alloc count. */
2057 * Return the max supported MSI messages this device supports.
2058 * Basically, assuming the MD code can alloc messages, this function
2059 * should return the maximum value that pci_alloc_msi() can return.
2060 * Thus, it is subject to the tunables, etc.
2063 pci_msi_count_method(device_t dev, device_t child)
2065 struct pci_devinfo *dinfo = device_get_ivars(child);
2066 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2068 if (pci_do_msi && msi->msi_location != 0)
2069 return (msi->msi_msgnum);
2073 /* kfree pcicfgregs structure and all depending data structures */
2076 pci_freecfg(struct pci_devinfo *dinfo)
2078 struct devlist *devlist_head;
2081 devlist_head = &pci_devq;
2083 if (dinfo->cfg.vpd.vpd_reg) {
2084 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2085 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2086 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2087 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2088 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2089 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2090 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2092 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2093 kfree(dinfo, M_DEVBUF);
2095 /* increment the generation count */
2098 /* we're losing one device */
2104 * PCI power manangement
2107 pci_set_powerstate_method(device_t dev, device_t child, int state)
2109 struct pci_devinfo *dinfo = device_get_ivars(child);
2110 pcicfgregs *cfg = &dinfo->cfg;
2112 int result, oldstate, highest, delay;
2114 if (cfg->pp.pp_cap == 0)
2115 return (EOPNOTSUPP);
2118 * Optimize a no state change request away. While it would be OK to
2119 * write to the hardware in theory, some devices have shown odd
2120 * behavior when going from D3 -> D3.
2122 oldstate = pci_get_powerstate(child);
2123 if (oldstate == state)
2127 * The PCI power management specification states that after a state
2128 * transition between PCI power states, system software must
2129 * guarantee a minimal delay before the function accesses the device.
2130 * Compute the worst case delay that we need to guarantee before we
2131 * access the device. Many devices will be responsive much more
2132 * quickly than this delay, but there are some that don't respond
2133 * instantly to state changes. Transitions to/from D3 state require
2134 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2135 * is done below with DELAY rather than a sleeper function because
2136 * this function can be called from contexts where we cannot sleep.
2138 highest = (oldstate > state) ? oldstate : state;
2139 if (highest == PCI_POWERSTATE_D3)
2141 else if (highest == PCI_POWERSTATE_D2)
2145 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2146 & ~PCIM_PSTAT_DMASK;
2149 case PCI_POWERSTATE_D0:
2150 status |= PCIM_PSTAT_D0;
2152 case PCI_POWERSTATE_D1:
2153 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2154 return (EOPNOTSUPP);
2155 status |= PCIM_PSTAT_D1;
2157 case PCI_POWERSTATE_D2:
2158 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2159 return (EOPNOTSUPP);
2160 status |= PCIM_PSTAT_D2;
2162 case PCI_POWERSTATE_D3:
2163 status |= PCIM_PSTAT_D3;
2171 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2172 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2173 dinfo->cfg.func, oldstate, state);
2175 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2182 pci_get_powerstate_method(device_t dev, device_t child)
2184 struct pci_devinfo *dinfo = device_get_ivars(child);
2185 pcicfgregs *cfg = &dinfo->cfg;
2189 if (cfg->pp.pp_cap != 0) {
2190 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2191 switch (status & PCIM_PSTAT_DMASK) {
2193 result = PCI_POWERSTATE_D0;
2196 result = PCI_POWERSTATE_D1;
2199 result = PCI_POWERSTATE_D2;
2202 result = PCI_POWERSTATE_D3;
2205 result = PCI_POWERSTATE_UNKNOWN;
2209 /* No support, device is always at D0 */
2210 result = PCI_POWERSTATE_D0;
2216 * Some convenience functions for PCI device drivers.
2219 static __inline void
2220 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2224 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2226 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2229 static __inline void
2230 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2234 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2236 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2240 pci_enable_busmaster_method(device_t dev, device_t child)
2242 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2247 pci_disable_busmaster_method(device_t dev, device_t child)
2249 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2254 pci_enable_io_method(device_t dev, device_t child, int space)
2264 case SYS_RES_IOPORT:
2265 bit = PCIM_CMD_PORTEN;
2268 case SYS_RES_MEMORY:
2269 bit = PCIM_CMD_MEMEN;
2275 pci_set_command_bit(dev, child, bit);
2276 /* Some devices seem to need a brief stall here, what do to? */
2277 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2280 device_printf(child, "failed to enable %s mapping!\n", error);
2285 pci_disable_io_method(device_t dev, device_t child, int space)
2295 case SYS_RES_IOPORT:
2296 bit = PCIM_CMD_PORTEN;
2299 case SYS_RES_MEMORY:
2300 bit = PCIM_CMD_MEMEN;
2306 pci_clear_command_bit(dev, child, bit);
2307 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2308 if (command & bit) {
2309 device_printf(child, "failed to disable %s mapping!\n", error);
2316 * New style pci driver. Parent device is either a pci-host-bridge or a
2317 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2321 pci_print_verbose(struct pci_devinfo *dinfo)
2325 pcicfgregs *cfg = &dinfo->cfg;
2327 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2328 cfg->vendor, cfg->device, cfg->revid);
2329 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2330 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2331 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2332 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2334 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2335 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2336 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2337 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2338 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2339 if (cfg->intpin > 0)
2340 kprintf("\tintpin=%c, irq=%d\n",
2341 cfg->intpin +'a' -1, cfg->intline);
2342 if (cfg->pp.pp_cap) {
2345 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2346 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2347 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2348 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2349 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2350 status & PCIM_PSTAT_DMASK);
2352 if (cfg->msi.msi_location) {
2355 ctrl = cfg->msi.msi_ctrl;
2356 kprintf("\tMSI supports %d message%s%s%s\n",
2357 cfg->msi.msi_msgnum,
2358 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2359 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2360 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2362 if (cfg->msix.msix_location) {
2363 kprintf("\tMSI-X supports %d message%s ",
2364 cfg->msix.msix_msgnum,
2365 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2366 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2367 kprintf("in map 0x%x\n",
2368 cfg->msix.msix_table_bar);
2370 kprintf("in maps 0x%x and 0x%x\n",
2371 cfg->msix.msix_table_bar,
2372 cfg->msix.msix_pba_bar);
2374 pci_print_verbose_expr(cfg);
2379 pci_print_verbose_expr(const pcicfgregs *cfg)
2381 const struct pcicfg_expr *expr = &cfg->expr;
2382 const char *port_name;
2388 if (expr->expr_ptr == 0) /* No PCI Express capability */
2391 kprintf("\tPCI Express ver.%d cap=0x%04x",
2392 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2393 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2396 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2398 switch (port_type) {
2399 case PCIE_END_POINT:
2400 port_name = "DEVICE";
2402 case PCIE_LEG_END_POINT:
2403 port_name = "LEGDEV";
2405 case PCIE_ROOT_PORT:
2408 case PCIE_UP_STREAM_PORT:
2409 port_name = "UPSTREAM";
2411 case PCIE_DOWN_STREAM_PORT:
2412 port_name = "DOWNSTRM";
2414 case PCIE_PCIE2PCI_BRIDGE:
2415 port_name = "PCIE2PCI";
2417 case PCIE_PCI2PCIE_BRIDGE:
2418 port_name = "PCI2PCIE";
2424 if ((port_type == PCIE_ROOT_PORT ||
2425 port_type == PCIE_DOWN_STREAM_PORT) &&
2426 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2428 if (port_name != NULL)
2429 kprintf("[%s]", port_name);
2431 if (pcie_slotimpl(cfg)) {
2432 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2433 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2434 kprintf("[HOTPLUG]");
2441 pci_porten(device_t pcib, int b, int s, int f)
2443 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2444 & PCIM_CMD_PORTEN) != 0;
2448 pci_memen(device_t pcib, int b, int s, int f)
2450 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2451 & PCIM_CMD_MEMEN) != 0;
2455 * Add a resource based on a pci map register. Return 1 if the map
2456 * register is a 32bit map register or 2 if it is a 64bit register.
2459 pci_add_map(device_t pcib, device_t bus, device_t dev,
2460 int b, int s, int f, int reg, struct resource_list *rl, int force,
2465 pci_addr_t start, end, count;
2472 struct resource *res;
2474 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2475 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2476 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2477 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2479 if (PCI_BAR_MEM(map)) {
2480 type = SYS_RES_MEMORY;
2481 if (map & PCIM_BAR_MEM_PREFETCH)
2484 type = SYS_RES_IOPORT;
2485 ln2size = pci_mapsize(testval);
2486 ln2range = pci_maprange(testval);
2487 base = pci_mapbase(map);
2488 barlen = ln2range == 64 ? 2 : 1;
2491 * For I/O registers, if bottom bit is set, and the next bit up
2492 * isn't clear, we know we have a BAR that doesn't conform to the
2493 * spec, so ignore it. Also, sanity check the size of the data
2494 * areas to the type of memory involved. Memory must be at least
2495 * 16 bytes in size, while I/O ranges must be at least 4.
2497 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2499 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2500 (type == SYS_RES_IOPORT && ln2size < 2))
2504 /* Read the other half of a 64bit map register */
2505 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2507 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2508 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2509 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2510 kprintf(", port disabled\n");
2511 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2512 kprintf(", memory disabled\n");
2514 kprintf(", enabled\n");
2518 * If base is 0, then we have problems. It is best to ignore
2519 * such entries for the moment. These will be allocated later if
2520 * the driver specifically requests them. However, some
2521 * removable busses look better when all resources are allocated,
2522 * so allow '0' to be overriden.
2524 * Similarly treat maps whose values is the same as the test value
2525 * read back. These maps have had all f's written to them by the
2526 * BIOS in an attempt to disable the resources.
2528 if (!force && (base == 0 || map == testval))
2530 if ((u_long)base != base) {
2532 "pci%d:%d:%d:%d bar %#x too many address bits",
2533 pci_get_domain(dev), b, s, f, reg);
2538 * This code theoretically does the right thing, but has
2539 * undesirable side effects in some cases where peripherals
2540 * respond oddly to having these bits enabled. Let the user
2541 * be able to turn them off (since pci_enable_io_modes is 1 by
2544 if (pci_enable_io_modes) {
2545 /* Turn on resources that have been left off by a lazy BIOS */
2546 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2547 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2548 cmd |= PCIM_CMD_PORTEN;
2549 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2551 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2552 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2553 cmd |= PCIM_CMD_MEMEN;
2554 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2557 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2559 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2563 count = 1 << ln2size;
2564 if (base == 0 || base == pci_mapbase(testval)) {
2565 start = 0; /* Let the parent decide. */
2569 end = base + (1 << ln2size) - 1;
2571 resource_list_add(rl, type, reg, start, end, count, -1);
2574 * Try to allocate the resource for this BAR from our parent
2575 * so that this resource range is already reserved. The
2576 * driver for this device will later inherit this resource in
2577 * pci_alloc_resource().
2579 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2580 prefetch ? RF_PREFETCHABLE : 0, -1);
2583 * If the allocation fails, delete the resource list
2584 * entry to force pci_alloc_resource() to allocate
2585 * resources from the parent.
2587 resource_list_delete(rl, type, reg);
2588 #ifdef PCI_BAR_CLEAR
2591 #else /* !PCI_BAR_CLEAR */
2593 * Don't clear BAR here. Some BIOS lists HPET as a
2594 * PCI function, clearing the BAR causes HPET timer
2598 kprintf("pci:%d:%d:%d: resource reservation failed "
2599 "%#jx - %#jx\n", b, s, f,
2600 (intmax_t)start, (intmax_t)end);
2603 #endif /* PCI_BAR_CLEAR */
2605 start = rman_get_start(res);
2607 pci_write_config(dev, reg, start, 4);
2609 pci_write_config(dev, reg + 4, start >> 32, 4);
2614 * For ATA devices we need to decide early what addressing mode to use.
2615 * Legacy demands that the primary and secondary ATA ports sits on the
2616 * same addresses that old ISA hardware did. This dictates that we use
2617 * those addresses and ignore the BAR's if we cannot set PCI native
2621 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2622 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2624 int rid, type, progif;
2626 /* if this device supports PCI native addressing use it */
2627 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2628 if ((progif & 0x8a) == 0x8a) {
2629 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2630 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2631 kprintf("Trying ATA native PCI addressing mode\n");
2632 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2636 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2637 type = SYS_RES_IOPORT;
2638 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2639 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2640 prefetchmask & (1 << 0));
2641 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2642 prefetchmask & (1 << 1));
2645 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
2646 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2649 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
2650 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2653 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2654 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2655 prefetchmask & (1 << 2));
2656 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2657 prefetchmask & (1 << 3));
2660 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
2661 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2664 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
2665 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2668 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2669 prefetchmask & (1 << 4));
2670 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2671 prefetchmask & (1 << 5));
2675 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2677 struct pci_devinfo *dinfo = device_get_ivars(dev);
2678 pcicfgregs *cfg = &dinfo->cfg;
2679 char tunable_name[64];
2682 /* Has to have an intpin to have an interrupt. */
2683 if (cfg->intpin == 0)
2686 /* Let the user override the IRQ with a tunable. */
2687 irq = PCI_INVALID_IRQ;
2688 ksnprintf(tunable_name, sizeof(tunable_name),
2689 "hw.pci%d.%d.%d.INT%c.irq",
2690 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2691 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2692 irq = PCI_INVALID_IRQ;
2695 * If we didn't get an IRQ via the tunable, then we either use the
2696 * IRQ value in the intline register or we ask the bus to route an
2697 * interrupt for us. If force_route is true, then we only use the
2698 * value in the intline register if the bus was unable to assign an
2701 if (!PCI_INTERRUPT_VALID(irq)) {
2702 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2703 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2704 if (!PCI_INTERRUPT_VALID(irq))
2708 /* If after all that we don't have an IRQ, just bail. */
2709 if (!PCI_INTERRUPT_VALID(irq))
2712 /* Update the config register if it changed. */
2713 if (irq != cfg->intline) {
2715 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2718 /* Add this IRQ as rid 0 interrupt resource. */
2719 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
2720 machintr_legacy_intr_cpuid(irq));
2724 pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
2726 struct pci_devinfo *dinfo = device_get_ivars(dev);
2727 pcicfgregs *cfg = &dinfo->cfg;
2728 struct resource_list *rl = &dinfo->resources;
2729 struct pci_quirk *q;
2736 /* ATA devices needs special map treatment */
2737 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2738 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2739 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2740 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2741 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2742 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2744 for (i = 0; i < cfg->nummaps;)
2745 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2746 rl, force, prefetchmask & (1 << i));
2749 * Add additional, quirked resources.
2751 for (q = &pci_quirks[0]; q->devid; q++) {
2752 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2753 && q->type == PCI_QUIRK_MAP_REG)
2754 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2758 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2760 * Try to re-route interrupts. Sometimes the BIOS or
2761 * firmware may leave bogus values in these registers.
2762 * If the re-route fails, then just stick with what we
2765 pci_assign_interrupt(bus, dev, 1);
2770 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2772 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2773 device_t pcib = device_get_parent(dev);
2774 struct pci_devinfo *dinfo;
2776 int s, f, pcifunchigh;
2779 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2780 ("dinfo_size too small"));
2781 maxslots = PCIB_MAXSLOTS(pcib);
2782 for (s = 0; s <= maxslots; s++) {
2786 hdrtype = REG(PCIR_HDRTYPE, 1);
2787 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2789 if (hdrtype & PCIM_MFDEV)
2790 pcifunchigh = PCI_FUNCMAX;
2791 for (f = 0; f <= pcifunchigh; f++) {
2792 dinfo = pci_read_device(pcib, domain, busno, s, f,
2794 if (dinfo != NULL) {
2795 pci_add_child(dev, dinfo);
2803 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2807 pcib = device_get_parent(bus);
2808 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2809 device_set_ivars(dinfo->cfg.dev, dinfo);
2810 resource_list_init(&dinfo->resources);
2811 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2812 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2813 pci_print_verbose(dinfo);
2814 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
2818 pci_probe(device_t dev)
2820 device_set_desc(dev, "PCI bus");
2822 /* Allow other subclasses to override this driver. */
2827 pci_attach(device_t dev)
2832 * Since there can be multiple independantly numbered PCI
2833 * busses on systems with multiple PCI domains, we can't use
2834 * the unit number to decide which bus we are probing. We ask
2835 * the parent pcib what our domain and bus numbers are.
2837 domain = pcib_get_domain(dev);
2838 busno = pcib_get_bus(dev);
2840 device_printf(dev, "domain=%d, physical bus=%d\n",
2843 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2845 return (bus_generic_attach(dev));
2849 pci_suspend(device_t dev)
2851 int dstate, error, i, numdevs;
2852 device_t acpi_dev, child, *devlist;
2853 struct pci_devinfo *dinfo;
2856 * Save the PCI configuration space for each child and set the
2857 * device in the appropriate power state for this sleep state.
2860 if (pci_do_power_resume)
2861 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2862 device_get_children(dev, &devlist, &numdevs);
2863 for (i = 0; i < numdevs; i++) {
2865 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2866 pci_cfg_save(child, dinfo, 0);
2869 /* Suspend devices before potentially powering them down. */
2870 error = bus_generic_suspend(dev);
2872 kfree(devlist, M_TEMP);
2877 * Always set the device to D3. If ACPI suggests a different
2878 * power state, use it instead. If ACPI is not present, the
2879 * firmware is responsible for managing device power. Skip
2880 * children who aren't attached since they are powered down
2881 * separately. Only manage type 0 devices for now.
2883 for (i = 0; acpi_dev && i < numdevs; i++) {
2885 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2886 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2887 dstate = PCI_POWERSTATE_D3;
2888 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2889 pci_set_powerstate(child, dstate);
2892 kfree(devlist, M_TEMP);
2897 pci_resume(device_t dev)
2900 device_t acpi_dev, child, *devlist;
2901 struct pci_devinfo *dinfo;
2904 * Set each child to D0 and restore its PCI configuration space.
2907 if (pci_do_power_resume)
2908 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2909 device_get_children(dev, &devlist, &numdevs);
2910 for (i = 0; i < numdevs; i++) {
2912 * Notify ACPI we're going to D0 but ignore the result. If
2913 * ACPI is not present, the firmware is responsible for
2914 * managing device power. Only manage type 0 devices for now.
2917 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2918 if (acpi_dev && device_is_attached(child) &&
2919 dinfo->cfg.hdrtype == 0) {
2920 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2921 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2924 /* Now the device is powered up, restore its config space. */
2925 pci_cfg_restore(child, dinfo);
2927 kfree(devlist, M_TEMP);
2928 return (bus_generic_resume(dev));
2932 pci_load_vendor_data(void)
2934 caddr_t vendordata, info;
2936 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2937 info = preload_search_info(vendordata, MODINFO_ADDR);
2938 pci_vendordata = *(char **)info;
2939 info = preload_search_info(vendordata, MODINFO_SIZE);
2940 pci_vendordata_size = *(size_t *)info;
2941 /* terminate the database */
2942 pci_vendordata[pci_vendordata_size] = '\n';
2947 pci_driver_added(device_t dev, driver_t *driver)
2952 struct pci_devinfo *dinfo;
2956 device_printf(dev, "driver added\n");
2957 DEVICE_IDENTIFY(driver, dev);
2958 device_get_children(dev, &devlist, &numdevs);
2959 for (i = 0; i < numdevs; i++) {
2961 if (device_get_state(child) != DS_NOTPRESENT)
2963 dinfo = device_get_ivars(child);
2964 pci_print_verbose(dinfo);
2966 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2967 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2969 pci_cfg_restore(child, dinfo);
2970 if (device_probe_and_attach(child) != 0)
2971 pci_cfg_save(child, dinfo, 1);
2973 kfree(devlist, M_TEMP);
2977 pci_child_detached(device_t parent __unused, device_t child)
2979 /* Turn child's power off */
2980 pci_cfg_save(child, device_get_ivars(child), 1);
2984 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2985 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
2990 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
2991 arg, &cookie, serializer);
2995 /* If this is not a direct child, just bail out. */
2996 if (device_get_parent(child) != dev) {
3001 rid = rman_get_rid(irq);
3003 /* Make sure that INTx is enabled */
3004 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3006 struct pci_devinfo *dinfo = device_get_ivars(child);
3011 * Check to see if the interrupt is MSI or MSI-X.
3012 * Ask our parent to map the MSI and give
3013 * us the address and data register values.
3014 * If we fail for some reason, teardown the
3015 * interrupt handler.
3017 if (dinfo->cfg.msi.msi_alloc > 0) {
3018 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3020 if (msi->msi_addr == 0) {
3021 KASSERT(msi->msi_handlers == 0,
3022 ("MSI has handlers, but vectors not mapped"));
3023 error = PCIB_MAP_MSI(device_get_parent(dev),
3024 child, rman_get_start(irq), &addr, &data,
3025 rman_get_cpuid(irq));
3028 msi->msi_addr = addr;
3029 msi->msi_data = data;
3030 pci_enable_msi(child, addr, data);
3032 msi->msi_handlers++;
3034 struct msix_vector *mv;
3037 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3038 ("No MSI-X or MSI rid %d allocated\n", rid));
3040 mv = pci_find_msix_vector(child, rid);
3042 ("MSI-X rid %d is not allocated\n", rid));
3043 KASSERT(mv->mv_address == 0,
3044 ("MSI-X rid %d has been setup\n", rid));
3046 error = PCIB_MAP_MSI(device_get_parent(dev),
3047 child, rman_get_start(irq), &addr, &data,
3048 rman_get_cpuid(irq));
3051 mv->mv_address = addr;
3054 vector = PCI_MSIX_RID2VEC(rid);
3055 pci_setup_msix_vector(child, vector,
3056 mv->mv_address, mv->mv_data);
3057 pci_unmask_msix_vector(child, vector);
3060 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3061 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3064 (void)bus_generic_teardown_intr(dev, child, irq,
3074 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3079 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3082 /* If this isn't a direct child, just bail out */
3083 if (device_get_parent(child) != dev)
3084 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3086 rid = rman_get_rid(irq);
3089 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3091 struct pci_devinfo *dinfo = device_get_ivars(child);
3094 * Check to see if the interrupt is MSI or MSI-X. If so,
3095 * decrement the appropriate handlers count and mask the
3096 * MSI-X message, or disable MSI messages if the count
3099 if (dinfo->cfg.msi.msi_alloc > 0) {
3100 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3102 KASSERT(rid <= msi->msi_alloc,
3103 ("MSI-X index too high\n"));
3104 KASSERT(msi->msi_handlers > 0,
3105 ("MSI rid %d is not setup\n", rid));
3107 msi->msi_handlers--;
3108 if (msi->msi_handlers == 0)
3109 pci_disable_msi(child);
3111 struct msix_vector *mv;
3113 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3114 ("No MSI or MSI-X rid %d allocated", rid));
3116 mv = pci_find_msix_vector(child, rid);
3118 ("MSI-X rid %d is not allocated\n", rid));
3119 KASSERT(mv->mv_address != 0,
3120 ("MSI-X rid %d has not been setup\n", rid));
3122 pci_mask_msix_vector(child, PCI_MSIX_RID2VEC(rid));
3127 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3130 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3135 pci_print_child(device_t dev, device_t child)
3137 struct pci_devinfo *dinfo;
3138 struct resource_list *rl;
3141 dinfo = device_get_ivars(child);
3142 rl = &dinfo->resources;
3144 retval += bus_print_child_header(dev, child);
3146 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3147 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3148 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3149 if (device_get_flags(dev))
3150 retval += kprintf(" flags %#x", device_get_flags(dev));
3152 retval += kprintf(" at device %d.%d", pci_get_slot(child),
3153 pci_get_function(child));
3155 retval += bus_print_child_footer(dev, child);
3165 } pci_nomatch_tab[] = {
3166 {PCIC_OLD, -1, "old"},
3167 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3168 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3169 {PCIC_STORAGE, -1, "mass storage"},
3170 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3171 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3172 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3173 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3174 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3175 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3176 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3177 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3178 {PCIC_NETWORK, -1, "network"},
3179 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3180 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3181 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3182 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3183 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3184 {PCIC_DISPLAY, -1, "display"},
3185 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3186 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3187 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3188 {PCIC_MULTIMEDIA, -1, "multimedia"},
3189 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3190 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3191 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3192 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3193 {PCIC_MEMORY, -1, "memory"},
3194 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3195 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3196 {PCIC_BRIDGE, -1, "bridge"},
3197 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3198 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3199 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3200 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3201 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3202 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3203 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3204 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3205 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3206 {PCIC_SIMPLECOMM, -1, "simple comms"},
3207 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3208 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3209 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3210 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3211 {PCIC_BASEPERIPH, -1, "base peripheral"},
3212 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3213 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3214 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3215 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3216 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3217 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3218 {PCIC_INPUTDEV, -1, "input device"},
3219 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3220 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3221 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3222 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3223 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3224 {PCIC_DOCKING, -1, "docking station"},
3225 {PCIC_PROCESSOR, -1, "processor"},
3226 {PCIC_SERIALBUS, -1, "serial bus"},
3227 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3228 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3229 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3230 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3231 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3232 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3233 {PCIC_WIRELESS, -1, "wireless controller"},
3234 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3235 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3236 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3237 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3238 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3239 {PCIC_SATCOM, -1, "satellite communication"},
3240 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3241 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3242 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3243 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3244 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3245 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3246 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3247 {PCIC_DASP, -1, "dasp"},
3248 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3253 pci_probe_nomatch(device_t dev, device_t child)
3256 char *cp, *scp, *device;
3259 * Look for a listing for this device in a loaded device database.
3261 if ((device = pci_describe_device(child)) != NULL) {
3262 device_printf(dev, "<%s>", device);
3263 kfree(device, M_DEVBUF);
3266 * Scan the class/subclass descriptions for a general
3271 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3272 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3273 if (pci_nomatch_tab[i].subclass == -1) {
3274 cp = pci_nomatch_tab[i].desc;
3275 } else if (pci_nomatch_tab[i].subclass ==
3276 pci_get_subclass(child)) {
3277 scp = pci_nomatch_tab[i].desc;
3281 device_printf(dev, "<%s%s%s>",
3283 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3286 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3287 pci_get_vendor(child), pci_get_device(child),
3288 pci_get_slot(child), pci_get_function(child));
3289 if (pci_get_intpin(child) > 0) {
3292 irq = pci_get_irq(child);
3293 if (PCI_INTERRUPT_VALID(irq))
3294 kprintf(" irq %d", irq);
3298 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3302 * Parse the PCI device database, if loaded, and return a pointer to a
3303 * description of the device.
3305 * The database is flat text formatted as follows:
3307 * Any line not in a valid format is ignored.
3308 * Lines are terminated with newline '\n' characters.
3310 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3313 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3314 * - devices cannot be listed without a corresponding VENDOR line.
3315 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3316 * another TAB, then the device name.
3320 * Assuming (ptr) points to the beginning of a line in the database,
3321 * return the vendor or device and description of the next entry.
3322 * The value of (vendor) or (device) inappropriate for the entry type
3323 * is set to -1. Returns nonzero at the end of the database.
3325 * Note that this is slightly unrobust in the face of corrupt data;
3326 * we attempt to safeguard against this by spamming the end of the
3327 * database with a newline when we initialise.
3330 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3339 left = pci_vendordata_size - (cp - pci_vendordata);
3347 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3351 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3354 /* skip to next line */
3355 while (*cp != '\n' && left > 0) {
3364 /* skip to next line */
3365 while (*cp != '\n' && left > 0) {
3369 if (*cp == '\n' && left > 0)
3376 pci_describe_device(device_t dev)
3379 char *desc, *vp, *dp, *line;
3381 desc = vp = dp = NULL;
3384 * If we have no vendor data, we can't do anything.
3386 if (pci_vendordata == NULL)
3390 * Scan the vendor data looking for this device
3392 line = pci_vendordata;
3393 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3396 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3398 if (vendor == pci_get_vendor(dev))
3401 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3404 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3412 if (device == pci_get_device(dev))
3416 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3417 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3419 ksprintf(desc, "%s, %s", vp, dp);
3422 kfree(vp, M_DEVBUF);
3424 kfree(dp, M_DEVBUF);
3429 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3431 struct pci_devinfo *dinfo;
3434 dinfo = device_get_ivars(child);
3438 case PCI_IVAR_ETHADDR:
3440 * The generic accessor doesn't deal with failure, so
3441 * we set the return value, then return an error.
3443 *((uint8_t **) result) = NULL;
3445 case PCI_IVAR_SUBVENDOR:
3446 *result = cfg->subvendor;
3448 case PCI_IVAR_SUBDEVICE:
3449 *result = cfg->subdevice;
3451 case PCI_IVAR_VENDOR:
3452 *result = cfg->vendor;
3454 case PCI_IVAR_DEVICE:
3455 *result = cfg->device;
3457 case PCI_IVAR_DEVID:
3458 *result = (cfg->device << 16) | cfg->vendor;
3460 case PCI_IVAR_CLASS:
3461 *result = cfg->baseclass;
3463 case PCI_IVAR_SUBCLASS:
3464 *result = cfg->subclass;
3466 case PCI_IVAR_PROGIF:
3467 *result = cfg->progif;
3469 case PCI_IVAR_REVID:
3470 *result = cfg->revid;
3472 case PCI_IVAR_INTPIN:
3473 *result = cfg->intpin;
3476 *result = cfg->intline;
3478 case PCI_IVAR_DOMAIN:
3479 *result = cfg->domain;
3485 *result = cfg->slot;
3487 case PCI_IVAR_FUNCTION:
3488 *result = cfg->func;
3490 case PCI_IVAR_CMDREG:
3491 *result = cfg->cmdreg;
3493 case PCI_IVAR_CACHELNSZ:
3494 *result = cfg->cachelnsz;
3496 case PCI_IVAR_MINGNT:
3497 *result = cfg->mingnt;
3499 case PCI_IVAR_MAXLAT:
3500 *result = cfg->maxlat;
3502 case PCI_IVAR_LATTIMER:
3503 *result = cfg->lattimer;
3505 case PCI_IVAR_PCIXCAP_PTR:
3506 *result = cfg->pcix.pcix_ptr;
3508 case PCI_IVAR_PCIECAP_PTR:
3509 *result = cfg->expr.expr_ptr;
3511 case PCI_IVAR_VPDCAP_PTR:
3512 *result = cfg->vpd.vpd_reg;
3521 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3523 struct pci_devinfo *dinfo;
3525 dinfo = device_get_ivars(child);
3528 case PCI_IVAR_INTPIN:
3529 dinfo->cfg.intpin = value;
3531 case PCI_IVAR_ETHADDR:
3532 case PCI_IVAR_SUBVENDOR:
3533 case PCI_IVAR_SUBDEVICE:
3534 case PCI_IVAR_VENDOR:
3535 case PCI_IVAR_DEVICE:
3536 case PCI_IVAR_DEVID:
3537 case PCI_IVAR_CLASS:
3538 case PCI_IVAR_SUBCLASS:
3539 case PCI_IVAR_PROGIF:
3540 case PCI_IVAR_REVID:
3542 case PCI_IVAR_DOMAIN:
3545 case PCI_IVAR_FUNCTION:
3546 return (EINVAL); /* disallow for now */
3553 #include "opt_ddb.h"
3555 #include <ddb/ddb.h>
3556 #include <sys/cons.h>
3559 * List resources based on pci map registers, used for within ddb
3562 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3564 struct pci_devinfo *dinfo;
3565 struct devlist *devlist_head;
3568 int i, error, none_count;
3571 /* get the head of the device queue */
3572 devlist_head = &pci_devq;
3575 * Go through the list of devices and print out devices
3577 for (error = 0, i = 0,
3578 dinfo = STAILQ_FIRST(devlist_head);
3579 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3580 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3582 /* Populate pd_name and pd_unit */
3585 name = device_get_name(dinfo->cfg.dev);
3588 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3589 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3590 (name && *name) ? name : "none",
3591 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3593 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3594 p->pc_sel.pc_func, (p->pc_class << 16) |
3595 (p->pc_subclass << 8) | p->pc_progif,
3596 (p->pc_subdevice << 16) | p->pc_subvendor,
3597 (p->pc_device << 16) | p->pc_vendor,
3598 p->pc_revid, p->pc_hdr);
3604 static struct resource *
3605 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3606 u_long start, u_long end, u_long count, u_int flags)
3608 struct pci_devinfo *dinfo = device_get_ivars(child);
3609 struct resource_list *rl = &dinfo->resources;
3610 struct resource_list_entry *rle;
3611 struct resource *res;
3612 pci_addr_t map, testval;
3616 * Weed out the bogons, and figure out how large the BAR/map
3617 * is. Bars that read back 0 here are bogus and unimplemented.
3618 * Note: atapci in legacy mode are special and handled elsewhere
3619 * in the code. If you have a atapci device in legacy mode and
3620 * it fails here, that other code is broken.
3623 map = pci_read_config(child, *rid, 4);
3624 pci_write_config(child, *rid, 0xffffffff, 4);
3625 testval = pci_read_config(child, *rid, 4);
3626 if (pci_maprange(testval) == 64)
3627 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3628 if (pci_mapbase(testval) == 0)
3632 * Restore the original value of the BAR. We may have reprogrammed
3633 * the BAR of the low-level console device and when booting verbose,
3634 * we need the console device addressable.
3636 pci_write_config(child, *rid, map, 4);
3638 if (PCI_BAR_MEM(testval)) {
3639 if (type != SYS_RES_MEMORY) {
3642 "child %s requested type %d for rid %#x,"
3643 " but the BAR says it is an memio\n",
3644 device_get_nameunit(child), type, *rid);
3648 if (type != SYS_RES_IOPORT) {
3651 "child %s requested type %d for rid %#x,"
3652 " but the BAR says it is an ioport\n",
3653 device_get_nameunit(child), type, *rid);
3658 * For real BARs, we need to override the size that
3659 * the driver requests, because that's what the BAR
3660 * actually uses and we would otherwise have a
3661 * situation where we might allocate the excess to
3662 * another driver, which won't work.
3664 mapsize = pci_mapsize(testval);
3665 count = 1UL << mapsize;
3666 if (RF_ALIGNMENT(flags) < mapsize)
3667 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3668 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3669 flags |= RF_PREFETCHABLE;
3672 * Allocate enough resource, and then write back the
3673 * appropriate bar for that resource.
3675 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3676 start, end, count, flags, -1);
3678 device_printf(child,
3679 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3680 count, *rid, type, start, end);
3683 resource_list_add(rl, type, *rid, start, end, count, -1);
3684 rle = resource_list_find(rl, type, *rid);
3686 panic("pci_alloc_map: unexpectedly can't find resource.");
3688 rle->start = rman_get_start(res);
3689 rle->end = rman_get_end(res);
3692 device_printf(child,
3693 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3694 count, *rid, type, rman_get_start(res));
3695 map = rman_get_start(res);
3697 pci_write_config(child, *rid, map, 4);
3698 if (pci_maprange(testval) == 64)
3699 pci_write_config(child, *rid + 4, map >> 32, 4);
3705 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3706 u_long start, u_long end, u_long count, u_int flags, int cpuid)
3708 struct pci_devinfo *dinfo = device_get_ivars(child);
3709 struct resource_list *rl = &dinfo->resources;
3710 struct resource_list_entry *rle;
3711 pcicfgregs *cfg = &dinfo->cfg;
3714 * Perform lazy resource allocation
3716 if (device_get_parent(child) == dev) {
3720 * Can't alloc legacy interrupt once MSI messages
3721 * have been allocated.
3723 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3724 cfg->msix.msix_alloc > 0))
3727 * If the child device doesn't have an
3728 * interrupt routed and is deserving of an
3729 * interrupt, try to assign it one.
3731 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3733 pci_assign_interrupt(dev, child, 0);
3735 case SYS_RES_IOPORT:
3736 case SYS_RES_MEMORY:
3737 if (*rid < PCIR_BAR(cfg->nummaps)) {
3739 * Enable the I/O mode. We should
3740 * also be assigning resources too
3741 * when none are present. The
3742 * resource_list_alloc kind of sorta does
3745 if (PCI_ENABLE_IO(dev, child, type))
3748 rle = resource_list_find(rl, type, *rid);
3750 return (pci_alloc_map(dev, child, type, rid,
3751 start, end, count, flags));
3755 * If we've already allocated the resource, then
3756 * return it now. But first we may need to activate
3757 * it, since we don't allocate the resource as active
3758 * above. Normally this would be done down in the
3759 * nexus, but since we short-circuit that path we have
3760 * to do its job here. Not sure if we should kfree the
3761 * resource if it fails to activate.
3763 rle = resource_list_find(rl, type, *rid);
3764 if (rle != NULL && rle->res != NULL) {
3766 device_printf(child,
3767 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3768 rman_get_size(rle->res), *rid, type,
3769 rman_get_start(rle->res));
3770 if ((flags & RF_ACTIVE) &&
3771 bus_generic_activate_resource(dev, child, type,
3772 *rid, rle->res) != 0)
3777 return (resource_list_alloc(rl, dev, child, type, rid,
3778 start, end, count, flags, cpuid));
3782 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3784 struct pci_devinfo *dinfo;
3785 struct resource_list *rl;
3786 struct resource_list_entry *rle;
3788 if (device_get_parent(child) != dev)
3791 dinfo = device_get_ivars(child);
3792 rl = &dinfo->resources;
3793 rle = resource_list_find(rl, type, rid);
3796 if (rman_get_device(rle->res) != dev ||
3797 rman_get_flags(rle->res) & RF_ACTIVE) {
3798 device_printf(dev, "delete_resource: "
3799 "Resource still owned by child, oops. "
3800 "(type=%d, rid=%d, addr=%lx)\n",
3801 rle->type, rle->rid,
3802 rman_get_start(rle->res));
3805 bus_release_resource(dev, type, rid, rle->res);
3807 resource_list_delete(rl, type, rid);
3810 * Why do we turn off the PCI configuration BAR when we delete a
3813 pci_write_config(child, rid, 0, 4);
3814 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3817 struct resource_list *
3818 pci_get_resource_list (device_t dev, device_t child)
3820 struct pci_devinfo *dinfo = device_get_ivars(child);
3825 return (&dinfo->resources);
3829 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3831 struct pci_devinfo *dinfo = device_get_ivars(child);
3832 pcicfgregs *cfg = &dinfo->cfg;
3834 return (PCIB_READ_CONFIG(device_get_parent(dev),
3835 cfg->bus, cfg->slot, cfg->func, reg, width));
3839 pci_write_config_method(device_t dev, device_t child, int reg,
3840 uint32_t val, int width)
3842 struct pci_devinfo *dinfo = device_get_ivars(child);
3843 pcicfgregs *cfg = &dinfo->cfg;
3845 PCIB_WRITE_CONFIG(device_get_parent(dev),
3846 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3850 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3854 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3855 pci_get_function(child));
3860 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3863 struct pci_devinfo *dinfo;
3866 dinfo = device_get_ivars(child);
3868 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3869 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3870 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3876 pci_assign_interrupt_method(device_t dev, device_t child)
3878 struct pci_devinfo *dinfo = device_get_ivars(child);
3879 pcicfgregs *cfg = &dinfo->cfg;
3881 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3886 pci_modevent(module_t mod, int what, void *arg)
3888 static struct cdev *pci_cdev;
3892 STAILQ_INIT(&pci_devq);
3894 pci_cdev = make_dev(&pcic_ops, 0, UID_ROOT, GID_WHEEL, 0644,
3896 pci_load_vendor_data();
3900 destroy_dev(pci_cdev);
3908 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3913 * Only do header type 0 devices. Type 1 devices are bridges,
3914 * which we know need special treatment. Type 2 devices are
3915 * cardbus bridges which also require special treatment.
3916 * Other types are unknown, and we err on the side of safety
3919 if (dinfo->cfg.hdrtype != 0)
3923 * Restore the device to full power mode. We must do this
3924 * before we restore the registers because moving from D3 to
3925 * D0 will cause the chip's BARs and some other registers to
3926 * be reset to some unknown power on reset values. Cut down
3927 * the noise on boot by doing nothing if we are already in
3930 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3931 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3933 for (i = 0; i < dinfo->cfg.nummaps; i++)
3934 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3935 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3936 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3937 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3938 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3939 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3940 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3941 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3942 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3943 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3944 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3946 /* Restore MSI and MSI-X configurations if they are present. */
3947 if (dinfo->cfg.msi.msi_location != 0)
3948 pci_resume_msi(dev);
3949 if (dinfo->cfg.msix.msix_location != 0)
3950 pci_resume_msix(dev);
3954 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3961 * Only do header type 0 devices. Type 1 devices are bridges, which
3962 * we know need special treatment. Type 2 devices are cardbus bridges
3963 * which also require special treatment. Other types are unknown, and
3964 * we err on the side of safety by ignoring them. Powering down
3965 * bridges should not be undertaken lightly.
3967 if (dinfo->cfg.hdrtype != 0)
3969 for (i = 0; i < dinfo->cfg.nummaps; i++)
3970 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3971 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3974 * Some drivers apparently write to these registers w/o updating our
3975 * cached copy. No harm happens if we update the copy, so do so here
3976 * so we can restore them. The COMMAND register is modified by the
3977 * bus w/o updating the cache. This should represent the normally
3978 * writable portion of the 'defined' part of type 0 headers. In
3979 * theory we also need to save/restore the PCI capability structures
3980 * we know about, but apart from power we don't know any that are
3983 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3984 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3985 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3986 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3987 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3988 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3989 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3990 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3991 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3992 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3993 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3994 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3995 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3996 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3997 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4000 * don't set the state for display devices, base peripherals and
4001 * memory devices since bad things happen when they are powered down.
4002 * We should (a) have drivers that can easily detach and (b) use
4003 * generic drivers for these devices so that some device actually
4004 * attaches. We need to make sure that when we implement (a) we don't
4005 * power the device down on a reattach.
4007 cls = pci_get_class(dev);
4010 switch (pci_do_power_nodriver)
4012 case 0: /* NO powerdown at all */
4014 case 1: /* Conservative about what to power down */
4015 if (cls == PCIC_STORAGE)
4018 case 2: /* Agressive about what to power down */
4019 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4020 cls == PCIC_BASEPERIPH)
4023 case 3: /* Power down everything */
4027 * PCI spec says we can only go into D3 state from D0 state.
4028 * Transition from D[12] into D0 before going to D3 state.
4030 ps = pci_get_powerstate(dev);
4031 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4032 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4033 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4034 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4037 #ifdef COMPAT_OLDPCI
4040 * Locate the parent of a PCI device by scanning the PCI devlist
4041 * and return the entry for the parent.
4042 * For devices on PCI Bus 0 (the host bus), this is the PCI Host.
4043 * For devices on secondary PCI busses, this is that bus' PCI-PCI Bridge.
4046 pci_devlist_get_parent(pcicfgregs *cfg)
4048 struct devlist *devlist_head;
4049 struct pci_devinfo *dinfo;
4050 pcicfgregs *bridge_cfg;
4053 dinfo = STAILQ_FIRST(devlist_head = &pci_devq);
4055 /* If the device is on PCI bus 0, look for the host */
4056 if (cfg->bus == 0) {
4057 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4058 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4059 bridge_cfg = &dinfo->cfg;
4060 if (bridge_cfg->baseclass == PCIC_BRIDGE
4061 && bridge_cfg->subclass == PCIS_BRIDGE_HOST
4062 && bridge_cfg->bus == cfg->bus) {
4068 /* If the device is not on PCI bus 0, look for the PCI-PCI bridge */
4070 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4071 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4072 bridge_cfg = &dinfo->cfg;
4073 if (bridge_cfg->baseclass == PCIC_BRIDGE
4074 && bridge_cfg->subclass == PCIS_BRIDGE_PCI
4075 && bridge_cfg->secondarybus == cfg->bus) {
4084 #endif /* COMPAT_OLDPCI */
4087 pci_alloc_1intr(device_t dev, int msi_enable, int *rid0, u_int *flags0)
4094 type = PCI_INTR_TYPE_LEGACY;
4095 flags = RF_SHAREABLE | RF_ACTIVE;
4097 ksnprintf(env, sizeof(env), "hw.%s.msi.enable",
4098 device_get_nameunit(dev));
4099 kgetenv_int(env, &msi_enable);
4104 ksnprintf(env, sizeof(env), "hw.%s.msi.cpu",
4105 device_get_nameunit(dev));
4106 kgetenv_int(env, &cpu);
4110 if (pci_alloc_msi(dev, &rid, 1, cpu) == 0) {
4111 flags &= ~RF_SHAREABLE;
4112 type = PCI_INTR_TYPE_MSI;