2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
33 #include "opt_compat_oldpci.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
46 #include <sys/machintr.h>
48 #include <machine/msi_machdep.h>
52 #include <vm/vm_extern.h>
56 #include <sys/device.h>
58 #include <sys/pciio.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pci_private.h>
67 #include <contrib/dev/acpica/acpi.h>
70 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 extern struct dev_ops pcic_ops; /* XXX */
75 typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
77 static uint32_t pci_mapbase(unsigned mapreg);
78 static const char *pci_maptype(unsigned mapreg);
79 static int pci_mapsize(unsigned testval);
80 static int pci_maprange(unsigned mapreg);
81 static void pci_fixancient(pcicfgregs *cfg);
83 static int pci_porten(device_t pcib, int b, int s, int f);
84 static int pci_memen(device_t pcib, int b, int s, int f);
85 static void pci_assign_interrupt(device_t bus, device_t dev,
87 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90 static int pci_probe(device_t dev);
91 static int pci_attach(device_t dev);
92 static void pci_child_detached(device_t, device_t);
93 static void pci_load_vendor_data(void);
94 static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96 static char *pci_describe_device(device_t dev);
97 static int pci_modevent(module_t mod, int what, void *arg);
98 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
100 static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
101 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
104 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
107 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108 static void pci_disable_msi(device_t dev);
109 static void pci_enable_msi(device_t dev, uint64_t address,
111 static void pci_setup_msix_vector(device_t dev, u_int index,
112 uint64_t address, uint32_t data);
113 static void pci_mask_msix_vector(device_t dev, u_int index);
114 static void pci_unmask_msix_vector(device_t dev, u_int index);
115 static void pci_mask_msix_allvectors(device_t dev);
116 static struct msix_vector *pci_find_msix_vector(device_t dev, int rid);
117 static int pci_msi_blacklisted(void);
118 static void pci_resume_msi(device_t dev);
119 static void pci_resume_msix(device_t dev);
120 static int pcie_slotimpl(const pcicfgregs *);
121 static void pci_print_verbose_expr(const pcicfgregs *);
123 static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
124 static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
125 static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
126 static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
127 static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
128 static void pci_read_cap_subvendor(device_t, int, int,
130 static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
131 static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
133 static device_method_t pci_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, pci_probe),
136 DEVMETHOD(device_attach, pci_attach),
137 DEVMETHOD(device_detach, bus_generic_detach),
138 DEVMETHOD(device_shutdown, bus_generic_shutdown),
139 DEVMETHOD(device_suspend, pci_suspend),
140 DEVMETHOD(device_resume, pci_resume),
143 DEVMETHOD(bus_print_child, pci_print_child),
144 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
145 DEVMETHOD(bus_read_ivar, pci_read_ivar),
146 DEVMETHOD(bus_write_ivar, pci_write_ivar),
147 DEVMETHOD(bus_driver_added, pci_driver_added),
148 DEVMETHOD(bus_child_detached, pci_child_detached),
149 DEVMETHOD(bus_setup_intr, pci_setup_intr),
150 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
152 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
153 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
154 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
155 DEVMETHOD(bus_delete_resource, pci_delete_resource),
156 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
157 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
158 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
159 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
160 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
161 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
164 DEVMETHOD(pci_read_config, pci_read_config_method),
165 DEVMETHOD(pci_write_config, pci_write_config_method),
166 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
167 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
168 DEVMETHOD(pci_enable_io, pci_enable_io_method),
169 DEVMETHOD(pci_disable_io, pci_disable_io_method),
170 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
171 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
172 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
173 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
174 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
175 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
176 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
177 DEVMETHOD(pci_release_msi, pci_release_msi_method),
178 DEVMETHOD(pci_alloc_msix_vector, pci_alloc_msix_vector_method),
179 DEVMETHOD(pci_release_msix_vector, pci_release_msix_vector_method),
180 DEVMETHOD(pci_msi_count, pci_msi_count_method),
181 DEVMETHOD(pci_msix_count, pci_msix_count_method),
186 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
188 static devclass_t pci_devclass;
189 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
190 MODULE_VERSION(pci, 1);
192 static char *pci_vendordata;
193 static size_t pci_vendordata_size;
196 static const struct pci_read_cap {
198 pci_read_cap_t read_cap;
199 } pci_read_caps[] = {
200 { PCIY_PMG, pci_read_cap_pmgt },
201 { PCIY_HT, pci_read_cap_ht },
202 { PCIY_MSI, pci_read_cap_msi },
203 { PCIY_MSIX, pci_read_cap_msix },
204 { PCIY_VPD, pci_read_cap_vpd },
205 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
206 { PCIY_PCIX, pci_read_cap_pcix },
207 { PCIY_EXPRESS, pci_read_cap_express },
208 { 0, NULL } /* required last entry */
212 uint32_t devid; /* Vendor/device of the card */
214 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
215 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
220 struct pci_quirk pci_quirks[] = {
221 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
222 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
224 /* As does the Serverworks OSB4 (the SMBus mapping register) */
225 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
228 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
229 * or the CMIC-SL (AKA ServerWorks GC_LE).
231 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 * MSI doesn't work on earlier Intel chipsets including
236 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
238 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
250 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
255 /* map register information */
256 #define PCI_MAPMEM 0x01 /* memory map */
257 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
258 #define PCI_MAPPORT 0x04 /* port map */
260 #define PCI_MSIX_RID2VEC(rid) ((rid) - 1) /* rid -> MSI-X vector # */
261 #define PCI_MSIX_VEC2RID(vec) ((vec) + 1) /* MSI-X vector # -> rid */
263 struct devlist pci_devq;
264 uint32_t pci_generation;
265 uint32_t pci_numdevs = 0;
266 static int pcie_chipset, pcix_chipset;
269 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
271 static int pci_enable_io_modes = 1;
272 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
273 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
274 &pci_enable_io_modes, 1,
275 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
276 enable these bits correctly. We'd like to do this all the time, but there\n\
277 are some peripherals that this causes problems with.");
279 static int pci_do_power_nodriver = 0;
280 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
281 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
282 &pci_do_power_nodriver, 0,
283 "Place a function into D3 state when no driver attaches to it. 0 means\n\
284 disable. 1 means conservatively place devices into D3 state. 2 means\n\
285 aggressively place devices into D3 state. 3 means put absolutely everything\n\
288 static int pci_do_power_resume = 1;
289 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
290 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
291 &pci_do_power_resume, 1,
292 "Transition from D3 -> D0 on resume.");
294 static int pci_do_msi = 1;
295 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
296 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
297 "Enable support for MSI interrupts");
299 static int pci_do_msix = 1;
300 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
301 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
302 "Enable support for MSI-X interrupts");
304 static int pci_honor_msi_blacklist = 1;
305 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
306 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
307 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
309 static int pci_msi_cpuid;
311 /* Find a device_t by bus/slot/function in domain 0 */
314 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
317 return (pci_find_dbsf(0, bus, slot, func));
320 /* Find a device_t by domain/bus/slot/function */
323 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
325 struct pci_devinfo *dinfo;
327 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
328 if ((dinfo->cfg.domain == domain) &&
329 (dinfo->cfg.bus == bus) &&
330 (dinfo->cfg.slot == slot) &&
331 (dinfo->cfg.func == func)) {
332 return (dinfo->cfg.dev);
339 /* Find a device_t by vendor/device ID */
342 pci_find_device(uint16_t vendor, uint16_t device)
344 struct pci_devinfo *dinfo;
346 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
347 if ((dinfo->cfg.vendor == vendor) &&
348 (dinfo->cfg.device == device)) {
349 return (dinfo->cfg.dev);
356 /* return base address of memory or port map */
359 pci_mapbase(uint32_t mapreg)
362 if (PCI_BAR_MEM(mapreg))
363 return (mapreg & PCIM_BAR_MEM_BASE);
365 return (mapreg & PCIM_BAR_IO_BASE);
368 /* return map type of memory or port map */
371 pci_maptype(unsigned mapreg)
374 if (PCI_BAR_IO(mapreg))
376 if (mapreg & PCIM_BAR_MEM_PREFETCH)
377 return ("Prefetchable Memory");
381 /* return log2 of map size decoded for memory or port map */
384 pci_mapsize(uint32_t testval)
388 testval = pci_mapbase(testval);
391 while ((testval & 1) == 0)
400 /* return log2 of address range supported by map register */
403 pci_maprange(unsigned mapreg)
407 if (PCI_BAR_IO(mapreg))
410 switch (mapreg & PCIM_BAR_MEM_TYPE) {
411 case PCIM_BAR_MEM_32:
414 case PCIM_BAR_MEM_1MB:
417 case PCIM_BAR_MEM_64:
424 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
427 pci_fixancient(pcicfgregs *cfg)
429 if (cfg->hdrtype != 0)
432 /* PCI to PCI bridges use header type 1 */
433 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
437 /* extract header type specific config data */
440 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
442 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
443 switch (cfg->hdrtype) {
445 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
446 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
447 cfg->nummaps = PCI_MAXMAPS_0;
450 cfg->nummaps = PCI_MAXMAPS_1;
452 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
456 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
457 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
458 cfg->nummaps = PCI_MAXMAPS_2;
460 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
467 /* read configuration header into pcicfgregs structure */
469 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
471 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
472 pcicfgregs *cfg = NULL;
473 struct pci_devinfo *devlist_entry;
474 struct devlist *devlist_head;
476 devlist_head = &pci_devq;
478 devlist_entry = NULL;
480 if (REG(PCIR_DEVVENDOR, 4) != -1) {
481 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
483 cfg = &devlist_entry->cfg;
489 cfg->vendor = REG(PCIR_VENDOR, 2);
490 cfg->device = REG(PCIR_DEVICE, 2);
491 cfg->cmdreg = REG(PCIR_COMMAND, 2);
492 cfg->statreg = REG(PCIR_STATUS, 2);
493 cfg->baseclass = REG(PCIR_CLASS, 1);
494 cfg->subclass = REG(PCIR_SUBCLASS, 1);
495 cfg->progif = REG(PCIR_PROGIF, 1);
496 cfg->revid = REG(PCIR_REVID, 1);
497 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
498 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
499 cfg->lattimer = REG(PCIR_LATTIMER, 1);
500 cfg->intpin = REG(PCIR_INTPIN, 1);
501 cfg->intline = REG(PCIR_INTLINE, 1);
503 cfg->mingnt = REG(PCIR_MINGNT, 1);
504 cfg->maxlat = REG(PCIR_MAXLAT, 1);
506 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
507 cfg->hdrtype &= ~PCIM_MFDEV;
510 pci_hdrtypedata(pcib, b, s, f, cfg);
512 pci_read_capabilities(pcib, cfg);
514 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
516 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
517 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
518 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
519 devlist_entry->conf.pc_sel.pc_func = cfg->func;
520 devlist_entry->conf.pc_hdr = cfg->hdrtype;
522 devlist_entry->conf.pc_subvendor = cfg->subvendor;
523 devlist_entry->conf.pc_subdevice = cfg->subdevice;
524 devlist_entry->conf.pc_vendor = cfg->vendor;
525 devlist_entry->conf.pc_device = cfg->device;
527 devlist_entry->conf.pc_class = cfg->baseclass;
528 devlist_entry->conf.pc_subclass = cfg->subclass;
529 devlist_entry->conf.pc_progif = cfg->progif;
530 devlist_entry->conf.pc_revid = cfg->revid;
535 return (devlist_entry);
540 pci_fixup_nextptr(int *nextptr0)
542 int nextptr = *nextptr0;
544 /* "Next pointer" is only one byte */
545 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
549 * PCI local bus spec 3.0:
551 * "... The bottom two bits of all pointers are reserved
552 * and must be implemented as 00b although software must
553 * mask them to allow for future uses of these bits ..."
556 kprintf("Illegal PCI extended capability "
557 "offset, fixup 0x%02x -> 0x%02x\n",
558 nextptr, nextptr & ~0x3);
564 if (nextptr < 0x40) {
566 kprintf("Illegal PCI extended capability "
567 "offset 0x%02x", nextptr);
575 pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
578 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
580 struct pcicfg_pp *pp = &cfg->pp;
585 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
586 pp->pp_status = ptr + PCIR_POWER_STATUS;
587 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
589 if ((nextptr - ptr) > PCIR_POWER_DATA) {
592 * We should write to data_select and read back from
593 * data_scale to determine whether data register is
597 pp->pp_data = ptr + PCIR_POWER_DATA;
607 pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
609 #if defined(__i386__) || defined(__x86_64__)
612 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
614 struct pcicfg_ht *ht = &cfg->ht;
618 /* Determine HT-specific capability type. */
619 val = REG(ptr + PCIR_HT_COMMAND, 2);
621 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
622 cfg->ht.ht_slave = ptr;
624 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
627 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
628 /* Sanity check the mapping window. */
629 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
631 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
632 if (addr != MSI_X86_ADDR_BASE) {
633 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
634 "has non-default MSI window 0x%llx\n",
635 cfg->domain, cfg->bus, cfg->slot, cfg->func,
639 addr = MSI_X86_ADDR_BASE;
643 ht->ht_msictrl = val;
644 ht->ht_msiaddr = addr;
648 #endif /* __i386__ || __x86_64__ */
652 pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
655 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
657 struct pcicfg_msi *msi = &cfg->msi;
659 msi->msi_location = ptr;
660 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
661 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
667 pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
670 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
672 struct pcicfg_msix *msix = &cfg->msix;
675 msix->msix_location = ptr;
676 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
677 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
679 val = REG(ptr + PCIR_MSIX_TABLE, 4);
680 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
681 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
683 val = REG(ptr + PCIR_MSIX_PBA, 4);
684 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
685 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
687 TAILQ_INIT(&msix->msix_vectors);
693 pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
695 cfg->vpd.vpd_reg = ptr;
699 pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
702 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
704 /* Should always be true. */
705 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
708 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
709 cfg->subvendor = val & 0xffff;
710 cfg->subdevice = val >> 16;
717 pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
720 * Assume we have a PCI-X chipset if we have
721 * at least one PCI-PCI bridge with a PCI-X
722 * capability. Note that some systems with
723 * PCI-express or HT chipsets might match on
724 * this check as well.
726 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
729 cfg->pcix.pcix_ptr = ptr;
733 pcie_slotimpl(const pcicfgregs *cfg)
735 const struct pcicfg_expr *expr = &cfg->expr;
739 * Only version 1 can be parsed currently
741 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
745 * - Slot implemented bit is meaningful iff current port is
746 * root port or down stream port.
747 * - Testing for root port or down stream port is meanningful
748 * iff PCI configure has type 1 header.
751 if (cfg->hdrtype != 1)
754 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
755 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
758 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
765 pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
768 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
770 struct pcicfg_expr *expr = &cfg->expr;
773 * Assume we have a PCI-express chipset if we have
774 * at least one PCI-express device.
778 expr->expr_ptr = ptr;
779 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
782 * Only version 1 can be parsed currently
784 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
788 * Read slot capabilities. Slot capabilities exists iff
789 * current port's slot is implemented
791 if (pcie_slotimpl(cfg))
792 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
798 pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
800 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
801 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
806 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
807 /* No capabilities */
811 switch (cfg->hdrtype & PCIM_HDRTYPE) {
814 ptrptr = PCIR_CAP_PTR;
817 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
820 return; /* no capabilities support */
822 nextptr = REG(ptrptr, 1); /* sanity check? */
825 * Read capability entries.
827 while (pci_fixup_nextptr(&nextptr)) {
828 const struct pci_read_cap *rc;
831 /* Find the next entry */
832 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
834 /* Process this entry */
835 val = REG(ptr + PCICAP_ID, 1);
836 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
837 if (rc->cap == val) {
838 rc->read_cap(pcib, ptr, nextptr, cfg);
844 #if defined(__i386__) || defined(__x86_64__)
846 * Enable the MSI mapping window for all HyperTransport
847 * slaves. PCI-PCI bridges have their windows enabled via
850 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
851 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
853 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
854 cfg->domain, cfg->bus, cfg->slot, cfg->func);
855 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
856 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
861 /* REG and WREG use carry through to next functions */
865 * PCI Vital Product Data
868 #define PCI_VPD_TIMEOUT 1000000
871 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
873 int count = PCI_VPD_TIMEOUT;
875 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
877 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
879 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
882 DELAY(1); /* limit looping */
884 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
891 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
893 int count = PCI_VPD_TIMEOUT;
895 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
897 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
898 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
899 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
902 DELAY(1); /* limit looping */
909 #undef PCI_VPD_TIMEOUT
911 struct vpd_readstate {
921 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
926 if (vrs->bytesinval == 0) {
927 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
929 vrs->val = le32toh(reg);
931 byte = vrs->val & 0xff;
934 vrs->val = vrs->val >> 8;
935 byte = vrs->val & 0xff;
945 pcie_slot_implemented(device_t dev)
947 struct pci_devinfo *dinfo = device_get_ivars(dev);
949 return pcie_slotimpl(&dinfo->cfg);
953 pcie_set_max_readrq(device_t dev, uint16_t rqsize)
958 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
959 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
960 panic("%s: invalid max read request size 0x%02x\n",
961 device_get_nameunit(dev), rqsize);
964 expr_ptr = pci_get_pciecap_ptr(dev);
966 panic("%s: not PCIe device\n", device_get_nameunit(dev));
968 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
969 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
971 device_printf(dev, "adjust device control 0x%04x", val);
973 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
975 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
978 kprintf(" -> 0x%04x\n", val);
983 pcie_get_max_readrq(device_t dev)
988 expr_ptr = pci_get_pciecap_ptr(dev);
990 panic("%s: not PCIe device\n", device_get_nameunit(dev));
992 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
993 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
997 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
999 struct vpd_readstate vrs;
1004 int alloc, off; /* alloc/off for RO/W arrays */
1010 /* init vpd reader */
1018 name = remain = i = 0; /* shut up stupid gcc */
1019 alloc = off = 0; /* shut up stupid gcc */
1020 dflen = 0; /* shut up stupid gcc */
1022 while (state >= 0) {
1023 if (vpd_nextbyte(&vrs, &byte)) {
1028 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1029 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1030 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1033 case 0: /* item name */
1035 if (vpd_nextbyte(&vrs, &byte2)) {
1040 if (vpd_nextbyte(&vrs, &byte2)) {
1044 remain |= byte2 << 8;
1045 if (remain > (0x7f*4 - vrs.off)) {
1048 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1049 cfg->domain, cfg->bus, cfg->slot,
1054 remain = byte & 0x7;
1055 name = (byte >> 3) & 0xf;
1058 case 0x2: /* String */
1059 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1060 M_DEVBUF, M_WAITOK);
1067 case 0x10: /* VPD-R */
1070 cfg->vpd.vpd_ros = kmalloc(alloc *
1071 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1075 case 0x11: /* VPD-W */
1078 cfg->vpd.vpd_w = kmalloc(alloc *
1079 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1083 default: /* Invalid data, abort */
1089 case 1: /* Identifier String */
1090 cfg->vpd.vpd_ident[i++] = byte;
1093 cfg->vpd.vpd_ident[i] = '\0';
1098 case 2: /* VPD-R Keyword Header */
1100 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1101 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1102 M_DEVBUF, M_WAITOK | M_ZERO);
1104 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1105 if (vpd_nextbyte(&vrs, &byte2)) {
1109 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1110 if (vpd_nextbyte(&vrs, &byte2)) {
1116 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1119 * if this happens, we can't trust the rest
1123 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1124 cfg->domain, cfg->bus, cfg->slot,
1129 } else if (dflen == 0) {
1130 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1131 sizeof(*cfg->vpd.vpd_ros[off].value),
1132 M_DEVBUF, M_WAITOK);
1133 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1135 cfg->vpd.vpd_ros[off].value = kmalloc(
1137 sizeof(*cfg->vpd.vpd_ros[off].value),
1138 M_DEVBUF, M_WAITOK);
1141 /* keep in sync w/ state 3's transistions */
1142 if (dflen == 0 && remain == 0)
1144 else if (dflen == 0)
1150 case 3: /* VPD-R Keyword Value */
1151 cfg->vpd.vpd_ros[off].value[i++] = byte;
1152 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1153 "RV", 2) == 0 && cksumvalid == -1) {
1159 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1160 cfg->domain, cfg->bus,
1161 cfg->slot, cfg->func,
1170 /* keep in sync w/ state 2's transistions */
1172 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1173 if (dflen == 0 && remain == 0) {
1174 cfg->vpd.vpd_rocnt = off;
1175 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
1176 off * sizeof(*cfg->vpd.vpd_ros),
1177 M_DEVBUF, M_WAITOK | M_ZERO);
1179 } else if (dflen == 0)
1189 case 5: /* VPD-W Keyword Header */
1191 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1192 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1193 M_DEVBUF, M_WAITOK | M_ZERO);
1195 cfg->vpd.vpd_w[off].keyword[0] = byte;
1196 if (vpd_nextbyte(&vrs, &byte2)) {
1200 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1201 if (vpd_nextbyte(&vrs, &byte2)) {
1205 cfg->vpd.vpd_w[off].len = dflen = byte2;
1206 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1207 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1208 sizeof(*cfg->vpd.vpd_w[off].value),
1209 M_DEVBUF, M_WAITOK);
1212 /* keep in sync w/ state 6's transistions */
1213 if (dflen == 0 && remain == 0)
1215 else if (dflen == 0)
1221 case 6: /* VPD-W Keyword Value */
1222 cfg->vpd.vpd_w[off].value[i++] = byte;
1225 /* keep in sync w/ state 5's transistions */
1227 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1228 if (dflen == 0 && remain == 0) {
1229 cfg->vpd.vpd_wcnt = off;
1230 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
1231 off * sizeof(*cfg->vpd.vpd_w),
1232 M_DEVBUF, M_WAITOK | M_ZERO);
1234 } else if (dflen == 0)
1239 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1240 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1247 if (cksumvalid == 0 || state < -1) {
1248 /* read-only data bad, clean up */
1249 if (cfg->vpd.vpd_ros != NULL) {
1250 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1251 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1252 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1253 cfg->vpd.vpd_ros = NULL;
1257 /* I/O error, clean up */
1258 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1259 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1260 if (cfg->vpd.vpd_ident != NULL) {
1261 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1262 cfg->vpd.vpd_ident = NULL;
1264 if (cfg->vpd.vpd_w != NULL) {
1265 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1266 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1267 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1268 cfg->vpd.vpd_w = NULL;
1271 cfg->vpd.vpd_cached = 1;
1277 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1279 struct pci_devinfo *dinfo = device_get_ivars(child);
1280 pcicfgregs *cfg = &dinfo->cfg;
1282 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1283 pci_read_vpd(device_get_parent(dev), cfg);
1285 *identptr = cfg->vpd.vpd_ident;
1287 if (*identptr == NULL)
1294 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1297 struct pci_devinfo *dinfo = device_get_ivars(child);
1298 pcicfgregs *cfg = &dinfo->cfg;
1301 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1302 pci_read_vpd(device_get_parent(dev), cfg);
1304 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1305 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1306 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1307 *vptr = cfg->vpd.vpd_ros[i].value;
1310 if (i != cfg->vpd.vpd_rocnt)
1318 * Return the offset in configuration space of the requested extended
1319 * capability entry or 0 if the specified capability was not found.
1322 pci_find_extcap_method(device_t dev, device_t child, int capability,
1325 struct pci_devinfo *dinfo = device_get_ivars(child);
1326 pcicfgregs *cfg = &dinfo->cfg;
1331 * Check the CAP_LIST bit of the PCI status register first.
1333 status = pci_read_config(child, PCIR_STATUS, 2);
1334 if (!(status & PCIM_STATUS_CAPPRESENT))
1338 * Determine the start pointer of the capabilities list.
1340 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1346 ptr = PCIR_CAP_PTR_2;
1350 return (ENXIO); /* no extended capabilities support */
1352 ptr = pci_read_config(child, ptr, 1);
1355 * Traverse the capabilities list.
1358 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1363 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1370 * Support for MSI-X message interrupts.
1373 pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1376 struct pci_devinfo *dinfo = device_get_ivars(dev);
1377 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1380 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1381 offset = msix->msix_table_offset + index * 16;
1382 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1383 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1384 bus_write_4(msix->msix_table_res, offset + 8, data);
1386 /* Enable MSI -> HT mapping. */
1387 pci_ht_map_msi(dev, address);
1391 pci_mask_msix_vector(device_t dev, u_int index)
1393 struct pci_devinfo *dinfo = device_get_ivars(dev);
1394 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1395 uint32_t offset, val;
1397 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1398 offset = msix->msix_table_offset + index * 16 + 12;
1399 val = bus_read_4(msix->msix_table_res, offset);
1400 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1401 val |= PCIM_MSIX_VCTRL_MASK;
1402 bus_write_4(msix->msix_table_res, offset, val);
1407 pci_unmask_msix_vector(device_t dev, u_int index)
1409 struct pci_devinfo *dinfo = device_get_ivars(dev);
1410 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1411 uint32_t offset, val;
1413 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1414 offset = msix->msix_table_offset + index * 16 + 12;
1415 val = bus_read_4(msix->msix_table_res, offset);
1416 if (val & PCIM_MSIX_VCTRL_MASK) {
1417 val &= ~PCIM_MSIX_VCTRL_MASK;
1418 bus_write_4(msix->msix_table_res, offset, val);
1423 pci_pending_msix_vector(device_t dev, u_int index)
1425 struct pci_devinfo *dinfo = device_get_ivars(dev);
1426 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1427 uint32_t offset, bit;
1429 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL,
1430 ("MSI-X is not setup yet\n"));
1432 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1433 offset = msix->msix_pba_offset + (index / 32) * 4;
1434 bit = 1 << index % 32;
1435 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1439 * Restore MSI-X registers and table during resume. If MSI-X is
1440 * enabled then walk the virtual table to restore the actual MSI-X
1444 pci_resume_msix(device_t dev)
1446 struct pci_devinfo *dinfo = device_get_ivars(dev);
1447 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1449 if (msix->msix_table_res != NULL) {
1450 const struct msix_vector *mv;
1452 pci_mask_msix_allvectors(dev);
1454 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1457 if (mv->mv_address == 0)
1460 vector = PCI_MSIX_RID2VEC(mv->mv_rid);
1461 pci_setup_msix_vector(dev, vector,
1462 mv->mv_address, mv->mv_data);
1463 pci_unmask_msix_vector(dev, vector);
1466 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1467 msix->msix_ctrl, 2);
1471 * Attempt to allocate one MSI-X message at the specified vector on cpuid.
1473 * After this function returns, the MSI-X's rid will be saved in rid0.
1476 pci_alloc_msix_vector_method(device_t dev, device_t child, u_int vector,
1477 int *rid0, int cpuid)
1479 struct pci_devinfo *dinfo = device_get_ivars(child);
1480 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1481 struct msix_vector *mv;
1482 struct resource_list_entry *rle;
1483 int error, irq, rid;
1485 KASSERT(msix->msix_table_res != NULL &&
1486 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1487 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d\n", cpuid));
1488 KASSERT(vector < msix->msix_msgnum,
1489 ("invalid MSI-X vector %u, total %d\n", vector, msix->msix_msgnum));
1492 device_printf(child,
1493 "attempting to allocate MSI-X #%u vector (%d supported)\n",
1494 vector, msix->msix_msgnum);
1497 /* Set rid according to vector number */
1498 rid = PCI_MSIX_VEC2RID(vector);
1500 /* Vector has already been allocated */
1501 mv = pci_find_msix_vector(child, rid);
1505 /* Allocate a message. */
1506 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq, cpuid);
1509 resource_list_add(&dinfo->resources, SYS_RES_IRQ, rid,
1510 irq, irq, 1, cpuid);
1513 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1514 device_printf(child, "using IRQ %lu for MSI-X on cpu%d\n",
1518 /* Update counts of alloc'd messages. */
1521 mv = kmalloc(sizeof(*mv), M_DEVBUF, M_WAITOK | M_ZERO);
1523 TAILQ_INSERT_TAIL(&msix->msix_vectors, mv, mv_link);
1530 pci_release_msix_vector_method(device_t dev, device_t child, int rid)
1532 struct pci_devinfo *dinfo = device_get_ivars(child);
1533 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1534 struct resource_list_entry *rle;
1535 struct msix_vector *mv;
1538 KASSERT(msix->msix_table_res != NULL &&
1539 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1540 KASSERT(msix->msix_alloc > 0, ("No MSI-X allocated\n"));
1541 KASSERT(rid > 0, ("invalid rid %d\n", rid));
1543 mv = pci_find_msix_vector(child, rid);
1544 KASSERT(mv != NULL, ("MSI-X rid %d is not allocated\n", rid));
1545 KASSERT(mv->mv_address == 0, ("MSI-X rid %d not teardown\n", rid));
1547 /* Make sure resource is no longer allocated. */
1548 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1549 KASSERT(rle != NULL, ("missing MSI-X resource, rid %d\n", rid));
1550 KASSERT(rle->res == NULL,
1551 ("MSI-X resource is still allocated, rid %d\n", rid));
1556 /* Free the resource list entries. */
1557 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, rid);
1559 /* Release the IRQ. */
1560 PCIB_RELEASE_MSIX(device_get_parent(dev), child, irq, cpuid);
1562 TAILQ_REMOVE(&msix->msix_vectors, mv, mv_link);
1563 kfree(mv, M_DEVBUF);
1570 * Return the max supported MSI-X messages this device supports.
1571 * Basically, assuming the MD code can alloc messages, this function
1572 * should return the maximum value that pci_alloc_msix() can return.
1573 * Thus, it is subject to the tunables, etc.
1576 pci_msix_count_method(device_t dev, device_t child)
1578 struct pci_devinfo *dinfo = device_get_ivars(child);
1579 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1581 if (pci_do_msix && msix->msix_location != 0)
1582 return (msix->msix_msgnum);
1587 pci_setup_msix(device_t dev)
1589 struct pci_devinfo *dinfo = device_get_ivars(dev);
1590 pcicfgregs *cfg = &dinfo->cfg;
1591 struct resource_list_entry *rle;
1592 struct resource *table_res, *pba_res;
1594 KASSERT(cfg->msix.msix_table_res == NULL &&
1595 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet\n"));
1597 /* If rid 0 is allocated, then fail. */
1598 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1599 if (rle != NULL && rle->res != NULL)
1602 /* Already have allocated MSIs? */
1603 if (cfg->msi.msi_alloc != 0)
1606 /* If MSI is blacklisted for this system, fail. */
1607 if (pci_msi_blacklisted())
1610 /* MSI-X capability present? */
1611 if (cfg->msix.msix_location == 0 || cfg->msix.msix_msgnum == 0 ||
1615 KASSERT(cfg->msix.msix_alloc == 0 &&
1616 TAILQ_EMPTY(&cfg->msix.msix_vectors),
1617 ("MSI-X vector has been allocated\n"));
1619 /* Make sure the appropriate BARs are mapped. */
1620 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1621 cfg->msix.msix_table_bar);
1622 if (rle == NULL || rle->res == NULL ||
1623 !(rman_get_flags(rle->res) & RF_ACTIVE))
1625 table_res = rle->res;
1626 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1627 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1628 cfg->msix.msix_pba_bar);
1629 if (rle == NULL || rle->res == NULL ||
1630 !(rman_get_flags(rle->res) & RF_ACTIVE))
1635 cfg->msix.msix_table_res = table_res;
1636 cfg->msix.msix_pba_res = pba_res;
1638 pci_mask_msix_allvectors(dev);
1644 pci_teardown_msix(device_t dev)
1646 struct pci_devinfo *dinfo = device_get_ivars(dev);
1647 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1649 KASSERT(msix->msix_table_res != NULL &&
1650 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1651 KASSERT(msix->msix_alloc == 0 && TAILQ_EMPTY(&msix->msix_vectors),
1652 ("MSI-X vector is still allocated\n"));
1654 pci_mask_msix_allvectors(dev);
1656 msix->msix_table_res = NULL;
1657 msix->msix_pba_res = NULL;
1661 pci_enable_msix(device_t dev)
1663 struct pci_devinfo *dinfo = device_get_ivars(dev);
1664 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1666 KASSERT(msix->msix_table_res != NULL &&
1667 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1669 /* Update control register to enable MSI-X. */
1670 msix->msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1671 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1672 msix->msix_ctrl, 2);
1676 pci_disable_msix(device_t dev)
1678 struct pci_devinfo *dinfo = device_get_ivars(dev);
1679 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1681 KASSERT(msix->msix_table_res != NULL &&
1682 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1684 /* Disable MSI -> HT mapping. */
1685 pci_ht_map_msi(dev, 0);
1687 /* Update control register to disable MSI-X. */
1688 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1689 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1690 msix->msix_ctrl, 2);
1694 pci_mask_msix_allvectors(device_t dev)
1696 struct pci_devinfo *dinfo = device_get_ivars(dev);
1699 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i)
1700 pci_mask_msix_vector(dev, i);
1703 static struct msix_vector *
1704 pci_find_msix_vector(device_t dev, int rid)
1706 struct pci_devinfo *dinfo = device_get_ivars(dev);
1707 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1708 struct msix_vector *mv;
1710 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1711 if (mv->mv_rid == rid)
1718 * HyperTransport MSI mapping control
1721 pci_ht_map_msi(device_t dev, uint64_t addr)
1723 struct pci_devinfo *dinfo = device_get_ivars(dev);
1724 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1729 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1730 ht->ht_msiaddr >> 20 == addr >> 20) {
1731 /* Enable MSI -> HT mapping. */
1732 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1733 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1737 if (!addr && (ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
1738 /* Disable MSI -> HT mapping. */
1739 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1740 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1746 * Support for MSI message signalled interrupts.
1749 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1751 struct pci_devinfo *dinfo = device_get_ivars(dev);
1752 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1754 /* Write data and address values. */
1755 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1756 address & 0xffffffff, 4);
1757 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1758 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1760 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1763 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1766 /* Enable MSI in the control register. */
1767 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1768 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1771 /* Enable MSI -> HT mapping. */
1772 pci_ht_map_msi(dev, address);
1776 pci_disable_msi(device_t dev)
1778 struct pci_devinfo *dinfo = device_get_ivars(dev);
1779 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1781 /* Disable MSI -> HT mapping. */
1782 pci_ht_map_msi(dev, 0);
1784 /* Disable MSI in the control register. */
1785 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1786 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1791 * Restore MSI registers during resume. If MSI is enabled then
1792 * restore the data and address registers in addition to the control
1796 pci_resume_msi(device_t dev)
1798 struct pci_devinfo *dinfo = device_get_ivars(dev);
1799 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1803 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1804 address = msi->msi_addr;
1805 data = msi->msi_data;
1806 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1807 address & 0xffffffff, 4);
1808 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1809 pci_write_config(dev, msi->msi_location +
1810 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1811 pci_write_config(dev, msi->msi_location +
1812 PCIR_MSI_DATA_64BIT, data, 2);
1814 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1817 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1822 * Returns true if the specified device is blacklisted because MSI
1826 pci_msi_device_blacklisted(device_t dev)
1828 struct pci_quirk *q;
1830 if (!pci_honor_msi_blacklist)
1833 for (q = &pci_quirks[0]; q->devid; q++) {
1834 if (q->devid == pci_get_devid(dev) &&
1835 q->type == PCI_QUIRK_DISABLE_MSI)
1842 * Determine if MSI is blacklisted globally on this sytem. Currently,
1843 * we just check for blacklisted chipsets as represented by the
1844 * host-PCI bridge at device 0:0:0. In the future, it may become
1845 * necessary to check other system attributes, such as the kenv values
1846 * that give the motherboard manufacturer and model number.
1849 pci_msi_blacklisted(void)
1853 if (!pci_honor_msi_blacklist)
1856 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1857 if (!(pcie_chipset || pcix_chipset))
1860 dev = pci_find_bsf(0, 0, 0);
1862 return (pci_msi_device_blacklisted(dev));
1867 * Attempt to allocate count MSI messages on start_cpuid.
1869 * If start_cpuid < 0, then the MSI messages' target CPU will be
1870 * selected automaticly.
1872 * If the caller explicitly specified the MSI messages' target CPU,
1873 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1874 * messages on the specified CPU, if the allocation fails due to MD
1875 * does not have enough vectors (EMSGSIZE), then we will try next
1876 * available CPU, until the allocation fails on all CPUs.
1878 * EMSGSIZE will be returned, if all available CPUs does not have
1879 * enough vectors for the requested amount of MSI messages. Caller
1880 * should either reduce the amount of MSI messages to be requested,
1881 * or simply giving up using MSI.
1883 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1884 * returned in 'rid' array, if the allocation succeeds.
1887 pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1890 struct pci_devinfo *dinfo = device_get_ivars(child);
1891 pcicfgregs *cfg = &dinfo->cfg;
1892 struct resource_list_entry *rle;
1893 int error, i, irqs[32], cpuid = 0;
1896 KASSERT(count != 0 && count <= 32 && powerof2(count),
1897 ("invalid MSI count %d\n", count));
1898 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
1900 /* If rid 0 is allocated, then fail. */
1901 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1902 if (rle != NULL && rle->res != NULL)
1905 /* Already have allocated messages? */
1906 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_table_res != NULL)
1909 /* If MSI is blacklisted for this system, fail. */
1910 if (pci_msi_blacklisted())
1913 /* MSI capability present? */
1914 if (cfg->msi.msi_location == 0 || cfg->msi.msi_msgnum == 0 ||
1918 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1919 count, cfg->msi.msi_msgnum));
1922 device_printf(child,
1923 "attempting to allocate %d MSI vectors (%d supported)\n",
1924 count, cfg->msi.msi_msgnum);
1927 if (start_cpuid < 0)
1928 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
1931 for (i = 0; i < ncpus; ++i) {
1932 cpuid = (start_cpuid + i) % ncpus;
1934 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
1935 cfg->msi.msi_msgnum, irqs, cpuid);
1938 else if (error != EMSGSIZE)
1945 * We now have N messages mapped onto SYS_RES_IRQ resources in
1946 * the irqs[] array, so add new resources starting at rid 1.
1948 for (i = 0; i < count; i++) {
1950 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1951 irqs[i], irqs[i], 1, cpuid);
1956 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1962 * Be fancy and try to print contiguous runs
1963 * of IRQ values as ranges. 'run' is true if
1964 * we are in a range.
1966 device_printf(child, "using IRQs %d", irqs[0]);
1968 for (i = 1; i < count; i++) {
1970 /* Still in a run? */
1971 if (irqs[i] == irqs[i - 1] + 1) {
1976 /* Finish previous range. */
1978 kprintf("-%d", irqs[i - 1]);
1982 /* Start new range. */
1983 kprintf(",%d", irqs[i]);
1986 /* Unfinished range? */
1988 kprintf("-%d", irqs[count - 1]);
1989 kprintf(" for MSI on cpu%d\n", cpuid);
1993 /* Update control register with count. */
1994 ctrl = cfg->msi.msi_ctrl;
1995 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1996 ctrl |= (ffs(count) - 1) << 4;
1997 cfg->msi.msi_ctrl = ctrl;
1998 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2000 /* Update counts of alloc'd messages. */
2001 cfg->msi.msi_alloc = count;
2002 cfg->msi.msi_handlers = 0;
2006 /* Release the MSI messages associated with this device. */
2008 pci_release_msi_method(device_t dev, device_t child)
2010 struct pci_devinfo *dinfo = device_get_ivars(child);
2011 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2012 struct resource_list_entry *rle;
2013 int i, irqs[32], cpuid = -1;
2015 /* Do we have any messages to release? */
2016 if (msi->msi_alloc == 0)
2018 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2020 /* Make sure none of the resources are allocated. */
2021 if (msi->msi_handlers > 0)
2023 for (i = 0; i < msi->msi_alloc; i++) {
2024 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2025 KASSERT(rle != NULL, ("missing MSI resource"));
2026 if (rle->res != NULL)
2030 KASSERT(cpuid >= 0 && cpuid < ncpus,
2031 ("invalid MSI target cpuid %d\n", cpuid));
2033 KASSERT(rle->cpuid == cpuid,
2034 ("MSI targets different cpus, "
2035 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
2037 irqs[i] = rle->start;
2040 /* Update control register with 0 count. */
2041 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2042 ("%s: MSI still enabled", __func__));
2043 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2044 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2047 /* Release the messages. */
2048 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2050 for (i = 0; i < msi->msi_alloc; i++)
2051 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2053 /* Update alloc count. */
2061 * Return the max supported MSI messages this device supports.
2062 * Basically, assuming the MD code can alloc messages, this function
2063 * should return the maximum value that pci_alloc_msi() can return.
2064 * Thus, it is subject to the tunables, etc.
2067 pci_msi_count_method(device_t dev, device_t child)
2069 struct pci_devinfo *dinfo = device_get_ivars(child);
2070 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2072 if (pci_do_msi && msi->msi_location != 0)
2073 return (msi->msi_msgnum);
2077 /* kfree pcicfgregs structure and all depending data structures */
2080 pci_freecfg(struct pci_devinfo *dinfo)
2082 struct devlist *devlist_head;
2085 devlist_head = &pci_devq;
2087 if (dinfo->cfg.vpd.vpd_reg) {
2088 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2089 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2090 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2091 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2092 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2093 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2094 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2096 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2097 kfree(dinfo, M_DEVBUF);
2099 /* increment the generation count */
2102 /* we're losing one device */
2108 * PCI power manangement
2111 pci_set_powerstate_method(device_t dev, device_t child, int state)
2113 struct pci_devinfo *dinfo = device_get_ivars(child);
2114 pcicfgregs *cfg = &dinfo->cfg;
2116 int result, oldstate, highest, delay;
2118 if (cfg->pp.pp_cap == 0)
2119 return (EOPNOTSUPP);
2122 * Optimize a no state change request away. While it would be OK to
2123 * write to the hardware in theory, some devices have shown odd
2124 * behavior when going from D3 -> D3.
2126 oldstate = pci_get_powerstate(child);
2127 if (oldstate == state)
2131 * The PCI power management specification states that after a state
2132 * transition between PCI power states, system software must
2133 * guarantee a minimal delay before the function accesses the device.
2134 * Compute the worst case delay that we need to guarantee before we
2135 * access the device. Many devices will be responsive much more
2136 * quickly than this delay, but there are some that don't respond
2137 * instantly to state changes. Transitions to/from D3 state require
2138 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2139 * is done below with DELAY rather than a sleeper function because
2140 * this function can be called from contexts where we cannot sleep.
2142 highest = (oldstate > state) ? oldstate : state;
2143 if (highest == PCI_POWERSTATE_D3)
2145 else if (highest == PCI_POWERSTATE_D2)
2149 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2150 & ~PCIM_PSTAT_DMASK;
2153 case PCI_POWERSTATE_D0:
2154 status |= PCIM_PSTAT_D0;
2156 case PCI_POWERSTATE_D1:
2157 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2158 return (EOPNOTSUPP);
2159 status |= PCIM_PSTAT_D1;
2161 case PCI_POWERSTATE_D2:
2162 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2163 return (EOPNOTSUPP);
2164 status |= PCIM_PSTAT_D2;
2166 case PCI_POWERSTATE_D3:
2167 status |= PCIM_PSTAT_D3;
2175 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2176 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2177 dinfo->cfg.func, oldstate, state);
2179 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2186 pci_get_powerstate_method(device_t dev, device_t child)
2188 struct pci_devinfo *dinfo = device_get_ivars(child);
2189 pcicfgregs *cfg = &dinfo->cfg;
2193 if (cfg->pp.pp_cap != 0) {
2194 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2195 switch (status & PCIM_PSTAT_DMASK) {
2197 result = PCI_POWERSTATE_D0;
2200 result = PCI_POWERSTATE_D1;
2203 result = PCI_POWERSTATE_D2;
2206 result = PCI_POWERSTATE_D3;
2209 result = PCI_POWERSTATE_UNKNOWN;
2213 /* No support, device is always at D0 */
2214 result = PCI_POWERSTATE_D0;
2220 * Some convenience functions for PCI device drivers.
2223 static __inline void
2224 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2228 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2230 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2233 static __inline void
2234 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2238 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2240 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2244 pci_enable_busmaster_method(device_t dev, device_t child)
2246 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2251 pci_disable_busmaster_method(device_t dev, device_t child)
2253 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2258 pci_enable_io_method(device_t dev, device_t child, int space)
2268 case SYS_RES_IOPORT:
2269 bit = PCIM_CMD_PORTEN;
2272 case SYS_RES_MEMORY:
2273 bit = PCIM_CMD_MEMEN;
2279 pci_set_command_bit(dev, child, bit);
2280 /* Some devices seem to need a brief stall here, what do to? */
2281 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2284 device_printf(child, "failed to enable %s mapping!\n", error);
2289 pci_disable_io_method(device_t dev, device_t child, int space)
2299 case SYS_RES_IOPORT:
2300 bit = PCIM_CMD_PORTEN;
2303 case SYS_RES_MEMORY:
2304 bit = PCIM_CMD_MEMEN;
2310 pci_clear_command_bit(dev, child, bit);
2311 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2312 if (command & bit) {
2313 device_printf(child, "failed to disable %s mapping!\n", error);
2320 * New style pci driver. Parent device is either a pci-host-bridge or a
2321 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2325 pci_print_verbose(struct pci_devinfo *dinfo)
2329 pcicfgregs *cfg = &dinfo->cfg;
2331 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2332 cfg->vendor, cfg->device, cfg->revid);
2333 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2334 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2335 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2336 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2338 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2339 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2340 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2341 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2342 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2343 if (cfg->intpin > 0)
2344 kprintf("\tintpin=%c, irq=%d\n",
2345 cfg->intpin +'a' -1, cfg->intline);
2346 if (cfg->pp.pp_cap) {
2349 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2350 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2351 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2352 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2353 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2354 status & PCIM_PSTAT_DMASK);
2356 if (cfg->msi.msi_location) {
2359 ctrl = cfg->msi.msi_ctrl;
2360 kprintf("\tMSI supports %d message%s%s%s\n",
2361 cfg->msi.msi_msgnum,
2362 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2363 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2364 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2366 if (cfg->msix.msix_location) {
2367 kprintf("\tMSI-X supports %d message%s ",
2368 cfg->msix.msix_msgnum,
2369 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2370 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2371 kprintf("in map 0x%x\n",
2372 cfg->msix.msix_table_bar);
2374 kprintf("in maps 0x%x and 0x%x\n",
2375 cfg->msix.msix_table_bar,
2376 cfg->msix.msix_pba_bar);
2378 pci_print_verbose_expr(cfg);
2383 pci_print_verbose_expr(const pcicfgregs *cfg)
2385 const struct pcicfg_expr *expr = &cfg->expr;
2386 const char *port_name;
2392 if (expr->expr_ptr == 0) /* No PCI Express capability */
2395 kprintf("\tPCI Express ver.%d cap=0x%04x",
2396 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2397 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2400 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2402 switch (port_type) {
2403 case PCIE_END_POINT:
2404 port_name = "DEVICE";
2406 case PCIE_LEG_END_POINT:
2407 port_name = "LEGDEV";
2409 case PCIE_ROOT_PORT:
2412 case PCIE_UP_STREAM_PORT:
2413 port_name = "UPSTREAM";
2415 case PCIE_DOWN_STREAM_PORT:
2416 port_name = "DOWNSTRM";
2418 case PCIE_PCIE2PCI_BRIDGE:
2419 port_name = "PCIE2PCI";
2421 case PCIE_PCI2PCIE_BRIDGE:
2422 port_name = "PCI2PCIE";
2428 if ((port_type == PCIE_ROOT_PORT ||
2429 port_type == PCIE_DOWN_STREAM_PORT) &&
2430 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2432 if (port_name != NULL)
2433 kprintf("[%s]", port_name);
2435 if (pcie_slotimpl(cfg)) {
2436 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2437 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2438 kprintf("[HOTPLUG]");
2445 pci_porten(device_t pcib, int b, int s, int f)
2447 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2448 & PCIM_CMD_PORTEN) != 0;
2452 pci_memen(device_t pcib, int b, int s, int f)
2454 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2455 & PCIM_CMD_MEMEN) != 0;
2459 * Add a resource based on a pci map register. Return 1 if the map
2460 * register is a 32bit map register or 2 if it is a 64bit register.
2463 pci_add_map(device_t pcib, device_t bus, device_t dev,
2464 int b, int s, int f, int reg, struct resource_list *rl, int force,
2469 pci_addr_t start, end, count;
2476 struct resource *res;
2478 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2479 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2480 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2481 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2483 if (PCI_BAR_MEM(map)) {
2484 type = SYS_RES_MEMORY;
2485 if (map & PCIM_BAR_MEM_PREFETCH)
2488 type = SYS_RES_IOPORT;
2489 ln2size = pci_mapsize(testval);
2490 ln2range = pci_maprange(testval);
2491 base = pci_mapbase(map);
2492 barlen = ln2range == 64 ? 2 : 1;
2495 * For I/O registers, if bottom bit is set, and the next bit up
2496 * isn't clear, we know we have a BAR that doesn't conform to the
2497 * spec, so ignore it. Also, sanity check the size of the data
2498 * areas to the type of memory involved. Memory must be at least
2499 * 16 bytes in size, while I/O ranges must be at least 4.
2501 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2503 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2504 (type == SYS_RES_IOPORT && ln2size < 2))
2508 /* Read the other half of a 64bit map register */
2509 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2511 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2512 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2513 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2514 kprintf(", port disabled\n");
2515 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2516 kprintf(", memory disabled\n");
2518 kprintf(", enabled\n");
2522 * If base is 0, then we have problems. It is best to ignore
2523 * such entries for the moment. These will be allocated later if
2524 * the driver specifically requests them. However, some
2525 * removable busses look better when all resources are allocated,
2526 * so allow '0' to be overriden.
2528 * Similarly treat maps whose values is the same as the test value
2529 * read back. These maps have had all f's written to them by the
2530 * BIOS in an attempt to disable the resources.
2532 if (!force && (base == 0 || map == testval))
2534 if ((u_long)base != base) {
2536 "pci%d:%d:%d:%d bar %#x too many address bits",
2537 pci_get_domain(dev), b, s, f, reg);
2542 * This code theoretically does the right thing, but has
2543 * undesirable side effects in some cases where peripherals
2544 * respond oddly to having these bits enabled. Let the user
2545 * be able to turn them off (since pci_enable_io_modes is 1 by
2548 if (pci_enable_io_modes) {
2549 /* Turn on resources that have been left off by a lazy BIOS */
2550 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2551 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2552 cmd |= PCIM_CMD_PORTEN;
2553 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2555 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2556 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2557 cmd |= PCIM_CMD_MEMEN;
2558 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2561 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2563 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2567 count = 1 << ln2size;
2568 if (base == 0 || base == pci_mapbase(testval)) {
2569 start = 0; /* Let the parent decide. */
2573 end = base + (1 << ln2size) - 1;
2575 resource_list_add(rl, type, reg, start, end, count, -1);
2578 * Try to allocate the resource for this BAR from our parent
2579 * so that this resource range is already reserved. The
2580 * driver for this device will later inherit this resource in
2581 * pci_alloc_resource().
2583 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2584 prefetch ? RF_PREFETCHABLE : 0, -1);
2587 * If the allocation fails, delete the resource list
2588 * entry to force pci_alloc_resource() to allocate
2589 * resources from the parent.
2591 resource_list_delete(rl, type, reg);
2592 #ifdef PCI_BAR_CLEAR
2595 #else /* !PCI_BAR_CLEAR */
2597 * Don't clear BAR here. Some BIOS lists HPET as a
2598 * PCI function, clearing the BAR causes HPET timer
2602 kprintf("pci:%d:%d:%d: resource reservation failed "
2603 "%#jx - %#jx\n", b, s, f,
2604 (intmax_t)start, (intmax_t)end);
2607 #endif /* PCI_BAR_CLEAR */
2609 start = rman_get_start(res);
2611 pci_write_config(dev, reg, start, 4);
2613 pci_write_config(dev, reg + 4, start >> 32, 4);
2618 * For ATA devices we need to decide early what addressing mode to use.
2619 * Legacy demands that the primary and secondary ATA ports sits on the
2620 * same addresses that old ISA hardware did. This dictates that we use
2621 * those addresses and ignore the BAR's if we cannot set PCI native
2625 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2626 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2628 int rid, type, progif;
2630 /* if this device supports PCI native addressing use it */
2631 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2632 if ((progif & 0x8a) == 0x8a) {
2633 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2634 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2635 kprintf("Trying ATA native PCI addressing mode\n");
2636 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2640 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2641 type = SYS_RES_IOPORT;
2642 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2643 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2644 prefetchmask & (1 << 0));
2645 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2646 prefetchmask & (1 << 1));
2649 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
2650 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2653 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
2654 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2657 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2658 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2659 prefetchmask & (1 << 2));
2660 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2661 prefetchmask & (1 << 3));
2664 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
2665 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2668 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
2669 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2672 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2673 prefetchmask & (1 << 4));
2674 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2675 prefetchmask & (1 << 5));
2679 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2681 struct pci_devinfo *dinfo = device_get_ivars(dev);
2682 pcicfgregs *cfg = &dinfo->cfg;
2683 char tunable_name[64];
2686 /* Has to have an intpin to have an interrupt. */
2687 if (cfg->intpin == 0)
2690 /* Let the user override the IRQ with a tunable. */
2691 irq = PCI_INVALID_IRQ;
2692 ksnprintf(tunable_name, sizeof(tunable_name),
2693 "hw.pci%d.%d.%d.INT%c.irq",
2694 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2695 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2696 irq = PCI_INVALID_IRQ;
2699 * If we didn't get an IRQ via the tunable, then we either use the
2700 * IRQ value in the intline register or we ask the bus to route an
2701 * interrupt for us. If force_route is true, then we only use the
2702 * value in the intline register if the bus was unable to assign an
2705 if (!PCI_INTERRUPT_VALID(irq)) {
2706 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2707 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2708 if (!PCI_INTERRUPT_VALID(irq))
2712 /* If after all that we don't have an IRQ, just bail. */
2713 if (!PCI_INTERRUPT_VALID(irq))
2716 /* Update the config register if it changed. */
2717 if (irq != cfg->intline) {
2719 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2722 /* Add this IRQ as rid 0 interrupt resource. */
2723 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
2724 machintr_legacy_intr_cpuid(irq));
2728 pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
2730 struct pci_devinfo *dinfo = device_get_ivars(dev);
2731 pcicfgregs *cfg = &dinfo->cfg;
2732 struct resource_list *rl = &dinfo->resources;
2733 struct pci_quirk *q;
2740 /* ATA devices needs special map treatment */
2741 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2742 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2743 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2744 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2745 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2746 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2748 for (i = 0; i < cfg->nummaps;)
2749 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2750 rl, force, prefetchmask & (1 << i));
2753 * Add additional, quirked resources.
2755 for (q = &pci_quirks[0]; q->devid; q++) {
2756 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2757 && q->type == PCI_QUIRK_MAP_REG)
2758 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2762 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2764 * Try to re-route interrupts. Sometimes the BIOS or
2765 * firmware may leave bogus values in these registers.
2766 * If the re-route fails, then just stick with what we
2769 pci_assign_interrupt(bus, dev, 1);
2774 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2776 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2777 device_t pcib = device_get_parent(dev);
2778 struct pci_devinfo *dinfo;
2780 int s, f, pcifunchigh;
2783 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2784 ("dinfo_size too small"));
2785 maxslots = PCIB_MAXSLOTS(pcib);
2786 for (s = 0; s <= maxslots; s++) {
2790 hdrtype = REG(PCIR_HDRTYPE, 1);
2791 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2793 if (hdrtype & PCIM_MFDEV)
2794 pcifunchigh = PCI_FUNCMAX;
2795 for (f = 0; f <= pcifunchigh; f++) {
2796 dinfo = pci_read_device(pcib, domain, busno, s, f,
2798 if (dinfo != NULL) {
2799 pci_add_child(dev, dinfo);
2807 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2811 pcib = device_get_parent(bus);
2812 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2813 device_set_ivars(dinfo->cfg.dev, dinfo);
2814 resource_list_init(&dinfo->resources);
2815 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2816 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2817 pci_print_verbose(dinfo);
2818 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
2822 pci_probe(device_t dev)
2824 device_set_desc(dev, "PCI bus");
2826 /* Allow other subclasses to override this driver. */
2831 pci_attach(device_t dev)
2836 * Since there can be multiple independantly numbered PCI
2837 * busses on systems with multiple PCI domains, we can't use
2838 * the unit number to decide which bus we are probing. We ask
2839 * the parent pcib what our domain and bus numbers are.
2841 domain = pcib_get_domain(dev);
2842 busno = pcib_get_bus(dev);
2844 device_printf(dev, "domain=%d, physical bus=%d\n",
2847 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2849 return (bus_generic_attach(dev));
2853 pci_suspend(device_t dev)
2855 int dstate, error, i, numdevs;
2856 device_t acpi_dev, child, *devlist;
2857 struct pci_devinfo *dinfo;
2860 * Save the PCI configuration space for each child and set the
2861 * device in the appropriate power state for this sleep state.
2864 if (pci_do_power_resume)
2865 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2866 device_get_children(dev, &devlist, &numdevs);
2867 for (i = 0; i < numdevs; i++) {
2869 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2870 pci_cfg_save(child, dinfo, 0);
2873 /* Suspend devices before potentially powering them down. */
2874 error = bus_generic_suspend(dev);
2876 kfree(devlist, M_TEMP);
2881 * Always set the device to D3. If ACPI suggests a different
2882 * power state, use it instead. If ACPI is not present, the
2883 * firmware is responsible for managing device power. Skip
2884 * children who aren't attached since they are powered down
2885 * separately. Only manage type 0 devices for now.
2887 for (i = 0; acpi_dev && i < numdevs; i++) {
2889 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2890 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2891 dstate = PCI_POWERSTATE_D3;
2892 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2893 pci_set_powerstate(child, dstate);
2896 kfree(devlist, M_TEMP);
2901 pci_resume(device_t dev)
2904 device_t acpi_dev, child, *devlist;
2905 struct pci_devinfo *dinfo;
2908 * Set each child to D0 and restore its PCI configuration space.
2911 if (pci_do_power_resume)
2912 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2913 device_get_children(dev, &devlist, &numdevs);
2914 for (i = 0; i < numdevs; i++) {
2916 * Notify ACPI we're going to D0 but ignore the result. If
2917 * ACPI is not present, the firmware is responsible for
2918 * managing device power. Only manage type 0 devices for now.
2921 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2922 if (acpi_dev && device_is_attached(child) &&
2923 dinfo->cfg.hdrtype == 0) {
2924 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2925 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2928 /* Now the device is powered up, restore its config space. */
2929 pci_cfg_restore(child, dinfo);
2931 kfree(devlist, M_TEMP);
2932 return (bus_generic_resume(dev));
2936 pci_load_vendor_data(void)
2938 caddr_t vendordata, info;
2940 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2941 info = preload_search_info(vendordata, MODINFO_ADDR);
2942 pci_vendordata = *(char **)info;
2943 info = preload_search_info(vendordata, MODINFO_SIZE);
2944 pci_vendordata_size = *(size_t *)info;
2945 /* terminate the database */
2946 pci_vendordata[pci_vendordata_size] = '\n';
2951 pci_driver_added(device_t dev, driver_t *driver)
2956 struct pci_devinfo *dinfo;
2960 device_printf(dev, "driver added\n");
2961 DEVICE_IDENTIFY(driver, dev);
2962 device_get_children(dev, &devlist, &numdevs);
2963 for (i = 0; i < numdevs; i++) {
2965 if (device_get_state(child) != DS_NOTPRESENT)
2967 dinfo = device_get_ivars(child);
2968 pci_print_verbose(dinfo);
2970 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2971 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2973 pci_cfg_restore(child, dinfo);
2974 if (device_probe_and_attach(child) != 0)
2975 pci_cfg_save(child, dinfo, 1);
2977 kfree(devlist, M_TEMP);
2981 pci_child_detached(device_t parent __unused, device_t child)
2983 /* Turn child's power off */
2984 pci_cfg_save(child, device_get_ivars(child), 1);
2988 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2989 driver_intr_t *intr, void *arg, void **cookiep,
2990 lwkt_serialize_t serializer, const char *desc)
2995 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
2996 arg, &cookie, serializer, desc);
3000 /* If this is not a direct child, just bail out. */
3001 if (device_get_parent(child) != dev) {
3006 rid = rman_get_rid(irq);
3008 /* Make sure that INTx is enabled */
3009 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3011 struct pci_devinfo *dinfo = device_get_ivars(child);
3016 * Check to see if the interrupt is MSI or MSI-X.
3017 * Ask our parent to map the MSI and give
3018 * us the address and data register values.
3019 * If we fail for some reason, teardown the
3020 * interrupt handler.
3022 if (dinfo->cfg.msi.msi_alloc > 0) {
3023 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3025 if (msi->msi_addr == 0) {
3026 KASSERT(msi->msi_handlers == 0,
3027 ("MSI has handlers, but vectors not mapped"));
3028 error = PCIB_MAP_MSI(device_get_parent(dev),
3029 child, rman_get_start(irq), &addr, &data,
3030 rman_get_cpuid(irq));
3033 msi->msi_addr = addr;
3034 msi->msi_data = data;
3035 pci_enable_msi(child, addr, data);
3037 msi->msi_handlers++;
3039 struct msix_vector *mv;
3042 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3043 ("No MSI-X or MSI rid %d allocated\n", rid));
3045 mv = pci_find_msix_vector(child, rid);
3047 ("MSI-X rid %d is not allocated\n", rid));
3048 KASSERT(mv->mv_address == 0,
3049 ("MSI-X rid %d has been setup\n", rid));
3051 error = PCIB_MAP_MSI(device_get_parent(dev),
3052 child, rman_get_start(irq), &addr, &data,
3053 rman_get_cpuid(irq));
3056 mv->mv_address = addr;
3059 vector = PCI_MSIX_RID2VEC(rid);
3060 pci_setup_msix_vector(child, vector,
3061 mv->mv_address, mv->mv_data);
3062 pci_unmask_msix_vector(child, vector);
3065 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3066 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3069 (void)bus_generic_teardown_intr(dev, child, irq,
3079 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3084 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3087 /* If this isn't a direct child, just bail out */
3088 if (device_get_parent(child) != dev)
3089 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3091 rid = rman_get_rid(irq);
3094 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3096 struct pci_devinfo *dinfo = device_get_ivars(child);
3099 * Check to see if the interrupt is MSI or MSI-X. If so,
3100 * decrement the appropriate handlers count and mask the
3101 * MSI-X message, or disable MSI messages if the count
3104 if (dinfo->cfg.msi.msi_alloc > 0) {
3105 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3107 KASSERT(rid <= msi->msi_alloc,
3108 ("MSI-X index too high\n"));
3109 KASSERT(msi->msi_handlers > 0,
3110 ("MSI rid %d is not setup\n", rid));
3112 msi->msi_handlers--;
3113 if (msi->msi_handlers == 0)
3114 pci_disable_msi(child);
3116 struct msix_vector *mv;
3118 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3119 ("No MSI or MSI-X rid %d allocated", rid));
3121 mv = pci_find_msix_vector(child, rid);
3123 ("MSI-X rid %d is not allocated\n", rid));
3124 KASSERT(mv->mv_address != 0,
3125 ("MSI-X rid %d has not been setup\n", rid));
3127 pci_mask_msix_vector(child, PCI_MSIX_RID2VEC(rid));
3132 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3135 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3140 pci_print_child(device_t dev, device_t child)
3142 struct pci_devinfo *dinfo;
3143 struct resource_list *rl;
3146 dinfo = device_get_ivars(child);
3147 rl = &dinfo->resources;
3149 retval += bus_print_child_header(dev, child);
3151 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3152 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3153 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3154 if (device_get_flags(dev))
3155 retval += kprintf(" flags %#x", device_get_flags(dev));
3157 retval += kprintf(" at device %d.%d", pci_get_slot(child),
3158 pci_get_function(child));
3160 retval += bus_print_child_footer(dev, child);
3170 } pci_nomatch_tab[] = {
3171 {PCIC_OLD, -1, "old"},
3172 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3173 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3174 {PCIC_STORAGE, -1, "mass storage"},
3175 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3176 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3177 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3178 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3179 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3180 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3181 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3182 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3183 {PCIC_NETWORK, -1, "network"},
3184 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3185 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3186 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3187 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3188 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3189 {PCIC_DISPLAY, -1, "display"},
3190 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3191 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3192 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3193 {PCIC_MULTIMEDIA, -1, "multimedia"},
3194 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3195 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3196 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3197 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3198 {PCIC_MEMORY, -1, "memory"},
3199 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3200 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3201 {PCIC_BRIDGE, -1, "bridge"},
3202 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3203 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3204 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3205 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3206 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3207 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3208 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3209 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3210 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3211 {PCIC_SIMPLECOMM, -1, "simple comms"},
3212 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3213 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3214 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3215 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3216 {PCIC_BASEPERIPH, -1, "base peripheral"},
3217 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3218 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3219 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3220 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3221 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3222 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3223 {PCIC_INPUTDEV, -1, "input device"},
3224 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3225 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3226 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3227 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3228 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3229 {PCIC_DOCKING, -1, "docking station"},
3230 {PCIC_PROCESSOR, -1, "processor"},
3231 {PCIC_SERIALBUS, -1, "serial bus"},
3232 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3233 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3234 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3235 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3236 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3237 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3238 {PCIC_WIRELESS, -1, "wireless controller"},
3239 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3240 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3241 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3242 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3243 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3244 {PCIC_SATCOM, -1, "satellite communication"},
3245 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3246 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3247 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3248 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3249 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3250 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3251 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3252 {PCIC_DASP, -1, "dasp"},
3253 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3258 pci_probe_nomatch(device_t dev, device_t child)
3261 char *cp, *scp, *device;
3264 * Look for a listing for this device in a loaded device database.
3266 if ((device = pci_describe_device(child)) != NULL) {
3267 device_printf(dev, "<%s>", device);
3268 kfree(device, M_DEVBUF);
3271 * Scan the class/subclass descriptions for a general
3276 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3277 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3278 if (pci_nomatch_tab[i].subclass == -1) {
3279 cp = pci_nomatch_tab[i].desc;
3280 } else if (pci_nomatch_tab[i].subclass ==
3281 pci_get_subclass(child)) {
3282 scp = pci_nomatch_tab[i].desc;
3286 device_printf(dev, "<%s%s%s>",
3288 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3291 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3292 pci_get_vendor(child), pci_get_device(child),
3293 pci_get_slot(child), pci_get_function(child));
3294 if (pci_get_intpin(child) > 0) {
3297 irq = pci_get_irq(child);
3298 if (PCI_INTERRUPT_VALID(irq))
3299 kprintf(" irq %d", irq);
3303 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3307 * Parse the PCI device database, if loaded, and return a pointer to a
3308 * description of the device.
3310 * The database is flat text formatted as follows:
3312 * Any line not in a valid format is ignored.
3313 * Lines are terminated with newline '\n' characters.
3315 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3318 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3319 * - devices cannot be listed without a corresponding VENDOR line.
3320 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3321 * another TAB, then the device name.
3325 * Assuming (ptr) points to the beginning of a line in the database,
3326 * return the vendor or device and description of the next entry.
3327 * The value of (vendor) or (device) inappropriate for the entry type
3328 * is set to -1. Returns nonzero at the end of the database.
3330 * Note that this is slightly unrobust in the face of corrupt data;
3331 * we attempt to safeguard against this by spamming the end of the
3332 * database with a newline when we initialise.
3335 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3344 left = pci_vendordata_size - (cp - pci_vendordata);
3352 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3356 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3359 /* skip to next line */
3360 while (*cp != '\n' && left > 0) {
3369 /* skip to next line */
3370 while (*cp != '\n' && left > 0) {
3374 if (*cp == '\n' && left > 0)
3381 pci_describe_device(device_t dev)
3384 char *desc, *vp, *dp, *line;
3386 desc = vp = dp = NULL;
3389 * If we have no vendor data, we can't do anything.
3391 if (pci_vendordata == NULL)
3395 * Scan the vendor data looking for this device
3397 line = pci_vendordata;
3398 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3401 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3403 if (vendor == pci_get_vendor(dev))
3406 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3409 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3417 if (device == pci_get_device(dev))
3421 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3422 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3424 ksprintf(desc, "%s, %s", vp, dp);
3427 kfree(vp, M_DEVBUF);
3429 kfree(dp, M_DEVBUF);
3434 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3436 struct pci_devinfo *dinfo;
3439 dinfo = device_get_ivars(child);
3443 case PCI_IVAR_ETHADDR:
3445 * The generic accessor doesn't deal with failure, so
3446 * we set the return value, then return an error.
3448 *((uint8_t **) result) = NULL;
3450 case PCI_IVAR_SUBVENDOR:
3451 *result = cfg->subvendor;
3453 case PCI_IVAR_SUBDEVICE:
3454 *result = cfg->subdevice;
3456 case PCI_IVAR_VENDOR:
3457 *result = cfg->vendor;
3459 case PCI_IVAR_DEVICE:
3460 *result = cfg->device;
3462 case PCI_IVAR_DEVID:
3463 *result = (cfg->device << 16) | cfg->vendor;
3465 case PCI_IVAR_CLASS:
3466 *result = cfg->baseclass;
3468 case PCI_IVAR_SUBCLASS:
3469 *result = cfg->subclass;
3471 case PCI_IVAR_PROGIF:
3472 *result = cfg->progif;
3474 case PCI_IVAR_REVID:
3475 *result = cfg->revid;
3477 case PCI_IVAR_INTPIN:
3478 *result = cfg->intpin;
3481 *result = cfg->intline;
3483 case PCI_IVAR_DOMAIN:
3484 *result = cfg->domain;
3490 *result = cfg->slot;
3492 case PCI_IVAR_FUNCTION:
3493 *result = cfg->func;
3495 case PCI_IVAR_CMDREG:
3496 *result = cfg->cmdreg;
3498 case PCI_IVAR_CACHELNSZ:
3499 *result = cfg->cachelnsz;
3501 case PCI_IVAR_MINGNT:
3502 *result = cfg->mingnt;
3504 case PCI_IVAR_MAXLAT:
3505 *result = cfg->maxlat;
3507 case PCI_IVAR_LATTIMER:
3508 *result = cfg->lattimer;
3510 case PCI_IVAR_PCIXCAP_PTR:
3511 *result = cfg->pcix.pcix_ptr;
3513 case PCI_IVAR_PCIECAP_PTR:
3514 *result = cfg->expr.expr_ptr;
3516 case PCI_IVAR_VPDCAP_PTR:
3517 *result = cfg->vpd.vpd_reg;
3526 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3528 struct pci_devinfo *dinfo;
3530 dinfo = device_get_ivars(child);
3533 case PCI_IVAR_INTPIN:
3534 dinfo->cfg.intpin = value;
3536 case PCI_IVAR_ETHADDR:
3537 case PCI_IVAR_SUBVENDOR:
3538 case PCI_IVAR_SUBDEVICE:
3539 case PCI_IVAR_VENDOR:
3540 case PCI_IVAR_DEVICE:
3541 case PCI_IVAR_DEVID:
3542 case PCI_IVAR_CLASS:
3543 case PCI_IVAR_SUBCLASS:
3544 case PCI_IVAR_PROGIF:
3545 case PCI_IVAR_REVID:
3547 case PCI_IVAR_DOMAIN:
3550 case PCI_IVAR_FUNCTION:
3551 return (EINVAL); /* disallow for now */
3558 #include "opt_ddb.h"
3560 #include <ddb/ddb.h>
3561 #include <sys/cons.h>
3564 * List resources based on pci map registers, used for within ddb
3567 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3569 struct pci_devinfo *dinfo;
3570 struct devlist *devlist_head;
3573 int i, error, none_count;
3576 /* get the head of the device queue */
3577 devlist_head = &pci_devq;
3580 * Go through the list of devices and print out devices
3582 for (error = 0, i = 0,
3583 dinfo = STAILQ_FIRST(devlist_head);
3584 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3585 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3587 /* Populate pd_name and pd_unit */
3590 name = device_get_name(dinfo->cfg.dev);
3593 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3594 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3595 (name && *name) ? name : "none",
3596 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3598 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3599 p->pc_sel.pc_func, (p->pc_class << 16) |
3600 (p->pc_subclass << 8) | p->pc_progif,
3601 (p->pc_subdevice << 16) | p->pc_subvendor,
3602 (p->pc_device << 16) | p->pc_vendor,
3603 p->pc_revid, p->pc_hdr);
3609 static struct resource *
3610 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3611 u_long start, u_long end, u_long count, u_int flags)
3613 struct pci_devinfo *dinfo = device_get_ivars(child);
3614 struct resource_list *rl = &dinfo->resources;
3615 struct resource_list_entry *rle;
3616 struct resource *res;
3617 pci_addr_t map, testval;
3621 * Weed out the bogons, and figure out how large the BAR/map
3622 * is. Bars that read back 0 here are bogus and unimplemented.
3623 * Note: atapci in legacy mode are special and handled elsewhere
3624 * in the code. If you have a atapci device in legacy mode and
3625 * it fails here, that other code is broken.
3628 map = pci_read_config(child, *rid, 4);
3629 pci_write_config(child, *rid, 0xffffffff, 4);
3630 testval = pci_read_config(child, *rid, 4);
3631 if (pci_maprange(testval) == 64)
3632 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3633 if (pci_mapbase(testval) == 0)
3637 * Restore the original value of the BAR. We may have reprogrammed
3638 * the BAR of the low-level console device and when booting verbose,
3639 * we need the console device addressable.
3641 pci_write_config(child, *rid, map, 4);
3643 if (PCI_BAR_MEM(testval)) {
3644 if (type != SYS_RES_MEMORY) {
3647 "child %s requested type %d for rid %#x,"
3648 " but the BAR says it is an memio\n",
3649 device_get_nameunit(child), type, *rid);
3653 if (type != SYS_RES_IOPORT) {
3656 "child %s requested type %d for rid %#x,"
3657 " but the BAR says it is an ioport\n",
3658 device_get_nameunit(child), type, *rid);
3663 * For real BARs, we need to override the size that
3664 * the driver requests, because that's what the BAR
3665 * actually uses and we would otherwise have a
3666 * situation where we might allocate the excess to
3667 * another driver, which won't work.
3669 mapsize = pci_mapsize(testval);
3670 count = 1UL << mapsize;
3671 if (RF_ALIGNMENT(flags) < mapsize)
3672 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3673 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3674 flags |= RF_PREFETCHABLE;
3677 * Allocate enough resource, and then write back the
3678 * appropriate bar for that resource.
3680 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3681 start, end, count, flags, -1);
3683 device_printf(child,
3684 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3685 count, *rid, type, start, end);
3688 resource_list_add(rl, type, *rid, start, end, count, -1);
3689 rle = resource_list_find(rl, type, *rid);
3691 panic("pci_alloc_map: unexpectedly can't find resource.");
3693 rle->start = rman_get_start(res);
3694 rle->end = rman_get_end(res);
3697 device_printf(child,
3698 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3699 count, *rid, type, rman_get_start(res));
3700 map = rman_get_start(res);
3702 pci_write_config(child, *rid, map, 4);
3703 if (pci_maprange(testval) == 64)
3704 pci_write_config(child, *rid + 4, map >> 32, 4);
3710 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3711 u_long start, u_long end, u_long count, u_int flags, int cpuid)
3713 struct pci_devinfo *dinfo = device_get_ivars(child);
3714 struct resource_list *rl = &dinfo->resources;
3715 struct resource_list_entry *rle;
3716 pcicfgregs *cfg = &dinfo->cfg;
3719 * Perform lazy resource allocation
3721 if (device_get_parent(child) == dev) {
3725 * Can't alloc legacy interrupt once MSI messages
3726 * have been allocated.
3728 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3729 cfg->msix.msix_alloc > 0))
3732 * If the child device doesn't have an
3733 * interrupt routed and is deserving of an
3734 * interrupt, try to assign it one.
3736 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3738 pci_assign_interrupt(dev, child, 0);
3740 case SYS_RES_IOPORT:
3741 case SYS_RES_MEMORY:
3742 if (*rid < PCIR_BAR(cfg->nummaps)) {
3744 * Enable the I/O mode. We should
3745 * also be assigning resources too
3746 * when none are present. The
3747 * resource_list_alloc kind of sorta does
3750 if (PCI_ENABLE_IO(dev, child, type))
3753 rle = resource_list_find(rl, type, *rid);
3755 return (pci_alloc_map(dev, child, type, rid,
3756 start, end, count, flags));
3760 * If we've already allocated the resource, then
3761 * return it now. But first we may need to activate
3762 * it, since we don't allocate the resource as active
3763 * above. Normally this would be done down in the
3764 * nexus, but since we short-circuit that path we have
3765 * to do its job here. Not sure if we should kfree the
3766 * resource if it fails to activate.
3768 rle = resource_list_find(rl, type, *rid);
3769 if (rle != NULL && rle->res != NULL) {
3771 device_printf(child,
3772 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3773 rman_get_size(rle->res), *rid, type,
3774 rman_get_start(rle->res));
3775 if ((flags & RF_ACTIVE) &&
3776 bus_generic_activate_resource(dev, child, type,
3777 *rid, rle->res) != 0)
3782 return (resource_list_alloc(rl, dev, child, type, rid,
3783 start, end, count, flags, cpuid));
3787 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3789 struct pci_devinfo *dinfo;
3790 struct resource_list *rl;
3791 struct resource_list_entry *rle;
3793 if (device_get_parent(child) != dev)
3796 dinfo = device_get_ivars(child);
3797 rl = &dinfo->resources;
3798 rle = resource_list_find(rl, type, rid);
3801 if (rman_get_device(rle->res) != dev ||
3802 rman_get_flags(rle->res) & RF_ACTIVE) {
3803 device_printf(dev, "delete_resource: "
3804 "Resource still owned by child, oops. "
3805 "(type=%d, rid=%d, addr=%lx)\n",
3806 rle->type, rle->rid,
3807 rman_get_start(rle->res));
3810 bus_release_resource(dev, type, rid, rle->res);
3812 resource_list_delete(rl, type, rid);
3815 * Why do we turn off the PCI configuration BAR when we delete a
3818 pci_write_config(child, rid, 0, 4);
3819 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3822 struct resource_list *
3823 pci_get_resource_list (device_t dev, device_t child)
3825 struct pci_devinfo *dinfo = device_get_ivars(child);
3830 return (&dinfo->resources);
3834 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3836 struct pci_devinfo *dinfo = device_get_ivars(child);
3837 pcicfgregs *cfg = &dinfo->cfg;
3839 return (PCIB_READ_CONFIG(device_get_parent(dev),
3840 cfg->bus, cfg->slot, cfg->func, reg, width));
3844 pci_write_config_method(device_t dev, device_t child, int reg,
3845 uint32_t val, int width)
3847 struct pci_devinfo *dinfo = device_get_ivars(child);
3848 pcicfgregs *cfg = &dinfo->cfg;
3850 PCIB_WRITE_CONFIG(device_get_parent(dev),
3851 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3855 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3859 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3860 pci_get_function(child));
3865 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3868 struct pci_devinfo *dinfo;
3871 dinfo = device_get_ivars(child);
3873 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3874 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3875 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3881 pci_assign_interrupt_method(device_t dev, device_t child)
3883 struct pci_devinfo *dinfo = device_get_ivars(child);
3884 pcicfgregs *cfg = &dinfo->cfg;
3886 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3891 pci_modevent(module_t mod, int what, void *arg)
3893 static struct cdev *pci_cdev;
3897 STAILQ_INIT(&pci_devq);
3899 pci_cdev = make_dev(&pcic_ops, 0, UID_ROOT, GID_WHEEL, 0644,
3901 pci_load_vendor_data();
3905 destroy_dev(pci_cdev);
3913 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3918 * Only do header type 0 devices. Type 1 devices are bridges,
3919 * which we know need special treatment. Type 2 devices are
3920 * cardbus bridges which also require special treatment.
3921 * Other types are unknown, and we err on the side of safety
3924 if (dinfo->cfg.hdrtype != 0)
3928 * Restore the device to full power mode. We must do this
3929 * before we restore the registers because moving from D3 to
3930 * D0 will cause the chip's BARs and some other registers to
3931 * be reset to some unknown power on reset values. Cut down
3932 * the noise on boot by doing nothing if we are already in
3935 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3936 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3938 for (i = 0; i < dinfo->cfg.nummaps; i++)
3939 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3940 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3941 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3942 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3943 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3944 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3945 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3946 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3947 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3948 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3949 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3951 /* Restore MSI and MSI-X configurations if they are present. */
3952 if (dinfo->cfg.msi.msi_location != 0)
3953 pci_resume_msi(dev);
3954 if (dinfo->cfg.msix.msix_location != 0)
3955 pci_resume_msix(dev);
3959 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3966 * Only do header type 0 devices. Type 1 devices are bridges, which
3967 * we know need special treatment. Type 2 devices are cardbus bridges
3968 * which also require special treatment. Other types are unknown, and
3969 * we err on the side of safety by ignoring them. Powering down
3970 * bridges should not be undertaken lightly.
3972 if (dinfo->cfg.hdrtype != 0)
3974 for (i = 0; i < dinfo->cfg.nummaps; i++)
3975 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3976 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3979 * Some drivers apparently write to these registers w/o updating our
3980 * cached copy. No harm happens if we update the copy, so do so here
3981 * so we can restore them. The COMMAND register is modified by the
3982 * bus w/o updating the cache. This should represent the normally
3983 * writable portion of the 'defined' part of type 0 headers. In
3984 * theory we also need to save/restore the PCI capability structures
3985 * we know about, but apart from power we don't know any that are
3988 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3989 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3990 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3991 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3992 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3993 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3994 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3995 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3996 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3997 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3998 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3999 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4000 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4001 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4002 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4005 * don't set the state for display devices, base peripherals and
4006 * memory devices since bad things happen when they are powered down.
4007 * We should (a) have drivers that can easily detach and (b) use
4008 * generic drivers for these devices so that some device actually
4009 * attaches. We need to make sure that when we implement (a) we don't
4010 * power the device down on a reattach.
4012 cls = pci_get_class(dev);
4015 switch (pci_do_power_nodriver)
4017 case 0: /* NO powerdown at all */
4019 case 1: /* Conservative about what to power down */
4020 if (cls == PCIC_STORAGE)
4023 case 2: /* Agressive about what to power down */
4024 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4025 cls == PCIC_BASEPERIPH)
4028 case 3: /* Power down everything */
4032 * PCI spec says we can only go into D3 state from D0 state.
4033 * Transition from D[12] into D0 before going to D3 state.
4035 ps = pci_get_powerstate(dev);
4036 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4037 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4038 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4039 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4042 #ifdef COMPAT_OLDPCI
4045 * Locate the parent of a PCI device by scanning the PCI devlist
4046 * and return the entry for the parent.
4047 * For devices on PCI Bus 0 (the host bus), this is the PCI Host.
4048 * For devices on secondary PCI busses, this is that bus' PCI-PCI Bridge.
4051 pci_devlist_get_parent(pcicfgregs *cfg)
4053 struct devlist *devlist_head;
4054 struct pci_devinfo *dinfo;
4055 pcicfgregs *bridge_cfg;
4058 dinfo = STAILQ_FIRST(devlist_head = &pci_devq);
4060 /* If the device is on PCI bus 0, look for the host */
4061 if (cfg->bus == 0) {
4062 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4063 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4064 bridge_cfg = &dinfo->cfg;
4065 if (bridge_cfg->baseclass == PCIC_BRIDGE
4066 && bridge_cfg->subclass == PCIS_BRIDGE_HOST
4067 && bridge_cfg->bus == cfg->bus) {
4073 /* If the device is not on PCI bus 0, look for the PCI-PCI bridge */
4075 for (i = 0; (dinfo != NULL) && (i < pci_numdevs);
4076 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4077 bridge_cfg = &dinfo->cfg;
4078 if (bridge_cfg->baseclass == PCIC_BRIDGE
4079 && bridge_cfg->subclass == PCIS_BRIDGE_PCI
4080 && bridge_cfg->secondarybus == cfg->bus) {
4089 #endif /* COMPAT_OLDPCI */
4092 pci_alloc_1intr(device_t dev, int msi_enable, int *rid0, u_int *flags0)
4099 type = PCI_INTR_TYPE_LEGACY;
4100 flags = RF_SHAREABLE | RF_ACTIVE;
4102 ksnprintf(env, sizeof(env), "hw.%s.msi.enable",
4103 device_get_nameunit(dev));
4104 kgetenv_int(env, &msi_enable);
4109 ksnprintf(env, sizeof(env), "hw.%s.msi.cpu",
4110 device_get_nameunit(dev));
4111 kgetenv_int(env, &cpu);
4115 if (pci_alloc_msi(dev, &rid, 1, cpu) == 0) {
4116 flags &= ~RF_SHAREABLE;
4117 type = PCI_INTR_TYPE_MSI;