msix: Make sure vectors are not allocated before setup/teardown
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83c1faaa
SW
27 *
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
984263bc
MD
29 */
30
4d28e78f 31#include "opt_bus.h"
92683a33 32#include "opt_acpi.h"
6951547b 33#include "opt_compat_oldpci.h"
984263bc
MD
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
4d28e78f 39#include <sys/linker.h>
984263bc
MD
40#include <sys/fcntl.h>
41#include <sys/conf.h>
42#include <sys/kernel.h>
43#include <sys/queue.h>
638744c5 44#include <sys/sysctl.h>
4d28e78f 45#include <sys/endian.h>
d2f04fe0 46#include <sys/machintr.h>
984263bc 47
941460da
SZ
48#include <machine/msi_machdep.h>
49
984263bc
MD
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_extern.h>
53
54#include <sys/bus.h>
984263bc 55#include <sys/rman.h>
4d28e78f 56#include <sys/device.h>
984263bc 57
dc5a7bd2 58#include <sys/pciio.h>
4d28e78f
SZ
59#include <bus/pci/pcireg.h>
60#include <bus/pci/pcivar.h>
61#include <bus/pci/pci_private.h>
984263bc 62
4a5a2d63 63#include "pcib_if.h"
4d28e78f
SZ
64#include "pci_if.h"
65
66#ifdef __HAVE_ACPI
67#include <contrib/dev/acpica/acpi.h>
68#include "acpi_if.h"
69#else
70#define ACPI_PWR_FOR_SLEEP(x, y, z)
71#endif
72
35b72619
SZ
73extern struct dev_ops pcic_ops; /* XXX */
74
3a6dc23c
SZ
75typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
76
4d28e78f
SZ
77static uint32_t pci_mapbase(unsigned mapreg);
78static const char *pci_maptype(unsigned mapreg);
79static int pci_mapsize(unsigned testval);
80static int pci_maprange(unsigned mapreg);
81static void pci_fixancient(pcicfgregs *cfg);
82
83static int pci_porten(device_t pcib, int b, int s, int f);
84static int pci_memen(device_t pcib, int b, int s, int f);
85static void pci_assign_interrupt(device_t bus, device_t dev,
86 int force_route);
87static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90static int pci_probe(device_t dev);
91static int pci_attach(device_t dev);
11a49859 92static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
93static void pci_load_vendor_data(void);
94static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96static char *pci_describe_device(device_t dev);
97static int pci_modevent(module_t mod, int what, void *arg);
98static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
99 pcicfgregs *cfg);
3a6dc23c 100static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
101static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
103#if 0
104static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
106#endif
107static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108static void pci_disable_msi(device_t dev);
109static void pci_enable_msi(device_t dev, uint64_t address,
110 uint16_t data);
cf8f3133 111static void pci_setup_msix_vector(device_t dev, u_int index,
4d28e78f 112 uint64_t address, uint32_t data);
cf8f3133
SZ
113static void pci_mask_msix_vector(device_t dev, u_int index);
114static void pci_unmask_msix_vector(device_t dev, u_int index);
31646171 115static void pci_mask_msix_allvectors(device_t dev);
f9c942fb 116static struct msix_vector *pci_find_msix_vector(device_t dev, int rid);
4d28e78f
SZ
117static int pci_msi_blacklisted(void);
118static void pci_resume_msi(device_t dev);
119static void pci_resume_msix(device_t dev);
d85e7311
SZ
120static int pcie_slotimpl(const pcicfgregs *);
121static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 122
3a6dc23c
SZ
123static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
124static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
125static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
126static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
127static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
128static void pci_read_cap_subvendor(device_t, int, int,
129 pcicfgregs *);
130static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 131static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 132
4d28e78f
SZ
133static device_method_t pci_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, pci_probe),
136 DEVMETHOD(device_attach, pci_attach),
137 DEVMETHOD(device_detach, bus_generic_detach),
138 DEVMETHOD(device_shutdown, bus_generic_shutdown),
139 DEVMETHOD(device_suspend, pci_suspend),
140 DEVMETHOD(device_resume, pci_resume),
141
142 /* Bus interface */
143 DEVMETHOD(bus_print_child, pci_print_child),
144 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
145 DEVMETHOD(bus_read_ivar, pci_read_ivar),
146 DEVMETHOD(bus_write_ivar, pci_write_ivar),
147 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 148 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
149 DEVMETHOD(bus_setup_intr, pci_setup_intr),
150 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
151
152 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
153 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
154 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
155 DEVMETHOD(bus_delete_resource, pci_delete_resource),
156 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
157 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
158 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
159 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
160 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
161 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
162
163 /* PCI interface */
164 DEVMETHOD(pci_read_config, pci_read_config_method),
165 DEVMETHOD(pci_write_config, pci_write_config_method),
166 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
167 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
168 DEVMETHOD(pci_enable_io, pci_enable_io_method),
169 DEVMETHOD(pci_disable_io, pci_disable_io_method),
170 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
171 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
172 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
173 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
174 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
175 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
176 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
4d28e78f 177 DEVMETHOD(pci_release_msi, pci_release_msi_method),
a7854dd0
SZ
178 DEVMETHOD(pci_alloc_msix_vector, pci_alloc_msix_vector_method),
179 DEVMETHOD(pci_release_msix_vector, pci_release_msix_vector_method),
4d28e78f
SZ
180 DEVMETHOD(pci_msi_count, pci_msi_count_method),
181 DEVMETHOD(pci_msix_count, pci_msix_count_method),
182
183 { 0, 0 }
184};
185
186DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 187
4d28e78f 188static devclass_t pci_devclass;
aa2b9d05 189DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
4d28e78f
SZ
190MODULE_VERSION(pci, 1);
191
192static char *pci_vendordata;
193static size_t pci_vendordata_size;
dc5a7bd2 194
984263bc 195
3a6dc23c
SZ
196static const struct pci_read_cap {
197 int cap;
198 pci_read_cap_t read_cap;
199} pci_read_caps[] = {
200 { PCIY_PMG, pci_read_cap_pmgt },
201 { PCIY_HT, pci_read_cap_ht },
202 { PCIY_MSI, pci_read_cap_msi },
203 { PCIY_MSIX, pci_read_cap_msix },
204 { PCIY_VPD, pci_read_cap_vpd },
205 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
206 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 207 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
208 { 0, NULL } /* required last entry */
209};
210
984263bc 211struct pci_quirk {
4d28e78f 212 uint32_t devid; /* Vendor/device of the card */
984263bc 213 int type;
4d28e78f
SZ
214#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
215#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
216 int arg1;
217 int arg2;
218};
219
220struct pci_quirk pci_quirks[] = {
4d28e78f 221 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
222 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
224 /* As does the Serverworks OSB4 (the SMBus mapping register) */
225 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 226
4d28e78f
SZ
227 /*
228 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
229 * or the CMIC-SL (AKA ServerWorks GC_LE).
230 */
231 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233
234 /*
235 * MSI doesn't work on earlier Intel chipsets including
236 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
237 */
238 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
245
246 /*
247 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
248 * bridge.
249 */
250 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
251
984263bc
MD
252 { 0 }
253};
254
255/* map register information */
4d28e78f
SZ
256#define PCI_MAPMEM 0x01 /* memory map */
257#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
258#define PCI_MAPPORT 0x04 /* port map */
259
f9c942fb
SZ
260#define PCI_MSIX_RID2VEC(rid) ((rid) - 1) /* rid -> MSI-X vector # */
261#define PCI_MSIX_VEC2RID(vec) ((vec) + 1) /* MSI-X vector # -> rid */
262
4d28e78f
SZ
263struct devlist pci_devq;
264uint32_t pci_generation;
265uint32_t pci_numdevs = 0;
266static int pcie_chipset, pcix_chipset;
267
268/* sysctl vars */
269SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
270
271static int pci_enable_io_modes = 1;
272TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
273SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
274 &pci_enable_io_modes, 1,
275 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
276enable these bits correctly. We'd like to do this all the time, but there\n\
277are some peripherals that this causes problems with.");
984263bc 278
638744c5
HT
279static int pci_do_power_nodriver = 0;
280TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
281SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
282 &pci_do_power_nodriver, 0,
283 "Place a function into D3 state when no driver attaches to it. 0 means\n\
284disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 285aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
286in D3 state.");
287
4d28e78f
SZ
288static int pci_do_power_resume = 1;
289TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
290SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
291 &pci_do_power_resume, 1,
292 "Transition from D3 -> D0 on resume.");
293
294static int pci_do_msi = 1;
295TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
296SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
297 "Enable support for MSI interrupts");
298
6475434e
SZ
299static int pci_do_msix = 0;
300#if 0
4d28e78f
SZ
301TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
302SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
303 "Enable support for MSI-X interrupts");
6475434e 304#endif
4d28e78f
SZ
305
306static int pci_honor_msi_blacklist = 1;
307TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
308SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
309 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
310
2c3d7ac8
SZ
311static int pci_msi_cpuid;
312
4d28e78f
SZ
313/* Find a device_t by bus/slot/function in domain 0 */
314
315device_t
316pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
317{
318
319 return (pci_find_dbsf(0, bus, slot, func));
320}
321
322/* Find a device_t by domain/bus/slot/function */
323
984263bc 324device_t
4d28e78f 325pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
326{
327 struct pci_devinfo *dinfo;
328
329 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
330 if ((dinfo->cfg.domain == domain) &&
331 (dinfo->cfg.bus == bus) &&
984263bc
MD
332 (dinfo->cfg.slot == slot) &&
333 (dinfo->cfg.func == func)) {
334 return (dinfo->cfg.dev);
335 }
336 }
337
338 return (NULL);
339}
340
4d28e78f
SZ
341/* Find a device_t by vendor/device ID */
342
984263bc 343device_t
4d28e78f 344pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
345{
346 struct pci_devinfo *dinfo;
347
348 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
349 if ((dinfo->cfg.vendor == vendor) &&
350 (dinfo->cfg.device == device)) {
351 return (dinfo->cfg.dev);
352 }
353 }
354
355 return (NULL);
356}
357
358/* return base address of memory or port map */
359
4d28e78f
SZ
360static uint32_t
361pci_mapbase(uint32_t mapreg)
984263bc 362{
4d28e78f
SZ
363
364 if (PCI_BAR_MEM(mapreg))
365 return (mapreg & PCIM_BAR_MEM_BASE);
366 else
367 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
368}
369
370/* return map type of memory or port map */
371
4d28e78f 372static const char *
984263bc
MD
373pci_maptype(unsigned mapreg)
374{
984263bc 375
4d28e78f
SZ
376 if (PCI_BAR_IO(mapreg))
377 return ("I/O Port");
378 if (mapreg & PCIM_BAR_MEM_PREFETCH)
379 return ("Prefetchable Memory");
380 return ("Memory");
984263bc
MD
381}
382
383/* return log2 of map size decoded for memory or port map */
384
385static int
4d28e78f 386pci_mapsize(uint32_t testval)
984263bc
MD
387{
388 int ln2size;
389
390 testval = pci_mapbase(testval);
391 ln2size = 0;
392 if (testval != 0) {
393 while ((testval & 1) == 0)
394 {
395 ln2size++;
396 testval >>= 1;
397 }
398 }
399 return (ln2size);
400}
401
402/* return log2 of address range supported by map register */
403
404static int
405pci_maprange(unsigned mapreg)
406{
407 int ln2range = 0;
4d28e78f
SZ
408
409 if (PCI_BAR_IO(mapreg))
984263bc 410 ln2range = 32;
4d28e78f
SZ
411 else
412 switch (mapreg & PCIM_BAR_MEM_TYPE) {
413 case PCIM_BAR_MEM_32:
414 ln2range = 32;
415 break;
416 case PCIM_BAR_MEM_1MB:
417 ln2range = 20;
418 break;
419 case PCIM_BAR_MEM_64:
420 ln2range = 64;
421 break;
422 }
984263bc
MD
423 return (ln2range);
424}
425
426/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
427
428static void
429pci_fixancient(pcicfgregs *cfg)
430{
431 if (cfg->hdrtype != 0)
432 return;
433
434 /* PCI to PCI bridges use header type 1 */
435 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
436 cfg->hdrtype = 1;
437}
438
984263bc
MD
439/* extract header type specific config data */
440
441static void
4a5a2d63 442pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 443{
4d28e78f 444#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
445 switch (cfg->hdrtype) {
446 case 0:
4a5a2d63
JS
447 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
448 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
449 cfg->nummaps = PCI_MAXMAPS_0;
450 break;
451 case 1:
984263bc 452 cfg->nummaps = PCI_MAXMAPS_1;
6951547b
SZ
453#ifdef COMPAT_OLDPCI
454 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
455#endif
984263bc
MD
456 break;
457 case 2:
4a5a2d63
JS
458 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
459 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 460 cfg->nummaps = PCI_MAXMAPS_2;
6951547b
SZ
461#ifdef COMPAT_OLDPCI
462 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
463#endif
984263bc
MD
464 break;
465 }
4a5a2d63 466#undef REG
984263bc
MD
467}
468
4d28e78f 469/* read configuration header into pcicfgregs structure */
22457186 470struct pci_devinfo *
4d28e78f 471pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 472{
4d28e78f 473#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
474 pcicfgregs *cfg = NULL;
475 struct pci_devinfo *devlist_entry;
476 struct devlist *devlist_head;
477
478 devlist_head = &pci_devq;
479
480 devlist_entry = NULL;
481
4d28e78f 482 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 483 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
484
485 cfg = &devlist_entry->cfg;
4d28e78f
SZ
486
487 cfg->domain = d;
4a5a2d63
JS
488 cfg->bus = b;
489 cfg->slot = s;
490 cfg->func = f;
491 cfg->vendor = REG(PCIR_VENDOR, 2);
492 cfg->device = REG(PCIR_DEVICE, 2);
493 cfg->cmdreg = REG(PCIR_COMMAND, 2);
494 cfg->statreg = REG(PCIR_STATUS, 2);
495 cfg->baseclass = REG(PCIR_CLASS, 1);
496 cfg->subclass = REG(PCIR_SUBCLASS, 1);
497 cfg->progif = REG(PCIR_PROGIF, 1);
498 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 499 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
500 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
501 cfg->lattimer = REG(PCIR_LATTIMER, 1);
502 cfg->intpin = REG(PCIR_INTPIN, 1);
503 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 504
4a5a2d63
JS
505 cfg->mingnt = REG(PCIR_MINGNT, 1);
506 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
507
508 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
509 cfg->hdrtype &= ~PCIM_MFDEV;
510
511 pci_fixancient(cfg);
4a5a2d63 512 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 513
3a6dc23c 514 pci_read_capabilities(pcib, cfg);
984263bc
MD
515
516 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
517
4d28e78f 518 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
519 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
520 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
521 devlist_entry->conf.pc_sel.pc_func = cfg->func;
522 devlist_entry->conf.pc_hdr = cfg->hdrtype;
523
524 devlist_entry->conf.pc_subvendor = cfg->subvendor;
525 devlist_entry->conf.pc_subdevice = cfg->subdevice;
526 devlist_entry->conf.pc_vendor = cfg->vendor;
527 devlist_entry->conf.pc_device = cfg->device;
528
529 devlist_entry->conf.pc_class = cfg->baseclass;
530 devlist_entry->conf.pc_subclass = cfg->subclass;
531 devlist_entry->conf.pc_progif = cfg->progif;
532 devlist_entry->conf.pc_revid = cfg->revid;
533
534 pci_numdevs++;
535 pci_generation++;
536 }
537 return (devlist_entry);
538#undef REG
539}
540
3a6dc23c
SZ
541static int
542pci_fixup_nextptr(int *nextptr0)
543{
544 int nextptr = *nextptr0;
545
546 /* "Next pointer" is only one byte */
547 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
548
549 if (nextptr & 0x3) {
550 /*
551 * PCI local bus spec 3.0:
552 *
553 * "... The bottom two bits of all pointers are reserved
554 * and must be implemented as 00b although software must
555 * mask them to allow for future uses of these bits ..."
556 */
557 if (bootverbose) {
558 kprintf("Illegal PCI extended capability "
559 "offset, fixup 0x%02x -> 0x%02x\n",
560 nextptr, nextptr & ~0x3);
561 }
562 nextptr &= ~0x3;
563 }
564 *nextptr0 = nextptr;
565
566 if (nextptr < 0x40) {
567 if (nextptr != 0) {
568 kprintf("Illegal PCI extended capability "
569 "offset 0x%02x", nextptr);
570 }
571 return 0;
572 }
573 return 1;
574}
575
984263bc 576static void
3a6dc23c 577pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 578{
3a6dc23c
SZ
579#define REG(n, w) \
580 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
581
582 struct pcicfg_pp *pp = &cfg->pp;
583
584 if (pp->pp_cap)
585 return;
586
587 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
588 pp->pp_status = ptr + PCIR_POWER_STATUS;
589 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
590
591 if ((nextptr - ptr) > PCIR_POWER_DATA) {
592 /*
593 * XXX
594 * We should write to data_select and read back from
595 * data_scale to determine whether data register is
596 * implemented.
597 */
598#ifdef foo
599 pp->pp_data = ptr + PCIR_POWER_DATA;
600#else
601 pp->pp_data = 0;
602#endif
603 }
604
605#undef REG
606}
607
608static void
609pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
610{
b2b3ffcd 611#if defined(__i386__) || defined(__x86_64__)
3a6dc23c
SZ
612
613#define REG(n, w) \
614 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
615
616 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 617 uint64_t addr;
4d28e78f 618 uint32_t val;
3a6dc23c
SZ
619
620 /* Determine HT-specific capability type. */
621 val = REG(ptr + PCIR_HT_COMMAND, 2);
622
941460da
SZ
623 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
624 cfg->ht.ht_slave = ptr;
625
3a6dc23c
SZ
626 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
627 return;
628
629 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
630 /* Sanity check the mapping window. */
631 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
632 addr <<= 32;
633 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
941460da 634 if (addr != MSI_X86_ADDR_BASE) {
3a6dc23c
SZ
635 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
636 "has non-default MSI window 0x%llx\n",
637 cfg->domain, cfg->bus, cfg->slot, cfg->func,
638 (long long)addr);
639 }
640 } else {
941460da 641 addr = MSI_X86_ADDR_BASE;
3a6dc23c
SZ
642 }
643
644 ht->ht_msimap = ptr;
645 ht->ht_msictrl = val;
646 ht->ht_msiaddr = addr;
647
648#undef REG
649
b2b3ffcd 650#endif /* __i386__ || __x86_64__ */
3a6dc23c
SZ
651}
652
653static void
654pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
655{
656#define REG(n, w) \
657 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
658
659 struct pcicfg_msi *msi = &cfg->msi;
660
661 msi->msi_location = ptr;
662 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
663 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
664
665#undef REG
666}
667
668static void
669pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
670{
671#define REG(n, w) \
672 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
673
674 struct pcicfg_msix *msix = &cfg->msix;
675 uint32_t val;
676
677 msix->msix_location = ptr;
678 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
679 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
680
681 val = REG(ptr + PCIR_MSIX_TABLE, 4);
682 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
683 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
684
685 val = REG(ptr + PCIR_MSIX_PBA, 4);
686 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
687 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
688
f9c942fb
SZ
689 TAILQ_INIT(&msix->msix_vectors);
690
3a6dc23c
SZ
691#undef REG
692}
693
694static void
695pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
696{
697 cfg->vpd.vpd_reg = ptr;
698}
699
700static void
701pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
702{
703#define REG(n, w) \
704 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
705
706 /* Should always be true. */
707 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
708 uint32_t val;
709
710 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
711 cfg->subvendor = val & 0xffff;
712 cfg->subdevice = val >> 16;
713 }
714
715#undef REG
716}
717
718static void
719pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
720{
721 /*
722 * Assume we have a PCI-X chipset if we have
723 * at least one PCI-PCI bridge with a PCI-X
724 * capability. Note that some systems with
725 * PCI-express or HT chipsets might match on
726 * this check as well.
727 */
728 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
729 pcix_chipset = 1;
d85e7311
SZ
730
731 cfg->pcix.pcix_ptr = ptr;
732}
733
734static int
735pcie_slotimpl(const pcicfgregs *cfg)
736{
737 const struct pcicfg_expr *expr = &cfg->expr;
738 uint16_t port_type;
739
740 /*
741 * Only version 1 can be parsed currently
742 */
743 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
744 return 0;
745
746 /*
747 * - Slot implemented bit is meaningful iff current port is
748 * root port or down stream port.
749 * - Testing for root port or down stream port is meanningful
750 * iff PCI configure has type 1 header.
751 */
752
753 if (cfg->hdrtype != 1)
754 return 0;
755
756 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
757 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
758 return 0;
759
760 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
761 return 0;
762
763 return 1;
3a6dc23c
SZ
764}
765
766static void
d85e7311 767pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 768{
d85e7311
SZ
769#define REG(n, w) \
770 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
771
772 struct pcicfg_expr *expr = &cfg->expr;
773
3a6dc23c
SZ
774 /*
775 * Assume we have a PCI-express chipset if we have
776 * at least one PCI-express device.
777 */
778 pcie_chipset = 1;
d85e7311
SZ
779
780 expr->expr_ptr = ptr;
781 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
782
783 /*
784 * Only version 1 can be parsed currently
785 */
786 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
787 return;
788
789 /*
790 * Read slot capabilities. Slot capabilities exists iff
791 * current port's slot is implemented
792 */
793 if (pcie_slotimpl(cfg))
794 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
795
796#undef REG
3a6dc23c
SZ
797}
798
799static void
800pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
801{
802#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
803#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
804
805 uint32_t val;
806 int nextptr, ptrptr;
807
808 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
809 /* No capabilities */
810 return;
811 }
0c78fe3f 812
4d28e78f 813 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 814 case 0:
81c29ce4
SZ
815 case 1:
816 ptrptr = PCIR_CAP_PTR;
984263bc
MD
817 break;
818 case 2:
4d28e78f 819 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
820 break;
821 default:
3a6dc23c 822 return; /* no capabilities support */
984263bc 823 }
4d28e78f 824 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
825
826 /*
827 * Read capability entries.
828 */
3a6dc23c
SZ
829 while (pci_fixup_nextptr(&nextptr)) {
830 const struct pci_read_cap *rc;
831 int ptr = nextptr;
832
4d28e78f 833 /* Find the next entry */
4d28e78f 834 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
835
836 /* Process this entry */
3a6dc23c
SZ
837 val = REG(ptr + PCICAP_ID, 1);
838 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
839 if (rc->cap == val) {
840 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
841 break;
842 }
984263bc
MD
843 }
844 }
941460da
SZ
845
846#if defined(__i386__) || defined(__x86_64__)
847 /*
848 * Enable the MSI mapping window for all HyperTransport
849 * slaves. PCI-PCI bridges have their windows enabled via
850 * PCIB_MAP_MSI().
851 */
852 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
853 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
854 device_printf(pcib,
855 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
856 cfg->domain, cfg->bus, cfg->slot, cfg->func);
857 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
858 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
859 2);
860 }
861#endif
862
4d28e78f 863/* REG and WREG use carry through to next functions */
984263bc
MD
864}
865
4d28e78f
SZ
866/*
867 * PCI Vital Product Data
868 */
869
870#define PCI_VPD_TIMEOUT 1000000
984263bc 871
4d28e78f
SZ
872static int
873pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 874{
4d28e78f 875 int count = PCI_VPD_TIMEOUT;
984263bc 876
4d28e78f 877 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 878
4d28e78f 879 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 880
4d28e78f
SZ
881 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
882 if (--count < 0)
883 return (ENXIO);
884 DELAY(1); /* limit looping */
885 }
886 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 887
984263bc
MD
888 return (0);
889}
984263bc 890
4d28e78f
SZ
891#if 0
892static int
893pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 894{
4d28e78f
SZ
895 int count = PCI_VPD_TIMEOUT;
896
897 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
898
899 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
900 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
901 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
902 if (--count < 0)
903 return (ENXIO);
904 DELAY(1); /* limit looping */
905 }
906
907 return (0);
908}
909#endif
910
911#undef PCI_VPD_TIMEOUT
912
913struct vpd_readstate {
914 device_t pcib;
915 pcicfgregs *cfg;
916 uint32_t val;
917 int bytesinval;
918 int off;
919 uint8_t cksum;
920};
921
922static int
923vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
924{
925 uint32_t reg;
926 uint8_t byte;
927
928 if (vrs->bytesinval == 0) {
929 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
930 return (ENXIO);
931 vrs->val = le32toh(reg);
932 vrs->off += 4;
933 byte = vrs->val & 0xff;
934 vrs->bytesinval = 3;
935 } else {
936 vrs->val = vrs->val >> 8;
937 byte = vrs->val & 0xff;
938 vrs->bytesinval--;
939 }
940
941 vrs->cksum += byte;
942 *data = byte;
943 return (0);
944}
945
d85e7311
SZ
946int
947pcie_slot_implemented(device_t dev)
948{
949 struct pci_devinfo *dinfo = device_get_ivars(dev);
950
951 return pcie_slotimpl(&dinfo->cfg);
952}
953
4d28e78f
SZ
954void
955pcie_set_max_readrq(device_t dev, uint16_t rqsize)
956{
d85e7311
SZ
957 uint8_t expr_ptr;
958 uint16_t val;
959
960 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
961 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
962 panic("%s: invalid max read request size 0x%02x\n",
963 device_get_nameunit(dev), rqsize);
964 }
965
966 expr_ptr = pci_get_pciecap_ptr(dev);
967 if (!expr_ptr)
968 panic("%s: not PCIe device\n", device_get_nameunit(dev));
969
970 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
971 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
972 if (bootverbose)
973 device_printf(dev, "adjust device control 0x%04x", val);
974
975 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
976 val |= rqsize;
977 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
978
979 if (bootverbose)
980 kprintf(" -> 0x%04x\n", val);
981 }
4d28e78f
SZ
982}
983
441580ca
SZ
984uint16_t
985pcie_get_max_readrq(device_t dev)
986{
987 uint8_t expr_ptr;
988 uint16_t val;
989
990 expr_ptr = pci_get_pciecap_ptr(dev);
991 if (!expr_ptr)
992 panic("%s: not PCIe device\n", device_get_nameunit(dev));
993
994 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
995 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
996}
997
4d28e78f
SZ
998static void
999pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1000{
1001 struct vpd_readstate vrs;
1002 int state;
1003 int name;
1004 int remain;
1005 int i;
1006 int alloc, off; /* alloc/off for RO/W arrays */
1007 int cksumvalid;
1008 int dflen;
1009 uint8_t byte;
1010 uint8_t byte2;
1011
1012 /* init vpd reader */
1013 vrs.bytesinval = 0;
1014 vrs.off = 0;
1015 vrs.pcib = pcib;
1016 vrs.cfg = cfg;
1017 vrs.cksum = 0;
1018
1019 state = 0;
1020 name = remain = i = 0; /* shut up stupid gcc */
1021 alloc = off = 0; /* shut up stupid gcc */
1022 dflen = 0; /* shut up stupid gcc */
1023 cksumvalid = -1;
1024 while (state >= 0) {
1025 if (vpd_nextbyte(&vrs, &byte)) {
1026 state = -2;
1027 break;
1028 }
1029#if 0
1030 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1031 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1032 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1033#endif
1034 switch (state) {
1035 case 0: /* item name */
1036 if (byte & 0x80) {
1037 if (vpd_nextbyte(&vrs, &byte2)) {
1038 state = -2;
1039 break;
1040 }
1041 remain = byte2;
1042 if (vpd_nextbyte(&vrs, &byte2)) {
1043 state = -2;
1044 break;
1045 }
1046 remain |= byte2 << 8;
1047 if (remain > (0x7f*4 - vrs.off)) {
1048 state = -1;
1049 kprintf(
1050 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1051 cfg->domain, cfg->bus, cfg->slot,
1052 cfg->func, remain);
1053 }
1054 name = byte & 0x7f;
1055 } else {
1056 remain = byte & 0x7;
1057 name = (byte >> 3) & 0xf;
1058 }
1059 switch (name) {
1060 case 0x2: /* String */
1061 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1062 M_DEVBUF, M_WAITOK);
1063 i = 0;
1064 state = 1;
1065 break;
1066 case 0xf: /* End */
1067 state = -1;
1068 break;
1069 case 0x10: /* VPD-R */
1070 alloc = 8;
1071 off = 0;
1072 cfg->vpd.vpd_ros = kmalloc(alloc *
1073 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1074 M_WAITOK | M_ZERO);
1075 state = 2;
1076 break;
1077 case 0x11: /* VPD-W */
1078 alloc = 8;
1079 off = 0;
1080 cfg->vpd.vpd_w = kmalloc(alloc *
1081 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1082 M_WAITOK | M_ZERO);
1083 state = 5;
1084 break;
1085 default: /* Invalid data, abort */
1086 state = -1;
1087 break;
1088 }
1089 break;
1090
1091 case 1: /* Identifier String */
1092 cfg->vpd.vpd_ident[i++] = byte;
1093 remain--;
1094 if (remain == 0) {
1095 cfg->vpd.vpd_ident[i] = '\0';
1096 state = 0;
1097 }
1098 break;
1099
1100 case 2: /* VPD-R Keyword Header */
1101 if (off == alloc) {
a68a7edf 1102 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1103 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1104 M_DEVBUF, M_WAITOK | M_ZERO);
1105 }
1106 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1107 if (vpd_nextbyte(&vrs, &byte2)) {
1108 state = -2;
1109 break;
1110 }
1111 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1112 if (vpd_nextbyte(&vrs, &byte2)) {
1113 state = -2;
1114 break;
1115 }
1116 dflen = byte2;
1117 if (dflen == 0 &&
1118 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1119 2) == 0) {
1120 /*
1121 * if this happens, we can't trust the rest
1122 * of the VPD.
1123 */
1124 kprintf(
1125 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1126 cfg->domain, cfg->bus, cfg->slot,
1127 cfg->func, dflen);
1128 cksumvalid = 0;
1129 state = -1;
1130 break;
1131 } else if (dflen == 0) {
1132 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1133 sizeof(*cfg->vpd.vpd_ros[off].value),
1134 M_DEVBUF, M_WAITOK);
1135 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1136 } else
1137 cfg->vpd.vpd_ros[off].value = kmalloc(
1138 (dflen + 1) *
1139 sizeof(*cfg->vpd.vpd_ros[off].value),
1140 M_DEVBUF, M_WAITOK);
1141 remain -= 3;
1142 i = 0;
1143 /* keep in sync w/ state 3's transistions */
1144 if (dflen == 0 && remain == 0)
1145 state = 0;
1146 else if (dflen == 0)
1147 state = 2;
1148 else
1149 state = 3;
1150 break;
1151
1152 case 3: /* VPD-R Keyword Value */
1153 cfg->vpd.vpd_ros[off].value[i++] = byte;
1154 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1155 "RV", 2) == 0 && cksumvalid == -1) {
1156 if (vrs.cksum == 0)
1157 cksumvalid = 1;
1158 else {
1159 if (bootverbose)
1160 kprintf(
1161 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1162 cfg->domain, cfg->bus,
1163 cfg->slot, cfg->func,
1164 vrs.cksum);
1165 cksumvalid = 0;
1166 state = -1;
1167 break;
1168 }
1169 }
1170 dflen--;
1171 remain--;
1172 /* keep in sync w/ state 2's transistions */
1173 if (dflen == 0)
1174 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1175 if (dflen == 0 && remain == 0) {
1176 cfg->vpd.vpd_rocnt = off;
a68a7edf 1177 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1178 off * sizeof(*cfg->vpd.vpd_ros),
1179 M_DEVBUF, M_WAITOK | M_ZERO);
1180 state = 0;
1181 } else if (dflen == 0)
1182 state = 2;
1183 break;
1184
1185 case 4:
1186 remain--;
1187 if (remain == 0)
1188 state = 0;
1189 break;
1190
1191 case 5: /* VPD-W Keyword Header */
1192 if (off == alloc) {
a68a7edf 1193 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1194 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1195 M_DEVBUF, M_WAITOK | M_ZERO);
1196 }
1197 cfg->vpd.vpd_w[off].keyword[0] = byte;
1198 if (vpd_nextbyte(&vrs, &byte2)) {
1199 state = -2;
1200 break;
1201 }
1202 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1203 if (vpd_nextbyte(&vrs, &byte2)) {
1204 state = -2;
1205 break;
1206 }
1207 cfg->vpd.vpd_w[off].len = dflen = byte2;
1208 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1209 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1210 sizeof(*cfg->vpd.vpd_w[off].value),
1211 M_DEVBUF, M_WAITOK);
1212 remain -= 3;
1213 i = 0;
1214 /* keep in sync w/ state 6's transistions */
1215 if (dflen == 0 && remain == 0)
1216 state = 0;
1217 else if (dflen == 0)
1218 state = 5;
1219 else
1220 state = 6;
1221 break;
1222
1223 case 6: /* VPD-W Keyword Value */
1224 cfg->vpd.vpd_w[off].value[i++] = byte;
1225 dflen--;
1226 remain--;
1227 /* keep in sync w/ state 5's transistions */
1228 if (dflen == 0)
1229 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1230 if (dflen == 0 && remain == 0) {
1231 cfg->vpd.vpd_wcnt = off;
a68a7edf 1232 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1233 off * sizeof(*cfg->vpd.vpd_w),
1234 M_DEVBUF, M_WAITOK | M_ZERO);
1235 state = 0;
1236 } else if (dflen == 0)
1237 state = 5;
1238 break;
1239
1240 default:
1241 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1242 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1243 state);
1244 state = -1;
1245 break;
1246 }
1247 }
1248
1249 if (cksumvalid == 0 || state < -1) {
1250 /* read-only data bad, clean up */
1251 if (cfg->vpd.vpd_ros != NULL) {
1252 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1253 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1254 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1255 cfg->vpd.vpd_ros = NULL;
1256 }
1257 }
1258 if (state < -1) {
1259 /* I/O error, clean up */
1260 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1261 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1262 if (cfg->vpd.vpd_ident != NULL) {
1263 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1264 cfg->vpd.vpd_ident = NULL;
1265 }
1266 if (cfg->vpd.vpd_w != NULL) {
1267 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1268 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1269 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1270 cfg->vpd.vpd_w = NULL;
1271 }
1272 }
1273 cfg->vpd.vpd_cached = 1;
1274#undef REG
1275#undef WREG
1276}
1277
1278int
1279pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1280{
1281 struct pci_devinfo *dinfo = device_get_ivars(child);
1282 pcicfgregs *cfg = &dinfo->cfg;
1283
1284 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1285 pci_read_vpd(device_get_parent(dev), cfg);
1286
1287 *identptr = cfg->vpd.vpd_ident;
1288
1289 if (*identptr == NULL)
1290 return (ENXIO);
1291
1292 return (0);
1293}
1294
1295int
1296pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1297 const char **vptr)
1298{
1299 struct pci_devinfo *dinfo = device_get_ivars(child);
1300 pcicfgregs *cfg = &dinfo->cfg;
1301 int i;
1302
1303 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1304 pci_read_vpd(device_get_parent(dev), cfg);
1305
1306 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1307 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1308 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1309 *vptr = cfg->vpd.vpd_ros[i].value;
1310 }
1311
1312 if (i != cfg->vpd.vpd_rocnt)
1313 return (0);
1314
1315 *vptr = NULL;
1316 return (ENXIO);
1317}
1318
1319/*
1320 * Return the offset in configuration space of the requested extended
1321 * capability entry or 0 if the specified capability was not found.
1322 */
1323int
1324pci_find_extcap_method(device_t dev, device_t child, int capability,
1325 int *capreg)
1326{
1327 struct pci_devinfo *dinfo = device_get_ivars(child);
1328 pcicfgregs *cfg = &dinfo->cfg;
1329 u_int32_t status;
1330 u_int8_t ptr;
1331
1332 /*
1333 * Check the CAP_LIST bit of the PCI status register first.
1334 */
1335 status = pci_read_config(child, PCIR_STATUS, 2);
1336 if (!(status & PCIM_STATUS_CAPPRESENT))
1337 return (ENXIO);
1338
1339 /*
1340 * Determine the start pointer of the capabilities list.
1341 */
1342 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1343 case 0:
1344 case 1:
1345 ptr = PCIR_CAP_PTR;
1346 break;
1347 case 2:
1348 ptr = PCIR_CAP_PTR_2;
1349 break;
1350 default:
1351 /* XXX: panic? */
1352 return (ENXIO); /* no extended capabilities support */
1353 }
1354 ptr = pci_read_config(child, ptr, 1);
1355
1356 /*
1357 * Traverse the capabilities list.
1358 */
1359 while (ptr != 0) {
1360 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1361 if (capreg != NULL)
1362 *capreg = ptr;
1363 return (0);
1364 }
1365 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1366 }
1367
1368 return (ENOENT);
1369}
1370
1371/*
1372 * Support for MSI-X message interrupts.
1373 */
cf8f3133
SZ
1374static void
1375pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1376 uint32_t data)
4d28e78f
SZ
1377{
1378 struct pci_devinfo *dinfo = device_get_ivars(dev);
1379 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1380 uint32_t offset;
1381
f9c942fb 1382 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1383 offset = msix->msix_table_offset + index * 16;
1384 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1385 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1386 bus_write_4(msix->msix_table_res, offset + 8, data);
1387
1388 /* Enable MSI -> HT mapping. */
1389 pci_ht_map_msi(dev, address);
1390}
1391
cf8f3133
SZ
1392static void
1393pci_mask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1394{
1395 struct pci_devinfo *dinfo = device_get_ivars(dev);
1396 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1397 uint32_t offset, val;
1398
1399 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1400 offset = msix->msix_table_offset + index * 16 + 12;
1401 val = bus_read_4(msix->msix_table_res, offset);
1402 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1403 val |= PCIM_MSIX_VCTRL_MASK;
1404 bus_write_4(msix->msix_table_res, offset, val);
1405 }
1406}
1407
cf8f3133
SZ
1408static void
1409pci_unmask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1410{
1411 struct pci_devinfo *dinfo = device_get_ivars(dev);
1412 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1413 uint32_t offset, val;
1414
f9c942fb 1415 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1416 offset = msix->msix_table_offset + index * 16 + 12;
1417 val = bus_read_4(msix->msix_table_res, offset);
1418 if (val & PCIM_MSIX_VCTRL_MASK) {
1419 val &= ~PCIM_MSIX_VCTRL_MASK;
1420 bus_write_4(msix->msix_table_res, offset, val);
1421 }
1422}
1423
1424int
cf8f3133 1425pci_pending_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1426{
1427 struct pci_devinfo *dinfo = device_get_ivars(dev);
1428 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1429 uint32_t offset, bit;
1430
31646171
SZ
1431 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL,
1432 ("MSI-X is not setup yet\n"));
1433
f9c942fb 1434 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1435 offset = msix->msix_pba_offset + (index / 32) * 4;
1436 bit = 1 << index % 32;
1437 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1438}
1439
1440/*
1441 * Restore MSI-X registers and table during resume. If MSI-X is
1442 * enabled then walk the virtual table to restore the actual MSI-X
1443 * table.
1444 */
1445static void
1446pci_resume_msix(device_t dev)
1447{
1448 struct pci_devinfo *dinfo = device_get_ivars(dev);
1449 struct pcicfg_msix *msix = &dinfo->cfg.msix;
4d28e78f
SZ
1450
1451 if (msix->msix_alloc > 0) {
f9c942fb
SZ
1452 const struct msix_vector *mv;
1453
31646171 1454 pci_mask_msix_allvectors(dev);
4d28e78f 1455
f9c942fb
SZ
1456 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1457 u_int vector;
1458
1459 if (mv->mv_address == 0)
4d28e78f 1460 continue;
f9c942fb
SZ
1461
1462 vector = PCI_MSIX_RID2VEC(mv->mv_rid);
1463 pci_setup_msix_vector(dev, vector,
1464 mv->mv_address, mv->mv_data);
1465 pci_unmask_msix_vector(dev, vector);
4d28e78f
SZ
1466 }
1467 }
1468 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1469 msix->msix_ctrl, 2);
1470}
1471
1472/*
84ee3613
SZ
1473 * Attempt to allocate one MSI-X message at the specified vector on cpuid.
1474 *
1475 * After this function returns, the MSI-X's rid will be saved in rid0.
4d28e78f
SZ
1476 */
1477int
84ee3613
SZ
1478pci_alloc_msix_vector_method(device_t dev, device_t child, u_int vector,
1479 int *rid0, int cpuid)
4d28e78f
SZ
1480{
1481 struct pci_devinfo *dinfo = device_get_ivars(child);
84ee3613 1482 struct pcicfg_msix *msix = &dinfo->cfg.msix;
f9c942fb 1483 struct msix_vector *mv;
4d28e78f 1484 struct resource_list_entry *rle;
84ee3613 1485 int error, irq, rid;
4d28e78f 1486
84ee3613
SZ
1487 KASSERT(msix->msix_table_res != NULL &&
1488 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1489 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d\n", cpuid));
1490 KASSERT(vector < msix->msix_msgnum,
1491 ("invalid MSI-X vector %u, total %d\n", vector, msix->msix_msgnum));
4d28e78f 1492
84ee3613 1493 if (bootverbose) {
4d28e78f 1494 device_printf(child,
84ee3613
SZ
1495 "attempting to allocate MSI-X #%u vector (%d supported)\n",
1496 vector, msix->msix_msgnum);
4d28e78f 1497 }
4d28e78f 1498
84ee3613 1499 /* Set rid according to vector number */
f9c942fb
SZ
1500 rid = PCI_MSIX_VEC2RID(vector);
1501
1502 /* Vector has already been allocated */
1503 mv = pci_find_msix_vector(child, rid);
1504 if (mv != NULL)
1505 return EBUSY;
14ae4dce 1506
84ee3613
SZ
1507 /* Allocate a message. */
1508 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq, cpuid);
1509 if (error)
1510 return error;
1511 resource_list_add(&dinfo->resources, SYS_RES_IRQ, rid,
1512 irq, irq, 1, cpuid);
4d28e78f 1513
84ee3613
SZ
1514 if (bootverbose) {
1515 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1516 device_printf(child, "using IRQ %lu for MSI-X on cpu%d\n",
1517 rle->start, cpuid);
4d28e78f
SZ
1518 }
1519
4d28e78f 1520 /* Update counts of alloc'd messages. */
84ee3613
SZ
1521 msix->msix_alloc++;
1522
f9c942fb
SZ
1523 mv = kmalloc(sizeof(*mv), M_DEVBUF, M_WAITOK | M_ZERO);
1524 mv->mv_rid = rid;
1525 TAILQ_INSERT_TAIL(&msix->msix_vectors, mv, mv_link);
1526
84ee3613
SZ
1527 *rid0 = rid;
1528 return 0;
4d28e78f
SZ
1529}
1530
a7854dd0
SZ
1531int
1532pci_release_msix_vector_method(device_t dev, device_t child, int rid)
4d28e78f
SZ
1533{
1534 struct pci_devinfo *dinfo = device_get_ivars(child);
1535 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1536 struct resource_list_entry *rle;
a7854dd0
SZ
1537 struct msix_vector *mv;
1538 int irq, cpuid;
4d28e78f 1539
a7854dd0
SZ
1540 KASSERT(msix->msix_table_res != NULL &&
1541 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1542 KASSERT(msix->msix_alloc > 0, ("No MSI-X allocated\n"));
1543 KASSERT(rid > 0, ("invalid rid %d\n", rid));
4d28e78f 1544
a7854dd0
SZ
1545 mv = pci_find_msix_vector(child, rid);
1546 KASSERT(mv != NULL, ("MSI-X rid %d is not allocated\n", rid));
1547 KASSERT(mv->mv_address == 0, ("MSI-X rid %d not teardown\n", rid));
1548
1549 /* Make sure resource is no longer allocated. */
1550 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1551 KASSERT(rle != NULL, ("missing MSI-X resource, rid %d\n", rid));
1552 KASSERT(rle->res == NULL,
1553 ("MSI-X resource is still allocated, rid %d\n", rid));
1554
1555 irq = rle->start;
1556 cpuid = rle->cpuid;
4d28e78f
SZ
1557
1558 /* Free the resource list entries. */
a7854dd0
SZ
1559 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, rid);
1560
1561 /* Release the IRQ. */
1562 PCIB_RELEASE_MSIX(device_get_parent(dev), child, irq, cpuid);
1563
1564 TAILQ_REMOVE(&msix->msix_vectors, mv, mv_link);
1565 kfree(mv, M_DEVBUF);
1566
1567 msix->msix_alloc--;
4d28e78f
SZ
1568 return (0);
1569}
1570
1571/*
1572 * Return the max supported MSI-X messages this device supports.
1573 * Basically, assuming the MD code can alloc messages, this function
1574 * should return the maximum value that pci_alloc_msix() can return.
1575 * Thus, it is subject to the tunables, etc.
1576 */
1577int
1578pci_msix_count_method(device_t dev, device_t child)
1579{
1580 struct pci_devinfo *dinfo = device_get_ivars(child);
1581 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1582
1583 if (pci_do_msix && msix->msix_location != 0)
1584 return (msix->msix_msgnum);
1585 return (0);
1586}
1587
31646171
SZ
1588int
1589pci_setup_msix(device_t dev)
1590{
1591 struct pci_devinfo *dinfo = device_get_ivars(dev);
1592 pcicfgregs *cfg = &dinfo->cfg;
1593 struct resource_list_entry *rle;
1594 struct resource *table_res, *pba_res;
1595
1596 KASSERT(cfg->msix.msix_table_res == NULL &&
1597 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet\n"));
1598
1599 /* If rid 0 is allocated, then fail. */
1600 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1601 if (rle != NULL && rle->res != NULL)
1602 return (ENXIO);
1603
1604 /* Already have allocated MSIs? */
1605 if (cfg->msi.msi_alloc != 0)
1606 return (ENXIO);
1607
1608 /* If MSI is blacklisted for this system, fail. */
1609 if (pci_msi_blacklisted())
1610 return (ENXIO);
1611
1612 /* MSI-X capability present? */
ce92281b
SZ
1613 if (cfg->msix.msix_location == 0 || cfg->msix.msix_msgnum == 0 ||
1614 !pci_do_msix)
31646171
SZ
1615 return (ENODEV);
1616
ac0a3f31
SZ
1617 KASSERT(cfg->msix.msix_alloc == 0 &&
1618 TAILQ_EMPTY(&cfg->msix.msix_vectors),
1619 ("MSI-X vector has been allocated\n"));
1620
31646171
SZ
1621 /* Make sure the appropriate BARs are mapped. */
1622 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1623 cfg->msix.msix_table_bar);
1624 if (rle == NULL || rle->res == NULL ||
1625 !(rman_get_flags(rle->res) & RF_ACTIVE))
1626 return (ENXIO);
1627 table_res = rle->res;
1628 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1629 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1630 cfg->msix.msix_pba_bar);
1631 if (rle == NULL || rle->res == NULL ||
1632 !(rman_get_flags(rle->res) & RF_ACTIVE))
1633 return (ENXIO);
1634 }
1635 pba_res = rle->res;
1636
1637 cfg->msix.msix_table_res = table_res;
1638 cfg->msix.msix_pba_res = pba_res;
1639
1640 pci_mask_msix_allvectors(dev);
1641
1642 return 0;
1643}
1644
1645void
1646pci_teardown_msix(device_t dev)
1647{
1648 struct pci_devinfo *dinfo = device_get_ivars(dev);
1649 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1650
1651 KASSERT(msix->msix_table_res != NULL &&
1652 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
ac0a3f31
SZ
1653 KASSERT(msix->msix_alloc == 0 && TAILQ_EMPTY(&msix->msix_vectors),
1654 ("MSI-X vector is still allocated\n"));
31646171
SZ
1655
1656 pci_mask_msix_allvectors(dev);
1657
1658 msix->msix_table_res = NULL;
1659 msix->msix_pba_res = NULL;
1660}
1661
a39a2984
SZ
1662void
1663pci_enable_msix(device_t dev)
1664{
1665 struct pci_devinfo *dinfo = device_get_ivars(dev);
1666 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1667
1668 KASSERT(msix->msix_table_res != NULL &&
1669 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1670
1671 /* Update control register to enable MSI-X. */
1672 msix->msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1673 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1674 msix->msix_ctrl, 2);
1675}
1676
1677void
1678pci_disable_msix(device_t dev)
1679{
1680 struct pci_devinfo *dinfo = device_get_ivars(dev);
1681 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1682
1683 KASSERT(msix->msix_table_res != NULL &&
1684 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1685
1686 /* Disable MSI -> HT mapping. */
1687 pci_ht_map_msi(dev, 0);
1688
1689 /* Update control register to disable MSI-X. */
1690 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1691 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1692 msix->msix_ctrl, 2);
1693}
1694
31646171
SZ
1695static void
1696pci_mask_msix_allvectors(device_t dev)
1697{
1698 struct pci_devinfo *dinfo = device_get_ivars(dev);
1699 u_int i;
1700
1701 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i)
1702 pci_mask_msix_vector(dev, i);
1703}
1704
f9c942fb
SZ
1705static struct msix_vector *
1706pci_find_msix_vector(device_t dev, int rid)
1707{
1708 struct pci_devinfo *dinfo = device_get_ivars(dev);
1709 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1710 struct msix_vector *mv;
1711
1712 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1713 if (mv->mv_rid == rid)
1714 return mv;
1715 }
1716 return NULL;
1717}
1718
4d28e78f
SZ
1719/*
1720 * HyperTransport MSI mapping control
1721 */
1722void
1723pci_ht_map_msi(device_t dev, uint64_t addr)
1724{
1725 struct pci_devinfo *dinfo = device_get_ivars(dev);
1726 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1727
1728 if (!ht->ht_msimap)
1729 return;
1730
1731 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1732 ht->ht_msiaddr >> 20 == addr >> 20) {
1733 /* Enable MSI -> HT mapping. */
1734 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1735 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1736 ht->ht_msictrl, 2);
1737 }
1738
a39a2984 1739 if (!addr && (ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
4d28e78f
SZ
1740 /* Disable MSI -> HT mapping. */
1741 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1742 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1743 ht->ht_msictrl, 2);
1744 }
1745}
1746
1747/*
1748 * Support for MSI message signalled interrupts.
1749 */
1750void
1751pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1752{
1753 struct pci_devinfo *dinfo = device_get_ivars(dev);
1754 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1755
1756 /* Write data and address values. */
1757 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1758 address & 0xffffffff, 4);
1759 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1760 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1761 address >> 32, 4);
1762 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1763 data, 2);
1764 } else
1765 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1766 2);
1767
1768 /* Enable MSI in the control register. */
1769 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1770 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1771 2);
1772
1773 /* Enable MSI -> HT mapping. */
1774 pci_ht_map_msi(dev, address);
1775}
1776
1777void
1778pci_disable_msi(device_t dev)
1779{
1780 struct pci_devinfo *dinfo = device_get_ivars(dev);
1781 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1782
1783 /* Disable MSI -> HT mapping. */
1784 pci_ht_map_msi(dev, 0);
1785
1786 /* Disable MSI in the control register. */
1787 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1788 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1789 2);
1790}
1791
1792/*
1793 * Restore MSI registers during resume. If MSI is enabled then
1794 * restore the data and address registers in addition to the control
1795 * register.
1796 */
1797static void
1798pci_resume_msi(device_t dev)
1799{
1800 struct pci_devinfo *dinfo = device_get_ivars(dev);
1801 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1802 uint64_t address;
1803 uint16_t data;
1804
1805 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1806 address = msi->msi_addr;
1807 data = msi->msi_data;
1808 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1809 address & 0xffffffff, 4);
1810 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1811 pci_write_config(dev, msi->msi_location +
1812 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1813 pci_write_config(dev, msi->msi_location +
1814 PCIR_MSI_DATA_64BIT, data, 2);
1815 } else
1816 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1817 data, 2);
1818 }
1819 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1820 2);
1821}
1822
4d28e78f
SZ
1823/*
1824 * Returns true if the specified device is blacklisted because MSI
1825 * doesn't work.
1826 */
1827int
1828pci_msi_device_blacklisted(device_t dev)
1829{
1830 struct pci_quirk *q;
1831
1832 if (!pci_honor_msi_blacklist)
1833 return (0);
1834
1835 for (q = &pci_quirks[0]; q->devid; q++) {
1836 if (q->devid == pci_get_devid(dev) &&
1837 q->type == PCI_QUIRK_DISABLE_MSI)
1838 return (1);
1839 }
1840 return (0);
1841}
1842
1843/*
1844 * Determine if MSI is blacklisted globally on this sytem. Currently,
1845 * we just check for blacklisted chipsets as represented by the
1846 * host-PCI bridge at device 0:0:0. In the future, it may become
1847 * necessary to check other system attributes, such as the kenv values
1848 * that give the motherboard manufacturer and model number.
1849 */
1850static int
1851pci_msi_blacklisted(void)
1852{
1853 device_t dev;
1854
1855 if (!pci_honor_msi_blacklist)
1856 return (0);
1857
1858 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1859 if (!(pcie_chipset || pcix_chipset))
1860 return (1);
1861
1862 dev = pci_find_bsf(0, 0, 0);
1863 if (dev != NULL)
1864 return (pci_msi_device_blacklisted(dev));
1865 return (0);
1866}
1867
1868/*
2c3d7ac8
SZ
1869 * Attempt to allocate count MSI messages on start_cpuid.
1870 *
1871 * If start_cpuid < 0, then the MSI messages' target CPU will be
1872 * selected automaticly.
1873 *
1874 * If the caller explicitly specified the MSI messages' target CPU,
1875 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1876 * messages on the specified CPU, if the allocation fails due to MD
1877 * does not have enough vectors (EMSGSIZE), then we will try next
1878 * available CPU, until the allocation fails on all CPUs.
1879 *
1880 * EMSGSIZE will be returned, if all available CPUs does not have
1881 * enough vectors for the requested amount of MSI messages. Caller
1882 * should either reduce the amount of MSI messages to be requested,
1883 * or simply giving up using MSI.
1884 *
1885 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1886 * returned in 'rid' array, if the allocation succeeds.
4d28e78f
SZ
1887 */
1888int
2c3d7ac8
SZ
1889pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1890 int start_cpuid)
4d28e78f
SZ
1891{
1892 struct pci_devinfo *dinfo = device_get_ivars(child);
1893 pcicfgregs *cfg = &dinfo->cfg;
1894 struct resource_list_entry *rle;
2c3d7ac8 1895 int error, i, irqs[32], cpuid = 0;
4d28e78f
SZ
1896 uint16_t ctrl;
1897
2c3d7ac8
SZ
1898 KASSERT(count != 0 && count <= 32 && powerof2(count),
1899 ("invalid MSI count %d\n", count));
1900 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
4d28e78f
SZ
1901
1902 /* If rid 0 is allocated, then fail. */
1903 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1904 if (rle != NULL && rle->res != NULL)
1905 return (ENXIO);
1906
1907 /* Already have allocated messages? */
1908 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1909 return (ENXIO);
1910
1911 /* If MSI is blacklisted for this system, fail. */
1912 if (pci_msi_blacklisted())
1913 return (ENXIO);
1914
1915 /* MSI capability present? */
ce92281b
SZ
1916 if (cfg->msi.msi_location == 0 || cfg->msi.msi_msgnum == 0 ||
1917 !pci_do_msi)
4d28e78f
SZ
1918 return (ENODEV);
1919
2c3d7ac8
SZ
1920 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1921 count, cfg->msi.msi_msgnum));
1922
1923 if (bootverbose) {
4d28e78f
SZ
1924 device_printf(child,
1925 "attempting to allocate %d MSI vectors (%d supported)\n",
2c3d7ac8
SZ
1926 count, cfg->msi.msi_msgnum);
1927 }
4d28e78f 1928
2c3d7ac8
SZ
1929 if (start_cpuid < 0)
1930 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
4d28e78f 1931
2c3d7ac8
SZ
1932 error = EINVAL;
1933 for (i = 0; i < ncpus; ++i) {
1934 cpuid = (start_cpuid + i) % ncpus;
4d28e78f 1935
2c3d7ac8 1936 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
803a9933 1937 cfg->msi.msi_msgnum, irqs, cpuid);
4d28e78f
SZ
1938 if (error == 0)
1939 break;
2c3d7ac8
SZ
1940 else if (error != EMSGSIZE)
1941 return error;
4d28e78f 1942 }
2c3d7ac8
SZ
1943 if (error)
1944 return error;
4d28e78f
SZ
1945
1946 /*
2c3d7ac8
SZ
1947 * We now have N messages mapped onto SYS_RES_IRQ resources in
1948 * the irqs[] array, so add new resources starting at rid 1.
4d28e78f 1949 */
2c3d7ac8
SZ
1950 for (i = 0; i < count; i++) {
1951 rid[i] = i + 1;
4d28e78f 1952 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
803a9933 1953 irqs[i], irqs[i], 1, cpuid);
2c3d7ac8 1954 }
4d28e78f
SZ
1955
1956 if (bootverbose) {
2c3d7ac8
SZ
1957 if (count == 1) {
1958 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1959 irqs[0], cpuid);
1960 } else {
4d28e78f
SZ
1961 int run;
1962
1963 /*
1964 * Be fancy and try to print contiguous runs
1965 * of IRQ values as ranges. 'run' is true if
1966 * we are in a range.
1967 */
1968 device_printf(child, "using IRQs %d", irqs[0]);
1969 run = 0;
2c3d7ac8 1970 for (i = 1; i < count; i++) {
4d28e78f
SZ
1971
1972 /* Still in a run? */
1973 if (irqs[i] == irqs[i - 1] + 1) {
1974 run = 1;
1975 continue;
1976 }
1977
1978 /* Finish previous range. */
1979 if (run) {
1980 kprintf("-%d", irqs[i - 1]);
1981 run = 0;
1982 }
1983
1984 /* Start new range. */
1985 kprintf(",%d", irqs[i]);
1986 }
1987
1988 /* Unfinished range? */
1989 if (run)
2c3d7ac8
SZ
1990 kprintf("-%d", irqs[count - 1]);
1991 kprintf(" for MSI on cpu%d\n", cpuid);
4d28e78f
SZ
1992 }
1993 }
1994
2c3d7ac8 1995 /* Update control register with count. */
4d28e78f
SZ
1996 ctrl = cfg->msi.msi_ctrl;
1997 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2c3d7ac8 1998 ctrl |= (ffs(count) - 1) << 4;
4d28e78f
SZ
1999 cfg->msi.msi_ctrl = ctrl;
2000 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2001
2002 /* Update counts of alloc'd messages. */
2c3d7ac8 2003 cfg->msi.msi_alloc = count;
4d28e78f 2004 cfg->msi.msi_handlers = 0;
4d28e78f
SZ
2005 return (0);
2006}
2007
2008/* Release the MSI messages associated with this device. */
2009int
2010pci_release_msi_method(device_t dev, device_t child)
2011{
2012 struct pci_devinfo *dinfo = device_get_ivars(child);
2013 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2014 struct resource_list_entry *rle;
50a5ba22 2015 int i, irqs[32], cpuid = -1;
4d28e78f
SZ
2016
2017 /* Do we have any messages to release? */
2018 if (msi->msi_alloc == 0)
2019 return (ENODEV);
2020 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2021
2022 /* Make sure none of the resources are allocated. */
2023 if (msi->msi_handlers > 0)
2024 return (EBUSY);
2025 for (i = 0; i < msi->msi_alloc; i++) {
2026 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2027 KASSERT(rle != NULL, ("missing MSI resource"));
2028 if (rle->res != NULL)
2029 return (EBUSY);
975cc3f0
SZ
2030 if (i == 0) {
2031 cpuid = rle->cpuid;
2032 KASSERT(cpuid >= 0 && cpuid < ncpus,
2033 ("invalid MSI target cpuid %d\n", cpuid));
2034 } else {
2035 KASSERT(rle->cpuid == cpuid,
2036 ("MSI targets different cpus, "
2037 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
2038 }
4d28e78f
SZ
2039 irqs[i] = rle->start;
2040 }
2041
2042 /* Update control register with 0 count. */
2043 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2044 ("%s: MSI still enabled", __func__));
2045 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2046 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2047 msi->msi_ctrl, 2);
2048
2049 /* Release the messages. */
975cc3f0
SZ
2050 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2051 cpuid);
4d28e78f
SZ
2052 for (i = 0; i < msi->msi_alloc; i++)
2053 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2054
2055 /* Update alloc count. */
2056 msi->msi_alloc = 0;
2057 msi->msi_addr = 0;
2058 msi->msi_data = 0;
2059 return (0);
2060}
2061
2062/*
2063 * Return the max supported MSI messages this device supports.
2064 * Basically, assuming the MD code can alloc messages, this function
2065 * should return the maximum value that pci_alloc_msi() can return.
2066 * Thus, it is subject to the tunables, etc.
2067 */
2068int
2069pci_msi_count_method(device_t dev, device_t child)
2070{
2071 struct pci_devinfo *dinfo = device_get_ivars(child);
2072 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2073
2074 if (pci_do_msi && msi->msi_location != 0)
2075 return (msi->msi_msgnum);
2076 return (0);
2077}
2078
2079/* kfree pcicfgregs structure and all depending data structures */
2080
2081int
2082pci_freecfg(struct pci_devinfo *dinfo)
2083{
2084 struct devlist *devlist_head;
2085 int i;
2086
2087 devlist_head = &pci_devq;
2088
2089 if (dinfo->cfg.vpd.vpd_reg) {
2090 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2091 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2092 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2093 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2094 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2095 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2096 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2097 }
2098 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2099 kfree(dinfo, M_DEVBUF);
2100
2101 /* increment the generation count */
2102 pci_generation++;
2103
2104 /* we're losing one device */
2105 pci_numdevs--;
2106 return (0);
2107}
2108
2109/*
2110 * PCI power manangement
2111 */
2112int
2113pci_set_powerstate_method(device_t dev, device_t child, int state)
2114{
2115 struct pci_devinfo *dinfo = device_get_ivars(child);
2116 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2117 uint16_t status;
2118 int result, oldstate, highest, delay;
984263bc 2119
4d28e78f 2120 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2121 return (EOPNOTSUPP);
2122
2123 /*
2124 * Optimize a no state change request away. While it would be OK to
2125 * write to the hardware in theory, some devices have shown odd
2126 * behavior when going from D3 -> D3.
2127 */
2128 oldstate = pci_get_powerstate(child);
2129 if (oldstate == state)
2130 return (0);
2131
2132 /*
2133 * The PCI power management specification states that after a state
2134 * transition between PCI power states, system software must
2135 * guarantee a minimal delay before the function accesses the device.
2136 * Compute the worst case delay that we need to guarantee before we
2137 * access the device. Many devices will be responsive much more
2138 * quickly than this delay, but there are some that don't respond
2139 * instantly to state changes. Transitions to/from D3 state require
2140 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2141 * is done below with DELAY rather than a sleeper function because
2142 * this function can be called from contexts where we cannot sleep.
2143 */
2144 highest = (oldstate > state) ? oldstate : state;
2145 if (highest == PCI_POWERSTATE_D3)
2146 delay = 10000;
2147 else if (highest == PCI_POWERSTATE_D2)
2148 delay = 200;
2149 else
2150 delay = 0;
4d28e78f 2151 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2152 & ~PCIM_PSTAT_DMASK;
2153 result = 0;
2154 switch (state) {
2155 case PCI_POWERSTATE_D0:
2156 status |= PCIM_PSTAT_D0;
2157 break;
2158 case PCI_POWERSTATE_D1:
4d28e78f 2159 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2160 return (EOPNOTSUPP);
2161 status |= PCIM_PSTAT_D1;
2162 break;
2163 case PCI_POWERSTATE_D2:
4d28e78f 2164 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2165 return (EOPNOTSUPP);
2166 status |= PCIM_PSTAT_D2;
2167 break;
2168 case PCI_POWERSTATE_D3:
2169 status |= PCIM_PSTAT_D3;
2170 break;
2171 default:
2172 return (EINVAL);
984263bc 2173 }
f4754a59
HT
2174
2175 if (bootverbose)
2176 kprintf(
4d28e78f
SZ
2177 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2178 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2179 dinfo->cfg.func, oldstate, state);
f4754a59 2180
4d28e78f 2181 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2182 if (delay)
2183 DELAY(delay);
2184 return (0);
984263bc
MD
2185}
2186
e126caf1 2187int
984263bc
MD
2188pci_get_powerstate_method(device_t dev, device_t child)
2189{
2190 struct pci_devinfo *dinfo = device_get_ivars(child);
2191 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2192 uint16_t status;
984263bc
MD
2193 int result;
2194
4d28e78f
SZ
2195 if (cfg->pp.pp_cap != 0) {
2196 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2197 switch (status & PCIM_PSTAT_DMASK) {
2198 case PCIM_PSTAT_D0:
2199 result = PCI_POWERSTATE_D0;
2200 break;
2201 case PCIM_PSTAT_D1:
2202 result = PCI_POWERSTATE_D1;
2203 break;
2204 case PCIM_PSTAT_D2:
2205 result = PCI_POWERSTATE_D2;
2206 break;
2207 case PCIM_PSTAT_D3:
2208 result = PCI_POWERSTATE_D3;
2209 break;
2210 default:
2211 result = PCI_POWERSTATE_UNKNOWN;
2212 break;
2213 }
2214 } else {
2215 /* No support, device is always at D0 */
2216 result = PCI_POWERSTATE_D0;
2217 }
f4754a59 2218 return (result);
984263bc
MD
2219}
2220
2221/*
2222 * Some convenience functions for PCI device drivers.
2223 */
2224
2225static __inline void
4d28e78f 2226pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2227{
4d28e78f 2228 uint16_t command;
984263bc 2229
4d28e78f
SZ
2230 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2231 command |= bit;
2232 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2233}
2234
2235static __inline void
4d28e78f
SZ
2236pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2237{
2238 uint16_t command;
984263bc 2239
4d28e78f
SZ
2240 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2241 command &= ~bit;
2242 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2243}
2244
4d28e78f
SZ
2245int
2246pci_enable_busmaster_method(device_t dev, device_t child)
2247{
2248 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2249 return (0);
2250}
984263bc 2251
4d28e78f
SZ
2252int
2253pci_disable_busmaster_method(device_t dev, device_t child)
2254{
2255 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2256 return (0);
2257}
984263bc 2258
4d28e78f
SZ
2259int
2260pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2261{
4d28e78f
SZ
2262 uint16_t command;
2263 uint16_t bit;
2264 char *error;
ed1bd994 2265
4d28e78f
SZ
2266 bit = 0;
2267 error = NULL;
2268
2269 switch(space) {
2270 case SYS_RES_IOPORT:
2271 bit = PCIM_CMD_PORTEN;
2272 error = "port";
ed1bd994 2273 break;
4d28e78f
SZ
2274 case SYS_RES_MEMORY:
2275 bit = PCIM_CMD_MEMEN;
2276 error = "memory";
ed1bd994
MD
2277 break;
2278 default:
4d28e78f 2279 return (EINVAL);
ed1bd994 2280 }
4d28e78f
SZ
2281 pci_set_command_bit(dev, child, bit);
2282 /* Some devices seem to need a brief stall here, what do to? */
2283 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2284 if (command & bit)
2285 return (0);
2286 device_printf(child, "failed to enable %s mapping!\n", error);
2287 return (ENXIO);
ed1bd994 2288}
984263bc 2289
4d28e78f
SZ
2290int
2291pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2292{
4d28e78f
SZ
2293 uint16_t command;
2294 uint16_t bit;
2295 char *error;
b4c0a845 2296
4d28e78f
SZ
2297 bit = 0;
2298 error = NULL;
b4c0a845 2299
4d28e78f
SZ
2300 switch(space) {
2301 case SYS_RES_IOPORT:
2302 bit = PCIM_CMD_PORTEN;
2303 error = "port";
b4c0a845 2304 break;
4d28e78f
SZ
2305 case SYS_RES_MEMORY:
2306 bit = PCIM_CMD_MEMEN;
2307 error = "memory";
b4c0a845
SZ
2308 break;
2309 default:
4d28e78f 2310 return (EINVAL);
b4c0a845 2311 }
4d28e78f
SZ
2312 pci_clear_command_bit(dev, child, bit);
2313 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2314 if (command & bit) {
2315 device_printf(child, "failed to disable %s mapping!\n", error);
2316 return (ENXIO);
b4c0a845 2317 }
4d28e78f 2318 return (0);
b4c0a845
SZ
2319}
2320
4d28e78f
SZ
2321/*
2322 * New style pci driver. Parent device is either a pci-host-bridge or a
2323 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2324 */
2325
22457186 2326void
984263bc
MD
2327pci_print_verbose(struct pci_devinfo *dinfo)
2328{
4d28e78f 2329
984263bc
MD
2330 if (bootverbose) {
2331 pcicfgregs *cfg = &dinfo->cfg;
2332
4d28e78f
SZ
2333 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2334 cfg->vendor, cfg->device, cfg->revid);
2335 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2336 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2337 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2338 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2339 cfg->mfdev);
2340 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2341 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2342 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2343 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2344 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2345 if (cfg->intpin > 0)
4d28e78f
SZ
2346 kprintf("\tintpin=%c, irq=%d\n",
2347 cfg->intpin +'a' -1, cfg->intline);
2348 if (cfg->pp.pp_cap) {
2349 uint16_t status;
2350
2351 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2352 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2353 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2354 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2355 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2356 status & PCIM_PSTAT_DMASK);
2357 }
2358 if (cfg->msi.msi_location) {
2359 int ctrl;
2360
2361 ctrl = cfg->msi.msi_ctrl;
2362 kprintf("\tMSI supports %d message%s%s%s\n",
2363 cfg->msi.msi_msgnum,
2364 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2365 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2366 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2367 }
2368 if (cfg->msix.msix_location) {
2369 kprintf("\tMSI-X supports %d message%s ",
2370 cfg->msix.msix_msgnum,
2371 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2372 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2373 kprintf("in map 0x%x\n",
2374 cfg->msix.msix_table_bar);
2375 else
2376 kprintf("in maps 0x%x and 0x%x\n",
2377 cfg->msix.msix_table_bar,
2378 cfg->msix.msix_pba_bar);
2379 }
d85e7311 2380 pci_print_verbose_expr(cfg);
984263bc
MD
2381 }
2382}
2383
d85e7311
SZ
2384static void
2385pci_print_verbose_expr(const pcicfgregs *cfg)
2386{
2387 const struct pcicfg_expr *expr = &cfg->expr;
2388 const char *port_name;
2389 uint16_t port_type;
2390
2391 if (!bootverbose)
2392 return;
2393
2394 if (expr->expr_ptr == 0) /* No PCI Express capability */
2395 return;
2396
2397 kprintf("\tPCI Express ver.%d cap=0x%04x",
2398 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2399 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2400 goto back;
2401
2402 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2403
2404 switch (port_type) {
2405 case PCIE_END_POINT:
2406 port_name = "DEVICE";
2407 break;
2408 case PCIE_LEG_END_POINT:
2409 port_name = "LEGDEV";
2410 break;
2411 case PCIE_ROOT_PORT:
2412 port_name = "ROOT";
2413 break;
2414 case PCIE_UP_STREAM_PORT:
2415 port_name = "UPSTREAM";
2416 break;
2417 case PCIE_DOWN_STREAM_PORT:
2418 port_name = "DOWNSTRM";
2419 break;
2420 case PCIE_PCIE2PCI_BRIDGE:
2421 port_name = "PCIE2PCI";
2422 break;
2423 case PCIE_PCI2PCIE_BRIDGE:
2424 port_name = "PCI2PCIE";
2425 break;
2426 default:
2427 port_name = NULL;
2428 break;
2429 }
2430 if ((port_type == PCIE_ROOT_PORT ||
2431 port_type == PCIE_DOWN_STREAM_PORT) &&
2432 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2433 port_name = NULL;
2434 if (port_name != NULL)
2435 kprintf("[%s]", port_name);
2436
2437 if (pcie_slotimpl(cfg)) {
2438 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2439 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2440 kprintf("[HOTPLUG]");
2441 }
2442back:
2443 kprintf("\n");
2444}
2445
984263bc 2446static int
4a5a2d63 2447pci_porten(device_t pcib, int b, int s, int f)
984263bc 2448{
4a5a2d63
JS
2449 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2450 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2451}
2452
2453static int
4a5a2d63 2454pci_memen(device_t pcib, int b, int s, int f)
984263bc 2455{
4a5a2d63
JS
2456 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2457 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2458}
2459
2460/*
2461 * Add a resource based on a pci map register. Return 1 if the map
2462 * register is a 32bit map register or 2 if it is a 64bit register.
2463 */
2464static int
4d28e78f
SZ
2465pci_add_map(device_t pcib, device_t bus, device_t dev,
2466 int b, int s, int f, int reg, struct resource_list *rl, int force,
2467 int prefetch)
2468{
2469 uint32_t map;
2470 pci_addr_t base;
2471 pci_addr_t start, end, count;
2472 uint8_t ln2size;
2473 uint8_t ln2range;
2474 uint32_t testval;
2475 uint16_t cmd;
984263bc 2476 int type;
4d28e78f
SZ
2477 int barlen;
2478 struct resource *res;
984263bc 2479
4a5a2d63 2480 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2481 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2482 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2483 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2484
4d28e78f 2485 if (PCI_BAR_MEM(map)) {
984263bc 2486 type = SYS_RES_MEMORY;
4d28e78f
SZ
2487 if (map & PCIM_BAR_MEM_PREFETCH)
2488 prefetch = 1;
2489 } else
984263bc
MD
2490 type = SYS_RES_IOPORT;
2491 ln2size = pci_mapsize(testval);
2492 ln2range = pci_maprange(testval);
4d28e78f
SZ
2493 base = pci_mapbase(map);
2494 barlen = ln2range == 64 ? 2 : 1;
2495
2496 /*
2497 * For I/O registers, if bottom bit is set, and the next bit up
2498 * isn't clear, we know we have a BAR that doesn't conform to the
2499 * spec, so ignore it. Also, sanity check the size of the data
2500 * areas to the type of memory involved. Memory must be at least
2501 * 16 bytes in size, while I/O ranges must be at least 4.
2502 */
2503 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2504 return (barlen);
2505 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2506 (type == SYS_RES_IOPORT && ln2size < 2))
2507 return (barlen);
2508
2509 if (ln2range == 64)
984263bc 2510 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2511 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2512 if (bootverbose) {
2513 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2514 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2515 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2516 kprintf(", port disabled\n");
2517 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2518 kprintf(", memory disabled\n");
2519 else
2520 kprintf(", enabled\n");
984263bc
MD
2521 }
2522
984263bc 2523 /*
4d28e78f
SZ
2524 * If base is 0, then we have problems. It is best to ignore
2525 * such entries for the moment. These will be allocated later if
2526 * the driver specifically requests them. However, some
2527 * removable busses look better when all resources are allocated,
2528 * so allow '0' to be overriden.
2529 *
2530 * Similarly treat maps whose values is the same as the test value
2531 * read back. These maps have had all f's written to them by the
2532 * BIOS in an attempt to disable the resources.
984263bc 2533 */
4d28e78f
SZ
2534 if (!force && (base == 0 || map == testval))
2535 return (barlen);
2536 if ((u_long)base != base) {
2537 device_printf(bus,
2538 "pci%d:%d:%d:%d bar %#x too many address bits",
2539 pci_get_domain(dev), b, s, f, reg);
2540 return (barlen);
984263bc 2541 }
984263bc 2542
4d28e78f
SZ
2543 /*
2544 * This code theoretically does the right thing, but has
2545 * undesirable side effects in some cases where peripherals
2546 * respond oddly to having these bits enabled. Let the user
2547 * be able to turn them off (since pci_enable_io_modes is 1 by
2548 * default).
2549 */
2550 if (pci_enable_io_modes) {
2551 /* Turn on resources that have been left off by a lazy BIOS */
2552 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2553 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2554 cmd |= PCIM_CMD_PORTEN;
2555 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2556 }
2557 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2558 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2559 cmd |= PCIM_CMD_MEMEN;
2560 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2561 }
2562 } else {
2563 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2564 return (barlen);
2565 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2566 return (barlen);
2567 }
984263bc 2568
4d28e78f
SZ
2569 count = 1 << ln2size;
2570 if (base == 0 || base == pci_mapbase(testval)) {
2571 start = 0; /* Let the parent decide. */
2572 end = ~0ULL;
2573 } else {
2574 start = base;
2575 end = base + (1 << ln2size) - 1;
984263bc 2576 }
1b000e91 2577 resource_list_add(rl, type, reg, start, end, count, -1);
984263bc 2578
4d28e78f
SZ
2579 /*
2580 * Try to allocate the resource for this BAR from our parent
2581 * so that this resource range is already reserved. The
2582 * driver for this device will later inherit this resource in
2583 * pci_alloc_resource().
2584 */
2585 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
93fad519 2586 prefetch ? RF_PREFETCHABLE : 0, -1);
4d28e78f
SZ
2587 if (res == NULL) {
2588 /*
d0c4beb1
SZ
2589 * If the allocation fails, delete the resource list
2590 * entry to force pci_alloc_resource() to allocate
2591 * resources from the parent.
4d28e78f
SZ
2592 */
2593 resource_list_delete(rl, type, reg);
d0c4beb1
SZ
2594#ifdef PCI_BAR_CLEAR
2595 /* Clear the BAR */
4d28e78f 2596 start = 0;
d0c4beb1
SZ
2597#else /* !PCI_BAR_CLEAR */
2598 /*
2599 * Don't clear BAR here. Some BIOS lists HPET as a
2600 * PCI function, clearing the BAR causes HPET timer
2601 * stop ticking.
2602 */
2603 if (bootverbose) {
2604 kprintf("pci:%d:%d:%d: resource reservation failed "
bfc09ba0
MD
2605 "%#jx - %#jx\n", b, s, f,
2606 (intmax_t)start, (intmax_t)end);
d0c4beb1
SZ
2607 }
2608 return (barlen);
2609#endif /* PCI_BAR_CLEAR */
2610 } else {
4d28e78f 2611 start = rman_get_start(res);
d0c4beb1 2612 }
4d28e78f
SZ
2613 pci_write_config(dev, reg, start, 4);
2614 if (ln2range == 64)
2615 pci_write_config(dev, reg + 4, start >> 32, 4);
2616 return (barlen);
984263bc
MD
2617}
2618
201eb0a7 2619/*
4d28e78f 2620 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2621 * Legacy demands that the primary and secondary ATA ports sits on the
2622 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2623 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2624 * addressing mode.
2625 */
2626static void
4d28e78f
SZ
2627pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2628 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2629{
2630 int rid, type, progif;
2631#if 0
2632 /* if this device supports PCI native addressing use it */
2633 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2634 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2635 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2636 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2637 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2638 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2639 }
2640 }
2641#endif
201eb0a7
TS
2642 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2643 type = SYS_RES_IOPORT;
2644 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2645 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2646 prefetchmask & (1 << 0));
2647 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2648 prefetchmask & (1 << 1));
201eb0a7
TS
2649 } else {
2650 rid = PCIR_BAR(0);
1b000e91 2651 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
201eb0a7 2652 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
93fad519 2653 0, -1);
201eb0a7 2654 rid = PCIR_BAR(1);
1b000e91 2655 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
201eb0a7 2656 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
93fad519 2657 0, -1);
201eb0a7
TS
2658 }
2659 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2660 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2661 prefetchmask & (1 << 2));
2662 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2663 prefetchmask & (1 << 3));
201eb0a7
TS
2664 } else {
2665 rid = PCIR_BAR(2);
1b000e91 2666 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
201eb0a7 2667 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
93fad519 2668 0, -1);
201eb0a7 2669 rid = PCIR_BAR(3);
1b000e91 2670 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
201eb0a7 2671 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
93fad519 2672 0, -1);
201eb0a7 2673 }
4d28e78f
SZ
2674 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2675 prefetchmask & (1 << 4));
2676 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2677 prefetchmask & (1 << 5));
201eb0a7 2678}
201eb0a7 2679
984263bc 2680static void
4d28e78f
SZ
2681pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2682{
2683 struct pci_devinfo *dinfo = device_get_ivars(dev);
2684 pcicfgregs *cfg = &dinfo->cfg;
2685 char tunable_name[64];
2686 int irq;
2687
2688 /* Has to have an intpin to have an interrupt. */
2689 if (cfg->intpin == 0)
2690 return;
2691
2692 /* Let the user override the IRQ with a tunable. */
2693 irq = PCI_INVALID_IRQ;
2694 ksnprintf(tunable_name, sizeof(tunable_name),
2695 "hw.pci%d.%d.%d.INT%c.irq",
2696 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2697 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2698 irq = PCI_INVALID_IRQ;
2699
2700 /*
2701 * If we didn't get an IRQ via the tunable, then we either use the
2702 * IRQ value in the intline register or we ask the bus to route an
2703 * interrupt for us. If force_route is true, then we only use the
2704 * value in the intline register if the bus was unable to assign an
2705 * IRQ.
2706 */
2707 if (!PCI_INTERRUPT_VALID(irq)) {
2708 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2709 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2710 if (!PCI_INTERRUPT_VALID(irq))
2711 irq = cfg->intline;
2712 }
2713
2714 /* If after all that we don't have an IRQ, just bail. */
2715 if (!PCI_INTERRUPT_VALID(irq))
2716 return;
2717
2718 /* Update the config register if it changed. */
2719 if (irq != cfg->intline) {
2720 cfg->intline = irq;
2721 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2722 }
2723
2724 /* Add this IRQ as rid 0 interrupt resource. */
d2f04fe0 2725 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
bec969af 2726 machintr_legacy_intr_cpuid(irq));
4d28e78f
SZ
2727}
2728
2729void
2730pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2731{
2732 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2733 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2734 struct resource_list *rl = &dinfo->resources;
2735 struct pci_quirk *q;
e126caf1 2736 int b, i, f, s;
984263bc 2737
e126caf1
MD
2738 b = cfg->bus;
2739 s = cfg->slot;
2740 f = cfg->func;
4d28e78f
SZ
2741
2742 /* ATA devices needs special map treatment */
201eb0a7
TS
2743 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2744 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2745 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2746 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2747 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2748 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2749 else
4d28e78f
SZ
2750 for (i = 0; i < cfg->nummaps;)
2751 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2752 rl, force, prefetchmask & (1 << i));
984263bc 2753
4d28e78f
SZ
2754 /*
2755 * Add additional, quirked resources.
2756 */
984263bc
MD
2757 for (q = &pci_quirks[0]; q->devid; q++) {
2758 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2759 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2760 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2761 force, 0);
984263bc
MD
2762 }
2763
4d28e78f 2764 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
4d28e78f
SZ
2765 /*
2766 * Try to re-route interrupts. Sometimes the BIOS or
2767 * firmware may leave bogus values in these registers.
2768 * If the re-route fails, then just stick with what we
2769 * have.
2770 */
2771 pci_assign_interrupt(bus, dev, 1);
4d28e78f 2772 }
984263bc
MD
2773}
2774
e126caf1 2775void
4d28e78f 2776pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2777{
4d28e78f 2778#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2779 device_t pcib = device_get_parent(dev);
e126caf1 2780 struct pci_devinfo *dinfo;
4a5a2d63 2781 int maxslots;
e126caf1
MD
2782 int s, f, pcifunchigh;
2783 uint8_t hdrtype;
2784
4d28e78f
SZ
2785 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2786 ("dinfo_size too small"));
4a5a2d63 2787 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2788 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2789 pcifunchigh = 0;
2790 f = 0;
4d28e78f 2791 DELAY(1);
e126caf1
MD
2792 hdrtype = REG(PCIR_HDRTYPE, 1);
2793 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2794 continue;
2795 if (hdrtype & PCIM_MFDEV)
2796 pcifunchigh = PCI_FUNCMAX;
5e658043 2797 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2798 dinfo = pci_read_device(pcib, domain, busno, s, f,
2799 dinfo_size);
984263bc 2800 if (dinfo != NULL) {
e126caf1 2801 pci_add_child(dev, dinfo);
984263bc
MD
2802 }
2803 }
2804 }
e126caf1
MD
2805#undef REG
2806}
2807
2808void
2809pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2810{
2811 device_t pcib;
2812
2813 pcib = device_get_parent(bus);
2814 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2815 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2816 resource_list_init(&dinfo->resources);
638744c5
HT
2817 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2818 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2819 pci_print_verbose(dinfo);
4d28e78f 2820 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2821}
2822
2823static int
4a5a2d63 2824pci_probe(device_t dev)
984263bc 2825{
984263bc 2826 device_set_desc(dev, "PCI bus");
4a5a2d63 2827
4d28e78f
SZ
2828 /* Allow other subclasses to override this driver. */
2829 return (-1000);
984263bc
MD
2830}
2831
e126caf1
MD
2832static int
2833pci_attach(device_t dev)
2834{
4d28e78f
SZ
2835 int busno, domain;
2836
2837 /*
2838 * Since there can be multiple independantly numbered PCI
2839 * busses on systems with multiple PCI domains, we can't use
2840 * the unit number to decide which bus we are probing. We ask
2841 * the parent pcib what our domain and bus numbers are.
2842 */
2843 domain = pcib_get_domain(dev);
2844 busno = pcib_get_bus(dev);
2845 if (bootverbose)
2846 device_printf(dev, "domain=%d, physical bus=%d\n",
2847 domain, busno);
e4c9c0c8 2848
4d28e78f 2849 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2850
4d28e78f
SZ
2851 return (bus_generic_attach(dev));
2852}
2853
2854int
2855pci_suspend(device_t dev)
2856{
2857 int dstate, error, i, numdevs;
2858 device_t acpi_dev, child, *devlist;
2859 struct pci_devinfo *dinfo;
2860
2861 /*
2862 * Save the PCI configuration space for each child and set the
2863 * device in the appropriate power state for this sleep state.
2864 */
2865 acpi_dev = NULL;
2866 if (pci_do_power_resume)
2867 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2868 device_get_children(dev, &devlist, &numdevs);
2869 for (i = 0; i < numdevs; i++) {
2870 child = devlist[i];
2871 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2872 pci_cfg_save(child, dinfo, 0);
2873 }
e126caf1 2874
4d28e78f
SZ
2875 /* Suspend devices before potentially powering them down. */
2876 error = bus_generic_suspend(dev);
2877 if (error) {
2878 kfree(devlist, M_TEMP);
2879 return (error);
2880 }
e126caf1 2881
4d28e78f
SZ
2882 /*
2883 * Always set the device to D3. If ACPI suggests a different
2884 * power state, use it instead. If ACPI is not present, the
2885 * firmware is responsible for managing device power. Skip
2886 * children who aren't attached since they are powered down
2887 * separately. Only manage type 0 devices for now.
2888 */
2889 for (i = 0; acpi_dev && i < numdevs; i++) {
2890 child = devlist[i];
2891 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2892 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2893 dstate = PCI_POWERSTATE_D3;
2894 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2895 pci_set_powerstate(child, dstate);
2896 }
2897 }
2898 kfree(devlist, M_TEMP);
2899 return (0);
e126caf1
MD
2900}
2901
4d28e78f
SZ
2902int
2903pci_resume(device_t dev)
984263bc 2904{
4d28e78f
SZ
2905 int i, numdevs;
2906 device_t acpi_dev, child, *devlist;
2907 struct pci_devinfo *dinfo;
2908
2909 /*
2910 * Set each child to D0 and restore its PCI configuration space.
2911 */
2912 acpi_dev = NULL;
2913 if (pci_do_power_resume)
2914 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2915 device_get_children(dev, &devlist, &numdevs);
2916 for (i = 0; i < numdevs; i++) {
2917 /*
2918 * Notify ACPI we're going to D0 but ignore the result. If
2919 * ACPI is not present, the firmware is responsible for
2920 * managing device power. Only manage type 0 devices for now.
2921 */
2922 child = devlist[i];
2923 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2924 if (acpi_dev && device_is_attached(child) &&
2925 dinfo->cfg.hdrtype == 0) {
2926 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2927 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2928 }
2929
2930 /* Now the device is powered up, restore its config space. */
2931 pci_cfg_restore(child, dinfo);
2932 }
2933 kfree(devlist, M_TEMP);
2934 return (bus_generic_resume(dev));
2935}
2936
2937static void
2938pci_load_vendor_data(void)
2939{
2940 caddr_t vendordata, info;
2941
2942 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2943 info = preload_search_info(vendordata, MODINFO_ADDR);
2944 pci_vendordata = *(char **)info;
2945 info = preload_search_info(vendordata, MODINFO_SIZE);
2946 pci_vendordata_size = *(size_t *)info;
2947 /* terminate the database */
2948 pci_vendordata[pci_vendordata_size] = '\n';
2949 }
2950}
2951
2952void
2953pci_driver_added(device_t dev, driver_t *driver)
2954{
2955 int numdevs;
2956 device_t *devlist;
2957 device_t child;
2958 struct pci_devinfo *dinfo;
2959 int i;
2960
2961 if (bootverbose)
2962 device_printf(dev, "driver added\n");
2963 DEVICE_IDENTIFY(driver, dev);
2964 device_get_children(dev, &devlist, &numdevs);
2965 for (i = 0; i < numdevs; i++) {
2966 child = devlist[i];
2967 if (device_get_state(child) != DS_NOTPRESENT)
2968 continue;
2969 dinfo = device_get_ivars(child);
2970 pci_print_verbose(dinfo);
2971 if (bootverbose)
2972 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2973 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2974 dinfo->cfg.func);
2975 pci_cfg_restore(child, dinfo);
2976 if (device_probe_and_attach(child) != 0)
2977 pci_cfg_save(child, dinfo, 1);
2978 }
2979 kfree(devlist, M_TEMP);
2980}
2981
11a49859
SZ
2982static void
2983pci_child_detached(device_t parent __unused, device_t child)
2984{
2985 /* Turn child's power off */
2986 pci_cfg_save(child, device_get_ivars(child), 1);
2987}
2988
4d28e78f
SZ
2989int
2990pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2991 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
2992{
fb9077ae 2993 int rid, error;
4d28e78f 2994 void *cookie;
fb9077ae 2995
4d28e78f
SZ
2996 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
2997 arg, &cookie, serializer);
2998 if (error)
2999 return (error);
3000
3001 /* If this is not a direct child, just bail out. */
3002 if (device_get_parent(child) != dev) {
3003 *cookiep = cookie;
3004 return(0);
3005 }
3006
4d28e78f
SZ
3007 rid = rman_get_rid(irq);
3008 if (rid == 0) {
3009 /* Make sure that INTx is enabled */
3010 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3011 } else {
f9c942fb
SZ
3012 struct pci_devinfo *dinfo = device_get_ivars(child);
3013 uint64_t addr;
3014 uint32_t data;
3015
4d28e78f
SZ
3016 /*
3017 * Check to see if the interrupt is MSI or MSI-X.
3018 * Ask our parent to map the MSI and give
3019 * us the address and data register values.
3020 * If we fail for some reason, teardown the
3021 * interrupt handler.
3022 */
4d28e78f 3023 if (dinfo->cfg.msi.msi_alloc > 0) {
f9c942fb
SZ
3024 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3025
3026 if (msi->msi_addr == 0) {
3027 KASSERT(msi->msi_handlers == 0,
4d28e78f
SZ
3028 ("MSI has handlers, but vectors not mapped"));
3029 error = PCIB_MAP_MSI(device_get_parent(dev),
0af900e1
SZ
3030 child, rman_get_start(irq), &addr, &data,
3031 rman_get_cpuid(irq));
4d28e78f
SZ
3032 if (error)
3033 goto bad;
f9c942fb
SZ
3034 msi->msi_addr = addr;
3035 msi->msi_data = data;
4d28e78f 3036 pci_enable_msi(child, addr, data);
984263bc 3037 }
f9c942fb 3038 msi->msi_handlers++;
4d28e78f 3039 } else {
f9c942fb
SZ
3040 struct msix_vector *mv;
3041 u_int vector;
3042
4d28e78f 3043 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
f9c942fb
SZ
3044 ("No MSI-X or MSI rid %d allocated\n", rid));
3045
3046 mv = pci_find_msix_vector(child, rid);
3047 KASSERT(mv != NULL,
3048 ("MSI-X rid %d is not allocated\n", rid));
3049 KASSERT(mv->mv_address == 0,
3050 ("MSI-X rid %d has been setup\n", rid));
3051
3052 error = PCIB_MAP_MSI(device_get_parent(dev),
3053 child, rman_get_start(irq), &addr, &data,
3054 rman_get_cpuid(irq));
3055 if (error)
3056 goto bad;
3057 mv->mv_address = addr;
3058 mv->mv_data = data;
3059
3060 vector = PCI_MSIX_RID2VEC(rid);
3061 pci_setup_msix_vector(child, vector,
3062 mv->mv_address, mv->mv_data);
3063 pci_unmask_msix_vector(child, vector);
4d28e78f
SZ
3064 }
3065
3066 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3067 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3068 bad:
3069 if (error) {
3070 (void)bus_generic_teardown_intr(dev, child, irq,
3071 cookie);
3072 return (error);
3073 }
3074 }
4d28e78f
SZ
3075 *cookiep = cookie;
3076 return (0);
3077}
3078
3079int
3080pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3081 void *cookie)
3082{
fb9077ae 3083 int rid, error;
4d28e78f
SZ
3084
3085 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3086 return (EINVAL);
3087
3088 /* If this isn't a direct child, just bail out */
3089 if (device_get_parent(child) != dev)
3090 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3091
4d28e78f
SZ
3092 rid = rman_get_rid(irq);
3093 if (rid == 0) {
3094 /* Mask INTx */
3095 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3096 } else {
f9c942fb
SZ
3097 struct pci_devinfo *dinfo = device_get_ivars(child);
3098
4d28e78f
SZ
3099 /*
3100 * Check to see if the interrupt is MSI or MSI-X. If so,
3101 * decrement the appropriate handlers count and mask the
3102 * MSI-X message, or disable MSI messages if the count
3103 * drops to 0.
3104 */
4d28e78f 3105 if (dinfo->cfg.msi.msi_alloc > 0) {
f9c942fb
SZ
3106 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3107
3108 KASSERT(rid <= msi->msi_alloc,
3109 ("MSI-X index too high\n"));
3110 KASSERT(msi->msi_handlers > 0,
3111 ("MSI rid %d is not setup\n", rid));
3112
3113 msi->msi_handlers--;
3114 if (msi->msi_handlers == 0)
4d28e78f
SZ
3115 pci_disable_msi(child);
3116 } else {
f9c942fb
SZ
3117 struct msix_vector *mv;
3118
4d28e78f 3119 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
f9c942fb
SZ
3120 ("No MSI or MSI-X rid %d allocated", rid));
3121
3122 mv = pci_find_msix_vector(child, rid);
3123 KASSERT(mv != NULL,
3124 ("MSI-X rid %d is not allocated\n", rid));
3125 KASSERT(mv->mv_address != 0,
3126 ("MSI-X rid %d has not been setup\n", rid));
3127
3128 pci_mask_msix_vector(child, PCI_MSIX_RID2VEC(rid));
3129 mv->mv_address = 0;
3130 mv->mv_data = 0;
984263bc
MD
3131 }
3132 }
4d28e78f
SZ
3133 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3134 if (rid > 0)
3135 KASSERT(error == 0,
3136 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4d28e78f 3137 return (error);
984263bc
MD
3138}
3139
e126caf1 3140int
984263bc
MD
3141pci_print_child(device_t dev, device_t child)
3142{
3143 struct pci_devinfo *dinfo;
3144 struct resource_list *rl;
984263bc
MD
3145 int retval = 0;
3146
3147 dinfo = device_get_ivars(child);
984263bc
MD
3148 rl = &dinfo->resources;
3149
3150 retval += bus_print_child_header(dev, child);
3151
4d28e78f
SZ
3152 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3153 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3154 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3155 if (device_get_flags(dev))
85f8e2ea 3156 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3157
85f8e2ea 3158 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3159 pci_get_function(child));
984263bc
MD
3160
3161 retval += bus_print_child_footer(dev, child);
3162
3163 return (retval);
3164}
3165
4d28e78f
SZ
3166static struct
3167{
3168 int class;
3169 int subclass;
3170 char *desc;
3171} pci_nomatch_tab[] = {
3172 {PCIC_OLD, -1, "old"},
3173 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3174 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3175 {PCIC_STORAGE, -1, "mass storage"},
3176 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3177 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3178 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3179 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3180 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3181 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3182 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3183 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3184 {PCIC_NETWORK, -1, "network"},
3185 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3186 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3187 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3188 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3189 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3190 {PCIC_DISPLAY, -1, "display"},
3191 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3192 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3193 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3194 {PCIC_MULTIMEDIA, -1, "multimedia"},
3195 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3196 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3197 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3198 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3199 {PCIC_MEMORY, -1, "memory"},
3200 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3201 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3202 {PCIC_BRIDGE, -1, "bridge"},
3203 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3204 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3205 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3206 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3207 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3208 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3209 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3210 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3211 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3212 {PCIC_SIMPLECOMM, -1, "simple comms"},
3213 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3214 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3215 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3216 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3217 {PCIC_BASEPERIPH, -1, "base peripheral"},
3218 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3219 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3220 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3221 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3222 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3223 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3224 {PCIC_INPUTDEV, -1, "input device"},
3225 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3226 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3227 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3228 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3229 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3230 {PCIC_DOCKING, -1, "docking station"},
3231 {PCIC_PROCESSOR, -1, "processor"},
3232 {PCIC_SERIALBUS, -1, "serial bus"},
3233 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3234 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3235 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3236 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3237 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3238 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3239 {PCIC_WIRELESS, -1, "wireless controller"},
3240 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3241 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3242 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3243 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3244 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3245 {PCIC_SATCOM, -1, "satellite communication"},
3246 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3247 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3248 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3249 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3250 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3251 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3252 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3253 {PCIC_DASP, -1, "dasp"},
3254 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3255 {0, 0, NULL}
3256};
3257
e126caf1 3258void
984263bc
MD
3259pci_probe_nomatch(device_t dev, device_t child)
3260{
4d28e78f
SZ
3261 int i;
3262 char *cp, *scp, *device;
984263bc 3263
4d28e78f
SZ
3264 /*
3265 * Look for a listing for this device in a loaded device database.
3266 */
3267 if ((device = pci_describe_device(child)) != NULL) {
3268 device_printf(dev, "<%s>", device);
3269 kfree(device, M_DEVBUF);
3270 } else {
3271 /*
3272 * Scan the class/subclass descriptions for a general
3273 * description.
3274 */
3275 cp = "unknown";
3276 scp = NULL;
3277 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3278 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3279 if (pci_nomatch_tab[i].subclass == -1) {
3280 cp = pci_nomatch_tab[i].desc;
3281 } else if (pci_nomatch_tab[i].subclass ==
3282 pci_get_subclass(child)) {
3283 scp = pci_nomatch_tab[i].desc;
3284 }
3285 }
3286 }
3287 device_printf(dev, "<%s%s%s>",
3288 cp ? cp : "",
3289 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3290 scp ? scp : "");
3291 }
6a45dbfa
SZ
3292 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3293 pci_get_vendor(child), pci_get_device(child),
3294 pci_get_slot(child), pci_get_function(child));
3295 if (pci_get_intpin(child) > 0) {
3296 int irq;
3297
3298 irq = pci_get_irq(child);
3299 if (PCI_INTERRUPT_VALID(irq))
3300 kprintf(" irq %d", irq);
3301 }
3302 kprintf("\n");
3303
638744c5 3304 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3305}
3306
4d28e78f
SZ
3307/*
3308 * Parse the PCI device database, if loaded, and return a pointer to a
3309 * description of the device.
3310 *
3311 * The database is flat text formatted as follows:
3312 *
3313 * Any line not in a valid format is ignored.
3314 * Lines are terminated with newline '\n' characters.
3315 *
3316 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3317 * the vendor name.
3318 *
3319 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3320 * - devices cannot be listed without a corresponding VENDOR line.
3321 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3322 * another TAB, then the device name.
3323 */
3324
3325/*
3326 * Assuming (ptr) points to the beginning of a line in the database,
3327 * return the vendor or device and description of the next entry.
3328 * The value of (vendor) or (device) inappropriate for the entry type
3329 * is set to -1. Returns nonzero at the end of the database.
3330 *
3331 * Note that this is slightly unrobust in the face of corrupt data;
3332 * we attempt to safeguard against this by spamming the end of the
3333 * database with a newline when we initialise.
3334 */
3335static int
3336pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3337{
3338 char *cp = *ptr;
3339 int left;
3340
3341 *device = -1;
3342 *vendor = -1;
3343 **desc = '\0';
3344 for (;;) {
3345 left = pci_vendordata_size - (cp - pci_vendordata);
3346 if (left <= 0) {
3347 *ptr = cp;
3348 return(1);
3349 }
3350
3351 /* vendor entry? */
3352 if (*cp != '\t' &&
3353 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3354 break;
3355 /* device entry? */
3356 if (*cp == '\t' &&
3357 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3358 break;
3359
3360 /* skip to next line */
3361 while (*cp != '\n' && left > 0) {
3362 cp++;
3363 left--;
3364 }
3365 if (*cp == '\n') {
3366 cp++;
3367 left--;
3368 }
3369 }
3370 /* skip to next line */
3371 while (*cp != '\n' && left > 0) {
3372 cp++;
3373 left--;
3374 }
3375 if (*cp == '\n' && left > 0)
3376 cp++;
3377 *ptr = cp;
3378 return(0);
3379}
3380
3381static char *
3382pci_describe_device(device_t dev)
3383{
3384 int vendor, device;
3385 char *desc, *vp, *dp, *line;
3386
3387 desc = vp = dp = NULL;
3388
3389 /*
3390 * If we have no vendor data, we can't do anything.
3391 */
3392 if (pci_vendordata == NULL)
3393 goto out;
3394
3395 /*
3396 * Scan the vendor data looking for this device
3397 */
3398 line = pci_vendordata;
3399 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3400 goto out;
3401 for (;;) {
3402 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3403 goto out;
3404 if (vendor == pci_get_vendor(dev))
3405 break;
3406 }
3407 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3408 goto out;
3409 for (;;) {
3410 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3411 *dp = 0;
3412 break;
3413 }
3414 if (vendor != -1) {
3415 *dp = 0;
3416 break;
3417 }
3418 if (device == pci_get_device(dev))
3419 break;
3420 }
3421 if (dp[0] == '\0')
3422 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3423 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3424 NULL)
3425 ksprintf(desc, "%s, %s", vp, dp);
3426 out:
3427 if (vp != NULL)
3428 kfree(vp, M_DEVBUF);
3429 if (dp != NULL)
3430 kfree(dp, M_DEVBUF);
3431 return(desc);
3432}
3433
22457186 3434int
4a5a2d63 3435pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3436{
3437 struct pci_devinfo *dinfo;
3438 pcicfgregs *cfg;
3439
3440 dinfo = device_get_ivars(child);
3441 cfg = &dinfo->cfg;
3442
3443 switch (which) {
4d28e78f
SZ
3444 case PCI_IVAR_ETHADDR:
3445 /*
3446 * The generic accessor doesn't deal with failure, so
3447 * we set the return value, then return an error.
3448 */
3449 *((uint8_t **) result) = NULL;
3450 return (EINVAL);
984263bc
MD
3451 case PCI_IVAR_SUBVENDOR:
3452 *result = cfg->subvendor;
3453 break;
3454 case PCI_IVAR_SUBDEVICE:
3455 *result = cfg->subdevice;
3456 break;
3457 case PCI_IVAR_VENDOR:
3458 *result = cfg->vendor;
3459 break;
3460 case PCI_IVAR_DEVICE:
3461 *result = cfg->device;
3462 break;
3463 case PCI_IVAR_DEVID:
3464 *result = (cfg->device << 16) | cfg->vendor;
3465 break;
3466 case PCI_IVAR_CLASS:
3467 *result = cfg->baseclass;
3468 break;
3469 case PCI_IVAR_SUBCLASS:
3470 *result = cfg->subclass;
3471 break;
3472 case PCI_IVAR_PROGIF:
3473 *result = cfg->progif;
3474 break;
3475 case PCI_IVAR_REVID:
3476 *result = cfg->revid;
3477 break;
3478 case PCI_IVAR_INTPIN:
3479 *result = cfg->intpin;
3480 break;
3481 case PCI_IVAR_IRQ:
3482 *result = cfg->intline;
3483 break;
4d28e78f
SZ
3484 case PCI_IVAR_DOMAIN:
3485 *result = cfg->domain;
3486 break;
984263bc
MD
3487 case PCI_IVAR_BUS:
3488 *result = cfg->bus;
3489 break;
3490 case PCI_IVAR_SLOT:
3491 *result = cfg->slot;
3492 break;
3493 case PCI_IVAR_FUNCTION:
3494 *result = cfg->func;
3495 break;
4d28e78f
SZ
3496 case PCI_IVAR_CMDREG:
3497 *result = cfg->cmdreg;
984263bc 3498 break;
4d28e78f
SZ
3499 case PCI_IVAR_CACHELNSZ:
3500 *result = cfg->cachelnsz;
984263bc 3501 break;
4d28e78f
SZ
3502 case PCI_IVAR_MINGNT:
3503 *result = cfg->mingnt;
c7e4e7eb 3504 break;
4d28e78f
SZ
3505 case PCI_IVAR_MAXLAT:
3506 *result = cfg->maxlat;
c7e4e7eb 3507 break;
4d28e78f
SZ
3508 case PCI_IVAR_LATTIMER:
3509 *result = cfg->lattimer;
0254566f 3510 break;
d85e7311
SZ
3511 case PCI_IVAR_PCIXCAP_PTR:
3512 *result = cfg->pcix.pcix_ptr;
3513 break;
3514 case PCI_IVAR_PCIECAP_PTR:
3515 *result = cfg->expr.expr_ptr;
3516 break;
3517 case PCI_IVAR_VPDCAP_PTR:
3518 *result = cfg->vpd.vpd_reg;
3519 break;
984263bc 3520 default:
4d28e78f 3521 return (ENOENT);
984263bc 3522 }
4d28e78f 3523 return (0);
984263bc
MD
3524}
3525
22457186 3526int
984263bc
MD
3527pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3528{
3529 struct pci_devinfo *dinfo;
984263bc
MD
3530
3531 dinfo = device_get_ivars(child);
984263bc
MD
3532
3533 switch (which) {
4d28e78f
SZ
3534 case PCI_IVAR_INTPIN:
3535 dinfo->cfg.intpin = value;
3536 return (0);
3537 case PCI_IVAR_ETHADDR:
984263bc
MD
3538 case PCI_IVAR_SUBVENDOR:
3539 case PCI_IVAR_SUBDEVICE:
3540 case PCI_IVAR_VENDOR:
3541 case PCI_IVAR_DEVICE:
3542 case PCI_IVAR_DEVID:
3543 case PCI_IVAR_CLASS:
3544 case PCI_IVAR_SUBCLASS:
3545 case PCI_IVAR_PROGIF:
3546 case PCI_IVAR_REVID:
984263bc 3547 case PCI_IVAR_IRQ:
4d28e78f 3548 case PCI_IVAR_DOMAIN:
984263bc
MD
3549 case PCI_IVAR_BUS:
3550 case PCI_IVAR_SLOT:
3551 case PCI_IVAR_FUNCTION:
4d28e78f 3552 return (EINVAL); /* disallow for now */
984263bc 3553
984263bc 3554 default:
4d28e78f
SZ
3555 return (ENOENT);
3556 }
3557}
3558#ifdef notyet
3559#include "opt_ddb.h"
3560#ifdef DDB
3561#include <ddb/ddb.h>
3562#include <sys/cons.h>
3563
3564/*
3565 * List resources based on pci map registers, used for within ddb
3566 */
3567
3568DB_SHOW_COMMAND(pciregs, db_pci_dump)
3569{
3570 struct pci_devinfo *dinfo;
3571 struct devlist *devlist_head;
3572 struct pci_conf *p;
3573 const char *name;
3574 int i, error, none_count;
3575
3576 none_count = 0;
3577 /* get the head of the device queue */
3578 devlist_head = &pci_devq;
3579
3580 /*
3581 * Go through the list of devices and print out devices
3582 */
3583 for (error = 0, i = 0,
3584 dinfo = STAILQ_FIRST(devlist_head);
3585 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3586 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3587
3588 /* Populate pd_name and pd_unit */
3589 name = NULL;
3590 if (dinfo->cfg.dev)
3591 name = device_get_name(dinfo->cfg.dev);
3592
3593 p = &dinfo->conf;
3594 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3595 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3596 (name && *name) ? name : "none",
3597 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3598 none_count++,
3599 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3600 p->pc_sel.pc_func, (p->pc_class << 16) |
3601 (p->pc_subclass << 8) | p->pc_progif,
3602 (p->pc_subdevice << 16) | p->pc_subvendor,
3603 (p->pc_device << 16) | p->pc_vendor,
3604 p->pc_revid, p->pc_hdr);
984263bc 3605 }
984263bc 3606}
4d28e78f
SZ
3607#endif /* DDB */
3608#endif
984263bc 3609
201eb0a7 3610static struct resource *
4d28e78f
SZ
3611pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3612 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3613{
3614 struct pci_devinfo *dinfo = device_get_ivars(child);
3615 struct resource_list *rl = &dinfo->resources;
3616 struct resource_list_entry *rle;
3617 struct resource *res;
4d28e78f 3618 pci_addr_t map, testval;
201eb0a7
TS
3619 int mapsize;
3620
3621 /*
3622 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3623 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3624 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3625 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3626 * it fails here, that other code is broken.
3627 */
3628 res = NULL;
3629 map = pci_read_config(child, *rid, 4);
3630 pci_write_config(child, *rid, 0xffffffff, 4);
3631 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3632 if (pci_maprange(testval) == 64)
3633 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3634 if (pci_mapbase(testval) == 0)
3635 goto out;
4d28e78f
SZ
3636
3637 /*
3638 * Restore the original value of the BAR. We may have reprogrammed
3639 * the BAR of the low-level console device and when booting verbose,
3640 * we need the console device addressable.
3641 */
3642 pci_write_config(child, *rid, map, 4);
3643
3644 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3645 if (type != SYS_RES_MEMORY) {
3646 if (bootverbose)
4d28e78f
SZ
3647 device_printf(dev,
3648 "child %s requested type %d for rid %#x,"
3649 " but the BAR says it is an memio\n",
3650 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3651 goto out;
3652 }
3653 } else {
3654 if (type != SYS_RES_IOPORT) {
3655 if (bootverbose)
4d28e78f
SZ
3656 device_printf(dev,
3657 "child %s requested type %d for rid %#x,"
3658 " but the BAR says it is an ioport\n",
3659 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3660 goto out;
3661 }
3662 }
3663 /*
3664 * For real BARs, we need to override the size that
3665 * the driver requests, because that's what the BAR
3666 * actually uses and we would otherwise have a
3667 * situation where we might allocate the excess to
3668 * another driver, which won't work.
3669 */
3670 mapsize = pci_mapsize(testval);
4d28e78f 3671 count = 1UL << mapsize;
201eb0a7 3672 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3673 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3674 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3675 flags |= RF_PREFETCHABLE;
3676
201eb0a7
TS
3677 /*
3678 * Allocate enough resource, and then write back the
4d28e78f 3679 * appropriate bar for that resource.
201eb0a7
TS
3680 */
3681 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4f7fe8c7 3682 start, end, count, flags, -1);
201eb0a7 3683 if (res == NULL) {
4d28e78f
SZ
3684 device_printf(child,
3685 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3686 count, *rid, type, start, end);
201eb0a7
TS
3687 goto out;
3688 }
1b000e91 3689 resource_list_add(rl, type, *rid, start, end, count, -1);
201eb0a7
TS
3690 rle = resource_list_find(rl, type, *rid);
3691 if (rle == NULL)
3692 panic("pci_alloc_map: unexpectedly can't find resource.");
3693 rle->res = res;
3694 rle->start = rman_get_start(res);
3695 rle->end = rman_get_end(res);
3696 rle->count = count;
3697 if (bootverbose)
4d28e78f
SZ
3698 device_printf(child,
3699 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3700 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3701 map = rman_get_start(res);
3702out:;
3703 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3704 if (pci_maprange(testval) == 64)
3705 pci_write_config(child, *rid + 4, map >> 32, 4);
3706 return (res);
201eb0a7 3707}
4d28e78f 3708
201eb0a7 3709
261fa16d 3710struct resource *
984263bc 3711pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4f7fe8c7 3712 u_long start, u_long end, u_long count, u_int flags, int cpuid)
984263bc
MD
3713{
3714 struct pci_devinfo *dinfo = device_get_ivars(child);
3715 struct resource_list *rl = &dinfo->resources;
201eb0a7 3716 struct resource_list_entry *rle;
984263bc 3717 pcicfgregs *cfg = &dinfo->cfg;
09e7d9f3 3718
984263bc
MD
3719 /*
3720 * Perform lazy resource allocation
984263bc
MD
3721 */
3722 if (device_get_parent(child) == dev) {
de67e43b
JS
3723 switch (type) {
3724 case SYS_RES_IRQ:
4d28e78f
SZ
3725 /*
3726 * Can't alloc legacy interrupt once MSI messages
3727 * have been allocated.
3728 */
4d28e78f
SZ
3729 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3730 cfg->msix.msix_alloc > 0))
3731 return (NULL);
4d28e78f
SZ
3732 /*
3733 * If the child device doesn't have an
3734 * interrupt routed and is deserving of an
3735 * interrupt, try to assign it one.
3736 */
3737 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3738 (cfg->intpin != 0))
3739 pci_assign_interrupt(dev, child, 0);
3740 break;
de67e43b
JS
3741 case SYS_RES_IOPORT:
3742 case SYS_RES_MEMORY:
3743 if (*rid < PCIR_BAR(cfg->nummaps)) {
3744 /*
3745 * Enable the I/O mode. We should
3746 * also be assigning resources too
3747 * when none are present. The
3748 * resource_list_alloc kind of sorta does
3749 * this...
3750 */
3751 if (PCI_ENABLE_IO(dev, child, type))
3752 return (NULL);
984263bc 3753 }
201eb0a7
TS
3754 rle = resource_list_find(rl, type, *rid);
3755 if (rle == NULL)
4d28e78f
SZ
3756 return (pci_alloc_map(dev, child, type, rid,
3757 start, end, count, flags));
820c1612 3758 break;
984263bc 3759 }
201eb0a7
TS
3760 /*
3761 * If we've already allocated the resource, then
4d28e78f 3762 * return it now. But first we may need to activate
201eb0a7 3763 * it, since we don't allocate the resource as active
4d28e78f 3764 * above. Normally this would be done down in the
201eb0a7 3765 * nexus, but since we short-circuit that path we have
4d28e78f 3766 * to do its job here. Not sure if we should kfree the
201eb0a7 3767 * resource if it fails to activate.
201eb0a7
TS
3768 */
3769 rle = resource_list_find(rl, type, *rid);
3770 if (rle != NULL && rle->res != NULL) {
3771 if (bootverbose)
4d28e78f
SZ
3772 device_printf(child,
3773 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3774 rman_get_size(rle->res), *rid, type,
3775 rman_get_start(rle->res));
201eb0a7
TS
3776 if ((flags & RF_ACTIVE) &&
3777 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3778 *rid, rle->res) != 0)
3779 return (NULL);
3780 return (rle->res);
201eb0a7 3781 }
984263bc 3782 }
4d28e78f 3783 return (resource_list_alloc(rl, dev, child, type, rid,
4f7fe8c7 3784 start, end, count, flags, cpuid));
984263bc
MD
3785}
3786
4d28e78f
SZ
3787void
3788pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3789{
4d28e78f
SZ
3790 struct pci_devinfo *dinfo;
3791 struct resource_list *rl;
984263bc
MD
3792 struct resource_list_entry *rle;
3793
4d28e78f
SZ
3794 if (device_get_parent(child) != dev)
3795 return;
984263bc 3796
4d28e78f
SZ
3797 dinfo = device_get_ivars(child);
3798 rl = &dinfo->resources;
3799 rle = resource_list_find(rl, type, rid);
3800 if (rle) {
3801 if (rle->res) {
3802 if (rman_get_device(rle->res) != dev ||
3803 rman_get_flags(rle->res) & RF_ACTIVE) {
3804 device_printf(dev, "delete_resource: "
3805 "Resource still owned by child, oops. "
3806 "(type=%d, rid=%d, addr=%lx)\n",
3807 rle->type, rle->rid,
3808 rman_get_start(rle->res));
3809 return;
3810 }
3811 bus_release_resource(dev, type, rid, rle->res);
3812 }
3813 resource_list_delete(rl, type, rid);
3814 }
3815 /*
3816 * Why do we turn off the PCI configuration BAR when we delete a
3817 * resource? -- imp
3818 */
3819 pci_write_config(child, rid, 0, 4);
3820 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3821}
3822
e126caf1
MD
3823struct resource_list *
3824pci_get_resource_list (device_t dev, device_t child)
3825{
4d28e78f 3826 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3827
bcc66dfa
SZ
3828 if (dinfo == NULL)
3829 return (NULL);
3830
b0486c83 3831 return (&dinfo->resources);
e126caf1
MD
3832}
3833
4d28e78f 3834uint32_t
984263bc
MD
3835pci_read_config_method(device_t dev, device_t child, int reg, int width)
3836{
3837 struct pci_devinfo *dinfo = device_get_ivars(child);
3838 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3839
4d28e78f
SZ
3840 return (PCIB_READ_CONFIG(device_get_parent(dev),
3841 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3842}
3843
e126caf1 3844void
984263bc 3845pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3846 uint32_t val, int width)
984263bc
MD
3847{
3848 struct pci_devinfo *dinfo = device_get_ivars(child);
3849 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3850
3851 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3852 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3853}
3854
e126caf1 3855int
4d28e78f 3856pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3857 size_t buflen)
3858{
e126caf1 3859
f8c7a42d 3860 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
e126caf1
MD
3861 pci_get_function(child));
3862 return (0);
3863}