msix: Rework MSI-X allocation, step 1/3
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83c1faaa
SW
27 *
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
984263bc
MD
29 */
30
4d28e78f 31#include "opt_bus.h"
92683a33 32#include "opt_acpi.h"
6951547b 33#include "opt_compat_oldpci.h"
984263bc
MD
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
4d28e78f 39#include <sys/linker.h>
984263bc
MD
40#include <sys/fcntl.h>
41#include <sys/conf.h>
42#include <sys/kernel.h>
43#include <sys/queue.h>
638744c5 44#include <sys/sysctl.h>
4d28e78f 45#include <sys/endian.h>
d2f04fe0 46#include <sys/machintr.h>
984263bc 47
941460da
SZ
48#include <machine/msi_machdep.h>
49
984263bc
MD
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_extern.h>
53
54#include <sys/bus.h>
984263bc 55#include <sys/rman.h>
4d28e78f 56#include <sys/device.h>
984263bc 57
dc5a7bd2 58#include <sys/pciio.h>
4d28e78f
SZ
59#include <bus/pci/pcireg.h>
60#include <bus/pci/pcivar.h>
61#include <bus/pci/pci_private.h>
984263bc 62
4a5a2d63 63#include "pcib_if.h"
4d28e78f
SZ
64#include "pci_if.h"
65
66#ifdef __HAVE_ACPI
67#include <contrib/dev/acpica/acpi.h>
68#include "acpi_if.h"
69#else
70#define ACPI_PWR_FOR_SLEEP(x, y, z)
71#endif
72
35b72619
SZ
73extern struct dev_ops pcic_ops; /* XXX */
74
3a6dc23c
SZ
75typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
76
4d28e78f
SZ
77static uint32_t pci_mapbase(unsigned mapreg);
78static const char *pci_maptype(unsigned mapreg);
79static int pci_mapsize(unsigned testval);
80static int pci_maprange(unsigned mapreg);
81static void pci_fixancient(pcicfgregs *cfg);
82
83static int pci_porten(device_t pcib, int b, int s, int f);
84static int pci_memen(device_t pcib, int b, int s, int f);
85static void pci_assign_interrupt(device_t bus, device_t dev,
86 int force_route);
87static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90static int pci_probe(device_t dev);
91static int pci_attach(device_t dev);
11a49859 92static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
93static void pci_load_vendor_data(void);
94static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96static char *pci_describe_device(device_t dev);
97static int pci_modevent(module_t mod, int what, void *arg);
98static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
99 pcicfgregs *cfg);
3a6dc23c 100static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
101static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
103#if 0
104static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
106#endif
107static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108static void pci_disable_msi(device_t dev);
109static void pci_enable_msi(device_t dev, uint64_t address,
110 uint16_t data);
cf8f3133 111static void pci_setup_msix_vector(device_t dev, u_int index,
4d28e78f 112 uint64_t address, uint32_t data);
cf8f3133
SZ
113static void pci_mask_msix_vector(device_t dev, u_int index);
114static void pci_unmask_msix_vector(device_t dev, u_int index);
31646171 115static void pci_mask_msix_allvectors(device_t dev);
4d28e78f
SZ
116static int pci_msi_blacklisted(void);
117static void pci_resume_msi(device_t dev);
118static void pci_resume_msix(device_t dev);
d85e7311
SZ
119static int pcie_slotimpl(const pcicfgregs *);
120static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 121
3a6dc23c
SZ
122static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
123static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
124static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
125static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
126static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
127static void pci_read_cap_subvendor(device_t, int, int,
128 pcicfgregs *);
129static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 130static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 131
4d28e78f
SZ
132static device_method_t pci_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_probe, pci_probe),
135 DEVMETHOD(device_attach, pci_attach),
136 DEVMETHOD(device_detach, bus_generic_detach),
137 DEVMETHOD(device_shutdown, bus_generic_shutdown),
138 DEVMETHOD(device_suspend, pci_suspend),
139 DEVMETHOD(device_resume, pci_resume),
140
141 /* Bus interface */
142 DEVMETHOD(bus_print_child, pci_print_child),
143 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
144 DEVMETHOD(bus_read_ivar, pci_read_ivar),
145 DEVMETHOD(bus_write_ivar, pci_write_ivar),
146 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 147 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
148 DEVMETHOD(bus_setup_intr, pci_setup_intr),
149 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
150
151 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
152 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
153 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
154 DEVMETHOD(bus_delete_resource, pci_delete_resource),
155 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
156 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
157 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
159 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
160 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
161
162 /* PCI interface */
163 DEVMETHOD(pci_read_config, pci_read_config_method),
164 DEVMETHOD(pci_write_config, pci_write_config_method),
165 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
166 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
167 DEVMETHOD(pci_enable_io, pci_enable_io_method),
168 DEVMETHOD(pci_disable_io, pci_disable_io_method),
169 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
170 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
171 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
172 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
173 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
174 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
175 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
176 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
4d28e78f
SZ
177 DEVMETHOD(pci_release_msi, pci_release_msi_method),
178 DEVMETHOD(pci_msi_count, pci_msi_count_method),
179 DEVMETHOD(pci_msix_count, pci_msix_count_method),
180
181 { 0, 0 }
182};
183
184DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 185
4d28e78f 186static devclass_t pci_devclass;
aa2b9d05 187DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
4d28e78f
SZ
188MODULE_VERSION(pci, 1);
189
190static char *pci_vendordata;
191static size_t pci_vendordata_size;
dc5a7bd2 192
984263bc 193
3a6dc23c
SZ
194static const struct pci_read_cap {
195 int cap;
196 pci_read_cap_t read_cap;
197} pci_read_caps[] = {
198 { PCIY_PMG, pci_read_cap_pmgt },
199 { PCIY_HT, pci_read_cap_ht },
200 { PCIY_MSI, pci_read_cap_msi },
201 { PCIY_MSIX, pci_read_cap_msix },
202 { PCIY_VPD, pci_read_cap_vpd },
203 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
204 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 205 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
206 { 0, NULL } /* required last entry */
207};
208
984263bc 209struct pci_quirk {
4d28e78f 210 uint32_t devid; /* Vendor/device of the card */
984263bc 211 int type;
4d28e78f
SZ
212#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
213#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
214 int arg1;
215 int arg2;
216};
217
218struct pci_quirk pci_quirks[] = {
4d28e78f 219 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
220 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
222 /* As does the Serverworks OSB4 (the SMBus mapping register) */
223 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 224
4d28e78f
SZ
225 /*
226 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
227 * or the CMIC-SL (AKA ServerWorks GC_LE).
228 */
229 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
230 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231
232 /*
233 * MSI doesn't work on earlier Intel chipsets including
234 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
235 */
236 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243
244 /*
245 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
246 * bridge.
247 */
248 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249
984263bc
MD
250 { 0 }
251};
252
253/* map register information */
4d28e78f
SZ
254#define PCI_MAPMEM 0x01 /* memory map */
255#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
256#define PCI_MAPPORT 0x04 /* port map */
257
258struct devlist pci_devq;
259uint32_t pci_generation;
260uint32_t pci_numdevs = 0;
261static int pcie_chipset, pcix_chipset;
262
263/* sysctl vars */
264SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
265
266static int pci_enable_io_modes = 1;
267TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
268SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
269 &pci_enable_io_modes, 1,
270 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
271enable these bits correctly. We'd like to do this all the time, but there\n\
272are some peripherals that this causes problems with.");
984263bc 273
638744c5
HT
274static int pci_do_power_nodriver = 0;
275TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
276SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
277 &pci_do_power_nodriver, 0,
278 "Place a function into D3 state when no driver attaches to it. 0 means\n\
279disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 280aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
281in D3 state.");
282
4d28e78f
SZ
283static int pci_do_power_resume = 1;
284TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
285SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
286 &pci_do_power_resume, 1,
287 "Transition from D3 -> D0 on resume.");
288
289static int pci_do_msi = 1;
290TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
291SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
292 "Enable support for MSI interrupts");
293
6475434e
SZ
294static int pci_do_msix = 0;
295#if 0
4d28e78f
SZ
296TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
297SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
298 "Enable support for MSI-X interrupts");
6475434e 299#endif
4d28e78f
SZ
300
301static int pci_honor_msi_blacklist = 1;
302TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
303SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
304 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
305
2c3d7ac8
SZ
306static int pci_msi_cpuid;
307
4d28e78f
SZ
308/* Find a device_t by bus/slot/function in domain 0 */
309
310device_t
311pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
312{
313
314 return (pci_find_dbsf(0, bus, slot, func));
315}
316
317/* Find a device_t by domain/bus/slot/function */
318
984263bc 319device_t
4d28e78f 320pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
321{
322 struct pci_devinfo *dinfo;
323
324 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
325 if ((dinfo->cfg.domain == domain) &&
326 (dinfo->cfg.bus == bus) &&
984263bc
MD
327 (dinfo->cfg.slot == slot) &&
328 (dinfo->cfg.func == func)) {
329 return (dinfo->cfg.dev);
330 }
331 }
332
333 return (NULL);
334}
335
4d28e78f
SZ
336/* Find a device_t by vendor/device ID */
337
984263bc 338device_t
4d28e78f 339pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
340{
341 struct pci_devinfo *dinfo;
342
343 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
344 if ((dinfo->cfg.vendor == vendor) &&
345 (dinfo->cfg.device == device)) {
346 return (dinfo->cfg.dev);
347 }
348 }
349
350 return (NULL);
351}
352
353/* return base address of memory or port map */
354
4d28e78f
SZ
355static uint32_t
356pci_mapbase(uint32_t mapreg)
984263bc 357{
4d28e78f
SZ
358
359 if (PCI_BAR_MEM(mapreg))
360 return (mapreg & PCIM_BAR_MEM_BASE);
361 else
362 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
363}
364
365/* return map type of memory or port map */
366
4d28e78f 367static const char *
984263bc
MD
368pci_maptype(unsigned mapreg)
369{
984263bc 370
4d28e78f
SZ
371 if (PCI_BAR_IO(mapreg))
372 return ("I/O Port");
373 if (mapreg & PCIM_BAR_MEM_PREFETCH)
374 return ("Prefetchable Memory");
375 return ("Memory");
984263bc
MD
376}
377
378/* return log2 of map size decoded for memory or port map */
379
380static int
4d28e78f 381pci_mapsize(uint32_t testval)
984263bc
MD
382{
383 int ln2size;
384
385 testval = pci_mapbase(testval);
386 ln2size = 0;
387 if (testval != 0) {
388 while ((testval & 1) == 0)
389 {
390 ln2size++;
391 testval >>= 1;
392 }
393 }
394 return (ln2size);
395}
396
397/* return log2 of address range supported by map register */
398
399static int
400pci_maprange(unsigned mapreg)
401{
402 int ln2range = 0;
4d28e78f
SZ
403
404 if (PCI_BAR_IO(mapreg))
984263bc 405 ln2range = 32;
4d28e78f
SZ
406 else
407 switch (mapreg & PCIM_BAR_MEM_TYPE) {
408 case PCIM_BAR_MEM_32:
409 ln2range = 32;
410 break;
411 case PCIM_BAR_MEM_1MB:
412 ln2range = 20;
413 break;
414 case PCIM_BAR_MEM_64:
415 ln2range = 64;
416 break;
417 }
984263bc
MD
418 return (ln2range);
419}
420
421/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
422
423static void
424pci_fixancient(pcicfgregs *cfg)
425{
426 if (cfg->hdrtype != 0)
427 return;
428
429 /* PCI to PCI bridges use header type 1 */
430 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
431 cfg->hdrtype = 1;
432}
433
984263bc
MD
434/* extract header type specific config data */
435
436static void
4a5a2d63 437pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 438{
4d28e78f 439#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
440 switch (cfg->hdrtype) {
441 case 0:
4a5a2d63
JS
442 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
443 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
444 cfg->nummaps = PCI_MAXMAPS_0;
445 break;
446 case 1:
984263bc 447 cfg->nummaps = PCI_MAXMAPS_1;
6951547b
SZ
448#ifdef COMPAT_OLDPCI
449 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
450#endif
984263bc
MD
451 break;
452 case 2:
4a5a2d63
JS
453 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
454 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 455 cfg->nummaps = PCI_MAXMAPS_2;
6951547b
SZ
456#ifdef COMPAT_OLDPCI
457 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
458#endif
984263bc
MD
459 break;
460 }
4a5a2d63 461#undef REG
984263bc
MD
462}
463
4d28e78f 464/* read configuration header into pcicfgregs structure */
22457186 465struct pci_devinfo *
4d28e78f 466pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 467{
4d28e78f 468#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
469 pcicfgregs *cfg = NULL;
470 struct pci_devinfo *devlist_entry;
471 struct devlist *devlist_head;
472
473 devlist_head = &pci_devq;
474
475 devlist_entry = NULL;
476
4d28e78f 477 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 478 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
479
480 cfg = &devlist_entry->cfg;
4d28e78f
SZ
481
482 cfg->domain = d;
4a5a2d63
JS
483 cfg->bus = b;
484 cfg->slot = s;
485 cfg->func = f;
486 cfg->vendor = REG(PCIR_VENDOR, 2);
487 cfg->device = REG(PCIR_DEVICE, 2);
488 cfg->cmdreg = REG(PCIR_COMMAND, 2);
489 cfg->statreg = REG(PCIR_STATUS, 2);
490 cfg->baseclass = REG(PCIR_CLASS, 1);
491 cfg->subclass = REG(PCIR_SUBCLASS, 1);
492 cfg->progif = REG(PCIR_PROGIF, 1);
493 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 494 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
495 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
496 cfg->lattimer = REG(PCIR_LATTIMER, 1);
497 cfg->intpin = REG(PCIR_INTPIN, 1);
498 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 499
4a5a2d63
JS
500 cfg->mingnt = REG(PCIR_MINGNT, 1);
501 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
502
503 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
504 cfg->hdrtype &= ~PCIM_MFDEV;
505
506 pci_fixancient(cfg);
4a5a2d63 507 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 508
3a6dc23c 509 pci_read_capabilities(pcib, cfg);
984263bc
MD
510
511 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
512
4d28e78f 513 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
514 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
515 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
516 devlist_entry->conf.pc_sel.pc_func = cfg->func;
517 devlist_entry->conf.pc_hdr = cfg->hdrtype;
518
519 devlist_entry->conf.pc_subvendor = cfg->subvendor;
520 devlist_entry->conf.pc_subdevice = cfg->subdevice;
521 devlist_entry->conf.pc_vendor = cfg->vendor;
522 devlist_entry->conf.pc_device = cfg->device;
523
524 devlist_entry->conf.pc_class = cfg->baseclass;
525 devlist_entry->conf.pc_subclass = cfg->subclass;
526 devlist_entry->conf.pc_progif = cfg->progif;
527 devlist_entry->conf.pc_revid = cfg->revid;
528
529 pci_numdevs++;
530 pci_generation++;
531 }
532 return (devlist_entry);
533#undef REG
534}
535
3a6dc23c
SZ
536static int
537pci_fixup_nextptr(int *nextptr0)
538{
539 int nextptr = *nextptr0;
540
541 /* "Next pointer" is only one byte */
542 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
543
544 if (nextptr & 0x3) {
545 /*
546 * PCI local bus spec 3.0:
547 *
548 * "... The bottom two bits of all pointers are reserved
549 * and must be implemented as 00b although software must
550 * mask them to allow for future uses of these bits ..."
551 */
552 if (bootverbose) {
553 kprintf("Illegal PCI extended capability "
554 "offset, fixup 0x%02x -> 0x%02x\n",
555 nextptr, nextptr & ~0x3);
556 }
557 nextptr &= ~0x3;
558 }
559 *nextptr0 = nextptr;
560
561 if (nextptr < 0x40) {
562 if (nextptr != 0) {
563 kprintf("Illegal PCI extended capability "
564 "offset 0x%02x", nextptr);
565 }
566 return 0;
567 }
568 return 1;
569}
570
b4c0a845 571static void
3a6dc23c 572pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 573{
3a6dc23c
SZ
574#define REG(n, w) \
575 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
576
577 struct pcicfg_pp *pp = &cfg->pp;
578
579 if (pp->pp_cap)
580 return;
581
582 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
583 pp->pp_status = ptr + PCIR_POWER_STATUS;
584 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
585
586 if ((nextptr - ptr) > PCIR_POWER_DATA) {
587 /*
588 * XXX
589 * We should write to data_select and read back from
590 * data_scale to determine whether data register is
591 * implemented.
592 */
593#ifdef foo
594 pp->pp_data = ptr + PCIR_POWER_DATA;
595#else
596 pp->pp_data = 0;
597#endif
598 }
599
600#undef REG
601}
602
603static void
604pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
605{
b2b3ffcd 606#if defined(__i386__) || defined(__x86_64__)
3a6dc23c
SZ
607
608#define REG(n, w) \
609 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
610
611 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 612 uint64_t addr;
4d28e78f 613 uint32_t val;
3a6dc23c
SZ
614
615 /* Determine HT-specific capability type. */
616 val = REG(ptr + PCIR_HT_COMMAND, 2);
617
941460da
SZ
618 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
619 cfg->ht.ht_slave = ptr;
620
3a6dc23c
SZ
621 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
622 return;
623
624 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
625 /* Sanity check the mapping window. */
626 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
627 addr <<= 32;
628 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
941460da 629 if (addr != MSI_X86_ADDR_BASE) {
3a6dc23c
SZ
630 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
631 "has non-default MSI window 0x%llx\n",
632 cfg->domain, cfg->bus, cfg->slot, cfg->func,
633 (long long)addr);
634 }
635 } else {
941460da 636 addr = MSI_X86_ADDR_BASE;
3a6dc23c
SZ
637 }
638
639 ht->ht_msimap = ptr;
640 ht->ht_msictrl = val;
641 ht->ht_msiaddr = addr;
642
643#undef REG
644
b2b3ffcd 645#endif /* __i386__ || __x86_64__ */
3a6dc23c
SZ
646}
647
648static void
649pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
650{
651#define REG(n, w) \
652 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
653
654 struct pcicfg_msi *msi = &cfg->msi;
655
656 msi->msi_location = ptr;
657 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
658 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
659
660#undef REG
661}
662
663static void
664pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
665{
666#define REG(n, w) \
667 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
668
669 struct pcicfg_msix *msix = &cfg->msix;
670 uint32_t val;
671
672 msix->msix_location = ptr;
673 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
674 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
675
676 val = REG(ptr + PCIR_MSIX_TABLE, 4);
677 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
678 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
679
680 val = REG(ptr + PCIR_MSIX_PBA, 4);
681 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
682 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
683
684#undef REG
685}
686
687static void
688pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
689{
690 cfg->vpd.vpd_reg = ptr;
691}
692
693static void
694pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
695{
696#define REG(n, w) \
697 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
698
699 /* Should always be true. */
700 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
701 uint32_t val;
702
703 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
704 cfg->subvendor = val & 0xffff;
705 cfg->subdevice = val >> 16;
706 }
707
708#undef REG
709}
710
711static void
712pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
713{
714 /*
715 * Assume we have a PCI-X chipset if we have
716 * at least one PCI-PCI bridge with a PCI-X
717 * capability. Note that some systems with
718 * PCI-express or HT chipsets might match on
719 * this check as well.
720 */
721 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
722 pcix_chipset = 1;
d85e7311
SZ
723
724 cfg->pcix.pcix_ptr = ptr;
725}
726
727static int
728pcie_slotimpl(const pcicfgregs *cfg)
729{
730 const struct pcicfg_expr *expr = &cfg->expr;
731 uint16_t port_type;
732
733 /*
734 * Only version 1 can be parsed currently
735 */
736 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
737 return 0;
738
739 /*
740 * - Slot implemented bit is meaningful iff current port is
741 * root port or down stream port.
742 * - Testing for root port or down stream port is meanningful
743 * iff PCI configure has type 1 header.
744 */
745
746 if (cfg->hdrtype != 1)
747 return 0;
748
749 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
750 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
751 return 0;
752
753 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
754 return 0;
755
756 return 1;
3a6dc23c
SZ
757}
758
759static void
d85e7311 760pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 761{
d85e7311
SZ
762#define REG(n, w) \
763 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
764
765 struct pcicfg_expr *expr = &cfg->expr;
766
3a6dc23c
SZ
767 /*
768 * Assume we have a PCI-express chipset if we have
769 * at least one PCI-express device.
770 */
771 pcie_chipset = 1;
d85e7311
SZ
772
773 expr->expr_ptr = ptr;
774 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
775
776 /*
777 * Only version 1 can be parsed currently
778 */
779 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
780 return;
781
782 /*
783 * Read slot capabilities. Slot capabilities exists iff
784 * current port's slot is implemented
785 */
786 if (pcie_slotimpl(cfg))
787 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
788
789#undef REG
3a6dc23c
SZ
790}
791
792static void
793pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
794{
795#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
796#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
797
798 uint32_t val;
799 int nextptr, ptrptr;
800
801 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
802 /* No capabilities */
803 return;
804 }
0c78fe3f 805
4d28e78f 806 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 807 case 0:
81c29ce4
SZ
808 case 1:
809 ptrptr = PCIR_CAP_PTR;
984263bc
MD
810 break;
811 case 2:
4d28e78f 812 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
813 break;
814 default:
3a6dc23c 815 return; /* no capabilities support */
984263bc 816 }
4d28e78f 817 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
818
819 /*
820 * Read capability entries.
821 */
3a6dc23c
SZ
822 while (pci_fixup_nextptr(&nextptr)) {
823 const struct pci_read_cap *rc;
824 int ptr = nextptr;
825
4d28e78f 826 /* Find the next entry */
4d28e78f 827 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
828
829 /* Process this entry */
3a6dc23c
SZ
830 val = REG(ptr + PCICAP_ID, 1);
831 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
832 if (rc->cap == val) {
833 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
834 break;
835 }
984263bc
MD
836 }
837 }
941460da
SZ
838
839#if defined(__i386__) || defined(__x86_64__)
840 /*
841 * Enable the MSI mapping window for all HyperTransport
842 * slaves. PCI-PCI bridges have their windows enabled via
843 * PCIB_MAP_MSI().
844 */
845 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
846 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
847 device_printf(pcib,
848 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
849 cfg->domain, cfg->bus, cfg->slot, cfg->func);
850 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
851 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
852 2);
853 }
854#endif
855
4d28e78f 856/* REG and WREG use carry through to next functions */
984263bc
MD
857}
858
4d28e78f
SZ
859/*
860 * PCI Vital Product Data
861 */
862
863#define PCI_VPD_TIMEOUT 1000000
984263bc 864
4d28e78f
SZ
865static int
866pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 867{
4d28e78f 868 int count = PCI_VPD_TIMEOUT;
984263bc 869
4d28e78f 870 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 871
4d28e78f 872 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 873
4d28e78f
SZ
874 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
875 if (--count < 0)
876 return (ENXIO);
877 DELAY(1); /* limit looping */
878 }
879 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 880
984263bc
MD
881 return (0);
882}
984263bc 883
4d28e78f
SZ
884#if 0
885static int
886pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 887{
4d28e78f
SZ
888 int count = PCI_VPD_TIMEOUT;
889
890 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
891
892 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
893 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
894 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
895 if (--count < 0)
896 return (ENXIO);
897 DELAY(1); /* limit looping */
898 }
899
900 return (0);
901}
902#endif
903
904#undef PCI_VPD_TIMEOUT
905
906struct vpd_readstate {
907 device_t pcib;
908 pcicfgregs *cfg;
909 uint32_t val;
910 int bytesinval;
911 int off;
912 uint8_t cksum;
913};
914
915static int
916vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
917{
918 uint32_t reg;
919 uint8_t byte;
920
921 if (vrs->bytesinval == 0) {
922 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
923 return (ENXIO);
924 vrs->val = le32toh(reg);
925 vrs->off += 4;
926 byte = vrs->val & 0xff;
927 vrs->bytesinval = 3;
928 } else {
929 vrs->val = vrs->val >> 8;
930 byte = vrs->val & 0xff;
931 vrs->bytesinval--;
932 }
933
934 vrs->cksum += byte;
935 *data = byte;
936 return (0);
937}
938
d85e7311
SZ
939int
940pcie_slot_implemented(device_t dev)
941{
942 struct pci_devinfo *dinfo = device_get_ivars(dev);
943
944 return pcie_slotimpl(&dinfo->cfg);
945}
946
4d28e78f
SZ
947void
948pcie_set_max_readrq(device_t dev, uint16_t rqsize)
949{
d85e7311
SZ
950 uint8_t expr_ptr;
951 uint16_t val;
952
953 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
954 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
955 panic("%s: invalid max read request size 0x%02x\n",
956 device_get_nameunit(dev), rqsize);
957 }
958
959 expr_ptr = pci_get_pciecap_ptr(dev);
960 if (!expr_ptr)
961 panic("%s: not PCIe device\n", device_get_nameunit(dev));
962
963 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
964 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
965 if (bootverbose)
966 device_printf(dev, "adjust device control 0x%04x", val);
967
968 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
969 val |= rqsize;
970 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
971
972 if (bootverbose)
973 kprintf(" -> 0x%04x\n", val);
974 }
4d28e78f
SZ
975}
976
441580ca
SZ
977uint16_t
978pcie_get_max_readrq(device_t dev)
979{
980 uint8_t expr_ptr;
981 uint16_t val;
982
983 expr_ptr = pci_get_pciecap_ptr(dev);
984 if (!expr_ptr)
985 panic("%s: not PCIe device\n", device_get_nameunit(dev));
986
987 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
988 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
989}
990
4d28e78f
SZ
991static void
992pci_read_vpd(device_t pcib, pcicfgregs *cfg)
993{
994 struct vpd_readstate vrs;
995 int state;
996 int name;
997 int remain;
998 int i;
999 int alloc, off; /* alloc/off for RO/W arrays */
1000 int cksumvalid;
1001 int dflen;
1002 uint8_t byte;
1003 uint8_t byte2;
1004
1005 /* init vpd reader */
1006 vrs.bytesinval = 0;
1007 vrs.off = 0;
1008 vrs.pcib = pcib;
1009 vrs.cfg = cfg;
1010 vrs.cksum = 0;
1011
1012 state = 0;
1013 name = remain = i = 0; /* shut up stupid gcc */
1014 alloc = off = 0; /* shut up stupid gcc */
1015 dflen = 0; /* shut up stupid gcc */
1016 cksumvalid = -1;
1017 while (state >= 0) {
1018 if (vpd_nextbyte(&vrs, &byte)) {
1019 state = -2;
1020 break;
1021 }
1022#if 0
1023 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1024 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1025 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1026#endif
1027 switch (state) {
1028 case 0: /* item name */
1029 if (byte & 0x80) {
1030 if (vpd_nextbyte(&vrs, &byte2)) {
1031 state = -2;
1032 break;
1033 }
1034 remain = byte2;
1035 if (vpd_nextbyte(&vrs, &byte2)) {
1036 state = -2;
1037 break;
1038 }
1039 remain |= byte2 << 8;
1040 if (remain > (0x7f*4 - vrs.off)) {
1041 state = -1;
1042 kprintf(
1043 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1044 cfg->domain, cfg->bus, cfg->slot,
1045 cfg->func, remain);
1046 }
1047 name = byte & 0x7f;
1048 } else {
1049 remain = byte & 0x7;
1050 name = (byte >> 3) & 0xf;
1051 }
1052 switch (name) {
1053 case 0x2: /* String */
1054 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1055 M_DEVBUF, M_WAITOK);
1056 i = 0;
1057 state = 1;
1058 break;
1059 case 0xf: /* End */
1060 state = -1;
1061 break;
1062 case 0x10: /* VPD-R */
1063 alloc = 8;
1064 off = 0;
1065 cfg->vpd.vpd_ros = kmalloc(alloc *
1066 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1067 M_WAITOK | M_ZERO);
1068 state = 2;
1069 break;
1070 case 0x11: /* VPD-W */
1071 alloc = 8;
1072 off = 0;
1073 cfg->vpd.vpd_w = kmalloc(alloc *
1074 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1075 M_WAITOK | M_ZERO);
1076 state = 5;
1077 break;
1078 default: /* Invalid data, abort */
1079 state = -1;
1080 break;
1081 }
1082 break;
1083
1084 case 1: /* Identifier String */
1085 cfg->vpd.vpd_ident[i++] = byte;
1086 remain--;
1087 if (remain == 0) {
1088 cfg->vpd.vpd_ident[i] = '\0';
1089 state = 0;
1090 }
1091 break;
1092
1093 case 2: /* VPD-R Keyword Header */
1094 if (off == alloc) {
a68a7edf 1095 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1096 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1097 M_DEVBUF, M_WAITOK | M_ZERO);
1098 }
1099 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1100 if (vpd_nextbyte(&vrs, &byte2)) {
1101 state = -2;
1102 break;
1103 }
1104 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1105 if (vpd_nextbyte(&vrs, &byte2)) {
1106 state = -2;
1107 break;
1108 }
1109 dflen = byte2;
1110 if (dflen == 0 &&
1111 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1112 2) == 0) {
1113 /*
1114 * if this happens, we can't trust the rest
1115 * of the VPD.
1116 */
1117 kprintf(
1118 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1119 cfg->domain, cfg->bus, cfg->slot,
1120 cfg->func, dflen);
1121 cksumvalid = 0;
1122 state = -1;
1123 break;
1124 } else if (dflen == 0) {
1125 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1126 sizeof(*cfg->vpd.vpd_ros[off].value),
1127 M_DEVBUF, M_WAITOK);
1128 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1129 } else
1130 cfg->vpd.vpd_ros[off].value = kmalloc(
1131 (dflen + 1) *
1132 sizeof(*cfg->vpd.vpd_ros[off].value),
1133 M_DEVBUF, M_WAITOK);
1134 remain -= 3;
1135 i = 0;
1136 /* keep in sync w/ state 3's transistions */
1137 if (dflen == 0 && remain == 0)
1138 state = 0;
1139 else if (dflen == 0)
1140 state = 2;
1141 else
1142 state = 3;
1143 break;
1144
1145 case 3: /* VPD-R Keyword Value */
1146 cfg->vpd.vpd_ros[off].value[i++] = byte;
1147 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1148 "RV", 2) == 0 && cksumvalid == -1) {
1149 if (vrs.cksum == 0)
1150 cksumvalid = 1;
1151 else {
1152 if (bootverbose)
1153 kprintf(
1154 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1155 cfg->domain, cfg->bus,
1156 cfg->slot, cfg->func,
1157 vrs.cksum);
1158 cksumvalid = 0;
1159 state = -1;
1160 break;
1161 }
1162 }
1163 dflen--;
1164 remain--;
1165 /* keep in sync w/ state 2's transistions */
1166 if (dflen == 0)
1167 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1168 if (dflen == 0 && remain == 0) {
1169 cfg->vpd.vpd_rocnt = off;
a68a7edf 1170 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1171 off * sizeof(*cfg->vpd.vpd_ros),
1172 M_DEVBUF, M_WAITOK | M_ZERO);
1173 state = 0;
1174 } else if (dflen == 0)
1175 state = 2;
1176 break;
1177
1178 case 4:
1179 remain--;
1180 if (remain == 0)
1181 state = 0;
1182 break;
1183
1184 case 5: /* VPD-W Keyword Header */
1185 if (off == alloc) {
a68a7edf 1186 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1187 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1188 M_DEVBUF, M_WAITOK | M_ZERO);
1189 }
1190 cfg->vpd.vpd_w[off].keyword[0] = byte;
1191 if (vpd_nextbyte(&vrs, &byte2)) {
1192 state = -2;
1193 break;
1194 }
1195 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1196 if (vpd_nextbyte(&vrs, &byte2)) {
1197 state = -2;
1198 break;
1199 }
1200 cfg->vpd.vpd_w[off].len = dflen = byte2;
1201 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1202 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1203 sizeof(*cfg->vpd.vpd_w[off].value),
1204 M_DEVBUF, M_WAITOK);
1205 remain -= 3;
1206 i = 0;
1207 /* keep in sync w/ state 6's transistions */
1208 if (dflen == 0 && remain == 0)
1209 state = 0;
1210 else if (dflen == 0)
1211 state = 5;
1212 else
1213 state = 6;
1214 break;
1215
1216 case 6: /* VPD-W Keyword Value */
1217 cfg->vpd.vpd_w[off].value[i++] = byte;
1218 dflen--;
1219 remain--;
1220 /* keep in sync w/ state 5's transistions */
1221 if (dflen == 0)
1222 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1223 if (dflen == 0 && remain == 0) {
1224 cfg->vpd.vpd_wcnt = off;
a68a7edf 1225 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1226 off * sizeof(*cfg->vpd.vpd_w),
1227 M_DEVBUF, M_WAITOK | M_ZERO);
1228 state = 0;
1229 } else if (dflen == 0)
1230 state = 5;
1231 break;
1232
1233 default:
1234 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1235 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1236 state);
1237 state = -1;
1238 break;
1239 }
1240 }
1241
1242 if (cksumvalid == 0 || state < -1) {
1243 /* read-only data bad, clean up */
1244 if (cfg->vpd.vpd_ros != NULL) {
1245 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1246 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1247 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1248 cfg->vpd.vpd_ros = NULL;
1249 }
1250 }
1251 if (state < -1) {
1252 /* I/O error, clean up */
1253 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1254 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1255 if (cfg->vpd.vpd_ident != NULL) {
1256 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1257 cfg->vpd.vpd_ident = NULL;
1258 }
1259 if (cfg->vpd.vpd_w != NULL) {
1260 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1261 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1262 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1263 cfg->vpd.vpd_w = NULL;
1264 }
1265 }
1266 cfg->vpd.vpd_cached = 1;
1267#undef REG
1268#undef WREG
1269}
1270
1271int
1272pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1273{
1274 struct pci_devinfo *dinfo = device_get_ivars(child);
1275 pcicfgregs *cfg = &dinfo->cfg;
1276
1277 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1278 pci_read_vpd(device_get_parent(dev), cfg);
1279
1280 *identptr = cfg->vpd.vpd_ident;
1281
1282 if (*identptr == NULL)
1283 return (ENXIO);
1284
1285 return (0);
1286}
1287
1288int
1289pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1290 const char **vptr)
1291{
1292 struct pci_devinfo *dinfo = device_get_ivars(child);
1293 pcicfgregs *cfg = &dinfo->cfg;
1294 int i;
1295
1296 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1297 pci_read_vpd(device_get_parent(dev), cfg);
1298
1299 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1300 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1301 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1302 *vptr = cfg->vpd.vpd_ros[i].value;
1303 }
1304
1305 if (i != cfg->vpd.vpd_rocnt)
1306 return (0);
1307
1308 *vptr = NULL;
1309 return (ENXIO);
1310}
1311
1312/*
1313 * Return the offset in configuration space of the requested extended
1314 * capability entry or 0 if the specified capability was not found.
1315 */
1316int
1317pci_find_extcap_method(device_t dev, device_t child, int capability,
1318 int *capreg)
1319{
1320 struct pci_devinfo *dinfo = device_get_ivars(child);
1321 pcicfgregs *cfg = &dinfo->cfg;
1322 u_int32_t status;
1323 u_int8_t ptr;
1324
1325 /*
1326 * Check the CAP_LIST bit of the PCI status register first.
1327 */
1328 status = pci_read_config(child, PCIR_STATUS, 2);
1329 if (!(status & PCIM_STATUS_CAPPRESENT))
1330 return (ENXIO);
1331
1332 /*
1333 * Determine the start pointer of the capabilities list.
1334 */
1335 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1336 case 0:
1337 case 1:
1338 ptr = PCIR_CAP_PTR;
1339 break;
1340 case 2:
1341 ptr = PCIR_CAP_PTR_2;
1342 break;
1343 default:
1344 /* XXX: panic? */
1345 return (ENXIO); /* no extended capabilities support */
1346 }
1347 ptr = pci_read_config(child, ptr, 1);
1348
1349 /*
1350 * Traverse the capabilities list.
1351 */
1352 while (ptr != 0) {
1353 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1354 if (capreg != NULL)
1355 *capreg = ptr;
1356 return (0);
1357 }
1358 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1359 }
1360
1361 return (ENOENT);
1362}
1363
1364/*
1365 * Support for MSI-X message interrupts.
1366 */
cf8f3133
SZ
1367static void
1368pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1369 uint32_t data)
4d28e78f
SZ
1370{
1371 struct pci_devinfo *dinfo = device_get_ivars(dev);
1372 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1373 uint32_t offset;
1374
1375 KASSERT(msix->msix_table_len > index, ("bogus index"));
1376 offset = msix->msix_table_offset + index * 16;
1377 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1378 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1379 bus_write_4(msix->msix_table_res, offset + 8, data);
1380
1381 /* Enable MSI -> HT mapping. */
1382 pci_ht_map_msi(dev, address);
1383}
1384
cf8f3133
SZ
1385static void
1386pci_mask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1387{
1388 struct pci_devinfo *dinfo = device_get_ivars(dev);
1389 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1390 uint32_t offset, val;
1391
1392 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1393 offset = msix->msix_table_offset + index * 16 + 12;
1394 val = bus_read_4(msix->msix_table_res, offset);
1395 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1396 val |= PCIM_MSIX_VCTRL_MASK;
1397 bus_write_4(msix->msix_table_res, offset, val);
1398 }
1399}
1400
cf8f3133
SZ
1401static void
1402pci_unmask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1403{
1404 struct pci_devinfo *dinfo = device_get_ivars(dev);
1405 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1406 uint32_t offset, val;
1407
1408 KASSERT(msix->msix_table_len > index, ("bogus index"));
1409 offset = msix->msix_table_offset + index * 16 + 12;
1410 val = bus_read_4(msix->msix_table_res, offset);
1411 if (val & PCIM_MSIX_VCTRL_MASK) {
1412 val &= ~PCIM_MSIX_VCTRL_MASK;
1413 bus_write_4(msix->msix_table_res, offset, val);
1414 }
1415}
1416
1417int
cf8f3133 1418pci_pending_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1419{
1420 struct pci_devinfo *dinfo = device_get_ivars(dev);
1421 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1422 uint32_t offset, bit;
1423
31646171
SZ
1424 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL,
1425 ("MSI-X is not setup yet\n"));
1426
4d28e78f
SZ
1427 KASSERT(msix->msix_table_len > index, ("bogus index"));
1428 offset = msix->msix_pba_offset + (index / 32) * 4;
1429 bit = 1 << index % 32;
1430 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1431}
1432
1433/*
1434 * Restore MSI-X registers and table during resume. If MSI-X is
1435 * enabled then walk the virtual table to restore the actual MSI-X
1436 * table.
1437 */
1438static void
1439pci_resume_msix(device_t dev)
1440{
1441 struct pci_devinfo *dinfo = device_get_ivars(dev);
1442 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1443 struct msix_table_entry *mte;
1444 struct msix_vector *mv;
1445 int i;
1446
1447 if (msix->msix_alloc > 0) {
31646171 1448 pci_mask_msix_allvectors(dev);
4d28e78f 1449
31646171 1450 /* Program any messages with at least one handler. */
4d28e78f
SZ
1451 for (i = 0; i < msix->msix_table_len; i++) {
1452 mte = &msix->msix_table[i];
1453 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1454 continue;
1455 mv = &msix->msix_vectors[mte->mte_vector - 1];
cf8f3133
SZ
1456 pci_setup_msix_vector(dev, i, mv->mv_address,
1457 mv->mv_data);
1458 pci_unmask_msix_vector(dev, i);
4d28e78f
SZ
1459 }
1460 }
1461 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1462 msix->msix_ctrl, 2);
1463}
1464
1465/*
1466 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1467 * returned in *count. After this function returns, each message will be
1468 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1469 */
1470int
1471pci_alloc_msix_method(device_t dev, device_t child, int *count)
1472{
1473 struct pci_devinfo *dinfo = device_get_ivars(child);
1474 pcicfgregs *cfg = &dinfo->cfg;
1475 struct resource_list_entry *rle;
1476 int actual, error, i, irq, max;
1477
1478 /* Don't let count == 0 get us into trouble. */
1479 if (*count == 0)
1480 return (EINVAL);
1481
1482 /* If rid 0 is allocated, then fail. */
1483 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1484 if (rle != NULL && rle->res != NULL)
1485 return (ENXIO);
1486
1487 /* Already have allocated messages? */
1488 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1489 return (ENXIO);
1490
1491 /* If MSI is blacklisted for this system, fail. */
1492 if (pci_msi_blacklisted())
1493 return (ENXIO);
1494
1495 /* MSI-X capability present? */
1496 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1497 return (ENODEV);
1498
1499 /* Make sure the appropriate BARs are mapped. */
1500 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1501 cfg->msix.msix_table_bar);
1502 if (rle == NULL || rle->res == NULL ||
1503 !(rman_get_flags(rle->res) & RF_ACTIVE))
1504 return (ENXIO);
1505 cfg->msix.msix_table_res = rle->res;
1506 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1507 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1508 cfg->msix.msix_pba_bar);
1509 if (rle == NULL || rle->res == NULL ||
1510 !(rman_get_flags(rle->res) & RF_ACTIVE))
1511 return (ENXIO);
1512 }
1513 cfg->msix.msix_pba_res = rle->res;
1514
1515 if (bootverbose)
1516 device_printf(child,
1517 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1518 *count, cfg->msix.msix_msgnum);
1519 max = min(*count, cfg->msix.msix_msgnum);
1520 for (i = 0; i < max; i++) {
1521 /* Allocate a message. */
2a1f96b9
SZ
1522 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq,
1523 -1 /* XXX */);
4d28e78f
SZ
1524 if (error)
1525 break;
1526 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1b000e91 1527 irq, 1, -1);
4d28e78f
SZ
1528 }
1529 actual = i;
1530
14ae4dce
MN
1531 if (actual == 0) {
1532 if (bootverbose) {
1533 device_printf(child,
1534 "could not allocate any MSI-X vectors\n");
1535 }
1536 return (ENXIO);
1537 }
1538
4d28e78f
SZ
1539 if (bootverbose) {
1540 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1541 if (actual == 1)
1542 device_printf(child, "using IRQ %lu for MSI-X\n",
1543 rle->start);
1544 else {
1545 int run;
1546
1547 /*
1548 * Be fancy and try to print contiguous runs of
1549 * IRQ values as ranges. 'irq' is the previous IRQ.
1550 * 'run' is true if we are in a range.
1551 */
1552 device_printf(child, "using IRQs %lu", rle->start);
1553 irq = rle->start;
1554 run = 0;
1555 for (i = 1; i < actual; i++) {
1556 rle = resource_list_find(&dinfo->resources,
1557 SYS_RES_IRQ, i + 1);
1558
1559 /* Still in a run? */
1560 if (rle->start == irq + 1) {
1561 run = 1;
1562 irq++;
1563 continue;
1564 }
1565
1566 /* Finish previous range. */
1567 if (run) {
1568 kprintf("-%d", irq);
1569 run = 0;
1570 }
1571
1572 /* Start new range. */
1573 kprintf(",%lu", rle->start);
1574 irq = rle->start;
1575 }
1576
1577 /* Unfinished range? */
1578 if (run)
1579 kprintf("-%d", irq);
1580 kprintf(" for MSI-X\n");
1581 }
1582 }
1583
1584 /* Mask all vectors. */
1585 for (i = 0; i < cfg->msix.msix_msgnum; i++)
cf8f3133 1586 pci_mask_msix_vector(child, i);
4d28e78f
SZ
1587
1588 /* Allocate and initialize vector data and virtual table. */
1589 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1591 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1592 M_DEVBUF, M_WAITOK | M_ZERO);
1593 for (i = 0; i < actual; i++) {
1594 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1595 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1596 cfg->msix.msix_table[i].mte_vector = i + 1;
1597 }
1598
1599 /* Update control register to enable MSI-X. */
1600 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1601 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1602 cfg->msix.msix_ctrl, 2);
1603
1604 /* Update counts of alloc'd messages. */
1605 cfg->msix.msix_alloc = actual;
1606 cfg->msix.msix_table_len = actual;
1607 *count = actual;
1608 return (0);
1609}
1610
50a5ba22 1611#ifdef notyet
4d28e78f
SZ
1612static int
1613pci_release_msix(device_t dev, device_t child)
1614{
1615 struct pci_devinfo *dinfo = device_get_ivars(child);
1616 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1617 struct resource_list_entry *rle;
1618 int i;
1619
1620 /* Do we have any messages to release? */
1621 if (msix->msix_alloc == 0)
1622 return (ENODEV);
1623
1624 /* Make sure none of the resources are allocated. */
1625 for (i = 0; i < msix->msix_table_len; i++) {
1626 if (msix->msix_table[i].mte_vector == 0)
1627 continue;
1628 if (msix->msix_table[i].mte_handlers > 0)
1629 return (EBUSY);
1630 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1631 KASSERT(rle != NULL, ("missing resource"));
1632 if (rle->res != NULL)
1633 return (EBUSY);
1634 }
1635
1636 /* Update control register to disable MSI-X. */
1637 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1638 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1639 msix->msix_ctrl, 2);
1640
1641 /* Free the resource list entries. */
1642 for (i = 0; i < msix->msix_table_len; i++) {
1643 if (msix->msix_table[i].mte_vector == 0)
1644 continue;
1645 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1646 }
1647 kfree(msix->msix_table, M_DEVBUF);
1648 msix->msix_table_len = 0;
1649
1650 /* Release the IRQs. */
1651 for (i = 0; i < msix->msix_alloc; i++)
1652 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
2a1f96b9 1653 msix->msix_vectors[i].mv_irq, -1 /* XXX */);
4d28e78f
SZ
1654 kfree(msix->msix_vectors, M_DEVBUF);
1655 msix->msix_alloc = 0;
1656 return (0);
1657}
50a5ba22 1658#endif
4d28e78f
SZ
1659
1660/*
1661 * Return the max supported MSI-X messages this device supports.
1662 * Basically, assuming the MD code can alloc messages, this function
1663 * should return the maximum value that pci_alloc_msix() can return.
1664 * Thus, it is subject to the tunables, etc.
1665 */
1666int
1667pci_msix_count_method(device_t dev, device_t child)
1668{
1669 struct pci_devinfo *dinfo = device_get_ivars(child);
1670 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1671
1672 if (pci_do_msix && msix->msix_location != 0)
1673 return (msix->msix_msgnum);
1674 return (0);
1675}
1676
31646171
SZ
1677int
1678pci_setup_msix(device_t dev)
1679{
1680 struct pci_devinfo *dinfo = device_get_ivars(dev);
1681 pcicfgregs *cfg = &dinfo->cfg;
1682 struct resource_list_entry *rle;
1683 struct resource *table_res, *pba_res;
1684
1685 KASSERT(cfg->msix.msix_table_res == NULL &&
1686 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet\n"));
1687
1688 /* If rid 0 is allocated, then fail. */
1689 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1690 if (rle != NULL && rle->res != NULL)
1691 return (ENXIO);
1692
1693 /* Already have allocated MSIs? */
1694 if (cfg->msi.msi_alloc != 0)
1695 return (ENXIO);
1696
1697 /* If MSI is blacklisted for this system, fail. */
1698 if (pci_msi_blacklisted())
1699 return (ENXIO);
1700
1701 /* MSI-X capability present? */
1702 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1703 return (ENODEV);
1704
1705 /* Make sure the appropriate BARs are mapped. */
1706 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1707 cfg->msix.msix_table_bar);
1708 if (rle == NULL || rle->res == NULL ||
1709 !(rman_get_flags(rle->res) & RF_ACTIVE))
1710 return (ENXIO);
1711 table_res = rle->res;
1712 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1713 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1714 cfg->msix.msix_pba_bar);
1715 if (rle == NULL || rle->res == NULL ||
1716 !(rman_get_flags(rle->res) & RF_ACTIVE))
1717 return (ENXIO);
1718 }
1719 pba_res = rle->res;
1720
1721 cfg->msix.msix_table_res = table_res;
1722 cfg->msix.msix_pba_res = pba_res;
1723
1724 pci_mask_msix_allvectors(dev);
1725
1726 return 0;
1727}
1728
1729void
1730pci_teardown_msix(device_t dev)
1731{
1732 struct pci_devinfo *dinfo = device_get_ivars(dev);
1733 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1734
1735 KASSERT(msix->msix_table_res != NULL &&
1736 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1737
1738 pci_mask_msix_allvectors(dev);
1739
1740 msix->msix_table_res = NULL;
1741 msix->msix_pba_res = NULL;
1742}
1743
1744static void
1745pci_mask_msix_allvectors(device_t dev)
1746{
1747 struct pci_devinfo *dinfo = device_get_ivars(dev);
1748 u_int i;
1749
1750 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i)
1751 pci_mask_msix_vector(dev, i);
1752}
1753
4d28e78f
SZ
1754/*
1755 * HyperTransport MSI mapping control
1756 */
1757void
1758pci_ht_map_msi(device_t dev, uint64_t addr)
1759{
1760 struct pci_devinfo *dinfo = device_get_ivars(dev);
1761 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1762
1763 if (!ht->ht_msimap)
1764 return;
1765
1766 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1767 ht->ht_msiaddr >> 20 == addr >> 20) {
1768 /* Enable MSI -> HT mapping. */
1769 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1770 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1771 ht->ht_msictrl, 2);
1772 }
1773
1774 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1775 /* Disable MSI -> HT mapping. */
1776 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1777 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1778 ht->ht_msictrl, 2);
1779 }
1780}
1781
1782/*
1783 * Support for MSI message signalled interrupts.
1784 */
1785void
1786pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1787{
1788 struct pci_devinfo *dinfo = device_get_ivars(dev);
1789 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1790
1791 /* Write data and address values. */
1792 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1793 address & 0xffffffff, 4);
1794 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1795 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1796 address >> 32, 4);
1797 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1798 data, 2);
1799 } else
1800 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1801 2);
1802
1803 /* Enable MSI in the control register. */
1804 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1805 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1806 2);
1807
1808 /* Enable MSI -> HT mapping. */
1809 pci_ht_map_msi(dev, address);
1810}
1811
1812void
1813pci_disable_msi(device_t dev)
1814{
1815 struct pci_devinfo *dinfo = device_get_ivars(dev);
1816 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1817
1818 /* Disable MSI -> HT mapping. */
1819 pci_ht_map_msi(dev, 0);
1820
1821 /* Disable MSI in the control register. */
1822 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1823 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1824 2);
1825}
1826
1827/*
1828 * Restore MSI registers during resume. If MSI is enabled then
1829 * restore the data and address registers in addition to the control
1830 * register.
1831 */
1832static void
1833pci_resume_msi(device_t dev)
1834{
1835 struct pci_devinfo *dinfo = device_get_ivars(dev);
1836 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1837 uint64_t address;
1838 uint16_t data;
1839
1840 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1841 address = msi->msi_addr;
1842 data = msi->msi_data;
1843 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1844 address & 0xffffffff, 4);
1845 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1846 pci_write_config(dev, msi->msi_location +
1847 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1848 pci_write_config(dev, msi->msi_location +
1849 PCIR_MSI_DATA_64BIT, data, 2);
1850 } else
1851 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1852 data, 2);
1853 }
1854 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1855 2);
1856}
1857
4d28e78f
SZ
1858/*
1859 * Returns true if the specified device is blacklisted because MSI
1860 * doesn't work.
1861 */
1862int
1863pci_msi_device_blacklisted(device_t dev)
1864{
1865 struct pci_quirk *q;
1866
1867 if (!pci_honor_msi_blacklist)
1868 return (0);
1869
1870 for (q = &pci_quirks[0]; q->devid; q++) {
1871 if (q->devid == pci_get_devid(dev) &&
1872 q->type == PCI_QUIRK_DISABLE_MSI)
1873 return (1);
1874 }
1875 return (0);
1876}
1877
1878/*
1879 * Determine if MSI is blacklisted globally on this sytem. Currently,
1880 * we just check for blacklisted chipsets as represented by the
1881 * host-PCI bridge at device 0:0:0. In the future, it may become
1882 * necessary to check other system attributes, such as the kenv values
1883 * that give the motherboard manufacturer and model number.
1884 */
1885static int
1886pci_msi_blacklisted(void)
1887{
1888 device_t dev;
1889
1890 if (!pci_honor_msi_blacklist)
1891 return (0);
1892
1893 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1894 if (!(pcie_chipset || pcix_chipset))
1895 return (1);
1896
1897 dev = pci_find_bsf(0, 0, 0);
1898 if (dev != NULL)
1899 return (pci_msi_device_blacklisted(dev));
1900 return (0);
1901}
1902
1903/*
2c3d7ac8
SZ
1904 * Attempt to allocate count MSI messages on start_cpuid.
1905 *
1906 * If start_cpuid < 0, then the MSI messages' target CPU will be
1907 * selected automaticly.
1908 *
1909 * If the caller explicitly specified the MSI messages' target CPU,
1910 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1911 * messages on the specified CPU, if the allocation fails due to MD
1912 * does not have enough vectors (EMSGSIZE), then we will try next
1913 * available CPU, until the allocation fails on all CPUs.
1914 *
1915 * EMSGSIZE will be returned, if all available CPUs does not have
1916 * enough vectors for the requested amount of MSI messages. Caller
1917 * should either reduce the amount of MSI messages to be requested,
1918 * or simply giving up using MSI.
1919 *
1920 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1921 * returned in 'rid' array, if the allocation succeeds.
4d28e78f
SZ
1922 */
1923int
2c3d7ac8
SZ
1924pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1925 int start_cpuid)
4d28e78f
SZ
1926{
1927 struct pci_devinfo *dinfo = device_get_ivars(child);
1928 pcicfgregs *cfg = &dinfo->cfg;
1929 struct resource_list_entry *rle;
2c3d7ac8 1930 int error, i, irqs[32], cpuid = 0;
4d28e78f
SZ
1931 uint16_t ctrl;
1932
2c3d7ac8
SZ
1933 KASSERT(count != 0 && count <= 32 && powerof2(count),
1934 ("invalid MSI count %d\n", count));
1935 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
4d28e78f
SZ
1936
1937 /* If rid 0 is allocated, then fail. */
1938 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1939 if (rle != NULL && rle->res != NULL)
1940 return (ENXIO);
1941
1942 /* Already have allocated messages? */
1943 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1944 return (ENXIO);
1945
1946 /* If MSI is blacklisted for this system, fail. */
1947 if (pci_msi_blacklisted())
1948 return (ENXIO);
1949
1950 /* MSI capability present? */
1951 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1952 return (ENODEV);
1953
2c3d7ac8
SZ
1954 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1955 count, cfg->msi.msi_msgnum));
1956
1957 if (bootverbose) {
4d28e78f
SZ
1958 device_printf(child,
1959 "attempting to allocate %d MSI vectors (%d supported)\n",
2c3d7ac8
SZ
1960 count, cfg->msi.msi_msgnum);
1961 }
4d28e78f 1962
2c3d7ac8
SZ
1963 if (start_cpuid < 0)
1964 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
4d28e78f 1965
2c3d7ac8
SZ
1966 error = EINVAL;
1967 for (i = 0; i < ncpus; ++i) {
1968 cpuid = (start_cpuid + i) % ncpus;
4d28e78f 1969
2c3d7ac8 1970 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
803a9933 1971 cfg->msi.msi_msgnum, irqs, cpuid);
4d28e78f
SZ
1972 if (error == 0)
1973 break;
2c3d7ac8
SZ
1974 else if (error != EMSGSIZE)
1975 return error;
4d28e78f 1976 }
2c3d7ac8
SZ
1977 if (error)
1978 return error;
4d28e78f
SZ
1979
1980 /*
2c3d7ac8
SZ
1981 * We now have N messages mapped onto SYS_RES_IRQ resources in
1982 * the irqs[] array, so add new resources starting at rid 1.
4d28e78f 1983 */
2c3d7ac8
SZ
1984 for (i = 0; i < count; i++) {
1985 rid[i] = i + 1;
4d28e78f 1986 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
803a9933 1987 irqs[i], irqs[i], 1, cpuid);
2c3d7ac8 1988 }
4d28e78f
SZ
1989
1990 if (bootverbose) {
2c3d7ac8
SZ
1991 if (count == 1) {
1992 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1993 irqs[0], cpuid);
1994 } else {
4d28e78f
SZ
1995 int run;
1996
1997 /*
1998 * Be fancy and try to print contiguous runs
1999 * of IRQ values as ranges. 'run' is true if
2000 * we are in a range.
2001 */
2002 device_printf(child, "using IRQs %d", irqs[0]);
2003 run = 0;
2c3d7ac8 2004 for (i = 1; i < count; i++) {
4d28e78f
SZ
2005
2006 /* Still in a run? */
2007 if (irqs[i] == irqs[i - 1] + 1) {
2008 run = 1;
2009 continue;
2010 }
2011
2012 /* Finish previous range. */
2013 if (run) {
2014 kprintf("-%d", irqs[i - 1]);
2015 run = 0;
2016 }
2017
2018 /* Start new range. */
2019 kprintf(",%d", irqs[i]);
2020 }
2021
2022 /* Unfinished range? */
2023 if (run)
2c3d7ac8
SZ
2024 kprintf("-%d", irqs[count - 1]);
2025 kprintf(" for MSI on cpu%d\n", cpuid);
4d28e78f
SZ
2026 }
2027 }
2028
2c3d7ac8 2029 /* Update control register with count. */
4d28e78f
SZ
2030 ctrl = cfg->msi.msi_ctrl;
2031 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2c3d7ac8 2032 ctrl |= (ffs(count) - 1) << 4;
4d28e78f
SZ
2033 cfg->msi.msi_ctrl = ctrl;
2034 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2035
2036 /* Update counts of alloc'd messages. */
2c3d7ac8 2037 cfg->msi.msi_alloc = count;
4d28e78f 2038 cfg->msi.msi_handlers = 0;
4d28e78f
SZ
2039 return (0);
2040}
2041
2042/* Release the MSI messages associated with this device. */
2043int
2044pci_release_msi_method(device_t dev, device_t child)
2045{
2046 struct pci_devinfo *dinfo = device_get_ivars(child);
2047 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2048 struct resource_list_entry *rle;
50a5ba22 2049 int i, irqs[32], cpuid = -1;
4d28e78f
SZ
2050
2051 /* Do we have any messages to release? */
2052 if (msi->msi_alloc == 0)
2053 return (ENODEV);
2054 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2055
2056 /* Make sure none of the resources are allocated. */
2057 if (msi->msi_handlers > 0)
2058 return (EBUSY);
2059 for (i = 0; i < msi->msi_alloc; i++) {
2060 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2061 KASSERT(rle != NULL, ("missing MSI resource"));
2062 if (rle->res != NULL)
2063 return (EBUSY);
975cc3f0
SZ
2064 if (i == 0) {
2065 cpuid = rle->cpuid;
2066 KASSERT(cpuid >= 0 && cpuid < ncpus,
2067 ("invalid MSI target cpuid %d\n", cpuid));
2068 } else {
2069 KASSERT(rle->cpuid == cpuid,
2070 ("MSI targets different cpus, "
2071 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
2072 }
4d28e78f
SZ
2073 irqs[i] = rle->start;
2074 }
2075
2076 /* Update control register with 0 count. */
2077 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2078 ("%s: MSI still enabled", __func__));
2079 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2080 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2081 msi->msi_ctrl, 2);
2082
2083 /* Release the messages. */
975cc3f0
SZ
2084 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2085 cpuid);
4d28e78f
SZ
2086 for (i = 0; i < msi->msi_alloc; i++)
2087 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2088
2089 /* Update alloc count. */
2090 msi->msi_alloc = 0;
2091 msi->msi_addr = 0;
2092 msi->msi_data = 0;
2093 return (0);
2094}
2095
2096/*
2097 * Return the max supported MSI messages this device supports.
2098 * Basically, assuming the MD code can alloc messages, this function
2099 * should return the maximum value that pci_alloc_msi() can return.
2100 * Thus, it is subject to the tunables, etc.
2101 */
2102int
2103pci_msi_count_method(device_t dev, device_t child)
2104{
2105 struct pci_devinfo *dinfo = device_get_ivars(child);
2106 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2107
2108 if (pci_do_msi && msi->msi_location != 0)
2109 return (msi->msi_msgnum);
2110 return (0);
2111}
2112
2113/* kfree pcicfgregs structure and all depending data structures */
2114
2115int
2116pci_freecfg(struct pci_devinfo *dinfo)
2117{
2118 struct devlist *devlist_head;
2119 int i;
2120
2121 devlist_head = &pci_devq;
2122
2123 if (dinfo->cfg.vpd.vpd_reg) {
2124 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2125 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2126 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2127 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2128 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2129 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2130 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2131 }
2132 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2133 kfree(dinfo, M_DEVBUF);
2134
2135 /* increment the generation count */
2136 pci_generation++;
2137
2138 /* we're losing one device */
2139 pci_numdevs--;
2140 return (0);
2141}
2142
2143/*
2144 * PCI power manangement
2145 */
2146int
2147pci_set_powerstate_method(device_t dev, device_t child, int state)
2148{
2149 struct pci_devinfo *dinfo = device_get_ivars(child);
2150 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2151 uint16_t status;
2152 int result, oldstate, highest, delay;
984263bc 2153
4d28e78f 2154 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2155 return (EOPNOTSUPP);
2156
2157 /*
2158 * Optimize a no state change request away. While it would be OK to
2159 * write to the hardware in theory, some devices have shown odd
2160 * behavior when going from D3 -> D3.
2161 */
2162 oldstate = pci_get_powerstate(child);
2163 if (oldstate == state)
2164 return (0);
2165
2166 /*
2167 * The PCI power management specification states that after a state
2168 * transition between PCI power states, system software must
2169 * guarantee a minimal delay before the function accesses the device.
2170 * Compute the worst case delay that we need to guarantee before we
2171 * access the device. Many devices will be responsive much more
2172 * quickly than this delay, but there are some that don't respond
2173 * instantly to state changes. Transitions to/from D3 state require
2174 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2175 * is done below with DELAY rather than a sleeper function because
2176 * this function can be called from contexts where we cannot sleep.
2177 */
2178 highest = (oldstate > state) ? oldstate : state;
2179 if (highest == PCI_POWERSTATE_D3)
2180 delay = 10000;
2181 else if (highest == PCI_POWERSTATE_D2)
2182 delay = 200;
2183 else
2184 delay = 0;
4d28e78f 2185 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2186 & ~PCIM_PSTAT_DMASK;
2187 result = 0;
2188 switch (state) {
2189 case PCI_POWERSTATE_D0:
2190 status |= PCIM_PSTAT_D0;
2191 break;
2192 case PCI_POWERSTATE_D1:
4d28e78f 2193 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2194 return (EOPNOTSUPP);
2195 status |= PCIM_PSTAT_D1;
2196 break;
2197 case PCI_POWERSTATE_D2:
4d28e78f 2198 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2199 return (EOPNOTSUPP);
2200 status |= PCIM_PSTAT_D2;
2201 break;
2202 case PCI_POWERSTATE_D3:
2203 status |= PCIM_PSTAT_D3;
2204 break;
2205 default:
2206 return (EINVAL);
984263bc 2207 }
f4754a59
HT
2208
2209 if (bootverbose)
2210 kprintf(
4d28e78f
SZ
2211 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2212 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2213 dinfo->cfg.func, oldstate, state);
f4754a59 2214
4d28e78f 2215 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2216 if (delay)
2217 DELAY(delay);
2218 return (0);
984263bc
MD
2219}
2220
e126caf1 2221int
984263bc
MD
2222pci_get_powerstate_method(device_t dev, device_t child)
2223{
2224 struct pci_devinfo *dinfo = device_get_ivars(child);
2225 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2226 uint16_t status;
984263bc
MD
2227 int result;
2228
4d28e78f
SZ
2229 if (cfg->pp.pp_cap != 0) {
2230 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2231 switch (status & PCIM_PSTAT_DMASK) {
2232 case PCIM_PSTAT_D0:
2233 result = PCI_POWERSTATE_D0;
2234 break;
2235 case PCIM_PSTAT_D1:
2236 result = PCI_POWERSTATE_D1;
2237 break;
2238 case PCIM_PSTAT_D2:
2239 result = PCI_POWERSTATE_D2;
2240 break;
2241 case PCIM_PSTAT_D3:
2242 result = PCI_POWERSTATE_D3;
2243 break;
2244 default:
2245 result = PCI_POWERSTATE_UNKNOWN;
2246 break;
2247 }
2248 } else {
2249 /* No support, device is always at D0 */
2250 result = PCI_POWERSTATE_D0;
2251 }
f4754a59 2252 return (result);
984263bc
MD
2253}
2254
2255/*
2256 * Some convenience functions for PCI device drivers.
2257 */
2258
2259static __inline void
4d28e78f 2260pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2261{
4d28e78f 2262 uint16_t command;
984263bc 2263
4d28e78f
SZ
2264 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2265 command |= bit;
2266 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2267}
2268
2269static __inline void
4d28e78f
SZ
2270pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2271{
2272 uint16_t command;
984263bc 2273
4d28e78f
SZ
2274 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2275 command &= ~bit;
2276 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2277}
2278
4d28e78f
SZ
2279int
2280pci_enable_busmaster_method(device_t dev, device_t child)
2281{
2282 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2283 return (0);
2284}
984263bc 2285
4d28e78f
SZ
2286int
2287pci_disable_busmaster_method(device_t dev, device_t child)
2288{
2289 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2290 return (0);
2291}
984263bc 2292
4d28e78f
SZ
2293int
2294pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2295{
4d28e78f
SZ
2296 uint16_t command;
2297 uint16_t bit;
2298 char *error;
ed1bd994 2299
4d28e78f
SZ
2300 bit = 0;
2301 error = NULL;
2302
2303 switch(space) {
2304 case SYS_RES_IOPORT:
2305 bit = PCIM_CMD_PORTEN;
2306 error = "port";
ed1bd994 2307 break;
4d28e78f
SZ
2308 case SYS_RES_MEMORY:
2309 bit = PCIM_CMD_MEMEN;
2310 error = "memory";
ed1bd994
MD
2311 break;
2312 default:
4d28e78f 2313 return (EINVAL);
ed1bd994 2314 }
4d28e78f
SZ
2315 pci_set_command_bit(dev, child, bit);
2316 /* Some devices seem to need a brief stall here, what do to? */
2317 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2318 if (command & bit)
2319 return (0);
2320 device_printf(child, "failed to enable %s mapping!\n", error);
2321 return (ENXIO);
ed1bd994 2322}
984263bc 2323
4d28e78f
SZ
2324int
2325pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2326{
4d28e78f
SZ
2327 uint16_t command;
2328 uint16_t bit;
2329 char *error;
b4c0a845 2330
4d28e78f
SZ
2331 bit = 0;
2332 error = NULL;
b4c0a845 2333
4d28e78f
SZ
2334 switch(space) {
2335 case SYS_RES_IOPORT:
2336 bit = PCIM_CMD_PORTEN;
2337 error = "port";
b4c0a845 2338 break;
4d28e78f
SZ
2339 case SYS_RES_MEMORY:
2340 bit = PCIM_CMD_MEMEN;
2341 error = "memory";
b4c0a845
SZ
2342 break;
2343 default:
4d28e78f 2344 return (EINVAL);
b4c0a845 2345 }
4d28e78f
SZ
2346 pci_clear_command_bit(dev, child, bit);
2347 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2348 if (command & bit) {
2349 device_printf(child, "failed to disable %s mapping!\n", error);
2350 return (ENXIO);
b4c0a845 2351 }
4d28e78f 2352 return (0);
b4c0a845
SZ
2353}
2354
4d28e78f
SZ
2355/*
2356 * New style pci driver. Parent device is either a pci-host-bridge or a
2357 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2358 */
2359
22457186 2360void
984263bc
MD
2361pci_print_verbose(struct pci_devinfo *dinfo)
2362{
4d28e78f 2363
984263bc
MD
2364 if (bootverbose) {
2365 pcicfgregs *cfg = &dinfo->cfg;
2366
4d28e78f
SZ
2367 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2368 cfg->vendor, cfg->device, cfg->revid);
2369 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2370 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2371 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2372 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2373 cfg->mfdev);
2374 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2375 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2376 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2377 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2378 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2379 if (cfg->intpin > 0)
4d28e78f
SZ
2380 kprintf("\tintpin=%c, irq=%d\n",
2381 cfg->intpin +'a' -1, cfg->intline);
2382 if (cfg->pp.pp_cap) {
2383 uint16_t status;
2384
2385 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2386 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2387 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2388 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2389 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2390 status & PCIM_PSTAT_DMASK);
2391 }
2392 if (cfg->msi.msi_location) {
2393 int ctrl;
2394
2395 ctrl = cfg->msi.msi_ctrl;
2396 kprintf("\tMSI supports %d message%s%s%s\n",
2397 cfg->msi.msi_msgnum,
2398 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2399 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2400 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2401 }
2402 if (cfg->msix.msix_location) {
2403 kprintf("\tMSI-X supports %d message%s ",
2404 cfg->msix.msix_msgnum,
2405 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2406 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2407 kprintf("in map 0x%x\n",
2408 cfg->msix.msix_table_bar);
2409 else
2410 kprintf("in maps 0x%x and 0x%x\n",
2411 cfg->msix.msix_table_bar,
2412 cfg->msix.msix_pba_bar);
2413 }
d85e7311 2414 pci_print_verbose_expr(cfg);
984263bc
MD
2415 }
2416}
2417
d85e7311
SZ
2418static void
2419pci_print_verbose_expr(const pcicfgregs *cfg)
2420{
2421 const struct pcicfg_expr *expr = &cfg->expr;
2422 const char *port_name;
2423 uint16_t port_type;
2424
2425 if (!bootverbose)
2426 return;
2427
2428 if (expr->expr_ptr == 0) /* No PCI Express capability */
2429 return;
2430
2431 kprintf("\tPCI Express ver.%d cap=0x%04x",
2432 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2433 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2434 goto back;
2435
2436 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2437
2438 switch (port_type) {
2439 case PCIE_END_POINT:
2440 port_name = "DEVICE";
2441 break;
2442 case PCIE_LEG_END_POINT:
2443 port_name = "LEGDEV";
2444 break;
2445 case PCIE_ROOT_PORT:
2446 port_name = "ROOT";
2447 break;
2448 case PCIE_UP_STREAM_PORT:
2449 port_name = "UPSTREAM";
2450 break;
2451 case PCIE_DOWN_STREAM_PORT:
2452 port_name = "DOWNSTRM";
2453 break;
2454 case PCIE_PCIE2PCI_BRIDGE:
2455 port_name = "PCIE2PCI";
2456 break;
2457 case PCIE_PCI2PCIE_BRIDGE:
2458 port_name = "PCI2PCIE";
2459 break;
2460 default:
2461 port_name = NULL;
2462 break;
2463 }
2464 if ((port_type == PCIE_ROOT_PORT ||
2465 port_type == PCIE_DOWN_STREAM_PORT) &&
2466 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2467 port_name = NULL;
2468 if (port_name != NULL)
2469 kprintf("[%s]", port_name);
2470
2471 if (pcie_slotimpl(cfg)) {
2472 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2473 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2474 kprintf("[HOTPLUG]");
2475 }
2476back:
2477 kprintf("\n");
2478}
2479
984263bc 2480static int
4a5a2d63 2481pci_porten(device_t pcib, int b, int s, int f)
984263bc 2482{
4a5a2d63
JS
2483 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2484 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2485}
2486
2487static int
4a5a2d63 2488pci_memen(device_t pcib, int b, int s, int f)
984263bc 2489{
4a5a2d63
JS
2490 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2491 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2492}
2493
2494/*
2495 * Add a resource based on a pci map register. Return 1 if the map
2496 * register is a 32bit map register or 2 if it is a 64bit register.
2497 */
2498static int
4d28e78f
SZ
2499pci_add_map(device_t pcib, device_t bus, device_t dev,
2500 int b, int s, int f, int reg, struct resource_list *rl, int force,
2501 int prefetch)
2502{
2503 uint32_t map;
2504 pci_addr_t base;
2505 pci_addr_t start, end, count;
2506 uint8_t ln2size;
2507 uint8_t ln2range;
2508 uint32_t testval;
2509 uint16_t cmd;
984263bc 2510 int type;
4d28e78f
SZ
2511 int barlen;
2512 struct resource *res;
984263bc 2513
4a5a2d63 2514 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2515 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2516 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2517 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2518
4d28e78f 2519 if (PCI_BAR_MEM(map)) {
984263bc 2520 type = SYS_RES_MEMORY;
4d28e78f
SZ
2521 if (map & PCIM_BAR_MEM_PREFETCH)
2522 prefetch = 1;
2523 } else
984263bc
MD
2524 type = SYS_RES_IOPORT;
2525 ln2size = pci_mapsize(testval);
2526 ln2range = pci_maprange(testval);
4d28e78f
SZ
2527 base = pci_mapbase(map);
2528 barlen = ln2range == 64 ? 2 : 1;
2529
2530 /*
2531 * For I/O registers, if bottom bit is set, and the next bit up
2532 * isn't clear, we know we have a BAR that doesn't conform to the
2533 * spec, so ignore it. Also, sanity check the size of the data
2534 * areas to the type of memory involved. Memory must be at least
2535 * 16 bytes in size, while I/O ranges must be at least 4.
2536 */
2537 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2538 return (barlen);
2539 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2540 (type == SYS_RES_IOPORT && ln2size < 2))
2541 return (barlen);
2542
2543 if (ln2range == 64)
984263bc 2544 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2545 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2546 if (bootverbose) {
2547 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2548 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2549 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2550 kprintf(", port disabled\n");
2551 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2552 kprintf(", memory disabled\n");
2553 else
2554 kprintf(", enabled\n");
984263bc
MD
2555 }
2556
984263bc 2557 /*
4d28e78f
SZ
2558 * If base is 0, then we have problems. It is best to ignore
2559 * such entries for the moment. These will be allocated later if
2560 * the driver specifically requests them. However, some
2561 * removable busses look better when all resources are allocated,
2562 * so allow '0' to be overriden.
2563 *
2564 * Similarly treat maps whose values is the same as the test value
2565 * read back. These maps have had all f's written to them by the
2566 * BIOS in an attempt to disable the resources.
984263bc 2567 */
4d28e78f
SZ
2568 if (!force && (base == 0 || map == testval))
2569 return (barlen);
2570 if ((u_long)base != base) {
2571 device_printf(bus,
2572 "pci%d:%d:%d:%d bar %#x too many address bits",
2573 pci_get_domain(dev), b, s, f, reg);
2574 return (barlen);
984263bc 2575 }
984263bc 2576
4d28e78f
SZ
2577 /*
2578 * This code theoretically does the right thing, but has
2579 * undesirable side effects in some cases where peripherals
2580 * respond oddly to having these bits enabled. Let the user
2581 * be able to turn them off (since pci_enable_io_modes is 1 by
2582 * default).
2583 */
2584 if (pci_enable_io_modes) {
2585 /* Turn on resources that have been left off by a lazy BIOS */
2586 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2587 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2588 cmd |= PCIM_CMD_PORTEN;
2589 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2590 }
2591 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2592 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2593 cmd |= PCIM_CMD_MEMEN;
2594 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2595 }
2596 } else {
2597 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2598 return (barlen);
2599 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2600 return (barlen);
2601 }
984263bc 2602
4d28e78f
SZ
2603 count = 1 << ln2size;
2604 if (base == 0 || base == pci_mapbase(testval)) {
2605 start = 0; /* Let the parent decide. */
2606 end = ~0ULL;
2607 } else {
2608 start = base;
2609 end = base + (1 << ln2size) - 1;
984263bc 2610 }
1b000e91 2611 resource_list_add(rl, type, reg, start, end, count, -1);
984263bc 2612
4d28e78f
SZ
2613 /*
2614 * Try to allocate the resource for this BAR from our parent
2615 * so that this resource range is already reserved. The
2616 * driver for this device will later inherit this resource in
2617 * pci_alloc_resource().
2618 */
2619 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
93fad519 2620 prefetch ? RF_PREFETCHABLE : 0, -1);
4d28e78f
SZ
2621 if (res == NULL) {
2622 /*
d0c4beb1
SZ
2623 * If the allocation fails, delete the resource list
2624 * entry to force pci_alloc_resource() to allocate
2625 * resources from the parent.
4d28e78f
SZ
2626 */
2627 resource_list_delete(rl, type, reg);
d0c4beb1
SZ
2628#ifdef PCI_BAR_CLEAR
2629 /* Clear the BAR */
4d28e78f 2630 start = 0;
d0c4beb1
SZ
2631#else /* !PCI_BAR_CLEAR */
2632 /*
2633 * Don't clear BAR here. Some BIOS lists HPET as a
2634 * PCI function, clearing the BAR causes HPET timer
2635 * stop ticking.
2636 */
2637 if (bootverbose) {
2638 kprintf("pci:%d:%d:%d: resource reservation failed "
bfc09ba0
MD
2639 "%#jx - %#jx\n", b, s, f,
2640 (intmax_t)start, (intmax_t)end);
d0c4beb1
SZ
2641 }
2642 return (barlen);
2643#endif /* PCI_BAR_CLEAR */
2644 } else {
4d28e78f 2645 start = rman_get_start(res);
d0c4beb1 2646 }
4d28e78f
SZ
2647 pci_write_config(dev, reg, start, 4);
2648 if (ln2range == 64)
2649 pci_write_config(dev, reg + 4, start >> 32, 4);
2650 return (barlen);
984263bc
MD
2651}
2652
201eb0a7 2653/*
4d28e78f 2654 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2655 * Legacy demands that the primary and secondary ATA ports sits on the
2656 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2657 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2658 * addressing mode.
2659 */
2660static void
4d28e78f
SZ
2661pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2662 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2663{
2664 int rid, type, progif;
2665#if 0
2666 /* if this device supports PCI native addressing use it */
2667 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2668 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2669 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2670 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2671 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2672 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2673 }
2674 }
2675#endif
201eb0a7
TS
2676 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2677 type = SYS_RES_IOPORT;
2678 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2679 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2680 prefetchmask & (1 << 0));
2681 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2682 prefetchmask & (1 << 1));
201eb0a7
TS
2683 } else {
2684 rid = PCIR_BAR(0);
1b000e91 2685 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
201eb0a7 2686 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
93fad519 2687 0, -1);
201eb0a7 2688 rid = PCIR_BAR(1);
1b000e91 2689 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
201eb0a7 2690 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
93fad519 2691 0, -1);
201eb0a7
TS
2692 }
2693 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2694 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2695 prefetchmask & (1 << 2));
2696 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2697 prefetchmask & (1 << 3));
201eb0a7
TS
2698 } else {
2699 rid = PCIR_BAR(2);
1b000e91 2700 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
201eb0a7 2701 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
93fad519 2702 0, -1);
201eb0a7 2703 rid = PCIR_BAR(3);
1b000e91 2704 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
201eb0a7 2705 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
93fad519 2706 0, -1);
201eb0a7 2707 }
4d28e78f
SZ
2708 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2709 prefetchmask & (1 << 4));
2710 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2711 prefetchmask & (1 << 5));
201eb0a7 2712}
201eb0a7 2713
984263bc 2714static void
4d28e78f
SZ
2715pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2716{
2717 struct pci_devinfo *dinfo = device_get_ivars(dev);
2718 pcicfgregs *cfg = &dinfo->cfg;
2719 char tunable_name[64];
2720 int irq;
2721
2722 /* Has to have an intpin to have an interrupt. */
2723 if (cfg->intpin == 0)
2724 return;
2725
2726 /* Let the user override the IRQ with a tunable. */
2727 irq = PCI_INVALID_IRQ;
2728 ksnprintf(tunable_name, sizeof(tunable_name),
2729 "hw.pci%d.%d.%d.INT%c.irq",
2730 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2731 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2732 irq = PCI_INVALID_IRQ;
2733
2734 /*
2735 * If we didn't get an IRQ via the tunable, then we either use the
2736 * IRQ value in the intline register or we ask the bus to route an
2737 * interrupt for us. If force_route is true, then we only use the
2738 * value in the intline register if the bus was unable to assign an
2739 * IRQ.
2740 */
2741 if (!PCI_INTERRUPT_VALID(irq)) {
2742 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2743 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2744 if (!PCI_INTERRUPT_VALID(irq))
2745 irq = cfg->intline;
2746 }
2747
2748 /* If after all that we don't have an IRQ, just bail. */
2749 if (!PCI_INTERRUPT_VALID(irq))
2750 return;
2751
2752 /* Update the config register if it changed. */
2753 if (irq != cfg->intline) {
2754 cfg->intline = irq;
2755 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2756 }
2757
2758 /* Add this IRQ as rid 0 interrupt resource. */
d2f04fe0 2759 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
bec969af 2760 machintr_legacy_intr_cpuid(irq));
4d28e78f
SZ
2761}
2762
2763void
2764pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2765{
2766 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2767 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2768 struct resource_list *rl = &dinfo->resources;
2769 struct pci_quirk *q;
e126caf1 2770 int b, i, f, s;
984263bc 2771
e126caf1
MD
2772 b = cfg->bus;
2773 s = cfg->slot;
2774 f = cfg->func;
4d28e78f
SZ
2775
2776 /* ATA devices needs special map treatment */
201eb0a7
TS
2777 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2778 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2779 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2780 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2781 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2782 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2783 else
4d28e78f
SZ
2784 for (i = 0; i < cfg->nummaps;)
2785 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2786 rl, force, prefetchmask & (1 << i));
984263bc 2787
4d28e78f
SZ
2788 /*
2789 * Add additional, quirked resources.
2790 */
984263bc
MD
2791 for (q = &pci_quirks[0]; q->devid; q++) {
2792 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2793 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2794 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2795 force, 0);
984263bc
MD
2796 }
2797
4d28e78f 2798 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
4d28e78f
SZ
2799 /*
2800 * Try to re-route interrupts. Sometimes the BIOS or
2801 * firmware may leave bogus values in these registers.
2802 * If the re-route fails, then just stick with what we
2803 * have.
2804 */
2805 pci_assign_interrupt(bus, dev, 1);
4d28e78f 2806 }
984263bc
MD
2807}
2808
e126caf1 2809void
4d28e78f 2810pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2811{
4d28e78f 2812#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2813 device_t pcib = device_get_parent(dev);
e126caf1 2814 struct pci_devinfo *dinfo;
4a5a2d63 2815 int maxslots;
e126caf1
MD
2816 int s, f, pcifunchigh;
2817 uint8_t hdrtype;
2818
4d28e78f
SZ
2819 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2820 ("dinfo_size too small"));
4a5a2d63 2821 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2822 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2823 pcifunchigh = 0;
2824 f = 0;
4d28e78f 2825 DELAY(1);
e126caf1
MD
2826 hdrtype = REG(PCIR_HDRTYPE, 1);
2827 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2828 continue;
2829 if (hdrtype & PCIM_MFDEV)
2830 pcifunchigh = PCI_FUNCMAX;
5e658043 2831 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2832 dinfo = pci_read_device(pcib, domain, busno, s, f,
2833 dinfo_size);
984263bc 2834 if (dinfo != NULL) {
e126caf1 2835 pci_add_child(dev, dinfo);
984263bc
MD
2836 }
2837 }
2838 }
e126caf1
MD
2839#undef REG
2840}
2841
2842void
2843pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2844{
2845 device_t pcib;
2846
2847 pcib = device_get_parent(bus);
2848 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2849 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2850 resource_list_init(&dinfo->resources);
638744c5
HT
2851 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2852 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2853 pci_print_verbose(dinfo);
4d28e78f 2854 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2855}
2856
2857static int
4a5a2d63 2858pci_probe(device_t dev)
984263bc 2859{
984263bc 2860 device_set_desc(dev, "PCI bus");
4a5a2d63 2861
4d28e78f
SZ
2862 /* Allow other subclasses to override this driver. */
2863 return (-1000);
984263bc
MD
2864}
2865
2866static int
e126caf1
MD
2867pci_attach(device_t dev)
2868{
4d28e78f
SZ
2869 int busno, domain;
2870
2871 /*
2872 * Since there can be multiple independantly numbered PCI
2873 * busses on systems with multiple PCI domains, we can't use
2874 * the unit number to decide which bus we are probing. We ask
2875 * the parent pcib what our domain and bus numbers are.
2876 */
2877 domain = pcib_get_domain(dev);
2878 busno = pcib_get_bus(dev);
2879 if (bootverbose)
2880 device_printf(dev, "domain=%d, physical bus=%d\n",
2881 domain, busno);
e4c9c0c8 2882
4d28e78f 2883 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2884
4d28e78f
SZ
2885 return (bus_generic_attach(dev));
2886}
2887
2888int
2889pci_suspend(device_t dev)
2890{
2891 int dstate, error, i, numdevs;
2892 device_t acpi_dev, child, *devlist;
2893 struct pci_devinfo *dinfo;
2894
2895 /*
2896 * Save the PCI configuration space for each child and set the
2897 * device in the appropriate power state for this sleep state.
2898 */
2899 acpi_dev = NULL;
2900 if (pci_do_power_resume)
2901 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2902 device_get_children(dev, &devlist, &numdevs);
2903 for (i = 0; i < numdevs; i++) {
2904 child = devlist[i];
2905 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2906 pci_cfg_save(child, dinfo, 0);
2907 }
e126caf1 2908
4d28e78f
SZ
2909 /* Suspend devices before potentially powering them down. */
2910 error = bus_generic_suspend(dev);
2911 if (error) {
2912 kfree(devlist, M_TEMP);
2913 return (error);
2914 }
e126caf1 2915
4d28e78f
SZ
2916 /*
2917 * Always set the device to D3. If ACPI suggests a different
2918 * power state, use it instead. If ACPI is not present, the
2919 * firmware is responsible for managing device power. Skip
2920 * children who aren't attached since they are powered down
2921 * separately. Only manage type 0 devices for now.
2922 */
2923 for (i = 0; acpi_dev && i < numdevs; i++) {
2924 child = devlist[i];
2925 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2926 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2927 dstate = PCI_POWERSTATE_D3;
2928 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2929 pci_set_powerstate(child, dstate);
2930 }
2931 }
2932 kfree(devlist, M_TEMP);
2933 return (0);
e126caf1
MD
2934}
2935
4d28e78f
SZ
2936int
2937pci_resume(device_t dev)
984263bc 2938{
4d28e78f
SZ
2939 int i, numdevs;
2940 device_t acpi_dev, child, *devlist;
2941 struct pci_devinfo *dinfo;
2942
2943 /*
2944 * Set each child to D0 and restore its PCI configuration space.
2945 */
2946 acpi_dev = NULL;
2947 if (pci_do_power_resume)
2948 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2949 device_get_children(dev, &devlist, &numdevs);
2950 for (i = 0; i < numdevs; i++) {
2951 /*
2952 * Notify ACPI we're going to D0 but ignore the result. If
2953 * ACPI is not present, the firmware is responsible for
2954 * managing device power. Only manage type 0 devices for now.
2955 */
2956 child = devlist[i];
2957 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2958 if (acpi_dev && device_is_attached(child) &&
2959 dinfo->cfg.hdrtype == 0) {
2960 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2961 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2962 }
2963
2964 /* Now the device is powered up, restore its config space. */
2965 pci_cfg_restore(child, dinfo);
2966 }
2967 kfree(devlist, M_TEMP);
2968 return (bus_generic_resume(dev));
2969}
2970
2971static void
2972pci_load_vendor_data(void)
2973{
2974 caddr_t vendordata, info;
2975
2976 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2977 info = preload_search_info(vendordata, MODINFO_ADDR);
2978 pci_vendordata = *(char **)info;
2979 info = preload_search_info(vendordata, MODINFO_SIZE);
2980 pci_vendordata_size = *(size_t *)info;
2981 /* terminate the database */
2982 pci_vendordata[pci_vendordata_size] = '\n';
2983 }
2984}
2985
2986void
2987pci_driver_added(device_t dev, driver_t *driver)
2988{
2989 int numdevs;
2990 device_t *devlist;
2991 device_t child;
2992 struct pci_devinfo *dinfo;
2993 int i;
2994
2995 if (bootverbose)
2996 device_printf(dev, "driver added\n");
2997 DEVICE_IDENTIFY(driver, dev);
2998 device_get_children(dev, &devlist, &numdevs);
2999 for (i = 0; i < numdevs; i++) {
3000 child = devlist[i];
3001 if (device_get_state(child) != DS_NOTPRESENT)
3002 continue;
3003 dinfo = device_get_ivars(child);
3004 pci_print_verbose(dinfo);
3005 if (bootverbose)
3006 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
3007 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
3008 dinfo->cfg.func);
3009 pci_cfg_restore(child, dinfo);
3010 if (device_probe_and_attach(child) != 0)
3011 pci_cfg_save(child, dinfo, 1);
3012 }
3013 kfree(devlist, M_TEMP);
3014}
3015
11a49859
SZ
3016static void
3017pci_child_detached(device_t parent __unused, device_t child)
3018{
3019 /* Turn child's power off */
3020 pci_cfg_save(child, device_get_ivars(child), 1);
3021}
3022
4d28e78f
SZ
3023int
3024pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3025 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
3026{
4d28e78f
SZ
3027 struct pci_devinfo *dinfo;
3028 struct msix_table_entry *mte;
3029 struct msix_vector *mv;
3030 uint64_t addr;
3031 uint32_t data;
fb9077ae 3032 int rid, error;
4d28e78f 3033 void *cookie;
fb9077ae 3034
4d28e78f
SZ
3035 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
3036 arg, &cookie, serializer);
3037 if (error)
3038 return (error);
3039
3040 /* If this is not a direct child, just bail out. */
3041 if (device_get_parent(child) != dev) {
3042 *cookiep = cookie;
3043 return(0);
3044 }
3045
4d28e78f
SZ
3046 rid = rman_get_rid(irq);
3047 if (rid == 0) {
3048 /* Make sure that INTx is enabled */
3049 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3050 } else {
3051 /*
3052 * Check to see if the interrupt is MSI or MSI-X.
3053 * Ask our parent to map the MSI and give
3054 * us the address and data register values.
3055 * If we fail for some reason, teardown the
3056 * interrupt handler.
3057 */
3058 dinfo = device_get_ivars(child);
3059 if (dinfo->cfg.msi.msi_alloc > 0) {
3060 if (dinfo->cfg.msi.msi_addr == 0) {
3061 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3062 ("MSI has handlers, but vectors not mapped"));
3063 error = PCIB_MAP_MSI(device_get_parent(dev),
0af900e1
SZ
3064 child, rman_get_start(irq), &addr, &data,
3065 rman_get_cpuid(irq));
4d28e78f
SZ
3066 if (error)
3067 goto bad;
3068 dinfo->cfg.msi.msi_addr = addr;
3069 dinfo->cfg.msi.msi_data = data;
3070 pci_enable_msi(child, addr, data);
984263bc 3071 }
4d28e78f
SZ
3072 dinfo->cfg.msi.msi_handlers++;
3073 } else {
3074 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3075 ("No MSI or MSI-X interrupts allocated"));
3076 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3077 ("MSI-X index too high"));
3078 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3079 KASSERT(mte->mte_vector != 0, ("no message vector"));
3080 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3081 KASSERT(mv->mv_irq == rman_get_start(irq),
3082 ("IRQ mismatch"));
3083 if (mv->mv_address == 0) {
3084 KASSERT(mte->mte_handlers == 0,
3085 ("MSI-X table entry has handlers, but vector not mapped"));
3086 error = PCIB_MAP_MSI(device_get_parent(dev),
0af900e1
SZ
3087 child, rman_get_start(irq), &addr, &data,
3088 rman_get_cpuid(irq));
4d28e78f
SZ
3089 if (error)
3090 goto bad;
3091 mv->mv_address = addr;
3092 mv->mv_data = data;
3093 }
3094 if (mte->mte_handlers == 0) {
cf8f3133
SZ
3095 pci_setup_msix_vector(child, rid - 1,
3096 mv->mv_address, mv->mv_data);
3097 pci_unmask_msix_vector(child, rid - 1);
4d28e78f
SZ
3098 }
3099 mte->mte_handlers++;
3100 }
3101
3102 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3103 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3104 bad:
3105 if (error) {
3106 (void)bus_generic_teardown_intr(dev, child, irq,
3107 cookie);
3108 return (error);
3109 }
3110 }
4d28e78f
SZ
3111 *cookiep = cookie;
3112 return (0);
3113}
3114
3115int
3116pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3117 void *cookie)
3118{
4d28e78f
SZ
3119 struct msix_table_entry *mte;
3120 struct resource_list_entry *rle;
3121 struct pci_devinfo *dinfo;
fb9077ae 3122 int rid, error;
4d28e78f
SZ
3123
3124 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3125 return (EINVAL);
3126
3127 /* If this isn't a direct child, just bail out */
3128 if (device_get_parent(child) != dev)
3129 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3130
4d28e78f
SZ
3131 rid = rman_get_rid(irq);
3132 if (rid == 0) {
3133 /* Mask INTx */
3134 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3135 } else {
3136 /*
3137 * Check to see if the interrupt is MSI or MSI-X. If so,
3138 * decrement the appropriate handlers count and mask the
3139 * MSI-X message, or disable MSI messages if the count
3140 * drops to 0.
3141 */
3142 dinfo = device_get_ivars(child);
3143 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3144 if (rle->res != irq)
3145 return (EINVAL);
3146 if (dinfo->cfg.msi.msi_alloc > 0) {
3147 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3148 ("MSI-X index too high"));
3149 if (dinfo->cfg.msi.msi_handlers == 0)
3150 return (EINVAL);
3151 dinfo->cfg.msi.msi_handlers--;
3152 if (dinfo->cfg.msi.msi_handlers == 0)
3153 pci_disable_msi(child);
3154 } else {
3155 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3156 ("No MSI or MSI-X interrupts allocated"));
3157 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3158 ("MSI-X index too high"));
3159 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3160 if (mte->mte_handlers == 0)
3161 return (EINVAL);
3162 mte->mte_handlers--;
3163 if (mte->mte_handlers == 0)
cf8f3133 3164 pci_mask_msix_vector(child, rid - 1);
984263bc
MD
3165 }
3166 }
4d28e78f
SZ
3167 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3168 if (rid > 0)
3169 KASSERT(error == 0,
3170 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4d28e78f 3171 return (error);
984263bc
MD
3172}
3173
e126caf1 3174int
984263bc
MD
3175pci_print_child(device_t dev, device_t child)
3176{
3177 struct pci_devinfo *dinfo;
3178 struct resource_list *rl;
984263bc
MD
3179 int retval = 0;
3180
3181 dinfo = device_get_ivars(child);
984263bc
MD
3182 rl = &dinfo->resources;
3183
3184 retval += bus_print_child_header(dev, child);
3185
4d28e78f
SZ
3186 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3187 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3188 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3189 if (device_get_flags(dev))
85f8e2ea 3190 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3191
85f8e2ea 3192 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3193 pci_get_function(child));
984263bc
MD
3194
3195 retval += bus_print_child_footer(dev, child);
3196
3197 return (retval);
3198}
3199
4d28e78f
SZ
3200static struct
3201{
3202 int class;
3203 int subclass;
3204 char *desc;
3205} pci_nomatch_tab[] = {
3206 {PCIC_OLD, -1, "old"},
3207 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3208 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3209 {PCIC_STORAGE, -1, "mass storage"},
3210 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3211 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3212 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3213 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3214 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3215 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3216 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3217 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3218 {PCIC_NETWORK, -1, "network"},
3219 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3220 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3221 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3222 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3223 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3224 {PCIC_DISPLAY, -1, "display"},
3225 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3226 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3227 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3228 {PCIC_MULTIMEDIA, -1, "multimedia"},
3229 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3230 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3231 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3232 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3233 {PCIC_MEMORY, -1, "memory"},
3234 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3235 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3236 {PCIC_BRIDGE, -1, "bridge"},
3237 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3238 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3239 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3240 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3241 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3242 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3243 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3244 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3245 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3246 {PCIC_SIMPLECOMM, -1, "simple comms"},
3247 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3248 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3249 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3250 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3251 {PCIC_BASEPERIPH, -1, "base peripheral"},
3252 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3253 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3254 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3255 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3256 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3257 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3258 {PCIC_INPUTDEV, -1, "input device"},
3259 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3260 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3261 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3262 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3263 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3264 {PCIC_DOCKING, -1, "docking station"},
3265 {PCIC_PROCESSOR, -1, "processor"},
3266 {PCIC_SERIALBUS, -1, "serial bus"},
3267 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3268 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3269 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3270 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3271 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3272 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3273 {PCIC_WIRELESS, -1, "wireless controller"},
3274 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3275 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3276 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3277 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3278 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3279 {PCIC_SATCOM, -1, "satellite communication"},
3280 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3281 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3282 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3283 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3284 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3285 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3286 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3287 {PCIC_DASP, -1, "dasp"},
3288 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3289 {0, 0, NULL}
3290};
3291
e126caf1 3292void
984263bc
MD
3293pci_probe_nomatch(device_t dev, device_t child)
3294{
4d28e78f
SZ
3295 int i;
3296 char *cp, *scp, *device;
984263bc 3297
4d28e78f
SZ
3298 /*
3299 * Look for a listing for this device in a loaded device database.
3300 */
3301 if ((device = pci_describe_device(child)) != NULL) {
3302 device_printf(dev, "<%s>", device);
3303 kfree(device, M_DEVBUF);
3304 } else {
3305 /*
3306 * Scan the class/subclass descriptions for a general
3307 * description.
3308 */
3309 cp = "unknown";
3310 scp = NULL;
3311 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3312 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3313 if (pci_nomatch_tab[i].subclass == -1) {
3314 cp = pci_nomatch_tab[i].desc;
3315 } else if (pci_nomatch_tab[i].subclass ==
3316 pci_get_subclass(child)) {
3317 scp = pci_nomatch_tab[i].desc;
3318 }
3319 }
3320 }
3321 device_printf(dev, "<%s%s%s>",
3322 cp ? cp : "",
3323 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3324 scp ? scp : "");
3325 }
6a45dbfa
SZ
3326 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3327 pci_get_vendor(child), pci_get_device(child),
3328 pci_get_slot(child), pci_get_function(child));
3329 if (pci_get_intpin(child) > 0) {
3330 int irq;
3331
3332 irq = pci_get_irq(child);
3333 if (PCI_INTERRUPT_VALID(irq))
3334 kprintf(" irq %d", irq);
3335 }
3336 kprintf("\n");
3337
638744c5 3338 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3339}
3340
4d28e78f
SZ
3341/*
3342 * Parse the PCI device database, if loaded, and return a pointer to a
3343 * description of the device.
3344 *
3345 * The database is flat text formatted as follows:
3346 *
3347 * Any line not in a valid format is ignored.
3348 * Lines are terminated with newline '\n' characters.
3349 *
3350 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3351 * the vendor name.
3352 *
3353 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3354 * - devices cannot be listed without a corresponding VENDOR line.
3355 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3356 * another TAB, then the device name.
3357 */
3358
3359/*
3360 * Assuming (ptr) points to the beginning of a line in the database,
3361 * return the vendor or device and description of the next entry.
3362 * The value of (vendor) or (device) inappropriate for the entry type
3363 * is set to -1. Returns nonzero at the end of the database.
3364 *
3365 * Note that this is slightly unrobust in the face of corrupt data;
3366 * we attempt to safeguard against this by spamming the end of the
3367 * database with a newline when we initialise.
3368 */
3369static int
3370pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3371{
3372 char *cp = *ptr;
3373 int left;
3374
3375 *device = -1;
3376 *vendor = -1;
3377 **desc = '\0';
3378 for (;;) {
3379 left = pci_vendordata_size - (cp - pci_vendordata);
3380 if (left <= 0) {
3381 *ptr = cp;
3382 return(1);
3383 }
3384
3385 /* vendor entry? */
3386 if (*cp != '\t' &&
3387 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3388 break;
3389 /* device entry? */
3390 if (*cp == '\t' &&
3391 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3392 break;
3393
3394 /* skip to next line */
3395 while (*cp != '\n' && left > 0) {
3396 cp++;
3397 left--;
3398 }
3399 if (*cp == '\n') {
3400 cp++;
3401 left--;
3402 }
3403 }
3404 /* skip to next line */
3405 while (*cp != '\n' && left > 0) {
3406 cp++;
3407 left--;
3408 }
3409 if (*cp == '\n' && left > 0)
3410 cp++;
3411 *ptr = cp;
3412 return(0);
3413}
3414
3415static char *
3416pci_describe_device(device_t dev)
3417{
3418 int vendor, device;
3419 char *desc, *vp, *dp, *line;
3420
3421 desc = vp = dp = NULL;
3422
3423 /*
3424 * If we have no vendor data, we can't do anything.
3425 */
3426 if (pci_vendordata == NULL)
3427 goto out;
3428
3429 /*
3430 * Scan the vendor data looking for this device
3431 */
3432 line = pci_vendordata;
3433 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3434 goto out;
3435 for (;;) {
3436 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3437 goto out;
3438 if (vendor == pci_get_vendor(dev))
3439 break;
3440 }
3441 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3442 goto out;
3443 for (;;) {
3444 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3445 *dp = 0;
3446 break;
3447 }
3448 if (vendor != -1) {
3449 *dp = 0;
3450 break;
3451 }
3452 if (device == pci_get_device(dev))
3453 break;
3454 }
3455 if (dp[0] == '\0')
3456 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3457 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3458 NULL)
3459 ksprintf(desc, "%s, %s", vp, dp);
3460 out:
3461 if (vp != NULL)
3462 kfree(vp, M_DEVBUF);
3463 if (dp != NULL)
3464 kfree(dp, M_DEVBUF);
3465 return(desc);
3466}
3467
22457186 3468int
4a5a2d63 3469pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3470{
3471 struct pci_devinfo *dinfo;
3472 pcicfgregs *cfg;
3473
3474 dinfo = device_get_ivars(child);
3475 cfg = &dinfo->cfg;
3476
3477 switch (which) {
4d28e78f
SZ
3478 case PCI_IVAR_ETHADDR:
3479 /*
3480 * The generic accessor doesn't deal with failure, so
3481 * we set the return value, then return an error.
3482 */
3483 *((uint8_t **) result) = NULL;
3484 return (EINVAL);
984263bc
MD
3485 case PCI_IVAR_SUBVENDOR:
3486 *result = cfg->subvendor;
3487 break;
3488 case PCI_IVAR_SUBDEVICE:
3489 *result = cfg->subdevice;
3490 break;
3491 case PCI_IVAR_VENDOR:
3492 *result = cfg->vendor;
3493 break;
3494 case PCI_IVAR_DEVICE:
3495 *result = cfg->device;
3496 break;
3497 case PCI_IVAR_DEVID:
3498 *result = (cfg->device << 16) | cfg->vendor;
3499 break;
3500 case PCI_IVAR_CLASS:
3501 *result = cfg->baseclass;
3502 break;
3503 case PCI_IVAR_SUBCLASS:
3504 *result = cfg->subclass;
3505 break;
3506 case PCI_IVAR_PROGIF:
3507 *result = cfg->progif;
3508 break;
3509 case PCI_IVAR_REVID:
3510 *result = cfg->revid;
3511 break;
3512 case PCI_IVAR_INTPIN:
3513 *result = cfg->intpin;
3514 break;
3515 case PCI_IVAR_IRQ:
3516 *result = cfg->intline;
3517 break;
4d28e78f
SZ
3518 case PCI_IVAR_DOMAIN:
3519 *result = cfg->domain;
3520 break;
984263bc
MD
3521 case PCI_IVAR_BUS:
3522 *result = cfg->bus;
3523 break;
3524 case PCI_IVAR_SLOT:
3525 *result = cfg->slot;
3526 break;
3527 case PCI_IVAR_FUNCTION:
3528 *result = cfg->func;
3529 break;
4d28e78f
SZ
3530 case PCI_IVAR_CMDREG:
3531 *result = cfg->cmdreg;
984263bc 3532 break;
4d28e78f
SZ
3533 case PCI_IVAR_CACHELNSZ:
3534 *result = cfg->cachelnsz;
984263bc 3535 break;
4d28e78f
SZ
3536 case PCI_IVAR_MINGNT:
3537 *result = cfg->mingnt;
c7e4e7eb 3538 break;
4d28e78f
SZ
3539 case PCI_IVAR_MAXLAT:
3540 *result = cfg->maxlat;
c7e4e7eb 3541 break;
4d28e78f
SZ
3542 case PCI_IVAR_LATTIMER:
3543 *result = cfg->lattimer;
0254566f 3544 break;
d85e7311
SZ
3545 case PCI_IVAR_PCIXCAP_PTR:
3546 *result = cfg->pcix.pcix_ptr;
3547 break;
3548 case PCI_IVAR_PCIECAP_PTR:
3549 *result = cfg->expr.expr_ptr;
3550 break;
3551 case PCI_IVAR_VPDCAP_PTR:
3552 *result = cfg->vpd.vpd_reg;
3553 break;
984263bc 3554 default:
4d28e78f 3555 return (ENOENT);
984263bc 3556 }
4d28e78f 3557 return (0);
984263bc
MD
3558}
3559
22457186 3560int
984263bc
MD
3561pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3562{
3563 struct pci_devinfo *dinfo;
984263bc
MD
3564
3565 dinfo = device_get_ivars(child);
984263bc
MD
3566
3567 switch (which) {
4d28e78f
SZ
3568 case PCI_IVAR_INTPIN:
3569 dinfo->cfg.intpin = value;
3570 return (0);
3571 case PCI_IVAR_ETHADDR:
984263bc
MD
3572 case PCI_IVAR_SUBVENDOR:
3573 case PCI_IVAR_SUBDEVICE:
3574 case PCI_IVAR_VENDOR:
3575 case PCI_IVAR_DEVICE:
3576 case PCI_IVAR_DEVID:
3577 case PCI_IVAR_CLASS:
3578 case PCI_IVAR_SUBCLASS:
3579 case PCI_IVAR_PROGIF:
3580 case PCI_IVAR_REVID:
984263bc 3581 case PCI_IVAR_IRQ:
4d28e78f 3582 case PCI_IVAR_DOMAIN:
984263bc
MD
3583 case PCI_IVAR_BUS:
3584 case PCI_IVAR_SLOT:
3585 case PCI_IVAR_FUNCTION:
4d28e78f 3586 return (EINVAL); /* disallow for now */
984263bc 3587
984263bc 3588 default:
4d28e78f
SZ
3589 return (ENOENT);
3590 }
3591}
3592#ifdef notyet
3593#include "opt_ddb.h"
3594#ifdef DDB
3595#include <ddb/ddb.h>
3596#include <sys/cons.h>
3597
3598/*
3599 * List resources based on pci map registers, used for within ddb
3600 */
3601
3602DB_SHOW_COMMAND(pciregs, db_pci_dump)
3603{
3604 struct pci_devinfo *dinfo;
3605 struct devlist *devlist_head;
3606 struct pci_conf *p;
3607 const char *name;
3608 int i, error, none_count;
3609
3610 none_count = 0;
3611 /* get the head of the device queue */
3612 devlist_head = &pci_devq;
3613
3614 /*
3615 * Go through the list of devices and print out devices
3616 */
3617 for (error = 0, i = 0,
3618 dinfo = STAILQ_FIRST(devlist_head);
3619 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3620 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3621
3622 /* Populate pd_name and pd_unit */
3623 name = NULL;
3624 if (dinfo->cfg.dev)
3625 name = device_get_name(dinfo->cfg.dev);
3626
3627 p = &dinfo->conf;
3628 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3629 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3630 (name && *name) ? name : "none",
3631 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3632 none_count++,
3633 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3634 p->pc_sel.pc_func, (p->pc_class << 16) |
3635 (p->pc_subclass << 8) | p->pc_progif,
3636 (p->pc_subdevice << 16) | p->pc_subvendor,
3637 (p->pc_device << 16) | p->pc_vendor,
3638 p->pc_revid, p->pc_hdr);
984263bc 3639 }
984263bc 3640}
4d28e78f
SZ
3641#endif /* DDB */
3642#endif
984263bc 3643
201eb0a7 3644static struct resource *
4d28e78f
SZ
3645pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3646 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3647{
3648 struct pci_devinfo *dinfo = device_get_ivars(child);
3649 struct resource_list *rl = &dinfo->resources;
3650 struct resource_list_entry *rle;
3651 struct resource *res;
4d28e78f 3652 pci_addr_t map, testval;
201eb0a7
TS
3653 int mapsize;
3654
3655 /*
3656 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3657 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3658 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3659 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3660 * it fails here, that other code is broken.
3661 */
3662 res = NULL;
3663 map = pci_read_config(child, *rid, 4);
3664 pci_write_config(child, *rid, 0xffffffff, 4);
3665 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3666 if (pci_maprange(testval) == 64)
3667 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3668 if (pci_mapbase(testval) == 0)
3669 goto out;
4d28e78f
SZ
3670
3671 /*
3672 * Restore the original value of the BAR. We may have reprogrammed
3673 * the BAR of the low-level console device and when booting verbose,
3674 * we need the console device addressable.
3675 */
3676 pci_write_config(child, *rid, map, 4);
3677
3678 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3679 if (type != SYS_RES_MEMORY) {
3680 if (bootverbose)
4d28e78f
SZ
3681 device_printf(dev,
3682 "child %s requested type %d for rid %#x,"
3683 " but the BAR says it is an memio\n",
3684 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3685 goto out;
3686 }
3687 } else {
3688 if (type != SYS_RES_IOPORT) {
3689 if (bootverbose)
4d28e78f
SZ
3690 device_printf(dev,
3691 "child %s requested type %d for rid %#x,"
3692 " but the BAR says it is an ioport\n",
3693 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3694 goto out;
3695 }
3696 }
3697 /*
3698 * For real BARs, we need to override the size that
3699 * the driver requests, because that's what the BAR
3700 * actually uses and we would otherwise have a
3701 * situation where we might allocate the excess to
3702 * another driver, which won't work.
3703 */
3704 mapsize = pci_mapsize(testval);
4d28e78f 3705 count = 1UL << mapsize;
201eb0a7 3706 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3707 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3708 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3709 flags |= RF_PREFETCHABLE;
3710
201eb0a7
TS
3711 /*
3712 * Allocate enough resource, and then write back the
4d28e78f 3713 * appropriate bar for that resource.
201eb0a7
TS
3714 */
3715 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4f7fe8c7 3716 start, end, count, flags, -1);
201eb0a7 3717 if (res == NULL) {
4d28e78f
SZ
3718 device_printf(child,
3719 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3720 count, *rid, type, start, end);
201eb0a7
TS
3721 goto out;
3722 }
1b000e91 3723 resource_list_add(rl, type, *rid, start, end, count, -1);
201eb0a7
TS
3724 rle = resource_list_find(rl, type, *rid);
3725 if (rle == NULL)
3726 panic("pci_alloc_map: unexpectedly can't find resource.");
3727 rle->res = res;
3728 rle->start = rman_get_start(res);
3729 rle->end = rman_get_end(res);
3730 rle->count = count;
3731 if (bootverbose)
4d28e78f
SZ
3732 device_printf(child,
3733 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3734 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3735 map = rman_get_start(res);
3736out:;
3737 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3738 if (pci_maprange(testval) == 64)
3739 pci_write_config(child, *rid + 4, map >> 32, 4);
3740 return (res);
201eb0a7 3741}
4d28e78f 3742
201eb0a7 3743
261fa16d 3744struct resource *
984263bc 3745pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4f7fe8c7 3746 u_long start, u_long end, u_long count, u_int flags, int cpuid)
984263bc
MD
3747{
3748 struct pci_devinfo *dinfo = device_get_ivars(child);
3749 struct resource_list *rl = &dinfo->resources;
201eb0a7 3750 struct resource_list_entry *rle;
984263bc 3751 pcicfgregs *cfg = &dinfo->cfg;
09e7d9f3 3752
984263bc
MD
3753 /*
3754 * Perform lazy resource allocation
984263bc
MD
3755 */
3756 if (device_get_parent(child) == dev) {
de67e43b
JS
3757 switch (type) {
3758 case SYS_RES_IRQ:
4d28e78f
SZ
3759 /*
3760 * Can't alloc legacy interrupt once MSI messages
3761 * have been allocated.
3762 */
4d28e78f
SZ
3763 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3764 cfg->msix.msix_alloc > 0))
3765 return (NULL);
4d28e78f
SZ
3766 /*
3767 * If the child device doesn't have an
3768 * interrupt routed and is deserving of an
3769 * interrupt, try to assign it one.
3770 */
3771 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3772 (cfg->intpin != 0))
3773 pci_assign_interrupt(dev, child, 0);
3774 break;
de67e43b
JS
3775 case SYS_RES_IOPORT:
3776 case SYS_RES_MEMORY:
3777 if (*rid < PCIR_BAR(cfg->nummaps)) {
3778 /*
3779 * Enable the I/O mode. We should
3780 * also be assigning resources too
3781 * when none are present. The
3782 * resource_list_alloc kind of sorta does
3783 * this...
3784 */
3785 if (PCI_ENABLE_IO(dev, child, type))
3786 return (NULL);
984263bc 3787 }
201eb0a7
TS
3788 rle = resource_list_find(rl, type, *rid);
3789 if (rle == NULL)
4d28e78f
SZ
3790 return (pci_alloc_map(dev, child, type, rid,
3791 start, end, count, flags));
820c1612 3792 break;
984263bc 3793 }
201eb0a7
TS
3794 /*
3795 * If we've already allocated the resource, then
4d28e78f 3796 * return it now. But first we may need to activate
201eb0a7 3797 * it, since we don't allocate the resource as active
4d28e78f 3798 * above. Normally this would be done down in the
201eb0a7 3799 * nexus, but since we short-circuit that path we have
4d28e78f 3800 * to do its job here. Not sure if we should kfree the
201eb0a7 3801 * resource if it fails to activate.
201eb0a7
TS
3802 */
3803 rle = resource_list_find(rl, type, *rid);
3804 if (rle != NULL && rle->res != NULL) {
3805 if (bootverbose)
4d28e78f
SZ
3806 device_printf(child,
3807 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3808 rman_get_size(rle->res), *rid, type,
3809 rman_get_start(rle->res));
201eb0a7
TS
3810 if ((flags & RF_ACTIVE) &&
3811 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3812 *rid, rle->res) != 0)
3813 return (NULL);
3814 return (rle->res);
201eb0a7 3815 }
984263bc 3816 }
4d28e78f 3817 return (resource_list_alloc(rl, dev, child, type, rid,
4f7fe8c7 3818 start, end, count, flags, cpuid));
984263bc
MD
3819}
3820
4d28e78f
SZ
3821void
3822pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3823{
4d28e78f
SZ
3824 struct pci_devinfo *dinfo;
3825 struct resource_list *rl;
984263bc
MD
3826 struct resource_list_entry *rle;
3827
4d28e78f
SZ
3828 if (device_get_parent(child) != dev)
3829 return;
984263bc 3830
4d28e78f
SZ
3831 dinfo = device_get_ivars(child);
3832 rl = &dinfo->resources;
3833 rle = resource_list_find(rl, type, rid);
3834 if (rle) {
3835 if (rle->res) {
3836 if (rman_get_device(rle->res) != dev ||
3837 rman_get_flags(rle->res) & RF_ACTIVE) {
3838 device_printf(dev, "delete_resource: "
3839 "Resource still owned by child, oops. "
3840 "(type=%d, rid=%d, addr=%lx)\n",
3841 rle->type, rle->rid,
3842 rman_get_start(rle->res));
3843 return;
3844 }
3845 bus_release_resource(dev, type, rid, rle->res);
3846 }
3847 resource_list_delete(rl, type, rid);
3848 }
3849 /*
3850 * Why do we turn off the PCI configuration BAR when we delete a
3851 * resource? -- imp
3852 */
3853 pci_write_config(child, rid, 0, 4);
3854 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3855}
3856
e126caf1
MD
3857struct resource_list *
3858pci_get_resource_list (device_t dev, device_t child)
3859{
4d28e78f 3860 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3861
bcc66dfa
SZ
3862 if (dinfo == NULL)
3863 return (NULL);
3864
b0486c83 3865 return (&dinfo->resources);
e126caf1
MD
3866}
3867
4d28e78f 3868uint32_t
984263bc
MD
3869pci_read_config_method(device_t dev, device_t child, int reg, int width)
3870{
3871 struct pci_devinfo *dinfo = device_get_ivars(child);
3872 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3873
4d28e78f
SZ
3874 return (PCIB_READ_CONFIG(device_get_parent(dev),
3875 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3876}
3877
e126caf1 3878void
984263bc 3879pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3880 uint32_t val, int width)
984263bc
MD
3881{
3882 struct pci_devinfo *dinfo = device_get_ivars(child);
3883 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3884
3885 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3886 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3887}
3888
e126caf1 3889int
4d28e78f 3890pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3891 size_t buflen)
3892{
e126caf1 3893
f8c7a42d 3894 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
e126caf1
MD
3895 pci_get_function(child));
3896 return (0);
3897}
3898
3899int
4d28e78f 3900pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3901 size_t buflen)
3902{
3903 struct pci_devinfo *dinfo;
3904 pcicfgregs *cfg;
3905
3906 dinfo = device_get_ivars(child);
3907 cfg = &dinfo->cfg;
f8c7a42d 3908 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
e126caf1
MD
3909 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3910 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3911 cfg->progif);
3912 return (0);
3913}
3914
3915int
3916pci_assign_interrupt_method(device_t dev, device_t child)