pci: Fix up bunch of warnings
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4d28e78f 27 * __FBSDID("$FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $");
984263bc
MD
28 */
29
4d28e78f 30#include <sys/cdefs.h>
984263bc 31
4d28e78f 32#include "opt_bus.h"
984263bc
MD
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
4d28e78f 38#include <sys/linker.h>
984263bc
MD
39#include <sys/fcntl.h>
40#include <sys/conf.h>
41#include <sys/kernel.h>
42#include <sys/queue.h>
638744c5 43#include <sys/sysctl.h>
4d28e78f 44#include <sys/endian.h>
984263bc 45
53f3a428
SZ
46#ifdef APIC_IO
47#include <machine/smp.h>
48#endif
49
984263bc
MD
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_extern.h>
53
54#include <sys/bus.h>
984263bc 55#include <sys/rman.h>
4d28e78f 56#include <sys/device.h>
984263bc 57
dc5a7bd2 58#include <sys/pciio.h>
4d28e78f
SZ
59#include <bus/pci/pcireg.h>
60#include <bus/pci/pcivar.h>
61#include <bus/pci/pci_private.h>
984263bc 62
4a5a2d63 63#include "pcib_if.h"
4d28e78f
SZ
64#include "pci_if.h"
65
66#ifdef __HAVE_ACPI
67#include <contrib/dev/acpica/acpi.h>
68#include "acpi_if.h"
69#else
70#define ACPI_PWR_FOR_SLEEP(x, y, z)
71#endif
72
35b72619
SZ
73extern struct dev_ops pcic_ops; /* XXX */
74
3a6dc23c
SZ
75typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
76
4d28e78f
SZ
77static uint32_t pci_mapbase(unsigned mapreg);
78static const char *pci_maptype(unsigned mapreg);
79static int pci_mapsize(unsigned testval);
80static int pci_maprange(unsigned mapreg);
81static void pci_fixancient(pcicfgregs *cfg);
82
83static int pci_porten(device_t pcib, int b, int s, int f);
84static int pci_memen(device_t pcib, int b, int s, int f);
85static void pci_assign_interrupt(device_t bus, device_t dev,
86 int force_route);
87static int pci_add_map(device_t pcib, device_t bus, device_t dev,
88 int b, int s, int f, int reg,
89 struct resource_list *rl, int force, int prefetch);
90static int pci_probe(device_t dev);
91static int pci_attach(device_t dev);
11a49859 92static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
93static void pci_load_vendor_data(void);
94static int pci_describe_parse_line(char **ptr, int *vendor,
95 int *device, char **desc);
96static char *pci_describe_device(device_t dev);
97static int pci_modevent(module_t mod, int what, void *arg);
98static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
99 pcicfgregs *cfg);
3a6dc23c 100static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
101static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
102 int reg, uint32_t *data);
103#if 0
104static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t data);
106#endif
107static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
108static void pci_disable_msi(device_t dev);
109static void pci_enable_msi(device_t dev, uint64_t address,
110 uint16_t data);
111static void pci_enable_msix(device_t dev, u_int index,
112 uint64_t address, uint32_t data);
113static void pci_mask_msix(device_t dev, u_int index);
114static void pci_unmask_msix(device_t dev, u_int index);
115static int pci_msi_blacklisted(void);
116static void pci_resume_msi(device_t dev);
117static void pci_resume_msix(device_t dev);
d85e7311
SZ
118static int pcie_slotimpl(const pcicfgregs *);
119static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 120
3a6dc23c
SZ
121static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
122static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
123static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
124static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
125static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
126static void pci_read_cap_subvendor(device_t, int, int,
127 pcicfgregs *);
128static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 129static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 130
4d28e78f
SZ
131static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, bus_generic_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, pci_suspend),
138 DEVMETHOD(device_resume, pci_resume),
139
140 /* Bus interface */
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 146 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
147 DEVMETHOD(bus_setup_intr, pci_setup_intr),
148 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
156 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
157 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
158 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
159 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
160
161 /* PCI interface */
162 DEVMETHOD(pci_read_config, pci_read_config_method),
163 DEVMETHOD(pci_write_config, pci_write_config_method),
164 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
165 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
166 DEVMETHOD(pci_enable_io, pci_enable_io_method),
167 DEVMETHOD(pci_disable_io, pci_disable_io_method),
168 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
169 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
170 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
171 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
172 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
173 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
174 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
175 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
176 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
177 DEVMETHOD(pci_release_msi, pci_release_msi_method),
178 DEVMETHOD(pci_msi_count, pci_msi_count_method),
179 DEVMETHOD(pci_msix_count, pci_msix_count_method),
180
181 { 0, 0 }
182};
183
184DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 185
4d28e78f
SZ
186static devclass_t pci_devclass;
187DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
188MODULE_VERSION(pci, 1);
189
190static char *pci_vendordata;
191static size_t pci_vendordata_size;
dc5a7bd2 192
984263bc 193
3a6dc23c
SZ
194static const struct pci_read_cap {
195 int cap;
196 pci_read_cap_t read_cap;
197} pci_read_caps[] = {
198 { PCIY_PMG, pci_read_cap_pmgt },
199 { PCIY_HT, pci_read_cap_ht },
200 { PCIY_MSI, pci_read_cap_msi },
201 { PCIY_MSIX, pci_read_cap_msix },
202 { PCIY_VPD, pci_read_cap_vpd },
203 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
204 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 205 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
206 { 0, NULL } /* required last entry */
207};
208
984263bc 209struct pci_quirk {
4d28e78f 210 uint32_t devid; /* Vendor/device of the card */
984263bc 211 int type;
4d28e78f
SZ
212#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
213#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
214 int arg1;
215 int arg2;
216};
217
218struct pci_quirk pci_quirks[] = {
4d28e78f 219 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
220 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
222 /* As does the Serverworks OSB4 (the SMBus mapping register) */
223 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 224
4d28e78f
SZ
225 /*
226 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
227 * or the CMIC-SL (AKA ServerWorks GC_LE).
228 */
229 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
230 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231
232 /*
233 * MSI doesn't work on earlier Intel chipsets including
234 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
235 */
236 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243
244 /*
245 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
246 * bridge.
247 */
248 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249
984263bc
MD
250 { 0 }
251};
252
253/* map register information */
4d28e78f
SZ
254#define PCI_MAPMEM 0x01 /* memory map */
255#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
256#define PCI_MAPPORT 0x04 /* port map */
257
258struct devlist pci_devq;
259uint32_t pci_generation;
260uint32_t pci_numdevs = 0;
261static int pcie_chipset, pcix_chipset;
262
263/* sysctl vars */
264SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
265
266static int pci_enable_io_modes = 1;
267TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
268SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
269 &pci_enable_io_modes, 1,
270 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
271enable these bits correctly. We'd like to do this all the time, but there\n\
272are some peripherals that this causes problems with.");
984263bc 273
638744c5
HT
274static int pci_do_power_nodriver = 0;
275TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
276SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
277 &pci_do_power_nodriver, 0,
278 "Place a function into D3 state when no driver attaches to it. 0 means\n\
279disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 280aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
281in D3 state.");
282
4d28e78f
SZ
283static int pci_do_power_resume = 1;
284TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
285SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
286 &pci_do_power_resume, 1,
287 "Transition from D3 -> D0 on resume.");
288
289static int pci_do_msi = 1;
290TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
291SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
292 "Enable support for MSI interrupts");
293
294static int pci_do_msix = 1;
295TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
296SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
297 "Enable support for MSI-X interrupts");
298
299static int pci_honor_msi_blacklist = 1;
300TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
301SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
302 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
303
304/* Find a device_t by bus/slot/function in domain 0 */
305
306device_t
307pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
308{
309
310 return (pci_find_dbsf(0, bus, slot, func));
311}
312
313/* Find a device_t by domain/bus/slot/function */
314
984263bc 315device_t
4d28e78f 316pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
317{
318 struct pci_devinfo *dinfo;
319
320 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
321 if ((dinfo->cfg.domain == domain) &&
322 (dinfo->cfg.bus == bus) &&
984263bc
MD
323 (dinfo->cfg.slot == slot) &&
324 (dinfo->cfg.func == func)) {
325 return (dinfo->cfg.dev);
326 }
327 }
328
329 return (NULL);
330}
331
4d28e78f
SZ
332/* Find a device_t by vendor/device ID */
333
984263bc 334device_t
4d28e78f 335pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
336{
337 struct pci_devinfo *dinfo;
338
339 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
340 if ((dinfo->cfg.vendor == vendor) &&
341 (dinfo->cfg.device == device)) {
342 return (dinfo->cfg.dev);
343 }
344 }
345
346 return (NULL);
347}
348
349/* return base address of memory or port map */
350
4d28e78f
SZ
351static uint32_t
352pci_mapbase(uint32_t mapreg)
984263bc 353{
4d28e78f
SZ
354
355 if (PCI_BAR_MEM(mapreg))
356 return (mapreg & PCIM_BAR_MEM_BASE);
357 else
358 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
359}
360
361/* return map type of memory or port map */
362
4d28e78f 363static const char *
984263bc
MD
364pci_maptype(unsigned mapreg)
365{
984263bc 366
4d28e78f
SZ
367 if (PCI_BAR_IO(mapreg))
368 return ("I/O Port");
369 if (mapreg & PCIM_BAR_MEM_PREFETCH)
370 return ("Prefetchable Memory");
371 return ("Memory");
984263bc
MD
372}
373
374/* return log2 of map size decoded for memory or port map */
375
376static int
4d28e78f 377pci_mapsize(uint32_t testval)
984263bc
MD
378{
379 int ln2size;
380
381 testval = pci_mapbase(testval);
382 ln2size = 0;
383 if (testval != 0) {
384 while ((testval & 1) == 0)
385 {
386 ln2size++;
387 testval >>= 1;
388 }
389 }
390 return (ln2size);
391}
392
393/* return log2 of address range supported by map register */
394
395static int
396pci_maprange(unsigned mapreg)
397{
398 int ln2range = 0;
4d28e78f
SZ
399
400 if (PCI_BAR_IO(mapreg))
984263bc 401 ln2range = 32;
4d28e78f
SZ
402 else
403 switch (mapreg & PCIM_BAR_MEM_TYPE) {
404 case PCIM_BAR_MEM_32:
405 ln2range = 32;
406 break;
407 case PCIM_BAR_MEM_1MB:
408 ln2range = 20;
409 break;
410 case PCIM_BAR_MEM_64:
411 ln2range = 64;
412 break;
413 }
984263bc
MD
414 return (ln2range);
415}
416
417/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
418
419static void
420pci_fixancient(pcicfgregs *cfg)
421{
422 if (cfg->hdrtype != 0)
423 return;
424
425 /* PCI to PCI bridges use header type 1 */
426 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
427 cfg->hdrtype = 1;
428}
429
984263bc
MD
430/* extract header type specific config data */
431
432static void
4a5a2d63 433pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 434{
4d28e78f 435#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
436 switch (cfg->hdrtype) {
437 case 0:
4a5a2d63
JS
438 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
439 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
440 cfg->nummaps = PCI_MAXMAPS_0;
441 break;
442 case 1:
984263bc 443 cfg->nummaps = PCI_MAXMAPS_1;
984263bc
MD
444 break;
445 case 2:
4a5a2d63
JS
446 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
447 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 448 cfg->nummaps = PCI_MAXMAPS_2;
984263bc
MD
449 break;
450 }
4a5a2d63 451#undef REG
984263bc
MD
452}
453
4d28e78f 454/* read configuration header into pcicfgregs structure */
22457186 455struct pci_devinfo *
4d28e78f 456pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 457{
4d28e78f 458#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
459 pcicfgregs *cfg = NULL;
460 struct pci_devinfo *devlist_entry;
461 struct devlist *devlist_head;
462
463 devlist_head = &pci_devq;
464
465 devlist_entry = NULL;
466
4d28e78f 467 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 468 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
469
470 cfg = &devlist_entry->cfg;
4d28e78f
SZ
471
472 cfg->domain = d;
4a5a2d63
JS
473 cfg->bus = b;
474 cfg->slot = s;
475 cfg->func = f;
476 cfg->vendor = REG(PCIR_VENDOR, 2);
477 cfg->device = REG(PCIR_DEVICE, 2);
478 cfg->cmdreg = REG(PCIR_COMMAND, 2);
479 cfg->statreg = REG(PCIR_STATUS, 2);
480 cfg->baseclass = REG(PCIR_CLASS, 1);
481 cfg->subclass = REG(PCIR_SUBCLASS, 1);
482 cfg->progif = REG(PCIR_PROGIF, 1);
483 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 484 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
485 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
486 cfg->lattimer = REG(PCIR_LATTIMER, 1);
487 cfg->intpin = REG(PCIR_INTPIN, 1);
488 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 489
53f3a428
SZ
490#ifdef APIC_IO
491 /*
492 * If using the APIC the intpin is probably wrong, since it
493 * is often setup by the BIOS with the PIC in mind.
494 */
495 if (cfg->intpin != 0) {
496 int airq;
497
498 airq = pci_apic_irq(cfg->bus, cfg->slot, cfg->intpin);
499 if (airq >= 0) {
500 /* PCI specific entry found in MP table */
501 if (airq != cfg->intline) {
502 undirect_pci_irq(cfg->intline);
503 cfg->intline = airq;
504 }
505 } else {
506 /*
507 * PCI interrupts might be redirected to the
508 * ISA bus according to some MP tables. Use the
509 * same methods as used by the ISA devices
510 * devices to find the proper IOAPIC int pin.
511 */
512 airq = isa_apic_irq(cfg->intline);
513 if ((airq >= 0) && (airq != cfg->intline)) {
514 /* XXX: undirect_pci_irq() ? */
515 undirect_isa_irq(cfg->intline);
516 cfg->intline = airq;
517 }
518 }
519 }
520#endif /* APIC_IO */
521
4a5a2d63
JS
522 cfg->mingnt = REG(PCIR_MINGNT, 1);
523 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
524
525 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
526 cfg->hdrtype &= ~PCIM_MFDEV;
527
528 pci_fixancient(cfg);
4a5a2d63 529 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 530
3a6dc23c 531 pci_read_capabilities(pcib, cfg);
984263bc
MD
532
533 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
534
4d28e78f 535 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
536 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
537 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
538 devlist_entry->conf.pc_sel.pc_func = cfg->func;
539 devlist_entry->conf.pc_hdr = cfg->hdrtype;
540
541 devlist_entry->conf.pc_subvendor = cfg->subvendor;
542 devlist_entry->conf.pc_subdevice = cfg->subdevice;
543 devlist_entry->conf.pc_vendor = cfg->vendor;
544 devlist_entry->conf.pc_device = cfg->device;
545
546 devlist_entry->conf.pc_class = cfg->baseclass;
547 devlist_entry->conf.pc_subclass = cfg->subclass;
548 devlist_entry->conf.pc_progif = cfg->progif;
549 devlist_entry->conf.pc_revid = cfg->revid;
550
551 pci_numdevs++;
552 pci_generation++;
553 }
554 return (devlist_entry);
555#undef REG
556}
557
3a6dc23c
SZ
558static int
559pci_fixup_nextptr(int *nextptr0)
560{
561 int nextptr = *nextptr0;
562
563 /* "Next pointer" is only one byte */
564 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
565
566 if (nextptr & 0x3) {
567 /*
568 * PCI local bus spec 3.0:
569 *
570 * "... The bottom two bits of all pointers are reserved
571 * and must be implemented as 00b although software must
572 * mask them to allow for future uses of these bits ..."
573 */
574 if (bootverbose) {
575 kprintf("Illegal PCI extended capability "
576 "offset, fixup 0x%02x -> 0x%02x\n",
577 nextptr, nextptr & ~0x3);
578 }
579 nextptr &= ~0x3;
580 }
581 *nextptr0 = nextptr;
582
583 if (nextptr < 0x40) {
584 if (nextptr != 0) {
585 kprintf("Illegal PCI extended capability "
586 "offset 0x%02x", nextptr);
587 }
588 return 0;
589 }
590 return 1;
591}
592
984263bc 593static void
3a6dc23c 594pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 595{
3a6dc23c
SZ
596#define REG(n, w) \
597 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
598
599 struct pcicfg_pp *pp = &cfg->pp;
600
601 if (pp->pp_cap)
602 return;
603
604 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
605 pp->pp_status = ptr + PCIR_POWER_STATUS;
606 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
607
608 if ((nextptr - ptr) > PCIR_POWER_DATA) {
609 /*
610 * XXX
611 * We should write to data_select and read back from
612 * data_scale to determine whether data register is
613 * implemented.
614 */
615#ifdef foo
616 pp->pp_data = ptr + PCIR_POWER_DATA;
617#else
618 pp->pp_data = 0;
619#endif
620 }
621
622#undef REG
623}
624
625static void
626pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
627{
628#ifdef notyet
4d28e78f 629#if defined(__i386__) || defined(__amd64__)
3a6dc23c
SZ
630
631#define REG(n, w) \
632 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
633
634 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 635 uint64_t addr;
4d28e78f 636 uint32_t val;
3a6dc23c
SZ
637
638 /* Determine HT-specific capability type. */
639 val = REG(ptr + PCIR_HT_COMMAND, 2);
640
641 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
642 return;
643
644 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
645 /* Sanity check the mapping window. */
646 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
647 addr <<= 32;
648 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
649 if (addr != MSI_INTEL_ADDR_BASE) {
650 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
651 "has non-default MSI window 0x%llx\n",
652 cfg->domain, cfg->bus, cfg->slot, cfg->func,
653 (long long)addr);
654 }
655 } else {
656 addr = MSI_INTEL_ADDR_BASE;
657 }
658
659 ht->ht_msimap = ptr;
660 ht->ht_msictrl = val;
661 ht->ht_msiaddr = addr;
662
663#undef REG
664
665#endif /* __i386__ || __amd64__ */
666#endif /* notyet */
667}
668
669static void
670pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
671{
672#define REG(n, w) \
673 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
674
675 struct pcicfg_msi *msi = &cfg->msi;
676
677 msi->msi_location = ptr;
678 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
679 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
680
681#undef REG
682}
683
684static void
685pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
686{
687#define REG(n, w) \
688 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
689
690 struct pcicfg_msix *msix = &cfg->msix;
691 uint32_t val;
692
693 msix->msix_location = ptr;
694 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
695 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
696
697 val = REG(ptr + PCIR_MSIX_TABLE, 4);
698 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
699 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
700
701 val = REG(ptr + PCIR_MSIX_PBA, 4);
702 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
703 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
704
705#undef REG
706}
707
708static void
709pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
710{
711 cfg->vpd.vpd_reg = ptr;
712}
713
714static void
715pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
716{
717#define REG(n, w) \
718 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
719
720 /* Should always be true. */
721 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
722 uint32_t val;
723
724 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
725 cfg->subvendor = val & 0xffff;
726 cfg->subdevice = val >> 16;
727 }
728
729#undef REG
730}
731
732static void
733pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
734{
735 /*
736 * Assume we have a PCI-X chipset if we have
737 * at least one PCI-PCI bridge with a PCI-X
738 * capability. Note that some systems with
739 * PCI-express or HT chipsets might match on
740 * this check as well.
741 */
742 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
743 pcix_chipset = 1;
d85e7311
SZ
744
745 cfg->pcix.pcix_ptr = ptr;
746}
747
748static int
749pcie_slotimpl(const pcicfgregs *cfg)
750{
751 const struct pcicfg_expr *expr = &cfg->expr;
752 uint16_t port_type;
753
754 /*
755 * Only version 1 can be parsed currently
756 */
757 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
758 return 0;
759
760 /*
761 * - Slot implemented bit is meaningful iff current port is
762 * root port or down stream port.
763 * - Testing for root port or down stream port is meanningful
764 * iff PCI configure has type 1 header.
765 */
766
767 if (cfg->hdrtype != 1)
768 return 0;
769
770 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
771 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
772 return 0;
773
774 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
775 return 0;
776
777 return 1;
3a6dc23c
SZ
778}
779
780static void
d85e7311 781pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 782{
d85e7311
SZ
783#define REG(n, w) \
784 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
785
786 struct pcicfg_expr *expr = &cfg->expr;
787
3a6dc23c
SZ
788 /*
789 * Assume we have a PCI-express chipset if we have
790 * at least one PCI-express device.
791 */
792 pcie_chipset = 1;
d85e7311
SZ
793
794 expr->expr_ptr = ptr;
795 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
796
797 /*
798 * Only version 1 can be parsed currently
799 */
800 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
801 return;
802
803 /*
804 * Read slot capabilities. Slot capabilities exists iff
805 * current port's slot is implemented
806 */
807 if (pcie_slotimpl(cfg))
808 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
809
810#undef REG
3a6dc23c
SZ
811}
812
813static void
814pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
815{
816#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
817#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
818
819 uint32_t val;
820 int nextptr, ptrptr;
821
822 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
823 /* No capabilities */
824 return;
825 }
0c78fe3f 826
4d28e78f 827 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 828 case 0:
81c29ce4
SZ
829 case 1:
830 ptrptr = PCIR_CAP_PTR;
984263bc
MD
831 break;
832 case 2:
4d28e78f 833 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
834 break;
835 default:
3a6dc23c 836 return; /* no capabilities support */
984263bc 837 }
4d28e78f 838 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
839
840 /*
841 * Read capability entries.
842 */
3a6dc23c
SZ
843 while (pci_fixup_nextptr(&nextptr)) {
844 const struct pci_read_cap *rc;
845 int ptr = nextptr;
846
4d28e78f 847 /* Find the next entry */
4d28e78f 848 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
849
850 /* Process this entry */
3a6dc23c
SZ
851 val = REG(ptr + PCICAP_ID, 1);
852 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
853 if (rc->cap == val) {
854 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
855 break;
856 }
984263bc
MD
857 }
858 }
4d28e78f 859/* REG and WREG use carry through to next functions */
984263bc
MD
860}
861
4d28e78f
SZ
862/*
863 * PCI Vital Product Data
864 */
865
866#define PCI_VPD_TIMEOUT 1000000
984263bc 867
4d28e78f
SZ
868static int
869pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 870{
4d28e78f 871 int count = PCI_VPD_TIMEOUT;
984263bc 872
4d28e78f 873 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 874
4d28e78f 875 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 876
4d28e78f
SZ
877 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
878 if (--count < 0)
879 return (ENXIO);
880 DELAY(1); /* limit looping */
881 }
882 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 883
984263bc
MD
884 return (0);
885}
984263bc 886
4d28e78f
SZ
887#if 0
888static int
889pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 890{
4d28e78f
SZ
891 int count = PCI_VPD_TIMEOUT;
892
893 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
894
895 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
896 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
897 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
898 if (--count < 0)
899 return (ENXIO);
900 DELAY(1); /* limit looping */
901 }
902
903 return (0);
904}
905#endif
906
907#undef PCI_VPD_TIMEOUT
908
909struct vpd_readstate {
910 device_t pcib;
911 pcicfgregs *cfg;
912 uint32_t val;
913 int bytesinval;
914 int off;
915 uint8_t cksum;
916};
917
918static int
919vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
920{
921 uint32_t reg;
922 uint8_t byte;
923
924 if (vrs->bytesinval == 0) {
925 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
926 return (ENXIO);
927 vrs->val = le32toh(reg);
928 vrs->off += 4;
929 byte = vrs->val & 0xff;
930 vrs->bytesinval = 3;
931 } else {
932 vrs->val = vrs->val >> 8;
933 byte = vrs->val & 0xff;
934 vrs->bytesinval--;
935 }
936
937 vrs->cksum += byte;
938 *data = byte;
939 return (0);
940}
941
d85e7311
SZ
942int
943pcie_slot_implemented(device_t dev)
944{
945 struct pci_devinfo *dinfo = device_get_ivars(dev);
946
947 return pcie_slotimpl(&dinfo->cfg);
948}
949
4d28e78f
SZ
950void
951pcie_set_max_readrq(device_t dev, uint16_t rqsize)
952{
d85e7311
SZ
953 uint8_t expr_ptr;
954 uint16_t val;
955
956 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
957 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
958 panic("%s: invalid max read request size 0x%02x\n",
959 device_get_nameunit(dev), rqsize);
960 }
961
962 expr_ptr = pci_get_pciecap_ptr(dev);
963 if (!expr_ptr)
964 panic("%s: not PCIe device\n", device_get_nameunit(dev));
965
966 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
967 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
968 if (bootverbose)
969 device_printf(dev, "adjust device control 0x%04x", val);
970
971 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
972 val |= rqsize;
973 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
974
975 if (bootverbose)
976 kprintf(" -> 0x%04x\n", val);
977 }
4d28e78f
SZ
978}
979
980static void
981pci_read_vpd(device_t pcib, pcicfgregs *cfg)
982{
983 struct vpd_readstate vrs;
984 int state;
985 int name;
986 int remain;
987 int i;
988 int alloc, off; /* alloc/off for RO/W arrays */
989 int cksumvalid;
990 int dflen;
991 uint8_t byte;
992 uint8_t byte2;
993
994 /* init vpd reader */
995 vrs.bytesinval = 0;
996 vrs.off = 0;
997 vrs.pcib = pcib;
998 vrs.cfg = cfg;
999 vrs.cksum = 0;
1000
1001 state = 0;
1002 name = remain = i = 0; /* shut up stupid gcc */
1003 alloc = off = 0; /* shut up stupid gcc */
1004 dflen = 0; /* shut up stupid gcc */
1005 cksumvalid = -1;
1006 while (state >= 0) {
1007 if (vpd_nextbyte(&vrs, &byte)) {
1008 state = -2;
1009 break;
1010 }
1011#if 0
1012 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1013 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1014 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1015#endif
1016 switch (state) {
1017 case 0: /* item name */
1018 if (byte & 0x80) {
1019 if (vpd_nextbyte(&vrs, &byte2)) {
1020 state = -2;
1021 break;
1022 }
1023 remain = byte2;
1024 if (vpd_nextbyte(&vrs, &byte2)) {
1025 state = -2;
1026 break;
1027 }
1028 remain |= byte2 << 8;
1029 if (remain > (0x7f*4 - vrs.off)) {
1030 state = -1;
1031 kprintf(
1032 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1033 cfg->domain, cfg->bus, cfg->slot,
1034 cfg->func, remain);
1035 }
1036 name = byte & 0x7f;
1037 } else {
1038 remain = byte & 0x7;
1039 name = (byte >> 3) & 0xf;
1040 }
1041 switch (name) {
1042 case 0x2: /* String */
1043 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1044 M_DEVBUF, M_WAITOK);
1045 i = 0;
1046 state = 1;
1047 break;
1048 case 0xf: /* End */
1049 state = -1;
1050 break;
1051 case 0x10: /* VPD-R */
1052 alloc = 8;
1053 off = 0;
1054 cfg->vpd.vpd_ros = kmalloc(alloc *
1055 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1056 M_WAITOK | M_ZERO);
1057 state = 2;
1058 break;
1059 case 0x11: /* VPD-W */
1060 alloc = 8;
1061 off = 0;
1062 cfg->vpd.vpd_w = kmalloc(alloc *
1063 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1064 M_WAITOK | M_ZERO);
1065 state = 5;
1066 break;
1067 default: /* Invalid data, abort */
1068 state = -1;
1069 break;
1070 }
1071 break;
1072
1073 case 1: /* Identifier String */
1074 cfg->vpd.vpd_ident[i++] = byte;
1075 remain--;
1076 if (remain == 0) {
1077 cfg->vpd.vpd_ident[i] = '\0';
1078 state = 0;
1079 }
1080 break;
1081
1082 case 2: /* VPD-R Keyword Header */
1083 if (off == alloc) {
a68a7edf 1084 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1085 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1086 M_DEVBUF, M_WAITOK | M_ZERO);
1087 }
1088 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1089 if (vpd_nextbyte(&vrs, &byte2)) {
1090 state = -2;
1091 break;
1092 }
1093 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1094 if (vpd_nextbyte(&vrs, &byte2)) {
1095 state = -2;
1096 break;
1097 }
1098 dflen = byte2;
1099 if (dflen == 0 &&
1100 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1101 2) == 0) {
1102 /*
1103 * if this happens, we can't trust the rest
1104 * of the VPD.
1105 */
1106 kprintf(
1107 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1108 cfg->domain, cfg->bus, cfg->slot,
1109 cfg->func, dflen);
1110 cksumvalid = 0;
1111 state = -1;
1112 break;
1113 } else if (dflen == 0) {
1114 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1115 sizeof(*cfg->vpd.vpd_ros[off].value),
1116 M_DEVBUF, M_WAITOK);
1117 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1118 } else
1119 cfg->vpd.vpd_ros[off].value = kmalloc(
1120 (dflen + 1) *
1121 sizeof(*cfg->vpd.vpd_ros[off].value),
1122 M_DEVBUF, M_WAITOK);
1123 remain -= 3;
1124 i = 0;
1125 /* keep in sync w/ state 3's transistions */
1126 if (dflen == 0 && remain == 0)
1127 state = 0;
1128 else if (dflen == 0)
1129 state = 2;
1130 else
1131 state = 3;
1132 break;
1133
1134 case 3: /* VPD-R Keyword Value */
1135 cfg->vpd.vpd_ros[off].value[i++] = byte;
1136 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1137 "RV", 2) == 0 && cksumvalid == -1) {
1138 if (vrs.cksum == 0)
1139 cksumvalid = 1;
1140 else {
1141 if (bootverbose)
1142 kprintf(
1143 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1144 cfg->domain, cfg->bus,
1145 cfg->slot, cfg->func,
1146 vrs.cksum);
1147 cksumvalid = 0;
1148 state = -1;
1149 break;
1150 }
1151 }
1152 dflen--;
1153 remain--;
1154 /* keep in sync w/ state 2's transistions */
1155 if (dflen == 0)
1156 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1157 if (dflen == 0 && remain == 0) {
1158 cfg->vpd.vpd_rocnt = off;
a68a7edf 1159 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1160 off * sizeof(*cfg->vpd.vpd_ros),
1161 M_DEVBUF, M_WAITOK | M_ZERO);
1162 state = 0;
1163 } else if (dflen == 0)
1164 state = 2;
1165 break;
1166
1167 case 4:
1168 remain--;
1169 if (remain == 0)
1170 state = 0;
1171 break;
1172
1173 case 5: /* VPD-W Keyword Header */
1174 if (off == alloc) {
a68a7edf 1175 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1176 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1177 M_DEVBUF, M_WAITOK | M_ZERO);
1178 }
1179 cfg->vpd.vpd_w[off].keyword[0] = byte;
1180 if (vpd_nextbyte(&vrs, &byte2)) {
1181 state = -2;
1182 break;
1183 }
1184 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1185 if (vpd_nextbyte(&vrs, &byte2)) {
1186 state = -2;
1187 break;
1188 }
1189 cfg->vpd.vpd_w[off].len = dflen = byte2;
1190 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1191 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1192 sizeof(*cfg->vpd.vpd_w[off].value),
1193 M_DEVBUF, M_WAITOK);
1194 remain -= 3;
1195 i = 0;
1196 /* keep in sync w/ state 6's transistions */
1197 if (dflen == 0 && remain == 0)
1198 state = 0;
1199 else if (dflen == 0)
1200 state = 5;
1201 else
1202 state = 6;
1203 break;
1204
1205 case 6: /* VPD-W Keyword Value */
1206 cfg->vpd.vpd_w[off].value[i++] = byte;
1207 dflen--;
1208 remain--;
1209 /* keep in sync w/ state 5's transistions */
1210 if (dflen == 0)
1211 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1212 if (dflen == 0 && remain == 0) {
1213 cfg->vpd.vpd_wcnt = off;
a68a7edf 1214 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1215 off * sizeof(*cfg->vpd.vpd_w),
1216 M_DEVBUF, M_WAITOK | M_ZERO);
1217 state = 0;
1218 } else if (dflen == 0)
1219 state = 5;
1220 break;
1221
1222 default:
1223 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1224 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1225 state);
1226 state = -1;
1227 break;
1228 }
1229 }
1230
1231 if (cksumvalid == 0 || state < -1) {
1232 /* read-only data bad, clean up */
1233 if (cfg->vpd.vpd_ros != NULL) {
1234 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1235 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1236 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1237 cfg->vpd.vpd_ros = NULL;
1238 }
1239 }
1240 if (state < -1) {
1241 /* I/O error, clean up */
1242 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1243 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1244 if (cfg->vpd.vpd_ident != NULL) {
1245 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1246 cfg->vpd.vpd_ident = NULL;
1247 }
1248 if (cfg->vpd.vpd_w != NULL) {
1249 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1250 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1251 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1252 cfg->vpd.vpd_w = NULL;
1253 }
1254 }
1255 cfg->vpd.vpd_cached = 1;
1256#undef REG
1257#undef WREG
1258}
1259
1260int
1261pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1262{
1263 struct pci_devinfo *dinfo = device_get_ivars(child);
1264 pcicfgregs *cfg = &dinfo->cfg;
1265
1266 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1267 pci_read_vpd(device_get_parent(dev), cfg);
1268
1269 *identptr = cfg->vpd.vpd_ident;
1270
1271 if (*identptr == NULL)
1272 return (ENXIO);
1273
1274 return (0);
1275}
1276
1277int
1278pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1279 const char **vptr)
1280{
1281 struct pci_devinfo *dinfo = device_get_ivars(child);
1282 pcicfgregs *cfg = &dinfo->cfg;
1283 int i;
1284
1285 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1286 pci_read_vpd(device_get_parent(dev), cfg);
1287
1288 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1289 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1290 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1291 *vptr = cfg->vpd.vpd_ros[i].value;
1292 }
1293
1294 if (i != cfg->vpd.vpd_rocnt)
1295 return (0);
1296
1297 *vptr = NULL;
1298 return (ENXIO);
1299}
1300
1301/*
1302 * Return the offset in configuration space of the requested extended
1303 * capability entry or 0 if the specified capability was not found.
1304 */
1305int
1306pci_find_extcap_method(device_t dev, device_t child, int capability,
1307 int *capreg)
1308{
1309 struct pci_devinfo *dinfo = device_get_ivars(child);
1310 pcicfgregs *cfg = &dinfo->cfg;
1311 u_int32_t status;
1312 u_int8_t ptr;
1313
1314 /*
1315 * Check the CAP_LIST bit of the PCI status register first.
1316 */
1317 status = pci_read_config(child, PCIR_STATUS, 2);
1318 if (!(status & PCIM_STATUS_CAPPRESENT))
1319 return (ENXIO);
1320
1321 /*
1322 * Determine the start pointer of the capabilities list.
1323 */
1324 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1325 case 0:
1326 case 1:
1327 ptr = PCIR_CAP_PTR;
1328 break;
1329 case 2:
1330 ptr = PCIR_CAP_PTR_2;
1331 break;
1332 default:
1333 /* XXX: panic? */
1334 return (ENXIO); /* no extended capabilities support */
1335 }
1336 ptr = pci_read_config(child, ptr, 1);
1337
1338 /*
1339 * Traverse the capabilities list.
1340 */
1341 while (ptr != 0) {
1342 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1343 if (capreg != NULL)
1344 *capreg = ptr;
1345 return (0);
1346 }
1347 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1348 }
1349
1350 return (ENOENT);
1351}
1352
1353/*
1354 * Support for MSI-X message interrupts.
1355 */
1356void
1357pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1358{
1359 struct pci_devinfo *dinfo = device_get_ivars(dev);
1360 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1361 uint32_t offset;
1362
1363 KASSERT(msix->msix_table_len > index, ("bogus index"));
1364 offset = msix->msix_table_offset + index * 16;
1365 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1366 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1367 bus_write_4(msix->msix_table_res, offset + 8, data);
1368
1369 /* Enable MSI -> HT mapping. */
1370 pci_ht_map_msi(dev, address);
1371}
1372
1373void
1374pci_mask_msix(device_t dev, u_int index)
1375{
1376 struct pci_devinfo *dinfo = device_get_ivars(dev);
1377 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1378 uint32_t offset, val;
1379
1380 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1381 offset = msix->msix_table_offset + index * 16 + 12;
1382 val = bus_read_4(msix->msix_table_res, offset);
1383 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1384 val |= PCIM_MSIX_VCTRL_MASK;
1385 bus_write_4(msix->msix_table_res, offset, val);
1386 }
1387}
1388
1389void
1390pci_unmask_msix(device_t dev, u_int index)
1391{
1392 struct pci_devinfo *dinfo = device_get_ivars(dev);
1393 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1394 uint32_t offset, val;
1395
1396 KASSERT(msix->msix_table_len > index, ("bogus index"));
1397 offset = msix->msix_table_offset + index * 16 + 12;
1398 val = bus_read_4(msix->msix_table_res, offset);
1399 if (val & PCIM_MSIX_VCTRL_MASK) {
1400 val &= ~PCIM_MSIX_VCTRL_MASK;
1401 bus_write_4(msix->msix_table_res, offset, val);
1402 }
1403}
1404
1405int
1406pci_pending_msix(device_t dev, u_int index)
1407{
1408 struct pci_devinfo *dinfo = device_get_ivars(dev);
1409 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1410 uint32_t offset, bit;
1411
1412 KASSERT(msix->msix_table_len > index, ("bogus index"));
1413 offset = msix->msix_pba_offset + (index / 32) * 4;
1414 bit = 1 << index % 32;
1415 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1416}
1417
1418/*
1419 * Restore MSI-X registers and table during resume. If MSI-X is
1420 * enabled then walk the virtual table to restore the actual MSI-X
1421 * table.
1422 */
1423static void
1424pci_resume_msix(device_t dev)
1425{
1426 struct pci_devinfo *dinfo = device_get_ivars(dev);
1427 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1428 struct msix_table_entry *mte;
1429 struct msix_vector *mv;
1430 int i;
1431
1432 if (msix->msix_alloc > 0) {
1433 /* First, mask all vectors. */
1434 for (i = 0; i < msix->msix_msgnum; i++)
1435 pci_mask_msix(dev, i);
1436
1437 /* Second, program any messages with at least one handler. */
1438 for (i = 0; i < msix->msix_table_len; i++) {
1439 mte = &msix->msix_table[i];
1440 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1441 continue;
1442 mv = &msix->msix_vectors[mte->mte_vector - 1];
1443 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1444 pci_unmask_msix(dev, i);
1445 }
1446 }
1447 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1448 msix->msix_ctrl, 2);
1449}
1450
1451/*
1452 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1453 * returned in *count. After this function returns, each message will be
1454 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1455 */
1456int
1457pci_alloc_msix_method(device_t dev, device_t child, int *count)
1458{
1459 struct pci_devinfo *dinfo = device_get_ivars(child);
1460 pcicfgregs *cfg = &dinfo->cfg;
1461 struct resource_list_entry *rle;
1462 int actual, error, i, irq, max;
1463
1464 /* Don't let count == 0 get us into trouble. */
1465 if (*count == 0)
1466 return (EINVAL);
1467
1468 /* If rid 0 is allocated, then fail. */
1469 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1470 if (rle != NULL && rle->res != NULL)
1471 return (ENXIO);
1472
1473 /* Already have allocated messages? */
1474 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1475 return (ENXIO);
1476
1477 /* If MSI is blacklisted for this system, fail. */
1478 if (pci_msi_blacklisted())
1479 return (ENXIO);
1480
1481 /* MSI-X capability present? */
1482 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1483 return (ENODEV);
1484
1485 /* Make sure the appropriate BARs are mapped. */
1486 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1487 cfg->msix.msix_table_bar);
1488 if (rle == NULL || rle->res == NULL ||
1489 !(rman_get_flags(rle->res) & RF_ACTIVE))
1490 return (ENXIO);
1491 cfg->msix.msix_table_res = rle->res;
1492 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1493 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1494 cfg->msix.msix_pba_bar);
1495 if (rle == NULL || rle->res == NULL ||
1496 !(rman_get_flags(rle->res) & RF_ACTIVE))
1497 return (ENXIO);
1498 }
1499 cfg->msix.msix_pba_res = rle->res;
1500
1501 if (bootverbose)
1502 device_printf(child,
1503 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1504 *count, cfg->msix.msix_msgnum);
1505 max = min(*count, cfg->msix.msix_msgnum);
1506 for (i = 0; i < max; i++) {
1507 /* Allocate a message. */
1508 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1509 if (error)
1510 break;
1511 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1512 irq, 1);
1513 }
1514 actual = i;
1515
1516 if (bootverbose) {
1517 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1518 if (actual == 1)
1519 device_printf(child, "using IRQ %lu for MSI-X\n",
1520 rle->start);
1521 else {
1522 int run;
1523
1524 /*
1525 * Be fancy and try to print contiguous runs of
1526 * IRQ values as ranges. 'irq' is the previous IRQ.
1527 * 'run' is true if we are in a range.
1528 */
1529 device_printf(child, "using IRQs %lu", rle->start);
1530 irq = rle->start;
1531 run = 0;
1532 for (i = 1; i < actual; i++) {
1533 rle = resource_list_find(&dinfo->resources,
1534 SYS_RES_IRQ, i + 1);
1535
1536 /* Still in a run? */
1537 if (rle->start == irq + 1) {
1538 run = 1;
1539 irq++;
1540 continue;
1541 }
1542
1543 /* Finish previous range. */
1544 if (run) {
1545 kprintf("-%d", irq);
1546 run = 0;
1547 }
1548
1549 /* Start new range. */
1550 kprintf(",%lu", rle->start);
1551 irq = rle->start;
1552 }
1553
1554 /* Unfinished range? */
1555 if (run)
1556 kprintf("-%d", irq);
1557 kprintf(" for MSI-X\n");
1558 }
1559 }
1560
1561 /* Mask all vectors. */
1562 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1563 pci_mask_msix(child, i);
1564
1565 /* Allocate and initialize vector data and virtual table. */
1566 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1567 M_DEVBUF, M_WAITOK | M_ZERO);
1568 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1569 M_DEVBUF, M_WAITOK | M_ZERO);
1570 for (i = 0; i < actual; i++) {
1571 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1572 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1573 cfg->msix.msix_table[i].mte_vector = i + 1;
1574 }
1575
1576 /* Update control register to enable MSI-X. */
1577 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1578 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1579 cfg->msix.msix_ctrl, 2);
1580
1581 /* Update counts of alloc'd messages. */
1582 cfg->msix.msix_alloc = actual;
1583 cfg->msix.msix_table_len = actual;
1584 *count = actual;
1585 return (0);
1586}
1587
1588/*
1589 * By default, pci_alloc_msix() will assign the allocated IRQ
1590 * resources consecutively to the first N messages in the MSI-X table.
1591 * However, device drivers may want to use different layouts if they
1592 * either receive fewer messages than they asked for, or they wish to
1593 * populate the MSI-X table sparsely. This method allows the driver
1594 * to specify what layout it wants. It must be called after a
1595 * successful pci_alloc_msix() but before any of the associated
1596 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1597 *
1598 * The 'vectors' array contains 'count' message vectors. The array
1599 * maps directly to the MSI-X table in that index 0 in the array
1600 * specifies the vector for the first message in the MSI-X table, etc.
1601 * The vector value in each array index can either be 0 to indicate
1602 * that no vector should be assigned to a message slot, or it can be a
1603 * number from 1 to N (where N is the count returned from a
1604 * succcessful call to pci_alloc_msix()) to indicate which message
1605 * vector (IRQ) to be used for the corresponding message.
1606 *
1607 * On successful return, each message with a non-zero vector will have
1608 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1609 * 1. Additionally, if any of the IRQs allocated via the previous
1610 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1611 * will be kfreed back to the system automatically.
1612 *
1613 * For example, suppose a driver has a MSI-X table with 6 messages and
1614 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1615 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1616 * C. After the call to pci_alloc_msix(), the device will be setup to
1617 * have an MSI-X table of ABC--- (where - means no vector assigned).
1618 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1619 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1620 * be kfreed back to the system. This device will also have valid
1621 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1622 *
1623 * In any case, the SYS_RES_IRQ rid X will always map to the message
1624 * at MSI-X table index X - 1 and will only be valid if a vector is
1625 * assigned to that table entry.
1626 */
1627int
1628pci_remap_msix_method(device_t dev, device_t child, int count,
1629 const u_int *vectors)
1630{
1631 struct pci_devinfo *dinfo = device_get_ivars(child);
1632 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1633 struct resource_list_entry *rle;
1634 int i, irq, j, *used;
1635
1636 /*
1637 * Have to have at least one message in the table but the
1638 * table can't be bigger than the actual MSI-X table in the
1639 * device.
1640 */
1641 if (count == 0 || count > msix->msix_msgnum)
1642 return (EINVAL);
1643
1644 /* Sanity check the vectors. */
1645 for (i = 0; i < count; i++)
1646 if (vectors[i] > msix->msix_alloc)
1647 return (EINVAL);
1648
1649 /*
1650 * Make sure there aren't any holes in the vectors to be used.
1651 * It's a big pain to support it, and it doesn't really make
1652 * sense anyway. Also, at least one vector must be used.
1653 */
1654 used = kmalloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1655 M_ZERO);
1656 for (i = 0; i < count; i++)
1657 if (vectors[i] != 0)
1658 used[vectors[i] - 1] = 1;
1659 for (i = 0; i < msix->msix_alloc - 1; i++)
1660 if (used[i] == 0 && used[i + 1] == 1) {
1661 kfree(used, M_DEVBUF);
1662 return (EINVAL);
1663 }
1664 if (used[0] != 1) {
1665 kfree(used, M_DEVBUF);
1666 return (EINVAL);
1667 }
1668
1669 /* Make sure none of the resources are allocated. */
1670 for (i = 0; i < msix->msix_table_len; i++) {
1671 if (msix->msix_table[i].mte_vector == 0)
1672 continue;
1673 if (msix->msix_table[i].mte_handlers > 0)
1674 return (EBUSY);
1675 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1676 KASSERT(rle != NULL, ("missing resource"));
1677 if (rle->res != NULL)
1678 return (EBUSY);
1679 }
1680
1681 /* Free the existing resource list entries. */
1682 for (i = 0; i < msix->msix_table_len; i++) {
1683 if (msix->msix_table[i].mte_vector == 0)
1684 continue;
1685 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1686 }
1687
1688 /*
1689 * Build the new virtual table keeping track of which vectors are
1690 * used.
1691 */
1692 kfree(msix->msix_table, M_DEVBUF);
1693 msix->msix_table = kmalloc(sizeof(struct msix_table_entry) * count,
1694 M_DEVBUF, M_WAITOK | M_ZERO);
1695 for (i = 0; i < count; i++)
1696 msix->msix_table[i].mte_vector = vectors[i];
1697 msix->msix_table_len = count;
1698
1699 /* Free any unused IRQs and resize the vectors array if necessary. */
1700 j = msix->msix_alloc - 1;
1701 if (used[j] == 0) {
1702 struct msix_vector *vec;
1703
1704 while (used[j] == 0) {
1705 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1706 msix->msix_vectors[j].mv_irq);
1707 j--;
1708 }
1709 vec = kmalloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1710 M_WAITOK);
1711 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1712 (j + 1));
1713 kfree(msix->msix_vectors, M_DEVBUF);
1714 msix->msix_vectors = vec;
1715 msix->msix_alloc = j + 1;
1716 }
1717 kfree(used, M_DEVBUF);
1718
1719 /* Map the IRQs onto the rids. */
1720 for (i = 0; i < count; i++) {
1721 if (vectors[i] == 0)
1722 continue;
1723 irq = msix->msix_vectors[vectors[i]].mv_irq;
1724 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1725 irq, 1);
1726 }
1727
1728 if (bootverbose) {
1729 device_printf(child, "Remapped MSI-X IRQs as: ");
1730 for (i = 0; i < count; i++) {
1731 if (i != 0)
1732 kprintf(", ");
1733 if (vectors[i] == 0)
1734 kprintf("---");
1735 else
1736 kprintf("%d",
1737 msix->msix_vectors[vectors[i]].mv_irq);
1738 }
1739 kprintf("\n");
1740 }
1741
1742 return (0);
1743}
1744
1745static int
1746pci_release_msix(device_t dev, device_t child)
1747{
1748 struct pci_devinfo *dinfo = device_get_ivars(child);
1749 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1750 struct resource_list_entry *rle;
1751 int i;
1752
1753 /* Do we have any messages to release? */
1754 if (msix->msix_alloc == 0)
1755 return (ENODEV);
1756
1757 /* Make sure none of the resources are allocated. */
1758 for (i = 0; i < msix->msix_table_len; i++) {
1759 if (msix->msix_table[i].mte_vector == 0)
1760 continue;
1761 if (msix->msix_table[i].mte_handlers > 0)
1762 return (EBUSY);
1763 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1764 KASSERT(rle != NULL, ("missing resource"));
1765 if (rle->res != NULL)
1766 return (EBUSY);
1767 }
1768
1769 /* Update control register to disable MSI-X. */
1770 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1771 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1772 msix->msix_ctrl, 2);
1773
1774 /* Free the resource list entries. */
1775 for (i = 0; i < msix->msix_table_len; i++) {
1776 if (msix->msix_table[i].mte_vector == 0)
1777 continue;
1778 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1779 }
1780 kfree(msix->msix_table, M_DEVBUF);
1781 msix->msix_table_len = 0;
1782
1783 /* Release the IRQs. */
1784 for (i = 0; i < msix->msix_alloc; i++)
1785 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1786 msix->msix_vectors[i].mv_irq);
1787 kfree(msix->msix_vectors, M_DEVBUF);
1788 msix->msix_alloc = 0;
1789 return (0);
1790}
1791
1792/*
1793 * Return the max supported MSI-X messages this device supports.
1794 * Basically, assuming the MD code can alloc messages, this function
1795 * should return the maximum value that pci_alloc_msix() can return.
1796 * Thus, it is subject to the tunables, etc.
1797 */
1798int
1799pci_msix_count_method(device_t dev, device_t child)
1800{
1801 struct pci_devinfo *dinfo = device_get_ivars(child);
1802 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1803
1804 if (pci_do_msix && msix->msix_location != 0)
1805 return (msix->msix_msgnum);
1806 return (0);
1807}
1808
1809/*
1810 * HyperTransport MSI mapping control
1811 */
1812void
1813pci_ht_map_msi(device_t dev, uint64_t addr)
1814{
1815 struct pci_devinfo *dinfo = device_get_ivars(dev);
1816 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1817
1818 if (!ht->ht_msimap)
1819 return;
1820
1821 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1822 ht->ht_msiaddr >> 20 == addr >> 20) {
1823 /* Enable MSI -> HT mapping. */
1824 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1825 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1826 ht->ht_msictrl, 2);
1827 }
1828
1829 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1830 /* Disable MSI -> HT mapping. */
1831 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1832 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1833 ht->ht_msictrl, 2);
1834 }
1835}
1836
1837/*
1838 * Support for MSI message signalled interrupts.
1839 */
1840void
1841pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1842{
1843 struct pci_devinfo *dinfo = device_get_ivars(dev);
1844 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1845
1846 /* Write data and address values. */
1847 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1848 address & 0xffffffff, 4);
1849 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1850 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1851 address >> 32, 4);
1852 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1853 data, 2);
1854 } else
1855 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1856 2);
1857
1858 /* Enable MSI in the control register. */
1859 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1860 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1861 2);
1862
1863 /* Enable MSI -> HT mapping. */
1864 pci_ht_map_msi(dev, address);
1865}
1866
1867void
1868pci_disable_msi(device_t dev)
1869{
1870 struct pci_devinfo *dinfo = device_get_ivars(dev);
1871 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1872
1873 /* Disable MSI -> HT mapping. */
1874 pci_ht_map_msi(dev, 0);
1875
1876 /* Disable MSI in the control register. */
1877 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1878 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1879 2);
1880}
1881
1882/*
1883 * Restore MSI registers during resume. If MSI is enabled then
1884 * restore the data and address registers in addition to the control
1885 * register.
1886 */
1887static void
1888pci_resume_msi(device_t dev)
1889{
1890 struct pci_devinfo *dinfo = device_get_ivars(dev);
1891 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1892 uint64_t address;
1893 uint16_t data;
1894
1895 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1896 address = msi->msi_addr;
1897 data = msi->msi_data;
1898 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1899 address & 0xffffffff, 4);
1900 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1901 pci_write_config(dev, msi->msi_location +
1902 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1903 pci_write_config(dev, msi->msi_location +
1904 PCIR_MSI_DATA_64BIT, data, 2);
1905 } else
1906 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1907 data, 2);
1908 }
1909 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1910 2);
1911}
1912
1913int
1914pci_remap_msi_irq(device_t dev, u_int irq)
1915{
1916 struct pci_devinfo *dinfo = device_get_ivars(dev);
1917 pcicfgregs *cfg = &dinfo->cfg;
1918 struct resource_list_entry *rle;
1919 struct msix_table_entry *mte;
1920 struct msix_vector *mv;
1921 device_t bus;
1922 uint64_t addr;
1923 uint32_t data;
1924 int error, i, j;
1925
1926 bus = device_get_parent(dev);
1927
1928 /*
1929 * Handle MSI first. We try to find this IRQ among our list
1930 * of MSI IRQs. If we find it, we request updated address and
1931 * data registers and apply the results.
1932 */
1933 if (cfg->msi.msi_alloc > 0) {
1934
1935 /* If we don't have any active handlers, nothing to do. */
1936 if (cfg->msi.msi_handlers == 0)
1937 return (0);
1938 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1939 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1940 i + 1);
1941 if (rle->start == irq) {
1942 error = PCIB_MAP_MSI(device_get_parent(bus),
1943 dev, irq, &addr, &data);
1944 if (error)
1945 return (error);
1946 pci_disable_msi(dev);
1947 dinfo->cfg.msi.msi_addr = addr;
1948 dinfo->cfg.msi.msi_data = data;
1949 pci_enable_msi(dev, addr, data);
1950 return (0);
1951 }
1952 }
1953 return (ENOENT);
1954 }
1955
1956 /*
1957 * For MSI-X, we check to see if we have this IRQ. If we do,
1958 * we request the updated mapping info. If that works, we go
1959 * through all the slots that use this IRQ and update them.
1960 */
1961 if (cfg->msix.msix_alloc > 0) {
1962 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1963 mv = &cfg->msix.msix_vectors[i];
1964 if (mv->mv_irq == irq) {
1965 error = PCIB_MAP_MSI(device_get_parent(bus),
1966 dev, irq, &addr, &data);
1967 if (error)
1968 return (error);
1969 mv->mv_address = addr;
1970 mv->mv_data = data;
1971 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1972 mte = &cfg->msix.msix_table[j];
1973 if (mte->mte_vector != i + 1)
1974 continue;
1975 if (mte->mte_handlers == 0)
1976 continue;
1977 pci_mask_msix(dev, j);
1978 pci_enable_msix(dev, j, addr, data);
1979 pci_unmask_msix(dev, j);
1980 }
1981 }
1982 }
1983 return (ENOENT);
1984 }
1985
1986 return (ENOENT);
1987}
1988
1989/*
1990 * Returns true if the specified device is blacklisted because MSI
1991 * doesn't work.
1992 */
1993int
1994pci_msi_device_blacklisted(device_t dev)
1995{
1996 struct pci_quirk *q;
1997
1998 if (!pci_honor_msi_blacklist)
1999 return (0);
2000
2001 for (q = &pci_quirks[0]; q->devid; q++) {
2002 if (q->devid == pci_get_devid(dev) &&
2003 q->type == PCI_QUIRK_DISABLE_MSI)
2004 return (1);
2005 }
2006 return (0);
2007}
2008
2009/*
2010 * Determine if MSI is blacklisted globally on this sytem. Currently,
2011 * we just check for blacklisted chipsets as represented by the
2012 * host-PCI bridge at device 0:0:0. In the future, it may become
2013 * necessary to check other system attributes, such as the kenv values
2014 * that give the motherboard manufacturer and model number.
2015 */
2016static int
2017pci_msi_blacklisted(void)
2018{
2019 device_t dev;
2020
2021 if (!pci_honor_msi_blacklist)
2022 return (0);
2023
2024 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2025 if (!(pcie_chipset || pcix_chipset))
2026 return (1);
2027
2028 dev = pci_find_bsf(0, 0, 0);
2029 if (dev != NULL)
2030 return (pci_msi_device_blacklisted(dev));
2031 return (0);
2032}
2033
2034/*
2035 * Attempt to allocate *count MSI messages. The actual number allocated is
2036 * returned in *count. After this function returns, each message will be
2037 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2038 */
2039int
2040pci_alloc_msi_method(device_t dev, device_t child, int *count)
2041{
2042 struct pci_devinfo *dinfo = device_get_ivars(child);
2043 pcicfgregs *cfg = &dinfo->cfg;
2044 struct resource_list_entry *rle;
2045 int actual, error, i, irqs[32];
2046 uint16_t ctrl;
2047
2048 /* Don't let count == 0 get us into trouble. */
2049 if (*count == 0)
2050 return (EINVAL);
2051
2052 /* If rid 0 is allocated, then fail. */
2053 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2054 if (rle != NULL && rle->res != NULL)
2055 return (ENXIO);
2056
2057 /* Already have allocated messages? */
2058 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2059 return (ENXIO);
2060
2061 /* If MSI is blacklisted for this system, fail. */
2062 if (pci_msi_blacklisted())
2063 return (ENXIO);
2064
2065 /* MSI capability present? */
2066 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2067 return (ENODEV);
2068
2069 if (bootverbose)
2070 device_printf(child,
2071 "attempting to allocate %d MSI vectors (%d supported)\n",
2072 *count, cfg->msi.msi_msgnum);
2073
2074 /* Don't ask for more than the device supports. */
2075 actual = min(*count, cfg->msi.msi_msgnum);
2076
2077 /* Don't ask for more than 32 messages. */
2078 actual = min(actual, 32);
2079
2080 /* MSI requires power of 2 number of messages. */
2081 if (!powerof2(actual))
2082 return (EINVAL);
2083
2084 for (;;) {
2085 /* Try to allocate N messages. */
2086 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2087 cfg->msi.msi_msgnum, irqs);
2088 if (error == 0)
2089 break;
2090 if (actual == 1)
2091 return (error);
2092
2093 /* Try N / 2. */
2094 actual >>= 1;
2095 }
2096
2097 /*
2098 * We now have N actual messages mapped onto SYS_RES_IRQ
2099 * resources in the irqs[] array, so add new resources
2100 * starting at rid 1.
2101 */
2102 for (i = 0; i < actual; i++)
2103 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2104 irqs[i], irqs[i], 1);
2105
2106 if (bootverbose) {
2107 if (actual == 1)
2108 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2109 else {
2110 int run;
2111
2112 /*
2113 * Be fancy and try to print contiguous runs
2114 * of IRQ values as ranges. 'run' is true if
2115 * we are in a range.
2116 */
2117 device_printf(child, "using IRQs %d", irqs[0]);
2118 run = 0;
2119 for (i = 1; i < actual; i++) {
2120
2121 /* Still in a run? */
2122 if (irqs[i] == irqs[i - 1] + 1) {
2123 run = 1;
2124 continue;
2125 }
2126
2127 /* Finish previous range. */
2128 if (run) {
2129 kprintf("-%d", irqs[i - 1]);
2130 run = 0;
2131 }
2132
2133 /* Start new range. */
2134 kprintf(",%d", irqs[i]);
2135 }
2136
2137 /* Unfinished range? */
2138 if (run)
2139 kprintf("-%d", irqs[actual - 1]);
2140 kprintf(" for MSI\n");
2141 }
2142 }
2143
2144 /* Update control register with actual count. */
2145 ctrl = cfg->msi.msi_ctrl;
2146 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2147 ctrl |= (ffs(actual) - 1) << 4;
2148 cfg->msi.msi_ctrl = ctrl;
2149 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2150
2151 /* Update counts of alloc'd messages. */
2152 cfg->msi.msi_alloc = actual;
2153 cfg->msi.msi_handlers = 0;
2154 *count = actual;
2155 return (0);
2156}
2157
2158/* Release the MSI messages associated with this device. */
2159int
2160pci_release_msi_method(device_t dev, device_t child)
2161{
2162 struct pci_devinfo *dinfo = device_get_ivars(child);
2163 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2164 struct resource_list_entry *rle;
2165 int error, i, irqs[32];
2166
2167 /* Try MSI-X first. */
2168 error = pci_release_msix(dev, child);
2169 if (error != ENODEV)
2170 return (error);
2171
2172 /* Do we have any messages to release? */
2173 if (msi->msi_alloc == 0)
2174 return (ENODEV);
2175 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2176
2177 /* Make sure none of the resources are allocated. */
2178 if (msi->msi_handlers > 0)
2179 return (EBUSY);
2180 for (i = 0; i < msi->msi_alloc; i++) {
2181 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2182 KASSERT(rle != NULL, ("missing MSI resource"));
2183 if (rle->res != NULL)
2184 return (EBUSY);
2185 irqs[i] = rle->start;
2186 }
2187
2188 /* Update control register with 0 count. */
2189 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2190 ("%s: MSI still enabled", __func__));
2191 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2192 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2193 msi->msi_ctrl, 2);
2194
2195 /* Release the messages. */
2196 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2197 for (i = 0; i < msi->msi_alloc; i++)
2198 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2199
2200 /* Update alloc count. */
2201 msi->msi_alloc = 0;
2202 msi->msi_addr = 0;
2203 msi->msi_data = 0;
2204 return (0);
2205}
2206
2207/*
2208 * Return the max supported MSI messages this device supports.
2209 * Basically, assuming the MD code can alloc messages, this function
2210 * should return the maximum value that pci_alloc_msi() can return.
2211 * Thus, it is subject to the tunables, etc.
2212 */
2213int
2214pci_msi_count_method(device_t dev, device_t child)
2215{
2216 struct pci_devinfo *dinfo = device_get_ivars(child);
2217 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2218
2219 if (pci_do_msi && msi->msi_location != 0)
2220 return (msi->msi_msgnum);
2221 return (0);
2222}
2223
2224/* kfree pcicfgregs structure and all depending data structures */
2225
2226int
2227pci_freecfg(struct pci_devinfo *dinfo)
2228{
2229 struct devlist *devlist_head;
2230 int i;
2231
2232 devlist_head = &pci_devq;
2233
2234 if (dinfo->cfg.vpd.vpd_reg) {
2235 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2236 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2237 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2238 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2239 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2240 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2241 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2242 }
2243 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2244 kfree(dinfo, M_DEVBUF);
2245
2246 /* increment the generation count */
2247 pci_generation++;
2248
2249 /* we're losing one device */
2250 pci_numdevs--;
2251 return (0);
2252}
2253
2254/*
2255 * PCI power manangement
2256 */
2257int
2258pci_set_powerstate_method(device_t dev, device_t child, int state)
2259{
2260 struct pci_devinfo *dinfo = device_get_ivars(child);
2261 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2262 uint16_t status;
2263 int result, oldstate, highest, delay;
984263bc 2264
4d28e78f 2265 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2266 return (EOPNOTSUPP);
2267
2268 /*
2269 * Optimize a no state change request away. While it would be OK to
2270 * write to the hardware in theory, some devices have shown odd
2271 * behavior when going from D3 -> D3.
2272 */
2273 oldstate = pci_get_powerstate(child);
2274 if (oldstate == state)
2275 return (0);
2276
2277 /*
2278 * The PCI power management specification states that after a state
2279 * transition between PCI power states, system software must
2280 * guarantee a minimal delay before the function accesses the device.
2281 * Compute the worst case delay that we need to guarantee before we
2282 * access the device. Many devices will be responsive much more
2283 * quickly than this delay, but there are some that don't respond
2284 * instantly to state changes. Transitions to/from D3 state require
2285 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2286 * is done below with DELAY rather than a sleeper function because
2287 * this function can be called from contexts where we cannot sleep.
2288 */
2289 highest = (oldstate > state) ? oldstate : state;
2290 if (highest == PCI_POWERSTATE_D3)
2291 delay = 10000;
2292 else if (highest == PCI_POWERSTATE_D2)
2293 delay = 200;
2294 else
2295 delay = 0;
4d28e78f 2296 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2297 & ~PCIM_PSTAT_DMASK;
2298 result = 0;
2299 switch (state) {
2300 case PCI_POWERSTATE_D0:
2301 status |= PCIM_PSTAT_D0;
2302 break;
2303 case PCI_POWERSTATE_D1:
4d28e78f 2304 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2305 return (EOPNOTSUPP);
2306 status |= PCIM_PSTAT_D1;
2307 break;
2308 case PCI_POWERSTATE_D2:
4d28e78f 2309 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2310 return (EOPNOTSUPP);
2311 status |= PCIM_PSTAT_D2;
2312 break;
2313 case PCI_POWERSTATE_D3:
2314 status |= PCIM_PSTAT_D3;
2315 break;
2316 default:
2317 return (EINVAL);
984263bc 2318 }
f4754a59
HT
2319
2320 if (bootverbose)
2321 kprintf(
4d28e78f
SZ
2322 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2323 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2324 dinfo->cfg.func, oldstate, state);
f4754a59 2325
4d28e78f 2326 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2327 if (delay)
2328 DELAY(delay);
2329 return (0);
984263bc
MD
2330}
2331
e126caf1 2332int
984263bc
MD
2333pci_get_powerstate_method(device_t dev, device_t child)
2334{
2335 struct pci_devinfo *dinfo = device_get_ivars(child);
2336 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2337 uint16_t status;
984263bc
MD
2338 int result;
2339
4d28e78f
SZ
2340 if (cfg->pp.pp_cap != 0) {
2341 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2342 switch (status & PCIM_PSTAT_DMASK) {
2343 case PCIM_PSTAT_D0:
2344 result = PCI_POWERSTATE_D0;
2345 break;
2346 case PCIM_PSTAT_D1:
2347 result = PCI_POWERSTATE_D1;
2348 break;
2349 case PCIM_PSTAT_D2:
2350 result = PCI_POWERSTATE_D2;
2351 break;
2352 case PCIM_PSTAT_D3:
2353 result = PCI_POWERSTATE_D3;
2354 break;
2355 default:
2356 result = PCI_POWERSTATE_UNKNOWN;
2357 break;
2358 }
2359 } else {
2360 /* No support, device is always at D0 */
2361 result = PCI_POWERSTATE_D0;
2362 }
f4754a59 2363 return (result);
984263bc
MD
2364}
2365
2366/*
2367 * Some convenience functions for PCI device drivers.
2368 */
2369
2370static __inline void
4d28e78f 2371pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2372{
4d28e78f 2373 uint16_t command;
984263bc 2374
4d28e78f
SZ
2375 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2376 command |= bit;
2377 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2378}
2379
2380static __inline void
4d28e78f
SZ
2381pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2382{
2383 uint16_t command;
984263bc 2384
4d28e78f
SZ
2385 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2386 command &= ~bit;
2387 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2388}
2389
4d28e78f
SZ
2390int
2391pci_enable_busmaster_method(device_t dev, device_t child)
2392{
2393 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2394 return (0);
2395}
984263bc 2396
4d28e78f
SZ
2397int
2398pci_disable_busmaster_method(device_t dev, device_t child)
2399{
2400 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2401 return (0);
2402}
984263bc 2403
4d28e78f
SZ
2404int
2405pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2406{
4d28e78f
SZ
2407 uint16_t command;
2408 uint16_t bit;
2409 char *error;
ed1bd994 2410
4d28e78f
SZ
2411 bit = 0;
2412 error = NULL;
2413
2414 switch(space) {
2415 case SYS_RES_IOPORT:
2416 bit = PCIM_CMD_PORTEN;
2417 error = "port";
ed1bd994 2418 break;
4d28e78f
SZ
2419 case SYS_RES_MEMORY:
2420 bit = PCIM_CMD_MEMEN;
2421 error = "memory";
ed1bd994
MD
2422 break;
2423 default:
4d28e78f 2424 return (EINVAL);
ed1bd994 2425 }
4d28e78f
SZ
2426 pci_set_command_bit(dev, child, bit);
2427 /* Some devices seem to need a brief stall here, what do to? */
2428 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2429 if (command & bit)
2430 return (0);
2431 device_printf(child, "failed to enable %s mapping!\n", error);
2432 return (ENXIO);
ed1bd994 2433}
984263bc 2434
4d28e78f
SZ
2435int
2436pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2437{
4d28e78f
SZ
2438 uint16_t command;
2439 uint16_t bit;
2440 char *error;
b4c0a845 2441
4d28e78f
SZ
2442 bit = 0;
2443 error = NULL;
b4c0a845 2444
4d28e78f
SZ
2445 switch(space) {
2446 case SYS_RES_IOPORT:
2447 bit = PCIM_CMD_PORTEN;
2448 error = "port";
b4c0a845 2449 break;
4d28e78f
SZ
2450 case SYS_RES_MEMORY:
2451 bit = PCIM_CMD_MEMEN;
2452 error = "memory";
b4c0a845
SZ
2453 break;
2454 default:
4d28e78f 2455 return (EINVAL);
b4c0a845 2456 }
4d28e78f
SZ
2457 pci_clear_command_bit(dev, child, bit);
2458 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2459 if (command & bit) {
2460 device_printf(child, "failed to disable %s mapping!\n", error);
2461 return (ENXIO);
b4c0a845 2462 }
4d28e78f 2463 return (0);
b4c0a845
SZ
2464}
2465
4d28e78f
SZ
2466/*
2467 * New style pci driver. Parent device is either a pci-host-bridge or a
2468 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2469 */
2470
22457186 2471void
984263bc
MD
2472pci_print_verbose(struct pci_devinfo *dinfo)
2473{
4d28e78f 2474
984263bc
MD
2475 if (bootverbose) {
2476 pcicfgregs *cfg = &dinfo->cfg;
2477
4d28e78f
SZ
2478 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2479 cfg->vendor, cfg->device, cfg->revid);
2480 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2481 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2482 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2483 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2484 cfg->mfdev);
2485 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2486 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2487 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2488 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2489 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2490 if (cfg->intpin > 0)
4d28e78f
SZ
2491 kprintf("\tintpin=%c, irq=%d\n",
2492 cfg->intpin +'a' -1, cfg->intline);
2493 if (cfg->pp.pp_cap) {
2494 uint16_t status;
2495
2496 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2497 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2498 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2499 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2500 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2501 status & PCIM_PSTAT_DMASK);
2502 }
2503 if (cfg->msi.msi_location) {
2504 int ctrl;
2505
2506 ctrl = cfg->msi.msi_ctrl;
2507 kprintf("\tMSI supports %d message%s%s%s\n",
2508 cfg->msi.msi_msgnum,
2509 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2510 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2511 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2512 }
2513 if (cfg->msix.msix_location) {
2514 kprintf("\tMSI-X supports %d message%s ",
2515 cfg->msix.msix_msgnum,
2516 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2517 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2518 kprintf("in map 0x%x\n",
2519 cfg->msix.msix_table_bar);
2520 else
2521 kprintf("in maps 0x%x and 0x%x\n",
2522 cfg->msix.msix_table_bar,
2523 cfg->msix.msix_pba_bar);
2524 }
d85e7311 2525 pci_print_verbose_expr(cfg);
984263bc
MD
2526 }
2527}
2528
d85e7311
SZ
2529static void
2530pci_print_verbose_expr(const pcicfgregs *cfg)
2531{
2532 const struct pcicfg_expr *expr = &cfg->expr;
2533 const char *port_name;
2534 uint16_t port_type;
2535
2536 if (!bootverbose)
2537 return;
2538
2539 if (expr->expr_ptr == 0) /* No PCI Express capability */
2540 return;
2541
2542 kprintf("\tPCI Express ver.%d cap=0x%04x",
2543 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2544 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2545 goto back;
2546
2547 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2548
2549 switch (port_type) {
2550 case PCIE_END_POINT:
2551 port_name = "DEVICE";
2552 break;
2553 case PCIE_LEG_END_POINT:
2554 port_name = "LEGDEV";
2555 break;
2556 case PCIE_ROOT_PORT:
2557 port_name = "ROOT";
2558 break;
2559 case PCIE_UP_STREAM_PORT:
2560 port_name = "UPSTREAM";
2561 break;
2562 case PCIE_DOWN_STREAM_PORT:
2563 port_name = "DOWNSTRM";
2564 break;
2565 case PCIE_PCIE2PCI_BRIDGE:
2566 port_name = "PCIE2PCI";
2567 break;
2568 case PCIE_PCI2PCIE_BRIDGE:
2569 port_name = "PCI2PCIE";
2570 break;
2571 default:
2572 port_name = NULL;
2573 break;
2574 }
2575 if ((port_type == PCIE_ROOT_PORT ||
2576 port_type == PCIE_DOWN_STREAM_PORT) &&
2577 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2578 port_name = NULL;
2579 if (port_name != NULL)
2580 kprintf("[%s]", port_name);
2581
2582 if (pcie_slotimpl(cfg)) {
2583 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2584 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2585 kprintf("[HOTPLUG]");
2586 }
2587back:
2588 kprintf("\n");
2589}
2590
984263bc 2591static int
4a5a2d63 2592pci_porten(device_t pcib, int b, int s, int f)
984263bc 2593{
4a5a2d63
JS
2594 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2595 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2596}
2597
2598static int
4a5a2d63 2599pci_memen(device_t pcib, int b, int s, int f)
984263bc 2600{
4a5a2d63
JS
2601 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2602 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2603}
2604
2605/*
2606 * Add a resource based on a pci map register. Return 1 if the map
2607 * register is a 32bit map register or 2 if it is a 64bit register.
2608 */
2609static int
4d28e78f
SZ
2610pci_add_map(device_t pcib, device_t bus, device_t dev,
2611 int b, int s, int f, int reg, struct resource_list *rl, int force,
2612 int prefetch)
2613{
2614 uint32_t map;
2615 pci_addr_t base;
2616 pci_addr_t start, end, count;
2617 uint8_t ln2size;
2618 uint8_t ln2range;
2619 uint32_t testval;
2620 uint16_t cmd;
984263bc 2621 int type;
4d28e78f
SZ
2622 int barlen;
2623 struct resource *res;
984263bc 2624
4a5a2d63 2625 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2626 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2627 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2628 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2629
4d28e78f 2630 if (PCI_BAR_MEM(map)) {
984263bc 2631 type = SYS_RES_MEMORY;
4d28e78f
SZ
2632 if (map & PCIM_BAR_MEM_PREFETCH)
2633 prefetch = 1;
2634 } else
984263bc
MD
2635 type = SYS_RES_IOPORT;
2636 ln2size = pci_mapsize(testval);
2637 ln2range = pci_maprange(testval);
4d28e78f
SZ
2638 base = pci_mapbase(map);
2639 barlen = ln2range == 64 ? 2 : 1;
2640
2641 /*
2642 * For I/O registers, if bottom bit is set, and the next bit up
2643 * isn't clear, we know we have a BAR that doesn't conform to the
2644 * spec, so ignore it. Also, sanity check the size of the data
2645 * areas to the type of memory involved. Memory must be at least
2646 * 16 bytes in size, while I/O ranges must be at least 4.
2647 */
2648 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2649 return (barlen);
2650 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2651 (type == SYS_RES_IOPORT && ln2size < 2))
2652 return (barlen);
2653
2654 if (ln2range == 64)
984263bc 2655 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2656 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2657 if (bootverbose) {
2658 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2659 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2660 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2661 kprintf(", port disabled\n");
2662 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2663 kprintf(", memory disabled\n");
2664 else
2665 kprintf(", enabled\n");
984263bc
MD
2666 }
2667
984263bc 2668 /*
4d28e78f
SZ
2669 * If base is 0, then we have problems. It is best to ignore
2670 * such entries for the moment. These will be allocated later if
2671 * the driver specifically requests them. However, some
2672 * removable busses look better when all resources are allocated,
2673 * so allow '0' to be overriden.
2674 *
2675 * Similarly treat maps whose values is the same as the test value
2676 * read back. These maps have had all f's written to them by the
2677 * BIOS in an attempt to disable the resources.
984263bc 2678 */
4d28e78f
SZ
2679 if (!force && (base == 0 || map == testval))
2680 return (barlen);
2681 if ((u_long)base != base) {
2682 device_printf(bus,
2683 "pci%d:%d:%d:%d bar %#x too many address bits",
2684 pci_get_domain(dev), b, s, f, reg);
2685 return (barlen);
984263bc 2686 }
984263bc 2687
4d28e78f
SZ
2688 /*
2689 * This code theoretically does the right thing, but has
2690 * undesirable side effects in some cases where peripherals
2691 * respond oddly to having these bits enabled. Let the user
2692 * be able to turn them off (since pci_enable_io_modes is 1 by
2693 * default).
2694 */
2695 if (pci_enable_io_modes) {
2696 /* Turn on resources that have been left off by a lazy BIOS */
2697 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2698 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2699 cmd |= PCIM_CMD_PORTEN;
2700 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2701 }
2702 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2703 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2704 cmd |= PCIM_CMD_MEMEN;
2705 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2706 }
2707 } else {
2708 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2709 return (barlen);
2710 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2711 return (barlen);
2712 }
984263bc 2713
4d28e78f
SZ
2714 count = 1 << ln2size;
2715 if (base == 0 || base == pci_mapbase(testval)) {
2716 start = 0; /* Let the parent decide. */
2717 end = ~0ULL;
2718 } else {
2719 start = base;
2720 end = base + (1 << ln2size) - 1;
984263bc 2721 }
4d28e78f 2722 resource_list_add(rl, type, reg, start, end, count);
984263bc 2723
4d28e78f
SZ
2724 /*
2725 * Try to allocate the resource for this BAR from our parent
2726 * so that this resource range is already reserved. The
2727 * driver for this device will later inherit this resource in
2728 * pci_alloc_resource().
2729 */
2730 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
2731 prefetch ? RF_PREFETCHABLE : 0);
2732 if (res == NULL) {
2733 /*
2734 * If the allocation fails, clear the BAR and delete
2735 * the resource list entry to force
2736 * pci_alloc_resource() to allocate resources from the
2737 * parent.
2738 */
2739 resource_list_delete(rl, type, reg);
2740 start = 0;
2741 } else
2742 start = rman_get_start(res);
2743 pci_write_config(dev, reg, start, 4);
2744 if (ln2range == 64)
2745 pci_write_config(dev, reg + 4, start >> 32, 4);
2746 return (barlen);
984263bc
MD
2747}
2748
201eb0a7 2749/*
4d28e78f 2750 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2751 * Legacy demands that the primary and secondary ATA ports sits on the
2752 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2753 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2754 * addressing mode.
2755 */
2756static void
4d28e78f
SZ
2757pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2758 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2759{
2760 int rid, type, progif;
2761#if 0
2762 /* if this device supports PCI native addressing use it */
2763 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2764 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2765 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2766 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2767 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2768 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2769 }
2770 }
2771#endif
201eb0a7
TS
2772 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2773 type = SYS_RES_IOPORT;
2774 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2775 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2776 prefetchmask & (1 << 0));
2777 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2778 prefetchmask & (1 << 1));
201eb0a7
TS
2779 } else {
2780 rid = PCIR_BAR(0);
2781 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2782 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
4d28e78f 2783 0);
201eb0a7
TS
2784 rid = PCIR_BAR(1);
2785 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2786 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
4d28e78f 2787 0);
201eb0a7
TS
2788 }
2789 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2790 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2791 prefetchmask & (1 << 2));
2792 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2793 prefetchmask & (1 << 3));
201eb0a7
TS
2794 } else {
2795 rid = PCIR_BAR(2);
2796 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2797 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
4d28e78f 2798 0);
201eb0a7
TS
2799 rid = PCIR_BAR(3);
2800 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2801 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
4d28e78f 2802 0);
201eb0a7 2803 }
4d28e78f
SZ
2804 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2805 prefetchmask & (1 << 4));
2806 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2807 prefetchmask & (1 << 5));
201eb0a7 2808}
201eb0a7 2809
984263bc 2810static void
4d28e78f
SZ
2811pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2812{
2813 struct pci_devinfo *dinfo = device_get_ivars(dev);
2814 pcicfgregs *cfg = &dinfo->cfg;
2815 char tunable_name[64];
2816 int irq;
2817
2818 /* Has to have an intpin to have an interrupt. */
2819 if (cfg->intpin == 0)
2820 return;
2821
2822 /* Let the user override the IRQ with a tunable. */
2823 irq = PCI_INVALID_IRQ;
2824 ksnprintf(tunable_name, sizeof(tunable_name),
2825 "hw.pci%d.%d.%d.INT%c.irq",
2826 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2827 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2828 irq = PCI_INVALID_IRQ;
2829
2830 /*
2831 * If we didn't get an IRQ via the tunable, then we either use the
2832 * IRQ value in the intline register or we ask the bus to route an
2833 * interrupt for us. If force_route is true, then we only use the
2834 * value in the intline register if the bus was unable to assign an
2835 * IRQ.
2836 */
2837 if (!PCI_INTERRUPT_VALID(irq)) {
2838 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2839 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2840 if (!PCI_INTERRUPT_VALID(irq))
2841 irq = cfg->intline;
2842 }
2843
2844 /* If after all that we don't have an IRQ, just bail. */
2845 if (!PCI_INTERRUPT_VALID(irq))
2846 return;
2847
2848 /* Update the config register if it changed. */
2849 if (irq != cfg->intline) {
2850 cfg->intline = irq;
2851 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2852 }
2853
2854 /* Add this IRQ as rid 0 interrupt resource. */
2855 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2856}
2857
2858void
2859pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2860{
2861 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2862 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2863 struct resource_list *rl = &dinfo->resources;
2864 struct pci_quirk *q;
e126caf1 2865 int b, i, f, s;
984263bc 2866
e126caf1
MD
2867 b = cfg->bus;
2868 s = cfg->slot;
2869 f = cfg->func;
4d28e78f
SZ
2870
2871 /* ATA devices needs special map treatment */
201eb0a7
TS
2872 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2873 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2874 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2875 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2876 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2877 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2878 else
4d28e78f
SZ
2879 for (i = 0; i < cfg->nummaps;)
2880 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2881 rl, force, prefetchmask & (1 << i));
984263bc 2882
4d28e78f
SZ
2883 /*
2884 * Add additional, quirked resources.
2885 */
984263bc
MD
2886 for (q = &pci_quirks[0]; q->devid; q++) {
2887 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2888 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2889 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2890 force, 0);
984263bc
MD
2891 }
2892
4d28e78f
SZ
2893 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2894#ifdef __PCI_REROUTE_INTERRUPT
2895 /*
2896 * Try to re-route interrupts. Sometimes the BIOS or
2897 * firmware may leave bogus values in these registers.
2898 * If the re-route fails, then just stick with what we
2899 * have.
2900 */
2901 pci_assign_interrupt(bus, dev, 1);
2902#else
2903 pci_assign_interrupt(bus, dev, 0);
2904#endif
2905 }
984263bc
MD
2906}
2907
e126caf1 2908void
4d28e78f 2909pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2910{
4d28e78f 2911#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2912 device_t pcib = device_get_parent(dev);
e126caf1 2913 struct pci_devinfo *dinfo;
4a5a2d63 2914 int maxslots;
e126caf1
MD
2915 int s, f, pcifunchigh;
2916 uint8_t hdrtype;
2917
4d28e78f
SZ
2918 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2919 ("dinfo_size too small"));
4a5a2d63 2920 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2921 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2922 pcifunchigh = 0;
2923 f = 0;
4d28e78f 2924 DELAY(1);
e126caf1
MD
2925 hdrtype = REG(PCIR_HDRTYPE, 1);
2926 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2927 continue;
2928 if (hdrtype & PCIM_MFDEV)
2929 pcifunchigh = PCI_FUNCMAX;
5e658043 2930 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2931 dinfo = pci_read_device(pcib, domain, busno, s, f,
2932 dinfo_size);
984263bc 2933 if (dinfo != NULL) {
e126caf1 2934 pci_add_child(dev, dinfo);
984263bc
MD
2935 }
2936 }
2937 }
e126caf1
MD
2938#undef REG
2939}
2940
2941void
2942pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2943{
2944 device_t pcib;
2945
2946 pcib = device_get_parent(bus);
2947 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2948 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2949 resource_list_init(&dinfo->resources);
638744c5
HT
2950 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2951 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2952 pci_print_verbose(dinfo);
4d28e78f 2953 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2954}
2955
2956static int
4a5a2d63 2957pci_probe(device_t dev)
984263bc 2958{
984263bc 2959 device_set_desc(dev, "PCI bus");
4a5a2d63 2960
4d28e78f
SZ
2961 /* Allow other subclasses to override this driver. */
2962 return (-1000);
984263bc
MD
2963}
2964
e126caf1
MD
2965static int
2966pci_attach(device_t dev)
2967{
4d28e78f
SZ
2968 int busno, domain;
2969
2970 /*
2971 * Since there can be multiple independantly numbered PCI
2972 * busses on systems with multiple PCI domains, we can't use
2973 * the unit number to decide which bus we are probing. We ask
2974 * the parent pcib what our domain and bus numbers are.
2975 */
2976 domain = pcib_get_domain(dev);
2977 busno = pcib_get_bus(dev);
2978 if (bootverbose)
2979 device_printf(dev, "domain=%d, physical bus=%d\n",
2980 domain, busno);
e4c9c0c8 2981
4d28e78f 2982 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2983
4d28e78f
SZ
2984 return (bus_generic_attach(dev));
2985}
2986
2987int
2988pci_suspend(device_t dev)
2989{
2990 int dstate, error, i, numdevs;
2991 device_t acpi_dev, child, *devlist;
2992 struct pci_devinfo *dinfo;
2993
2994 /*
2995 * Save the PCI configuration space for each child and set the
2996 * device in the appropriate power state for this sleep state.
2997 */
2998 acpi_dev = NULL;
2999 if (pci_do_power_resume)
3000 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3001 device_get_children(dev, &devlist, &numdevs);
3002 for (i = 0; i < numdevs; i++) {
3003 child = devlist[i];
3004 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3005 pci_cfg_save(child, dinfo, 0);
3006 }
e126caf1 3007
4d28e78f
SZ
3008 /* Suspend devices before potentially powering them down. */
3009 error = bus_generic_suspend(dev);
3010 if (error) {
3011 kfree(devlist, M_TEMP);
3012 return (error);
3013 }
e126caf1 3014
4d28e78f
SZ
3015 /*
3016 * Always set the device to D3. If ACPI suggests a different
3017 * power state, use it instead. If ACPI is not present, the
3018 * firmware is responsible for managing device power. Skip
3019 * children who aren't attached since they are powered down
3020 * separately. Only manage type 0 devices for now.
3021 */
3022 for (i = 0; acpi_dev && i < numdevs; i++) {
3023 child = devlist[i];
3024 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3025 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3026 dstate = PCI_POWERSTATE_D3;
3027 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3028 pci_set_powerstate(child, dstate);
3029 }
3030 }
3031 kfree(devlist, M_TEMP);
3032 return (0);
e126caf1
MD
3033}
3034
4d28e78f
SZ
3035int
3036pci_resume(device_t dev)
984263bc 3037{
4d28e78f
SZ
3038 int i, numdevs;
3039 device_t acpi_dev, child, *devlist;
3040 struct pci_devinfo *dinfo;
3041
3042 /*
3043 * Set each child to D0 and restore its PCI configuration space.
3044 */
3045 acpi_dev = NULL;
3046 if (pci_do_power_resume)
3047 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3048 device_get_children(dev, &devlist, &numdevs);
3049 for (i = 0; i < numdevs; i++) {
3050 /*
3051 * Notify ACPI we're going to D0 but ignore the result. If
3052 * ACPI is not present, the firmware is responsible for
3053 * managing device power. Only manage type 0 devices for now.
3054 */
3055 child = devlist[i];
3056 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3057 if (acpi_dev && device_is_attached(child) &&
3058 dinfo->cfg.hdrtype == 0) {
3059 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3060 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3061 }
3062
3063 /* Now the device is powered up, restore its config space. */
3064 pci_cfg_restore(child, dinfo);
3065 }
3066 kfree(devlist, M_TEMP);
3067 return (bus_generic_resume(dev));
3068}
3069
3070static void
3071pci_load_vendor_data(void)
3072{
3073 caddr_t vendordata, info;
3074
3075 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3076 info = preload_search_info(vendordata, MODINFO_ADDR);
3077 pci_vendordata = *(char **)info;
3078 info = preload_search_info(vendordata, MODINFO_SIZE);
3079 pci_vendordata_size = *(size_t *)info;
3080 /* terminate the database */
3081 pci_vendordata[pci_vendordata_size] = '\n';
3082 }
3083}
3084
3085void
3086pci_driver_added(device_t dev, driver_t *driver)
3087{
3088 int numdevs;
3089 device_t *devlist;
3090 device_t child;
3091 struct pci_devinfo *dinfo;
3092 int i;
3093
3094 if (bootverbose)
3095 device_printf(dev, "driver added\n");
3096 DEVICE_IDENTIFY(driver, dev);
3097 device_get_children(dev, &devlist, &numdevs);
3098 for (i = 0; i < numdevs; i++) {
3099 child = devlist[i];
3100 if (device_get_state(child) != DS_NOTPRESENT)
3101 continue;
3102 dinfo = device_get_ivars(child);
3103 pci_print_verbose(dinfo);
3104 if (bootverbose)
3105 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
3106 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
3107 dinfo->cfg.func);
3108 pci_cfg_restore(child, dinfo);
3109 if (device_probe_and_attach(child) != 0)
3110 pci_cfg_save(child, dinfo, 1);
3111 }
3112 kfree(devlist, M_TEMP);
3113}
3114
11a49859
SZ
3115static void
3116pci_child_detached(device_t parent __unused, device_t child)
3117{
3118 /* Turn child's power off */
3119 pci_cfg_save(child, device_get_ivars(child), 1);
3120}
3121
4d28e78f
SZ
3122int
3123pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3124 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
3125{
3126#ifdef MSI
3127 struct pci_devinfo *dinfo;
3128 struct msix_table_entry *mte;
3129 struct msix_vector *mv;
3130 uint64_t addr;
3131 uint32_t data;
35b72619 3132 int rid;
4d28e78f 3133#endif
35b72619 3134 int error;
4d28e78f
SZ
3135 void *cookie;
3136 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
3137 arg, &cookie, serializer);
3138 if (error)
3139 return (error);
3140
3141 /* If this is not a direct child, just bail out. */
3142 if (device_get_parent(child) != dev) {
3143 *cookiep = cookie;
3144 return(0);
3145 }
3146
3147 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3148#ifdef MSI
3149 rid = rman_get_rid(irq);
3150 if (rid == 0) {
3151 /* Make sure that INTx is enabled */
3152 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3153 } else {
3154 /*
3155 * Check to see if the interrupt is MSI or MSI-X.
3156 * Ask our parent to map the MSI and give
3157 * us the address and data register values.
3158 * If we fail for some reason, teardown the
3159 * interrupt handler.
3160 */
3161 dinfo = device_get_ivars(child);
3162 if (dinfo->cfg.msi.msi_alloc > 0) {
3163 if (dinfo->cfg.msi.msi_addr == 0) {
3164 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3165 ("MSI has handlers, but vectors not mapped"));
3166 error = PCIB_MAP_MSI(device_get_parent(dev),
3167 child, rman_get_start(irq), &addr, &data);
3168 if (error)
3169 goto bad;
3170 dinfo->cfg.msi.msi_addr = addr;
3171 dinfo->cfg.msi.msi_data = data;
3172 pci_enable_msi(child, addr, data);
984263bc 3173 }
4d28e78f
SZ
3174 dinfo->cfg.msi.msi_handlers++;
3175 } else {
3176 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3177 ("No MSI or MSI-X interrupts allocated"));
3178 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3179 ("MSI-X index too high"));
3180 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3181 KASSERT(mte->mte_vector != 0, ("no message vector"));
3182 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3183 KASSERT(mv->mv_irq == rman_get_start(irq),
3184 ("IRQ mismatch"));
3185 if (mv->mv_address == 0) {
3186 KASSERT(mte->mte_handlers == 0,
3187 ("MSI-X table entry has handlers, but vector not mapped"));
3188 error = PCIB_MAP_MSI(device_get_parent(dev),
3189 child, rman_get_start(irq), &addr, &data);
3190 if (error)
3191 goto bad;
3192 mv->mv_address = addr;
3193 mv->mv_data = data;
3194 }
3195 if (mte->mte_handlers == 0) {
3196 pci_enable_msix(child, rid - 1, mv->mv_address,
3197 mv->mv_data);
3198 pci_unmask_msix(child, rid - 1);
3199 }
3200 mte->mte_handlers++;
3201 }
3202
3203 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3204 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3205 bad:
3206 if (error) {
3207 (void)bus_generic_teardown_intr(dev, child, irq,
3208 cookie);
3209 return (error);
3210 }
3211 }
3212#endif
3213 *cookiep = cookie;
3214 return (0);
3215}
3216
3217int
3218pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3219 void *cookie)
3220{
3221#ifdef MSI
3222 struct msix_table_entry *mte;
3223 struct resource_list_entry *rle;
3224 struct pci_devinfo *dinfo;
35b72619 3225 int rid;
4d28e78f 3226#endif
35b72619 3227 int error;
4d28e78f
SZ
3228
3229 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3230 return (EINVAL);
3231
3232 /* If this isn't a direct child, just bail out */
3233 if (device_get_parent(child) != dev)
3234 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3235
3236 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3237#ifdef MSI
3238 rid = rman_get_rid(irq);
3239 if (rid == 0) {
3240 /* Mask INTx */
3241 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3242 } else {
3243 /*
3244 * Check to see if the interrupt is MSI or MSI-X. If so,
3245 * decrement the appropriate handlers count and mask the
3246 * MSI-X message, or disable MSI messages if the count
3247 * drops to 0.
3248 */
3249 dinfo = device_get_ivars(child);
3250 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3251 if (rle->res != irq)
3252 return (EINVAL);
3253 if (dinfo->cfg.msi.msi_alloc > 0) {
3254 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3255 ("MSI-X index too high"));
3256 if (dinfo->cfg.msi.msi_handlers == 0)
3257 return (EINVAL);
3258 dinfo->cfg.msi.msi_handlers--;
3259 if (dinfo->cfg.msi.msi_handlers == 0)
3260 pci_disable_msi(child);
3261 } else {
3262 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3263 ("No MSI or MSI-X interrupts allocated"));
3264 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3265 ("MSI-X index too high"));
3266 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3267 if (mte->mte_handlers == 0)
3268 return (EINVAL);
3269 mte->mte_handlers--;
3270 if (mte->mte_handlers == 0)
3271 pci_mask_msix(child, rid - 1);
984263bc
MD
3272 }
3273 }
4d28e78f
SZ
3274 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3275 if (rid > 0)
3276 KASSERT(error == 0,
3277 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3278#endif
3279 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3280 return (error);
984263bc
MD
3281}
3282
e126caf1 3283int
984263bc
MD
3284pci_print_child(device_t dev, device_t child)
3285{
3286 struct pci_devinfo *dinfo;
3287 struct resource_list *rl;
984263bc
MD
3288 int retval = 0;
3289
3290 dinfo = device_get_ivars(child);
984263bc
MD
3291 rl = &dinfo->resources;
3292
3293 retval += bus_print_child_header(dev, child);
3294
4d28e78f
SZ
3295 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3296 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3297 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3298 if (device_get_flags(dev))
85f8e2ea 3299 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3300
85f8e2ea 3301 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3302 pci_get_function(child));
984263bc
MD
3303
3304 retval += bus_print_child_footer(dev, child);
3305
3306 return (retval);
3307}
3308
4d28e78f
SZ
3309static struct
3310{
3311 int class;
3312 int subclass;
3313 char *desc;
3314} pci_nomatch_tab[] = {
3315 {PCIC_OLD, -1, "old"},
3316 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3317 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3318 {PCIC_STORAGE, -1, "mass storage"},
3319 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3320 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3321 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3322 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3323 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3324 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3325 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3326 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3327 {PCIC_NETWORK, -1, "network"},
3328 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3329 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3330 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3331 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3332 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3333 {PCIC_DISPLAY, -1, "display"},
3334 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3335 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3336 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3337 {PCIC_MULTIMEDIA, -1, "multimedia"},
3338 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3339 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3340 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3341 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3342 {PCIC_MEMORY, -1, "memory"},
3343 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3344 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3345 {PCIC_BRIDGE, -1, "bridge"},
3346 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3347 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3348 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3349 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3350 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3351 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3352 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3353 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3354 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3355 {PCIC_SIMPLECOMM, -1, "simple comms"},
3356 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3357 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3358 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3359 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3360 {PCIC_BASEPERIPH, -1, "base peripheral"},
3361 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3362 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3363 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3364 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3365 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3366 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3367 {PCIC_INPUTDEV, -1, "input device"},
3368 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3369 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3370 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3371 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3372 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3373 {PCIC_DOCKING, -1, "docking station"},
3374 {PCIC_PROCESSOR, -1, "processor"},
3375 {PCIC_SERIALBUS, -1, "serial bus"},
3376 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3377 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3378 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3379 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3380 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3381 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3382 {PCIC_WIRELESS, -1, "wireless controller"},
3383 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3384 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3385 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3386 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3387 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3388 {PCIC_SATCOM, -1, "satellite communication"},
3389 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3390 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3391 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3392 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3393 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3394 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3395 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3396 {PCIC_DASP, -1, "dasp"},
3397 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3398 {0, 0, NULL}
3399};
3400
e126caf1 3401void
984263bc
MD
3402pci_probe_nomatch(device_t dev, device_t child)
3403{
4d28e78f
SZ
3404 int i;
3405 char *cp, *scp, *device;
984263bc 3406
4d28e78f
SZ
3407 /*
3408 * Look for a listing for this device in a loaded device database.
3409 */
3410 if ((device = pci_describe_device(child)) != NULL) {
3411 device_printf(dev, "<%s>", device);
3412 kfree(device, M_DEVBUF);
3413 } else {
3414 /*
3415 * Scan the class/subclass descriptions for a general
3416 * description.
3417 */
3418 cp = "unknown";
3419 scp = NULL;
3420 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3421 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3422 if (pci_nomatch_tab[i].subclass == -1) {
3423 cp = pci_nomatch_tab[i].desc;
3424 } else if (pci_nomatch_tab[i].subclass ==
3425 pci_get_subclass(child)) {
3426 scp = pci_nomatch_tab[i].desc;
3427 }
3428 }
3429 }
3430 device_printf(dev, "<%s%s%s>",
3431 cp ? cp : "",
3432 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3433 scp ? scp : "");
3434 }
3435 kprintf(" at device %d.%d (no driver attached)\n",
3436 pci_get_slot(child), pci_get_function(child));
638744c5 3437 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3438 return;
3439}
3440
4d28e78f
SZ
3441/*
3442 * Parse the PCI device database, if loaded, and return a pointer to a
3443 * description of the device.
3444 *
3445 * The database is flat text formatted as follows:
3446 *
3447 * Any line not in a valid format is ignored.
3448 * Lines are terminated with newline '\n' characters.
3449 *
3450 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3451 * the vendor name.
3452 *
3453 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3454 * - devices cannot be listed without a corresponding VENDOR line.
3455 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3456 * another TAB, then the device name.
3457 */
3458
3459/*
3460 * Assuming (ptr) points to the beginning of a line in the database,
3461 * return the vendor or device and description of the next entry.
3462 * The value of (vendor) or (device) inappropriate for the entry type
3463 * is set to -1. Returns nonzero at the end of the database.
3464 *
3465 * Note that this is slightly unrobust in the face of corrupt data;
3466 * we attempt to safeguard against this by spamming the end of the
3467 * database with a newline when we initialise.
3468 */
3469static int
3470pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3471{
3472 char *cp = *ptr;
3473 int left;
3474
3475 *device = -1;
3476 *vendor = -1;
3477 **desc = '\0';
3478 for (;;) {
3479 left = pci_vendordata_size - (cp - pci_vendordata);
3480 if (left <= 0) {
3481 *ptr = cp;
3482 return(1);
3483 }
3484
3485 /* vendor entry? */
3486 if (*cp != '\t' &&
3487 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3488 break;
3489 /* device entry? */
3490 if (*cp == '\t' &&
3491 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3492 break;
3493
3494 /* skip to next line */
3495 while (*cp != '\n' && left > 0) {
3496 cp++;
3497 left--;
3498 }
3499 if (*cp == '\n') {
3500 cp++;
3501 left--;
3502 }
3503 }
3504 /* skip to next line */
3505 while (*cp != '\n' && left > 0) {
3506 cp++;
3507 left--;
3508 }
3509 if (*cp == '\n' && left > 0)
3510 cp++;
3511 *ptr = cp;
3512 return(0);
3513}
3514
3515static char *
3516pci_describe_device(device_t dev)
3517{
3518 int vendor, device;
3519 char *desc, *vp, *dp, *line;
3520
3521 desc = vp = dp = NULL;
3522
3523 /*
3524 * If we have no vendor data, we can't do anything.
3525 */
3526 if (pci_vendordata == NULL)
3527 goto out;
3528
3529 /*
3530 * Scan the vendor data looking for this device
3531 */
3532 line = pci_vendordata;
3533 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3534 goto out;
3535 for (;;) {
3536 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3537 goto out;
3538 if (vendor == pci_get_vendor(dev))
3539 break;
3540 }
3541 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3542 goto out;
3543 for (;;) {
3544 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3545 *dp = 0;
3546 break;
3547 }
3548 if (vendor != -1) {
3549 *dp = 0;
3550 break;
3551 }
3552 if (device == pci_get_device(dev))
3553 break;
3554 }
3555 if (dp[0] == '\0')
3556 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3557 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3558 NULL)
3559 ksprintf(desc, "%s, %s", vp, dp);
3560 out:
3561 if (vp != NULL)
3562 kfree(vp, M_DEVBUF);
3563 if (dp != NULL)
3564 kfree(dp, M_DEVBUF);
3565 return(desc);
3566}
3567
22457186 3568int
4a5a2d63 3569pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3570{
3571 struct pci_devinfo *dinfo;
3572 pcicfgregs *cfg;
3573
3574 dinfo = device_get_ivars(child);
3575 cfg = &dinfo->cfg;
3576
3577 switch (which) {
4d28e78f
SZ
3578 case PCI_IVAR_ETHADDR:
3579 /*
3580 * The generic accessor doesn't deal with failure, so
3581 * we set the return value, then return an error.
3582 */
3583 *((uint8_t **) result) = NULL;
3584 return (EINVAL);
984263bc
MD
3585 case PCI_IVAR_SUBVENDOR:
3586 *result = cfg->subvendor;
3587 break;
3588 case PCI_IVAR_SUBDEVICE:
3589 *result = cfg->subdevice;
3590 break;
3591 case PCI_IVAR_VENDOR:
3592 *result = cfg->vendor;
3593 break;
3594 case PCI_IVAR_DEVICE:
3595 *result = cfg->device;
3596 break;
3597 case PCI_IVAR_DEVID:
3598 *result = (cfg->device << 16) | cfg->vendor;
3599 break;
3600 case PCI_IVAR_CLASS:
3601 *result = cfg->baseclass;
3602 break;
3603 case PCI_IVAR_SUBCLASS:
3604 *result = cfg->subclass;
3605 break;
3606 case PCI_IVAR_PROGIF:
3607 *result = cfg->progif;
3608 break;
3609 case PCI_IVAR_REVID:
3610 *result = cfg->revid;
3611 break;
3612 case PCI_IVAR_INTPIN:
3613 *result = cfg->intpin;
3614 break;
3615 case PCI_IVAR_IRQ:
3616 *result = cfg->intline;
3617 break;
4d28e78f
SZ
3618 case PCI_IVAR_DOMAIN:
3619 *result = cfg->domain;
3620 break;
984263bc
MD
3621 case PCI_IVAR_BUS:
3622 *result = cfg->bus;
3623 break;
3624 case PCI_IVAR_SLOT:
3625 *result = cfg->slot;
3626 break;
3627 case PCI_IVAR_FUNCTION:
3628 *result = cfg->func;
3629 break;
4d28e78f
SZ
3630 case PCI_IVAR_CMDREG:
3631 *result = cfg->cmdreg;
984263bc 3632 break;
4d28e78f
SZ
3633 case PCI_IVAR_CACHELNSZ:
3634 *result = cfg->cachelnsz;
984263bc 3635 break;
4d28e78f
SZ
3636 case PCI_IVAR_MINGNT:
3637 *result = cfg->mingnt;
c7e4e7eb 3638 break;
4d28e78f
SZ
3639 case PCI_IVAR_MAXLAT:
3640 *result = cfg->maxlat;
c7e4e7eb 3641 break;
4d28e78f
SZ
3642 case PCI_IVAR_LATTIMER:
3643 *result = cfg->lattimer;
0254566f 3644 break;
d85e7311
SZ
3645 case PCI_IVAR_PCIXCAP_PTR:
3646 *result = cfg->pcix.pcix_ptr;
3647 break;
3648 case PCI_IVAR_PCIECAP_PTR:
3649 *result = cfg->expr.expr_ptr;
3650 break;
3651 case PCI_IVAR_VPDCAP_PTR:
3652 *result = cfg->vpd.vpd_reg;
3653 break;
984263bc 3654 default:
4d28e78f 3655 return (ENOENT);
984263bc 3656 }
4d28e78f 3657 return (0);
984263bc
MD
3658}
3659
22457186 3660int
984263bc
MD
3661pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3662{
3663 struct pci_devinfo *dinfo;
984263bc
MD
3664
3665 dinfo = device_get_ivars(child);
984263bc
MD
3666
3667 switch (which) {
4d28e78f
SZ
3668 case PCI_IVAR_INTPIN:
3669 dinfo->cfg.intpin = value;
3670 return (0);
3671 case PCI_IVAR_ETHADDR:
984263bc
MD
3672 case PCI_IVAR_SUBVENDOR:
3673 case PCI_IVAR_SUBDEVICE:
3674 case PCI_IVAR_VENDOR:
3675 case PCI_IVAR_DEVICE:
3676 case PCI_IVAR_DEVID:
3677 case PCI_IVAR_CLASS:
3678 case PCI_IVAR_SUBCLASS:
3679 case PCI_IVAR_PROGIF:
3680 case PCI_IVAR_REVID:
984263bc 3681 case PCI_IVAR_IRQ:
4d28e78f 3682 case PCI_IVAR_DOMAIN:
984263bc
MD
3683 case PCI_IVAR_BUS:
3684 case PCI_IVAR_SLOT:
3685 case PCI_IVAR_FUNCTION:
4d28e78f 3686 return (EINVAL); /* disallow for now */
984263bc 3687
984263bc 3688 default:
4d28e78f
SZ
3689 return (ENOENT);
3690 }
3691}
3692#ifdef notyet
3693#include "opt_ddb.h"
3694#ifdef DDB
3695#include <ddb/ddb.h>
3696#include <sys/cons.h>
3697
3698/*
3699 * List resources based on pci map registers, used for within ddb
3700 */
3701
3702DB_SHOW_COMMAND(pciregs, db_pci_dump)
3703{
3704 struct pci_devinfo *dinfo;
3705 struct devlist *devlist_head;
3706 struct pci_conf *p;
3707 const char *name;
3708 int i, error, none_count;
3709
3710 none_count = 0;
3711 /* get the head of the device queue */
3712 devlist_head = &pci_devq;
3713
3714 /*
3715 * Go through the list of devices and print out devices
3716 */
3717 for (error = 0, i = 0,
3718 dinfo = STAILQ_FIRST(devlist_head);
3719 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3720 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3721
3722 /* Populate pd_name and pd_unit */
3723 name = NULL;
3724 if (dinfo->cfg.dev)
3725 name = device_get_name(dinfo->cfg.dev);
3726
3727 p = &dinfo->conf;
3728 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3729 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3730 (name && *name) ? name : "none",
3731 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3732 none_count++,
3733 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3734 p->pc_sel.pc_func, (p->pc_class << 16) |
3735 (p->pc_subclass << 8) | p->pc_progif,
3736 (p->pc_subdevice << 16) | p->pc_subvendor,
3737 (p->pc_device << 16) | p->pc_vendor,
3738 p->pc_revid, p->pc_hdr);
984263bc 3739 }
984263bc 3740}
4d28e78f
SZ
3741#endif /* DDB */
3742#endif
984263bc 3743
201eb0a7 3744static struct resource *
4d28e78f
SZ
3745pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3746 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3747{
3748 struct pci_devinfo *dinfo = device_get_ivars(child);
3749 struct resource_list *rl = &dinfo->resources;
3750 struct resource_list_entry *rle;
3751 struct resource *res;
4d28e78f 3752 pci_addr_t map, testval;
201eb0a7
TS
3753 int mapsize;
3754
3755 /*
3756 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3757 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3758 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3759 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3760 * it fails here, that other code is broken.
3761 */
3762 res = NULL;
3763 map = pci_read_config(child, *rid, 4);
3764 pci_write_config(child, *rid, 0xffffffff, 4);
3765 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3766 if (pci_maprange(testval) == 64)
3767 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3768 if (pci_mapbase(testval) == 0)
3769 goto out;
4d28e78f
SZ
3770
3771 /*
3772 * Restore the original value of the BAR. We may have reprogrammed
3773 * the BAR of the low-level console device and when booting verbose,
3774 * we need the console device addressable.
3775 */
3776 pci_write_config(child, *rid, map, 4);
3777
3778 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3779 if (type != SYS_RES_MEMORY) {
3780 if (bootverbose)
4d28e78f
SZ
3781 device_printf(dev,
3782 "child %s requested type %d for rid %#x,"
3783 " but the BAR says it is an memio\n",
3784 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3785 goto out;
3786 }
3787 } else {
3788 if (type != SYS_RES_IOPORT) {
3789 if (bootverbose)
4d28e78f
SZ
3790 device_printf(dev,
3791 "child %s requested type %d for rid %#x,"
3792 " but the BAR says it is an ioport\n",
3793 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3794 goto out;
3795 }
3796 }
3797 /*
3798 * For real BARs, we need to override the size that
3799 * the driver requests, because that's what the BAR
3800 * actually uses and we would otherwise have a
3801 * situation where we might allocate the excess to
3802 * another driver, which won't work.
3803 */
3804 mapsize = pci_mapsize(testval);
4d28e78f 3805 count = 1UL << mapsize;
201eb0a7 3806 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3807 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3808 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3809 flags |= RF_PREFETCHABLE;
3810
201eb0a7
TS
3811 /*
3812 * Allocate enough resource, and then write back the
4d28e78f 3813 * appropriate bar for that resource.
201eb0a7
TS
3814 */
3815 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4d28e78f 3816 start, end, count, flags);
201eb0a7 3817 if (res == NULL) {
4d28e78f
SZ
3818 device_printf(child,
3819 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3820 count, *rid, type, start, end);
201eb0a7
TS
3821 goto out;
3822 }
3823 resource_list_add(rl, type, *rid, start, end, count);
3824 rle = resource_list_find(rl, type, *rid);
3825 if (rle == NULL)
3826 panic("pci_alloc_map: unexpectedly can't find resource.");
3827 rle->res = res;
3828 rle->start = rman_get_start(res);
3829 rle->end = rman_get_end(res);
3830 rle->count = count;
3831 if (bootverbose)
4d28e78f
SZ
3832 device_printf(child,
3833 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3834 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3835 map = rman_get_start(res);
3836out:;
3837 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3838 if (pci_maprange(testval) == 64)
3839 pci_write_config(child, *rid + 4, map >> 32, 4);
3840 return (res);
201eb0a7 3841}
4d28e78f 3842
201eb0a7 3843
261fa16d 3844struct resource *
984263bc
MD
3845pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3846 u_long start, u_long end, u_long count, u_int flags)
3847{
3848 struct pci_devinfo *dinfo = device_get_ivars(child);
3849 struct resource_list *rl = &dinfo->resources;
201eb0a7 3850 struct resource_list_entry *rle;
984263bc 3851 pcicfgregs *cfg = &dinfo->cfg;
09e7d9f3 3852
984263bc
MD
3853 /*
3854 * Perform lazy resource allocation
984263bc
MD
3855 */
3856 if (device_get_parent(child) == dev) {
de67e43b
JS
3857 switch (type) {
3858 case SYS_RES_IRQ:
4d28e78f
SZ
3859 /*
3860 * Can't alloc legacy interrupt once MSI messages
3861 * have been allocated.
3862 */
3863#ifdef MSI
3864 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3865 cfg->msix.msix_alloc > 0))
3866 return (NULL);
de67e43b 3867#endif
4d28e78f
SZ
3868 /*
3869 * If the child device doesn't have an
3870 * interrupt routed and is deserving of an
3871 * interrupt, try to assign it one.
3872 */
3873 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3874 (cfg->intpin != 0))
3875 pci_assign_interrupt(dev, child, 0);
3876 break;
de67e43b
JS
3877 case SYS_RES_IOPORT:
3878 case SYS_RES_MEMORY:
3879 if (*rid < PCIR_BAR(cfg->nummaps)) {
3880 /*
3881 * Enable the I/O mode. We should
3882 * also be assigning resources too
3883 * when none are present. The
3884 * resource_list_alloc kind of sorta does
3885 * this...
3886 */
3887 if (PCI_ENABLE_IO(dev, child, type))
3888 return (NULL);
984263bc 3889 }
201eb0a7
TS
3890 rle = resource_list_find(rl, type, *rid);
3891 if (rle == NULL)
4d28e78f
SZ
3892 return (pci_alloc_map(dev, child, type, rid,
3893 start, end, count, flags));
820c1612 3894 break;
984263bc 3895 }
201eb0a7
TS
3896 /*
3897 * If we've already allocated the resource, then
4d28e78f 3898 * return it now. But first we may need to activate
201eb0a7 3899 * it, since we don't allocate the resource as active
4d28e78f 3900 * above. Normally this would be done down in the
201eb0a7 3901 * nexus, but since we short-circuit that path we have
4d28e78f 3902 * to do its job here. Not sure if we should kfree the
201eb0a7 3903 * resource if it fails to activate.
201eb0a7
TS
3904 */
3905 rle = resource_list_find(rl, type, *rid);
3906 if (rle != NULL && rle->res != NULL) {
3907 if (bootverbose)
4d28e78f
SZ
3908 device_printf(child,
3909 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3910 rman_get_size(rle->res), *rid, type,
3911 rman_get_start(rle->res));
201eb0a7
TS
3912 if ((flags & RF_ACTIVE) &&
3913 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3914 *rid, rle->res) != 0)
3915 return (NULL);
3916 return (rle->res);
201eb0a7 3917 }
984263bc 3918 }
4d28e78f
SZ
3919 return (resource_list_alloc(rl, dev, child, type, rid,
3920 start, end, count, flags));
984263bc
MD
3921}
3922
4d28e78f
SZ
3923void
3924pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3925{
4d28e78f
SZ
3926 struct pci_devinfo *dinfo;
3927 struct resource_list *rl;
984263bc
MD
3928 struct resource_list_entry *rle;
3929
4d28e78f
SZ
3930 if (device_get_parent(child) != dev)
3931 return;
984263bc 3932
4d28e78f
SZ
3933 dinfo = device_get_ivars(child);
3934 rl = &dinfo->resources;
3935 rle = resource_list_find(rl, type, rid);
3936 if (rle) {
3937 if (rle->res) {
3938 if (rman_get_device(rle->res) != dev ||
3939 rman_get_flags(rle->res) & RF_ACTIVE) {
3940 device_printf(dev, "delete_resource: "
3941 "Resource still owned by child, oops. "
3942 "(type=%d, rid=%d, addr=%lx)\n",
3943 rle->type, rle->rid,
3944 rman_get_start(rle->res));
3945 return;
3946 }
3947 bus_release_resource(dev, type, rid, rle->res);
3948 }
3949 resource_list_delete(rl, type, rid);
3950 }
3951 /*
3952 * Why do we turn off the PCI configuration BAR when we delete a
3953 * resource? -- imp
3954 */
3955 pci_write_config(child, rid, 0, 4);
3956 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3957}
3958
e126caf1
MD
3959struct resource_list *
3960pci_get_resource_list (device_t dev, device_t child)
3961{
4d28e78f 3962 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3963
bcc66dfa
SZ
3964 if (dinfo == NULL)
3965 return (NULL);
3966
b0486c83 3967 return (&dinfo->resources);
e126caf1
MD
3968}
3969
4d28e78f 3970uint32_t
984263bc
MD
3971pci_read_config_method(device_t dev, device_t child, int reg, int width)
3972{
3973 struct pci_devinfo *dinfo = device_get_ivars(child);
3974 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3975
4d28e78f
SZ
3976 return (PCIB_READ_CONFIG(device_get_parent(dev),
3977 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3978}
3979
e126caf1 3980void
984263bc 3981pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3982 uint32_t val, int width)
984263bc
MD
3983{
3984 struct pci_devinfo *dinfo = device_get_ivars(child);
3985 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3986
3987 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3988 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3989}
3990
e126caf1 3991int
4d28e78f 3992pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3993 size_t buflen)
3994{
e126caf1 3995
f8c7a42d 3996 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
e126caf1
MD
3997 pci_get_function(child));
3998 return (0);
3999}
4000
4001int
4d28e78f 4002pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
4003 size_t buflen)
4004{
4005 struct pci_devinfo *dinfo;
4006 pcicfgregs *cfg;
4007
4008 dinfo = device_get_ivars(child);
4009 cfg = &dinfo->cfg;
f8c7a42d 4010 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
e126caf1
MD
4011 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4012 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4013 cfg->progif);
4014 return (0);
4015}
4016
4017int
4018pci_assign_interrupt_method(device_t dev, device_t child)
4d28e78f
SZ
4019{
4020 struct pci_devinfo *dinfo = device_get_ivars(child);
4021 pcicfgregs *cfg = &dinfo->cfg;
4022
4023 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4024 cfg->intpin));
e126caf1
MD
4025}
4026
984263bc
MD
4027static int
4028pci_modevent(module_t mod, int what, void *arg)
4029{
4d28e78f 4030 static struct cdev *pci_cdev;
4d28e78f 4031