pci: Put back PCI Express related bits
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4d28e78f 27 * __FBSDID("$FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $");
984263bc
MD
28 */
29
4d28e78f 30#include <sys/cdefs.h>
984263bc 31
4d28e78f 32#include "opt_bus.h"
984263bc
MD
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
4d28e78f 38#include <sys/linker.h>
984263bc
MD
39#include <sys/fcntl.h>
40#include <sys/conf.h>
41#include <sys/kernel.h>
42#include <sys/queue.h>
638744c5 43#include <sys/sysctl.h>
4d28e78f 44#include <sys/endian.h>
984263bc 45
53f3a428
SZ
46#ifdef APIC_IO
47#include <machine/smp.h>
48#endif
49
984263bc
MD
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_extern.h>
53
54#include <sys/bus.h>
984263bc 55#include <sys/rman.h>
4d28e78f 56#include <sys/device.h>
984263bc 57
dc5a7bd2 58#include <sys/pciio.h>
4d28e78f
SZ
59#include <bus/pci/pcireg.h>
60#include <bus/pci/pcivar.h>
61#include <bus/pci/pci_private.h>
984263bc 62
4a5a2d63 63#include "pcib_if.h"
4d28e78f
SZ
64#include "pci_if.h"
65
66#ifdef __HAVE_ACPI
67#include <contrib/dev/acpica/acpi.h>
68#include "acpi_if.h"
69#else
70#define ACPI_PWR_FOR_SLEEP(x, y, z)
71#endif
72
3a6dc23c
SZ
73typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
74
4d28e78f
SZ
75static uint32_t pci_mapbase(unsigned mapreg);
76static const char *pci_maptype(unsigned mapreg);
77static int pci_mapsize(unsigned testval);
78static int pci_maprange(unsigned mapreg);
79static void pci_fixancient(pcicfgregs *cfg);
80
81static int pci_porten(device_t pcib, int b, int s, int f);
82static int pci_memen(device_t pcib, int b, int s, int f);
83static void pci_assign_interrupt(device_t bus, device_t dev,
84 int force_route);
85static int pci_add_map(device_t pcib, device_t bus, device_t dev,
86 int b, int s, int f, int reg,
87 struct resource_list *rl, int force, int prefetch);
88static int pci_probe(device_t dev);
89static int pci_attach(device_t dev);
11a49859 90static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
91static void pci_load_vendor_data(void);
92static int pci_describe_parse_line(char **ptr, int *vendor,
93 int *device, char **desc);
94static char *pci_describe_device(device_t dev);
95static int pci_modevent(module_t mod, int what, void *arg);
96static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
97 pcicfgregs *cfg);
3a6dc23c 98static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
99static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
100 int reg, uint32_t *data);
101#if 0
102static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
103 int reg, uint32_t data);
104#endif
105static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
106static void pci_disable_msi(device_t dev);
107static void pci_enable_msi(device_t dev, uint64_t address,
108 uint16_t data);
109static void pci_enable_msix(device_t dev, u_int index,
110 uint64_t address, uint32_t data);
111static void pci_mask_msix(device_t dev, u_int index);
112static void pci_unmask_msix(device_t dev, u_int index);
113static int pci_msi_blacklisted(void);
114static void pci_resume_msi(device_t dev);
115static void pci_resume_msix(device_t dev);
d85e7311
SZ
116static int pcie_slotimpl(const pcicfgregs *);
117static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 118
3a6dc23c
SZ
119static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
120static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
121static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
122static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
123static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
124static void pci_read_cap_subvendor(device_t, int, int,
125 pcicfgregs *);
126static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 127static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 128
4d28e78f
SZ
129static device_method_t pci_methods[] = {
130 /* Device interface */
131 DEVMETHOD(device_probe, pci_probe),
132 DEVMETHOD(device_attach, pci_attach),
133 DEVMETHOD(device_detach, bus_generic_detach),
134 DEVMETHOD(device_shutdown, bus_generic_shutdown),
135 DEVMETHOD(device_suspend, pci_suspend),
136 DEVMETHOD(device_resume, pci_resume),
137
138 /* Bus interface */
139 DEVMETHOD(bus_print_child, pci_print_child),
140 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
141 DEVMETHOD(bus_read_ivar, pci_read_ivar),
142 DEVMETHOD(bus_write_ivar, pci_write_ivar),
143 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 144 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
145 DEVMETHOD(bus_setup_intr, pci_setup_intr),
146 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
147
148 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
149 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
150 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
151 DEVMETHOD(bus_delete_resource, pci_delete_resource),
152 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
153 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
154 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
155 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
156 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
157 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
158
159 /* PCI interface */
160 DEVMETHOD(pci_read_config, pci_read_config_method),
161 DEVMETHOD(pci_write_config, pci_write_config_method),
162 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
163 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
164 DEVMETHOD(pci_enable_io, pci_enable_io_method),
165 DEVMETHOD(pci_disable_io, pci_disable_io_method),
166 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
167 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
168 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
169 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
170 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
171 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
172 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
173 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
174 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
175 DEVMETHOD(pci_release_msi, pci_release_msi_method),
176 DEVMETHOD(pci_msi_count, pci_msi_count_method),
177 DEVMETHOD(pci_msix_count, pci_msix_count_method),
178
179 { 0, 0 }
180};
181
182DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 183
4d28e78f
SZ
184static devclass_t pci_devclass;
185DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
186MODULE_VERSION(pci, 1);
187
188static char *pci_vendordata;
189static size_t pci_vendordata_size;
dc5a7bd2 190
984263bc 191
3a6dc23c
SZ
192static const struct pci_read_cap {
193 int cap;
194 pci_read_cap_t read_cap;
195} pci_read_caps[] = {
196 { PCIY_PMG, pci_read_cap_pmgt },
197 { PCIY_HT, pci_read_cap_ht },
198 { PCIY_MSI, pci_read_cap_msi },
199 { PCIY_MSIX, pci_read_cap_msix },
200 { PCIY_VPD, pci_read_cap_vpd },
201 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
202 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 203 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
204 { 0, NULL } /* required last entry */
205};
206
984263bc 207struct pci_quirk {
4d28e78f 208 uint32_t devid; /* Vendor/device of the card */
984263bc 209 int type;
4d28e78f
SZ
210#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
211#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
212 int arg1;
213 int arg2;
214};
215
216struct pci_quirk pci_quirks[] = {
4d28e78f 217 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
218 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
219 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
220 /* As does the Serverworks OSB4 (the SMBus mapping register) */
221 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 222
4d28e78f
SZ
223 /*
224 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
225 * or the CMIC-SL (AKA ServerWorks GC_LE).
226 */
227 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
229
230 /*
231 * MSI doesn't work on earlier Intel chipsets including
232 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
233 */
234 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241
242 /*
243 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
244 * bridge.
245 */
246 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247
984263bc
MD
248 { 0 }
249};
250
251/* map register information */
4d28e78f
SZ
252#define PCI_MAPMEM 0x01 /* memory map */
253#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
254#define PCI_MAPPORT 0x04 /* port map */
255
256struct devlist pci_devq;
257uint32_t pci_generation;
258uint32_t pci_numdevs = 0;
259static int pcie_chipset, pcix_chipset;
260
261/* sysctl vars */
262SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
263
264static int pci_enable_io_modes = 1;
265TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
266SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
267 &pci_enable_io_modes, 1,
268 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
269enable these bits correctly. We'd like to do this all the time, but there\n\
270are some peripherals that this causes problems with.");
984263bc 271
638744c5
HT
272static int pci_do_power_nodriver = 0;
273TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
274SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
275 &pci_do_power_nodriver, 0,
276 "Place a function into D3 state when no driver attaches to it. 0 means\n\
277disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 278aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
279in D3 state.");
280
4d28e78f
SZ
281static int pci_do_power_resume = 1;
282TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
283SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
284 &pci_do_power_resume, 1,
285 "Transition from D3 -> D0 on resume.");
286
287static int pci_do_msi = 1;
288TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
289SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
290 "Enable support for MSI interrupts");
291
292static int pci_do_msix = 1;
293TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
294SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
295 "Enable support for MSI-X interrupts");
296
297static int pci_honor_msi_blacklist = 1;
298TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
299SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
300 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
301
302/* Find a device_t by bus/slot/function in domain 0 */
303
304device_t
305pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
306{
307
308 return (pci_find_dbsf(0, bus, slot, func));
309}
310
311/* Find a device_t by domain/bus/slot/function */
312
984263bc 313device_t
4d28e78f 314pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
315{
316 struct pci_devinfo *dinfo;
317
318 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
319 if ((dinfo->cfg.domain == domain) &&
320 (dinfo->cfg.bus == bus) &&
984263bc
MD
321 (dinfo->cfg.slot == slot) &&
322 (dinfo->cfg.func == func)) {
323 return (dinfo->cfg.dev);
324 }
325 }
326
327 return (NULL);
328}
329
4d28e78f
SZ
330/* Find a device_t by vendor/device ID */
331
984263bc 332device_t
4d28e78f 333pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
334{
335 struct pci_devinfo *dinfo;
336
337 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
338 if ((dinfo->cfg.vendor == vendor) &&
339 (dinfo->cfg.device == device)) {
340 return (dinfo->cfg.dev);
341 }
342 }
343
344 return (NULL);
345}
346
347/* return base address of memory or port map */
348
4d28e78f
SZ
349static uint32_t
350pci_mapbase(uint32_t mapreg)
984263bc 351{
4d28e78f
SZ
352
353 if (PCI_BAR_MEM(mapreg))
354 return (mapreg & PCIM_BAR_MEM_BASE);
355 else
356 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
357}
358
359/* return map type of memory or port map */
360
4d28e78f 361static const char *
984263bc
MD
362pci_maptype(unsigned mapreg)
363{
984263bc 364
4d28e78f
SZ
365 if (PCI_BAR_IO(mapreg))
366 return ("I/O Port");
367 if (mapreg & PCIM_BAR_MEM_PREFETCH)
368 return ("Prefetchable Memory");
369 return ("Memory");
984263bc
MD
370}
371
372/* return log2 of map size decoded for memory or port map */
373
374static int
4d28e78f 375pci_mapsize(uint32_t testval)
984263bc
MD
376{
377 int ln2size;
378
379 testval = pci_mapbase(testval);
380 ln2size = 0;
381 if (testval != 0) {
382 while ((testval & 1) == 0)
383 {
384 ln2size++;
385 testval >>= 1;
386 }
387 }
388 return (ln2size);
389}
390
391/* return log2 of address range supported by map register */
392
393static int
394pci_maprange(unsigned mapreg)
395{
396 int ln2range = 0;
4d28e78f
SZ
397
398 if (PCI_BAR_IO(mapreg))
984263bc 399 ln2range = 32;
4d28e78f
SZ
400 else
401 switch (mapreg & PCIM_BAR_MEM_TYPE) {
402 case PCIM_BAR_MEM_32:
403 ln2range = 32;
404 break;
405 case PCIM_BAR_MEM_1MB:
406 ln2range = 20;
407 break;
408 case PCIM_BAR_MEM_64:
409 ln2range = 64;
410 break;
411 }
984263bc
MD
412 return (ln2range);
413}
414
415/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
416
417static void
418pci_fixancient(pcicfgregs *cfg)
419{
420 if (cfg->hdrtype != 0)
421 return;
422
423 /* PCI to PCI bridges use header type 1 */
424 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
425 cfg->hdrtype = 1;
426}
427
984263bc
MD
428/* extract header type specific config data */
429
430static void
4a5a2d63 431pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 432{
4d28e78f 433#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
434 switch (cfg->hdrtype) {
435 case 0:
4a5a2d63
JS
436 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
437 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
438 cfg->nummaps = PCI_MAXMAPS_0;
439 break;
440 case 1:
984263bc 441 cfg->nummaps = PCI_MAXMAPS_1;
984263bc
MD
442 break;
443 case 2:
4a5a2d63
JS
444 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
445 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 446 cfg->nummaps = PCI_MAXMAPS_2;
984263bc
MD
447 break;
448 }
4a5a2d63 449#undef REG
984263bc
MD
450}
451
4d28e78f 452/* read configuration header into pcicfgregs structure */
22457186 453struct pci_devinfo *
4d28e78f 454pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 455{
4d28e78f 456#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
457 pcicfgregs *cfg = NULL;
458 struct pci_devinfo *devlist_entry;
459 struct devlist *devlist_head;
460
461 devlist_head = &pci_devq;
462
463 devlist_entry = NULL;
464
4d28e78f 465 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 466 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
467
468 cfg = &devlist_entry->cfg;
4d28e78f
SZ
469
470 cfg->domain = d;
4a5a2d63
JS
471 cfg->bus = b;
472 cfg->slot = s;
473 cfg->func = f;
474 cfg->vendor = REG(PCIR_VENDOR, 2);
475 cfg->device = REG(PCIR_DEVICE, 2);
476 cfg->cmdreg = REG(PCIR_COMMAND, 2);
477 cfg->statreg = REG(PCIR_STATUS, 2);
478 cfg->baseclass = REG(PCIR_CLASS, 1);
479 cfg->subclass = REG(PCIR_SUBCLASS, 1);
480 cfg->progif = REG(PCIR_PROGIF, 1);
481 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 482 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
483 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
484 cfg->lattimer = REG(PCIR_LATTIMER, 1);
485 cfg->intpin = REG(PCIR_INTPIN, 1);
486 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 487
53f3a428
SZ
488#ifdef APIC_IO
489 /*
490 * If using the APIC the intpin is probably wrong, since it
491 * is often setup by the BIOS with the PIC in mind.
492 */
493 if (cfg->intpin != 0) {
494 int airq;
495
496 airq = pci_apic_irq(cfg->bus, cfg->slot, cfg->intpin);
497 if (airq >= 0) {
498 /* PCI specific entry found in MP table */
499 if (airq != cfg->intline) {
500 undirect_pci_irq(cfg->intline);
501 cfg->intline = airq;
502 }
503 } else {
504 /*
505 * PCI interrupts might be redirected to the
506 * ISA bus according to some MP tables. Use the
507 * same methods as used by the ISA devices
508 * devices to find the proper IOAPIC int pin.
509 */
510 airq = isa_apic_irq(cfg->intline);
511 if ((airq >= 0) && (airq != cfg->intline)) {
512 /* XXX: undirect_pci_irq() ? */
513 undirect_isa_irq(cfg->intline);
514 cfg->intline = airq;
515 }
516 }
517 }
518#endif /* APIC_IO */
519
4a5a2d63
JS
520 cfg->mingnt = REG(PCIR_MINGNT, 1);
521 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
522
523 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
524 cfg->hdrtype &= ~PCIM_MFDEV;
525
526 pci_fixancient(cfg);
4a5a2d63 527 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 528
3a6dc23c 529 pci_read_capabilities(pcib, cfg);
984263bc
MD
530
531 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
532
4d28e78f 533 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
534 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
535 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
536 devlist_entry->conf.pc_sel.pc_func = cfg->func;
537 devlist_entry->conf.pc_hdr = cfg->hdrtype;
538
539 devlist_entry->conf.pc_subvendor = cfg->subvendor;
540 devlist_entry->conf.pc_subdevice = cfg->subdevice;
541 devlist_entry->conf.pc_vendor = cfg->vendor;
542 devlist_entry->conf.pc_device = cfg->device;
543
544 devlist_entry->conf.pc_class = cfg->baseclass;
545 devlist_entry->conf.pc_subclass = cfg->subclass;
546 devlist_entry->conf.pc_progif = cfg->progif;
547 devlist_entry->conf.pc_revid = cfg->revid;
548
549 pci_numdevs++;
550 pci_generation++;
551 }
552 return (devlist_entry);
553#undef REG
554}
555
3a6dc23c
SZ
556static int
557pci_fixup_nextptr(int *nextptr0)
558{
559 int nextptr = *nextptr0;
560
561 /* "Next pointer" is only one byte */
562 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
563
564 if (nextptr & 0x3) {
565 /*
566 * PCI local bus spec 3.0:
567 *
568 * "... The bottom two bits of all pointers are reserved
569 * and must be implemented as 00b although software must
570 * mask them to allow for future uses of these bits ..."
571 */
572 if (bootverbose) {
573 kprintf("Illegal PCI extended capability "
574 "offset, fixup 0x%02x -> 0x%02x\n",
575 nextptr, nextptr & ~0x3);
576 }
577 nextptr &= ~0x3;
578 }
579 *nextptr0 = nextptr;
580
581 if (nextptr < 0x40) {
582 if (nextptr != 0) {
583 kprintf("Illegal PCI extended capability "
584 "offset 0x%02x", nextptr);
585 }
586 return 0;
587 }
588 return 1;
589}
590
b4c0a845 591static void
3a6dc23c 592pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 593{
3a6dc23c
SZ
594#define REG(n, w) \
595 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
596
597 struct pcicfg_pp *pp = &cfg->pp;
598
599 if (pp->pp_cap)
600 return;
601
602 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
603 pp->pp_status = ptr + PCIR_POWER_STATUS;
604 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
605
606 if ((nextptr - ptr) > PCIR_POWER_DATA) {
607 /*
608 * XXX
609 * We should write to data_select and read back from
610 * data_scale to determine whether data register is
611 * implemented.
612 */
613#ifdef foo
614 pp->pp_data = ptr + PCIR_POWER_DATA;
615#else
616 pp->pp_data = 0;
617#endif
618 }
619
620#undef REG
621}
622
623static void
624pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
625{
626#ifdef notyet
4d28e78f 627#if defined(__i386__) || defined(__amd64__)
3a6dc23c
SZ
628
629#define REG(n, w) \
630 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
631
632 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 633 uint64_t addr;
4d28e78f 634 uint32_t val;
3a6dc23c
SZ
635
636 /* Determine HT-specific capability type. */
637 val = REG(ptr + PCIR_HT_COMMAND, 2);
638
639 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
640 return;
641
642 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
643 /* Sanity check the mapping window. */
644 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
645 addr <<= 32;
646 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
647 if (addr != MSI_INTEL_ADDR_BASE) {
648 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
649 "has non-default MSI window 0x%llx\n",
650 cfg->domain, cfg->bus, cfg->slot, cfg->func,
651 (long long)addr);
652 }
653 } else {
654 addr = MSI_INTEL_ADDR_BASE;
655 }
656
657 ht->ht_msimap = ptr;
658 ht->ht_msictrl = val;
659 ht->ht_msiaddr = addr;
660
661#undef REG
662
663#endif /* __i386__ || __amd64__ */
664#endif /* notyet */
665}
666
667static void
668pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
669{
670#define REG(n, w) \
671 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
672
673 struct pcicfg_msi *msi = &cfg->msi;
674
675 msi->msi_location = ptr;
676 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
677 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
678
679#undef REG
680}
681
682static void
683pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
684{
685#define REG(n, w) \
686 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
687
688 struct pcicfg_msix *msix = &cfg->msix;
689 uint32_t val;
690
691 msix->msix_location = ptr;
692 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
693 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
694
695 val = REG(ptr + PCIR_MSIX_TABLE, 4);
696 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
697 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
698
699 val = REG(ptr + PCIR_MSIX_PBA, 4);
700 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
701 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
702
703#undef REG
704}
705
706static void
707pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
708{
709 cfg->vpd.vpd_reg = ptr;
710}
711
712static void
713pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
714{
715#define REG(n, w) \
716 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
717
718 /* Should always be true. */
719 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
720 uint32_t val;
721
722 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
723 cfg->subvendor = val & 0xffff;
724 cfg->subdevice = val >> 16;
725 }
726
727#undef REG
728}
729
730static void
731pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
732{
733 /*
734 * Assume we have a PCI-X chipset if we have
735 * at least one PCI-PCI bridge with a PCI-X
736 * capability. Note that some systems with
737 * PCI-express or HT chipsets might match on
738 * this check as well.
739 */
740 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
741 pcix_chipset = 1;
d85e7311
SZ
742
743 cfg->pcix.pcix_ptr = ptr;
744}
745
746static int
747pcie_slotimpl(const pcicfgregs *cfg)
748{
749 const struct pcicfg_expr *expr = &cfg->expr;
750 uint16_t port_type;
751
752 /*
753 * Only version 1 can be parsed currently
754 */
755 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
756 return 0;
757
758 /*
759 * - Slot implemented bit is meaningful iff current port is
760 * root port or down stream port.
761 * - Testing for root port or down stream port is meanningful
762 * iff PCI configure has type 1 header.
763 */
764
765 if (cfg->hdrtype != 1)
766 return 0;
767
768 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
769 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
770 return 0;
771
772 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
773 return 0;
774
775 return 1;
3a6dc23c
SZ
776}
777
778static void
d85e7311 779pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 780{
d85e7311
SZ
781#define REG(n, w) \
782 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
783
784 struct pcicfg_expr *expr = &cfg->expr;
785
3a6dc23c
SZ
786 /*
787 * Assume we have a PCI-express chipset if we have
788 * at least one PCI-express device.
789 */
790 pcie_chipset = 1;
d85e7311
SZ
791
792 expr->expr_ptr = ptr;
793 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
794
795 /*
796 * Only version 1 can be parsed currently
797 */
798 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
799 return;
800
801 /*
802 * Read slot capabilities. Slot capabilities exists iff
803 * current port's slot is implemented
804 */
805 if (pcie_slotimpl(cfg))
806 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
807
808#undef REG
3a6dc23c
SZ
809}
810
811static void
812pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
813{
814#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
815#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
816
817 uint32_t val;
818 int nextptr, ptrptr;
819
820 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
821 /* No capabilities */
822 return;
823 }
0c78fe3f 824
4d28e78f 825 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 826 case 0:
81c29ce4
SZ
827 case 1:
828 ptrptr = PCIR_CAP_PTR;
984263bc
MD
829 break;
830 case 2:
4d28e78f 831 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
832 break;
833 default:
3a6dc23c 834 return; /* no capabilities support */
984263bc 835 }
4d28e78f 836 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
837
838 /*
839 * Read capability entries.
840 */
3a6dc23c
SZ
841 while (pci_fixup_nextptr(&nextptr)) {
842 const struct pci_read_cap *rc;
843 int ptr = nextptr;
844
4d28e78f 845 /* Find the next entry */
4d28e78f 846 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
847
848 /* Process this entry */
3a6dc23c
SZ
849 val = REG(ptr + PCICAP_ID, 1);
850 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
851 if (rc->cap == val) {
852 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
853 break;
854 }
984263bc
MD
855 }
856 }
4d28e78f 857/* REG and WREG use carry through to next functions */
984263bc
MD
858}
859
4d28e78f
SZ
860/*
861 * PCI Vital Product Data
862 */
863
864#define PCI_VPD_TIMEOUT 1000000
984263bc 865
4d28e78f
SZ
866static int
867pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 868{
4d28e78f 869 int count = PCI_VPD_TIMEOUT;
984263bc 870
4d28e78f 871 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 872
4d28e78f 873 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 874
4d28e78f
SZ
875 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
876 if (--count < 0)
877 return (ENXIO);
878 DELAY(1); /* limit looping */
879 }
880 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 881
984263bc
MD
882 return (0);
883}
984263bc 884
4d28e78f
SZ
885#if 0
886static int
887pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 888{
4d28e78f
SZ
889 int count = PCI_VPD_TIMEOUT;
890
891 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
892
893 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
894 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
895 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
896 if (--count < 0)
897 return (ENXIO);
898 DELAY(1); /* limit looping */
899 }
900
901 return (0);
902}
903#endif
904
905#undef PCI_VPD_TIMEOUT
906
907struct vpd_readstate {
908 device_t pcib;
909 pcicfgregs *cfg;
910 uint32_t val;
911 int bytesinval;
912 int off;
913 uint8_t cksum;
914};
915
916static int
917vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
918{
919 uint32_t reg;
920 uint8_t byte;
921
922 if (vrs->bytesinval == 0) {
923 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
924 return (ENXIO);
925 vrs->val = le32toh(reg);
926 vrs->off += 4;
927 byte = vrs->val & 0xff;
928 vrs->bytesinval = 3;
929 } else {
930 vrs->val = vrs->val >> 8;
931 byte = vrs->val & 0xff;
932 vrs->bytesinval--;
933 }
934
935 vrs->cksum += byte;
936 *data = byte;
937 return (0);
938}
939
d85e7311
SZ
940int
941pcie_slot_implemented(device_t dev)
942{
943 struct pci_devinfo *dinfo = device_get_ivars(dev);
944
945 return pcie_slotimpl(&dinfo->cfg);
946}
947
4d28e78f
SZ
948void
949pcie_set_max_readrq(device_t dev, uint16_t rqsize)
950{
d85e7311
SZ
951 uint8_t expr_ptr;
952 uint16_t val;
953
954 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
955 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
956 panic("%s: invalid max read request size 0x%02x\n",
957 device_get_nameunit(dev), rqsize);
958 }
959
960 expr_ptr = pci_get_pciecap_ptr(dev);
961 if (!expr_ptr)
962 panic("%s: not PCIe device\n", device_get_nameunit(dev));
963
964 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
965 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
966 if (bootverbose)
967 device_printf(dev, "adjust device control 0x%04x", val);
968
969 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
970 val |= rqsize;
971 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
972
973 if (bootverbose)
974 kprintf(" -> 0x%04x\n", val);
975 }
4d28e78f
SZ
976}
977
978static void
979pci_read_vpd(device_t pcib, pcicfgregs *cfg)
980{
981 struct vpd_readstate vrs;
982 int state;
983 int name;
984 int remain;
985 int i;
986 int alloc, off; /* alloc/off for RO/W arrays */
987 int cksumvalid;
988 int dflen;
989 uint8_t byte;
990 uint8_t byte2;
991
992 /* init vpd reader */
993 vrs.bytesinval = 0;
994 vrs.off = 0;
995 vrs.pcib = pcib;
996 vrs.cfg = cfg;
997 vrs.cksum = 0;
998
999 state = 0;
1000 name = remain = i = 0; /* shut up stupid gcc */
1001 alloc = off = 0; /* shut up stupid gcc */
1002 dflen = 0; /* shut up stupid gcc */
1003 cksumvalid = -1;
1004 while (state >= 0) {
1005 if (vpd_nextbyte(&vrs, &byte)) {
1006 state = -2;
1007 break;
1008 }
1009#if 0
1010 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1011 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1012 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1013#endif
1014 switch (state) {
1015 case 0: /* item name */
1016 if (byte & 0x80) {
1017 if (vpd_nextbyte(&vrs, &byte2)) {
1018 state = -2;
1019 break;
1020 }
1021 remain = byte2;
1022 if (vpd_nextbyte(&vrs, &byte2)) {
1023 state = -2;
1024 break;
1025 }
1026 remain |= byte2 << 8;
1027 if (remain > (0x7f*4 - vrs.off)) {
1028 state = -1;
1029 kprintf(
1030 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1031 cfg->domain, cfg->bus, cfg->slot,
1032 cfg->func, remain);
1033 }
1034 name = byte & 0x7f;
1035 } else {
1036 remain = byte & 0x7;
1037 name = (byte >> 3) & 0xf;
1038 }
1039 switch (name) {
1040 case 0x2: /* String */
1041 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1042 M_DEVBUF, M_WAITOK);
1043 i = 0;
1044 state = 1;
1045 break;
1046 case 0xf: /* End */
1047 state = -1;
1048 break;
1049 case 0x10: /* VPD-R */
1050 alloc = 8;
1051 off = 0;
1052 cfg->vpd.vpd_ros = kmalloc(alloc *
1053 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1054 M_WAITOK | M_ZERO);
1055 state = 2;
1056 break;
1057 case 0x11: /* VPD-W */
1058 alloc = 8;
1059 off = 0;
1060 cfg->vpd.vpd_w = kmalloc(alloc *
1061 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1062 M_WAITOK | M_ZERO);
1063 state = 5;
1064 break;
1065 default: /* Invalid data, abort */
1066 state = -1;
1067 break;
1068 }
1069 break;
1070
1071 case 1: /* Identifier String */
1072 cfg->vpd.vpd_ident[i++] = byte;
1073 remain--;
1074 if (remain == 0) {
1075 cfg->vpd.vpd_ident[i] = '\0';
1076 state = 0;
1077 }
1078 break;
1079
1080 case 2: /* VPD-R Keyword Header */
1081 if (off == alloc) {
a68a7edf 1082 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1083 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1084 M_DEVBUF, M_WAITOK | M_ZERO);
1085 }
1086 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1087 if (vpd_nextbyte(&vrs, &byte2)) {
1088 state = -2;
1089 break;
1090 }
1091 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1092 if (vpd_nextbyte(&vrs, &byte2)) {
1093 state = -2;
1094 break;
1095 }
1096 dflen = byte2;
1097 if (dflen == 0 &&
1098 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1099 2) == 0) {
1100 /*
1101 * if this happens, we can't trust the rest
1102 * of the VPD.
1103 */
1104 kprintf(
1105 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1106 cfg->domain, cfg->bus, cfg->slot,
1107 cfg->func, dflen);
1108 cksumvalid = 0;
1109 state = -1;
1110 break;
1111 } else if (dflen == 0) {
1112 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1113 sizeof(*cfg->vpd.vpd_ros[off].value),
1114 M_DEVBUF, M_WAITOK);
1115 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1116 } else
1117 cfg->vpd.vpd_ros[off].value = kmalloc(
1118 (dflen + 1) *
1119 sizeof(*cfg->vpd.vpd_ros[off].value),
1120 M_DEVBUF, M_WAITOK);
1121 remain -= 3;
1122 i = 0;
1123 /* keep in sync w/ state 3's transistions */
1124 if (dflen == 0 && remain == 0)
1125 state = 0;
1126 else if (dflen == 0)
1127 state = 2;
1128 else
1129 state = 3;
1130 break;
1131
1132 case 3: /* VPD-R Keyword Value */
1133 cfg->vpd.vpd_ros[off].value[i++] = byte;
1134 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1135 "RV", 2) == 0 && cksumvalid == -1) {
1136 if (vrs.cksum == 0)
1137 cksumvalid = 1;
1138 else {
1139 if (bootverbose)
1140 kprintf(
1141 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1142 cfg->domain, cfg->bus,
1143 cfg->slot, cfg->func,
1144 vrs.cksum);
1145 cksumvalid = 0;
1146 state = -1;
1147 break;
1148 }
1149 }
1150 dflen--;
1151 remain--;
1152 /* keep in sync w/ state 2's transistions */
1153 if (dflen == 0)
1154 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1155 if (dflen == 0 && remain == 0) {
1156 cfg->vpd.vpd_rocnt = off;
a68a7edf 1157 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1158 off * sizeof(*cfg->vpd.vpd_ros),
1159 M_DEVBUF, M_WAITOK | M_ZERO);
1160 state = 0;
1161 } else if (dflen == 0)
1162 state = 2;
1163 break;
1164
1165 case 4:
1166 remain--;
1167 if (remain == 0)
1168 state = 0;
1169 break;
1170
1171 case 5: /* VPD-W Keyword Header */
1172 if (off == alloc) {
a68a7edf 1173 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1174 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1175 M_DEVBUF, M_WAITOK | M_ZERO);
1176 }
1177 cfg->vpd.vpd_w[off].keyword[0] = byte;
1178 if (vpd_nextbyte(&vrs, &byte2)) {
1179 state = -2;
1180 break;
1181 }
1182 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1183 if (vpd_nextbyte(&vrs, &byte2)) {
1184 state = -2;
1185 break;
1186 }
1187 cfg->vpd.vpd_w[off].len = dflen = byte2;
1188 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1189 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1190 sizeof(*cfg->vpd.vpd_w[off].value),
1191 M_DEVBUF, M_WAITOK);
1192 remain -= 3;
1193 i = 0;
1194 /* keep in sync w/ state 6's transistions */
1195 if (dflen == 0 && remain == 0)
1196 state = 0;
1197 else if (dflen == 0)
1198 state = 5;
1199 else
1200 state = 6;
1201 break;
1202
1203 case 6: /* VPD-W Keyword Value */
1204 cfg->vpd.vpd_w[off].value[i++] = byte;
1205 dflen--;
1206 remain--;
1207 /* keep in sync w/ state 5's transistions */
1208 if (dflen == 0)
1209 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1210 if (dflen == 0 && remain == 0) {
1211 cfg->vpd.vpd_wcnt = off;
a68a7edf 1212 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1213 off * sizeof(*cfg->vpd.vpd_w),
1214 M_DEVBUF, M_WAITOK | M_ZERO);
1215 state = 0;
1216 } else if (dflen == 0)
1217 state = 5;
1218 break;
1219
1220 default:
1221 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1222 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1223 state);
1224 state = -1;
1225 break;
1226 }
1227 }
1228
1229 if (cksumvalid == 0 || state < -1) {
1230 /* read-only data bad, clean up */
1231 if (cfg->vpd.vpd_ros != NULL) {
1232 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1233 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1234 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1235 cfg->vpd.vpd_ros = NULL;
1236 }
1237 }
1238 if (state < -1) {
1239 /* I/O error, clean up */
1240 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1241 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1242 if (cfg->vpd.vpd_ident != NULL) {
1243 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1244 cfg->vpd.vpd_ident = NULL;
1245 }
1246 if (cfg->vpd.vpd_w != NULL) {
1247 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1248 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1249 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1250 cfg->vpd.vpd_w = NULL;
1251 }
1252 }
1253 cfg->vpd.vpd_cached = 1;
1254#undef REG
1255#undef WREG
1256}
1257
1258int
1259pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1260{
1261 struct pci_devinfo *dinfo = device_get_ivars(child);
1262 pcicfgregs *cfg = &dinfo->cfg;
1263
1264 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1265 pci_read_vpd(device_get_parent(dev), cfg);
1266
1267 *identptr = cfg->vpd.vpd_ident;
1268
1269 if (*identptr == NULL)
1270 return (ENXIO);
1271
1272 return (0);
1273}
1274
1275int
1276pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1277 const char **vptr)
1278{
1279 struct pci_devinfo *dinfo = device_get_ivars(child);
1280 pcicfgregs *cfg = &dinfo->cfg;
1281 int i;
1282
1283 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1284 pci_read_vpd(device_get_parent(dev), cfg);
1285
1286 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1287 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1288 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1289 *vptr = cfg->vpd.vpd_ros[i].value;
1290 }
1291
1292 if (i != cfg->vpd.vpd_rocnt)
1293 return (0);
1294
1295 *vptr = NULL;
1296 return (ENXIO);
1297}
1298
1299/*
1300 * Return the offset in configuration space of the requested extended
1301 * capability entry or 0 if the specified capability was not found.
1302 */
1303int
1304pci_find_extcap_method(device_t dev, device_t child, int capability,
1305 int *capreg)
1306{
1307 struct pci_devinfo *dinfo = device_get_ivars(child);
1308 pcicfgregs *cfg = &dinfo->cfg;
1309 u_int32_t status;
1310 u_int8_t ptr;
1311
1312 /*
1313 * Check the CAP_LIST bit of the PCI status register first.
1314 */
1315 status = pci_read_config(child, PCIR_STATUS, 2);
1316 if (!(status & PCIM_STATUS_CAPPRESENT))
1317 return (ENXIO);
1318
1319 /*
1320 * Determine the start pointer of the capabilities list.
1321 */
1322 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1323 case 0:
1324 case 1:
1325 ptr = PCIR_CAP_PTR;
1326 break;
1327 case 2:
1328 ptr = PCIR_CAP_PTR_2;
1329 break;
1330 default:
1331 /* XXX: panic? */
1332 return (ENXIO); /* no extended capabilities support */
1333 }
1334 ptr = pci_read_config(child, ptr, 1);
1335
1336 /*
1337 * Traverse the capabilities list.
1338 */
1339 while (ptr != 0) {
1340 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1341 if (capreg != NULL)
1342 *capreg = ptr;
1343 return (0);
1344 }
1345 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1346 }
1347
1348 return (ENOENT);
1349}
1350
1351/*
1352 * Support for MSI-X message interrupts.
1353 */
1354void
1355pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1356{
1357 struct pci_devinfo *dinfo = device_get_ivars(dev);
1358 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1359 uint32_t offset;
1360
1361 KASSERT(msix->msix_table_len > index, ("bogus index"));
1362 offset = msix->msix_table_offset + index * 16;
1363 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1364 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1365 bus_write_4(msix->msix_table_res, offset + 8, data);
1366
1367 /* Enable MSI -> HT mapping. */
1368 pci_ht_map_msi(dev, address);
1369}
1370
1371void
1372pci_mask_msix(device_t dev, u_int index)
1373{
1374 struct pci_devinfo *dinfo = device_get_ivars(dev);
1375 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1376 uint32_t offset, val;
1377
1378 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1379 offset = msix->msix_table_offset + index * 16 + 12;
1380 val = bus_read_4(msix->msix_table_res, offset);
1381 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1382 val |= PCIM_MSIX_VCTRL_MASK;
1383 bus_write_4(msix->msix_table_res, offset, val);
1384 }
1385}
1386
1387void
1388pci_unmask_msix(device_t dev, u_int index)
1389{
1390 struct pci_devinfo *dinfo = device_get_ivars(dev);
1391 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1392 uint32_t offset, val;
1393
1394 KASSERT(msix->msix_table_len > index, ("bogus index"));
1395 offset = msix->msix_table_offset + index * 16 + 12;
1396 val = bus_read_4(msix->msix_table_res, offset);
1397 if (val & PCIM_MSIX_VCTRL_MASK) {
1398 val &= ~PCIM_MSIX_VCTRL_MASK;
1399 bus_write_4(msix->msix_table_res, offset, val);
1400 }
1401}
1402
1403int
1404pci_pending_msix(device_t dev, u_int index)
1405{
1406 struct pci_devinfo *dinfo = device_get_ivars(dev);
1407 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1408 uint32_t offset, bit;
1409
1410 KASSERT(msix->msix_table_len > index, ("bogus index"));
1411 offset = msix->msix_pba_offset + (index / 32) * 4;
1412 bit = 1 << index % 32;
1413 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1414}
1415
1416/*
1417 * Restore MSI-X registers and table during resume. If MSI-X is
1418 * enabled then walk the virtual table to restore the actual MSI-X
1419 * table.
1420 */
1421static void
1422pci_resume_msix(device_t dev)
1423{
1424 struct pci_devinfo *dinfo = device_get_ivars(dev);
1425 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1426 struct msix_table_entry *mte;
1427 struct msix_vector *mv;
1428 int i;
1429
1430 if (msix->msix_alloc > 0) {
1431 /* First, mask all vectors. */
1432 for (i = 0; i < msix->msix_msgnum; i++)
1433 pci_mask_msix(dev, i);
1434
1435 /* Second, program any messages with at least one handler. */
1436 for (i = 0; i < msix->msix_table_len; i++) {
1437 mte = &msix->msix_table[i];
1438 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1439 continue;
1440 mv = &msix->msix_vectors[mte->mte_vector - 1];
1441 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1442 pci_unmask_msix(dev, i);
1443 }
1444 }
1445 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1446 msix->msix_ctrl, 2);
1447}
1448
1449/*
1450 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1451 * returned in *count. After this function returns, each message will be
1452 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1453 */
1454int
1455pci_alloc_msix_method(device_t dev, device_t child, int *count)
1456{
1457 struct pci_devinfo *dinfo = device_get_ivars(child);
1458 pcicfgregs *cfg = &dinfo->cfg;
1459 struct resource_list_entry *rle;
1460 int actual, error, i, irq, max;
1461
1462 /* Don't let count == 0 get us into trouble. */
1463 if (*count == 0)
1464 return (EINVAL);
1465
1466 /* If rid 0 is allocated, then fail. */
1467 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1468 if (rle != NULL && rle->res != NULL)
1469 return (ENXIO);
1470
1471 /* Already have allocated messages? */
1472 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1473 return (ENXIO);
1474
1475 /* If MSI is blacklisted for this system, fail. */
1476 if (pci_msi_blacklisted())
1477 return (ENXIO);
1478
1479 /* MSI-X capability present? */
1480 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1481 return (ENODEV);
1482
1483 /* Make sure the appropriate BARs are mapped. */
1484 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1485 cfg->msix.msix_table_bar);
1486 if (rle == NULL || rle->res == NULL ||
1487 !(rman_get_flags(rle->res) & RF_ACTIVE))
1488 return (ENXIO);
1489 cfg->msix.msix_table_res = rle->res;
1490 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1491 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1492 cfg->msix.msix_pba_bar);
1493 if (rle == NULL || rle->res == NULL ||
1494 !(rman_get_flags(rle->res) & RF_ACTIVE))
1495 return (ENXIO);
1496 }
1497 cfg->msix.msix_pba_res = rle->res;
1498
1499 if (bootverbose)
1500 device_printf(child,
1501 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1502 *count, cfg->msix.msix_msgnum);
1503 max = min(*count, cfg->msix.msix_msgnum);
1504 for (i = 0; i < max; i++) {
1505 /* Allocate a message. */
1506 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1507 if (error)
1508 break;
1509 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1510 irq, 1);
1511 }
1512 actual = i;
1513
1514 if (bootverbose) {
1515 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1516 if (actual == 1)
1517 device_printf(child, "using IRQ %lu for MSI-X\n",
1518 rle->start);
1519 else {
1520 int run;
1521
1522 /*
1523 * Be fancy and try to print contiguous runs of
1524 * IRQ values as ranges. 'irq' is the previous IRQ.
1525 * 'run' is true if we are in a range.
1526 */
1527 device_printf(child, "using IRQs %lu", rle->start);
1528 irq = rle->start;
1529 run = 0;
1530 for (i = 1; i < actual; i++) {
1531 rle = resource_list_find(&dinfo->resources,
1532 SYS_RES_IRQ, i + 1);
1533
1534 /* Still in a run? */
1535 if (rle->start == irq + 1) {
1536 run = 1;
1537 irq++;
1538 continue;
1539 }
1540
1541 /* Finish previous range. */
1542 if (run) {
1543 kprintf("-%d", irq);
1544 run = 0;
1545 }
1546
1547 /* Start new range. */
1548 kprintf(",%lu", rle->start);
1549 irq = rle->start;
1550 }
1551
1552 /* Unfinished range? */
1553 if (run)
1554 kprintf("-%d", irq);
1555 kprintf(" for MSI-X\n");
1556 }
1557 }
1558
1559 /* Mask all vectors. */
1560 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1561 pci_mask_msix(child, i);
1562
1563 /* Allocate and initialize vector data and virtual table. */
1564 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1565 M_DEVBUF, M_WAITOK | M_ZERO);
1566 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1567 M_DEVBUF, M_WAITOK | M_ZERO);
1568 for (i = 0; i < actual; i++) {
1569 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1570 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1571 cfg->msix.msix_table[i].mte_vector = i + 1;
1572 }
1573
1574 /* Update control register to enable MSI-X. */
1575 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1576 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1577 cfg->msix.msix_ctrl, 2);
1578
1579 /* Update counts of alloc'd messages. */
1580 cfg->msix.msix_alloc = actual;
1581 cfg->msix.msix_table_len = actual;
1582 *count = actual;
1583 return (0);
1584}
1585
1586/*
1587 * By default, pci_alloc_msix() will assign the allocated IRQ
1588 * resources consecutively to the first N messages in the MSI-X table.
1589 * However, device drivers may want to use different layouts if they
1590 * either receive fewer messages than they asked for, or they wish to
1591 * populate the MSI-X table sparsely. This method allows the driver
1592 * to specify what layout it wants. It must be called after a
1593 * successful pci_alloc_msix() but before any of the associated
1594 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1595 *
1596 * The 'vectors' array contains 'count' message vectors. The array
1597 * maps directly to the MSI-X table in that index 0 in the array
1598 * specifies the vector for the first message in the MSI-X table, etc.
1599 * The vector value in each array index can either be 0 to indicate
1600 * that no vector should be assigned to a message slot, or it can be a
1601 * number from 1 to N (where N is the count returned from a
1602 * succcessful call to pci_alloc_msix()) to indicate which message
1603 * vector (IRQ) to be used for the corresponding message.
1604 *
1605 * On successful return, each message with a non-zero vector will have
1606 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1607 * 1. Additionally, if any of the IRQs allocated via the previous
1608 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1609 * will be kfreed back to the system automatically.
1610 *
1611 * For example, suppose a driver has a MSI-X table with 6 messages and
1612 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1613 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1614 * C. After the call to pci_alloc_msix(), the device will be setup to
1615 * have an MSI-X table of ABC--- (where - means no vector assigned).
1616 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1617 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1618 * be kfreed back to the system. This device will also have valid
1619 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1620 *
1621 * In any case, the SYS_RES_IRQ rid X will always map to the message
1622 * at MSI-X table index X - 1 and will only be valid if a vector is
1623 * assigned to that table entry.
1624 */
1625int
1626pci_remap_msix_method(device_t dev, device_t child, int count,
1627 const u_int *vectors)
1628{
1629 struct pci_devinfo *dinfo = device_get_ivars(child);
1630 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1631 struct resource_list_entry *rle;
1632 int i, irq, j, *used;
1633
1634 /*
1635 * Have to have at least one message in the table but the
1636 * table can't be bigger than the actual MSI-X table in the
1637 * device.
1638 */
1639 if (count == 0 || count > msix->msix_msgnum)
1640 return (EINVAL);
1641
1642 /* Sanity check the vectors. */
1643 for (i = 0; i < count; i++)
1644 if (vectors[i] > msix->msix_alloc)
1645 return (EINVAL);
1646
1647 /*
1648 * Make sure there aren't any holes in the vectors to be used.
1649 * It's a big pain to support it, and it doesn't really make
1650 * sense anyway. Also, at least one vector must be used.
1651 */
1652 used = kmalloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1653 M_ZERO);
1654 for (i = 0; i < count; i++)
1655 if (vectors[i] != 0)
1656 used[vectors[i] - 1] = 1;
1657 for (i = 0; i < msix->msix_alloc - 1; i++)
1658 if (used[i] == 0 && used[i + 1] == 1) {
1659 kfree(used, M_DEVBUF);
1660 return (EINVAL);
1661 }
1662 if (used[0] != 1) {
1663 kfree(used, M_DEVBUF);
1664 return (EINVAL);
1665 }
1666
1667 /* Make sure none of the resources are allocated. */
1668 for (i = 0; i < msix->msix_table_len; i++) {
1669 if (msix->msix_table[i].mte_vector == 0)
1670 continue;
1671 if (msix->msix_table[i].mte_handlers > 0)
1672 return (EBUSY);
1673 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1674 KASSERT(rle != NULL, ("missing resource"));
1675 if (rle->res != NULL)
1676 return (EBUSY);
1677 }
1678
1679 /* Free the existing resource list entries. */
1680 for (i = 0; i < msix->msix_table_len; i++) {
1681 if (msix->msix_table[i].mte_vector == 0)
1682 continue;
1683 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1684 }
1685
1686 /*
1687 * Build the new virtual table keeping track of which vectors are
1688 * used.
1689 */
1690 kfree(msix->msix_table, M_DEVBUF);
1691 msix->msix_table = kmalloc(sizeof(struct msix_table_entry) * count,
1692 M_DEVBUF, M_WAITOK | M_ZERO);
1693 for (i = 0; i < count; i++)
1694 msix->msix_table[i].mte_vector = vectors[i];
1695 msix->msix_table_len = count;
1696
1697 /* Free any unused IRQs and resize the vectors array if necessary. */
1698 j = msix->msix_alloc - 1;
1699 if (used[j] == 0) {
1700 struct msix_vector *vec;
1701
1702 while (used[j] == 0) {
1703 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1704 msix->msix_vectors[j].mv_irq);
1705 j--;
1706 }
1707 vec = kmalloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1708 M_WAITOK);
1709 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1710 (j + 1));
1711 kfree(msix->msix_vectors, M_DEVBUF);
1712 msix->msix_vectors = vec;
1713 msix->msix_alloc = j + 1;
1714 }
1715 kfree(used, M_DEVBUF);
1716
1717 /* Map the IRQs onto the rids. */
1718 for (i = 0; i < count; i++) {
1719 if (vectors[i] == 0)
1720 continue;
1721 irq = msix->msix_vectors[vectors[i]].mv_irq;
1722 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1723 irq, 1);
1724 }
1725
1726 if (bootverbose) {
1727 device_printf(child, "Remapped MSI-X IRQs as: ");
1728 for (i = 0; i < count; i++) {
1729 if (i != 0)
1730 kprintf(", ");
1731 if (vectors[i] == 0)
1732 kprintf("---");
1733 else
1734 kprintf("%d",
1735 msix->msix_vectors[vectors[i]].mv_irq);
1736 }
1737 kprintf("\n");
1738 }
1739
1740 return (0);
1741}
1742
1743static int
1744pci_release_msix(device_t dev, device_t child)
1745{
1746 struct pci_devinfo *dinfo = device_get_ivars(child);
1747 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1748 struct resource_list_entry *rle;
1749 int i;
1750
1751 /* Do we have any messages to release? */
1752 if (msix->msix_alloc == 0)
1753 return (ENODEV);
1754
1755 /* Make sure none of the resources are allocated. */
1756 for (i = 0; i < msix->msix_table_len; i++) {
1757 if (msix->msix_table[i].mte_vector == 0)
1758 continue;
1759 if (msix->msix_table[i].mte_handlers > 0)
1760 return (EBUSY);
1761 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1762 KASSERT(rle != NULL, ("missing resource"));
1763 if (rle->res != NULL)
1764 return (EBUSY);
1765 }
1766
1767 /* Update control register to disable MSI-X. */
1768 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1769 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1770 msix->msix_ctrl, 2);
1771
1772 /* Free the resource list entries. */
1773 for (i = 0; i < msix->msix_table_len; i++) {
1774 if (msix->msix_table[i].mte_vector == 0)
1775 continue;
1776 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1777 }
1778 kfree(msix->msix_table, M_DEVBUF);
1779 msix->msix_table_len = 0;
1780
1781 /* Release the IRQs. */
1782 for (i = 0; i < msix->msix_alloc; i++)
1783 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1784 msix->msix_vectors[i].mv_irq);
1785 kfree(msix->msix_vectors, M_DEVBUF);
1786 msix->msix_alloc = 0;
1787 return (0);
1788}
1789
1790/*
1791 * Return the max supported MSI-X messages this device supports.
1792 * Basically, assuming the MD code can alloc messages, this function
1793 * should return the maximum value that pci_alloc_msix() can return.
1794 * Thus, it is subject to the tunables, etc.
1795 */
1796int
1797pci_msix_count_method(device_t dev, device_t child)
1798{
1799 struct pci_devinfo *dinfo = device_get_ivars(child);
1800 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1801
1802 if (pci_do_msix && msix->msix_location != 0)
1803 return (msix->msix_msgnum);
1804 return (0);
1805}
1806
1807/*
1808 * HyperTransport MSI mapping control
1809 */
1810void
1811pci_ht_map_msi(device_t dev, uint64_t addr)
1812{
1813 struct pci_devinfo *dinfo = device_get_ivars(dev);
1814 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1815
1816 if (!ht->ht_msimap)
1817 return;
1818
1819 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1820 ht->ht_msiaddr >> 20 == addr >> 20) {
1821 /* Enable MSI -> HT mapping. */
1822 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1823 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1824 ht->ht_msictrl, 2);
1825 }
1826
1827 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1828 /* Disable MSI -> HT mapping. */
1829 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1830 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1831 ht->ht_msictrl, 2);
1832 }
1833}
1834
1835/*
1836 * Support for MSI message signalled interrupts.
1837 */
1838void
1839pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1840{
1841 struct pci_devinfo *dinfo = device_get_ivars(dev);
1842 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1843
1844 /* Write data and address values. */
1845 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1846 address & 0xffffffff, 4);
1847 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1848 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1849 address >> 32, 4);
1850 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1851 data, 2);
1852 } else
1853 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1854 2);
1855
1856 /* Enable MSI in the control register. */
1857 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1858 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1859 2);
1860
1861 /* Enable MSI -> HT mapping. */
1862 pci_ht_map_msi(dev, address);
1863}
1864
1865void
1866pci_disable_msi(device_t dev)
1867{
1868 struct pci_devinfo *dinfo = device_get_ivars(dev);
1869 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1870
1871 /* Disable MSI -> HT mapping. */
1872 pci_ht_map_msi(dev, 0);
1873
1874 /* Disable MSI in the control register. */
1875 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1876 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1877 2);
1878}
1879
1880/*
1881 * Restore MSI registers during resume. If MSI is enabled then
1882 * restore the data and address registers in addition to the control
1883 * register.
1884 */
1885static void
1886pci_resume_msi(device_t dev)
1887{
1888 struct pci_devinfo *dinfo = device_get_ivars(dev);
1889 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1890 uint64_t address;
1891 uint16_t data;
1892
1893 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1894 address = msi->msi_addr;
1895 data = msi->msi_data;
1896 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1897 address & 0xffffffff, 4);
1898 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1899 pci_write_config(dev, msi->msi_location +
1900 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1901 pci_write_config(dev, msi->msi_location +
1902 PCIR_MSI_DATA_64BIT, data, 2);
1903 } else
1904 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1905 data, 2);
1906 }
1907 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1908 2);
1909}
1910
1911int
1912pci_remap_msi_irq(device_t dev, u_int irq)
1913{
1914 struct pci_devinfo *dinfo = device_get_ivars(dev);
1915 pcicfgregs *cfg = &dinfo->cfg;
1916 struct resource_list_entry *rle;
1917 struct msix_table_entry *mte;
1918 struct msix_vector *mv;
1919 device_t bus;
1920 uint64_t addr;
1921 uint32_t data;
1922 int error, i, j;
1923
1924 bus = device_get_parent(dev);
1925
1926 /*
1927 * Handle MSI first. We try to find this IRQ among our list
1928 * of MSI IRQs. If we find it, we request updated address and
1929 * data registers and apply the results.
1930 */
1931 if (cfg->msi.msi_alloc > 0) {
1932
1933 /* If we don't have any active handlers, nothing to do. */
1934 if (cfg->msi.msi_handlers == 0)
1935 return (0);
1936 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1937 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1938 i + 1);
1939 if (rle->start == irq) {
1940 error = PCIB_MAP_MSI(device_get_parent(bus),
1941 dev, irq, &addr, &data);
1942 if (error)
1943 return (error);
1944 pci_disable_msi(dev);
1945 dinfo->cfg.msi.msi_addr = addr;
1946 dinfo->cfg.msi.msi_data = data;
1947 pci_enable_msi(dev, addr, data);
1948 return (0);
1949 }
1950 }
1951 return (ENOENT);
1952 }
1953
1954 /*
1955 * For MSI-X, we check to see if we have this IRQ. If we do,
1956 * we request the updated mapping info. If that works, we go
1957 * through all the slots that use this IRQ and update them.
1958 */
1959 if (cfg->msix.msix_alloc > 0) {
1960 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1961 mv = &cfg->msix.msix_vectors[i];
1962 if (mv->mv_irq == irq) {
1963 error = PCIB_MAP_MSI(device_get_parent(bus),
1964 dev, irq, &addr, &data);
1965 if (error)
1966 return (error);
1967 mv->mv_address = addr;
1968 mv->mv_data = data;
1969 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1970 mte = &cfg->msix.msix_table[j];
1971 if (mte->mte_vector != i + 1)
1972 continue;
1973 if (mte->mte_handlers == 0)
1974 continue;
1975 pci_mask_msix(dev, j);
1976 pci_enable_msix(dev, j, addr, data);
1977 pci_unmask_msix(dev, j);
1978 }
1979 }
1980 }
1981 return (ENOENT);
1982 }
1983
1984 return (ENOENT);
1985}
1986
1987/*
1988 * Returns true if the specified device is blacklisted because MSI
1989 * doesn't work.
1990 */
1991int
1992pci_msi_device_blacklisted(device_t dev)
1993{
1994 struct pci_quirk *q;
1995
1996 if (!pci_honor_msi_blacklist)
1997 return (0);
1998
1999 for (q = &pci_quirks[0]; q->devid; q++) {
2000 if (q->devid == pci_get_devid(dev) &&
2001 q->type == PCI_QUIRK_DISABLE_MSI)
2002 return (1);
2003 }
2004 return (0);
2005}
2006
2007/*
2008 * Determine if MSI is blacklisted globally on this sytem. Currently,
2009 * we just check for blacklisted chipsets as represented by the
2010 * host-PCI bridge at device 0:0:0. In the future, it may become
2011 * necessary to check other system attributes, such as the kenv values
2012 * that give the motherboard manufacturer and model number.
2013 */
2014static int
2015pci_msi_blacklisted(void)
2016{
2017 device_t dev;
2018
2019 if (!pci_honor_msi_blacklist)
2020 return (0);
2021
2022 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2023 if (!(pcie_chipset || pcix_chipset))
2024 return (1);
2025
2026 dev = pci_find_bsf(0, 0, 0);
2027 if (dev != NULL)
2028 return (pci_msi_device_blacklisted(dev));
2029 return (0);
2030}
2031
2032/*
2033 * Attempt to allocate *count MSI messages. The actual number allocated is
2034 * returned in *count. After this function returns, each message will be
2035 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2036 */
2037int
2038pci_alloc_msi_method(device_t dev, device_t child, int *count)
2039{
2040 struct pci_devinfo *dinfo = device_get_ivars(child);
2041 pcicfgregs *cfg = &dinfo->cfg;
2042 struct resource_list_entry *rle;
2043 int actual, error, i, irqs[32];
2044 uint16_t ctrl;
2045
2046 /* Don't let count == 0 get us into trouble. */
2047 if (*count == 0)
2048 return (EINVAL);
2049
2050 /* If rid 0 is allocated, then fail. */
2051 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2052 if (rle != NULL && rle->res != NULL)
2053 return (ENXIO);
2054
2055 /* Already have allocated messages? */
2056 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2057 return (ENXIO);
2058
2059 /* If MSI is blacklisted for this system, fail. */
2060 if (pci_msi_blacklisted())
2061 return (ENXIO);
2062
2063 /* MSI capability present? */
2064 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2065 return (ENODEV);
2066
2067 if (bootverbose)
2068 device_printf(child,
2069 "attempting to allocate %d MSI vectors (%d supported)\n",
2070 *count, cfg->msi.msi_msgnum);
2071
2072 /* Don't ask for more than the device supports. */
2073 actual = min(*count, cfg->msi.msi_msgnum);
2074
2075 /* Don't ask for more than 32 messages. */
2076 actual = min(actual, 32);
2077
2078 /* MSI requires power of 2 number of messages. */
2079 if (!powerof2(actual))
2080 return (EINVAL);
2081
2082 for (;;) {
2083 /* Try to allocate N messages. */
2084 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2085 cfg->msi.msi_msgnum, irqs);
2086 if (error == 0)
2087 break;
2088 if (actual == 1)
2089 return (error);
2090
2091 /* Try N / 2. */
2092 actual >>= 1;
2093 }
2094
2095 /*
2096 * We now have N actual messages mapped onto SYS_RES_IRQ
2097 * resources in the irqs[] array, so add new resources
2098 * starting at rid 1.
2099 */
2100 for (i = 0; i < actual; i++)
2101 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2102 irqs[i], irqs[i], 1);
2103
2104 if (bootverbose) {
2105 if (actual == 1)
2106 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2107 else {
2108 int run;
2109
2110 /*
2111 * Be fancy and try to print contiguous runs
2112 * of IRQ values as ranges. 'run' is true if
2113 * we are in a range.
2114 */
2115 device_printf(child, "using IRQs %d", irqs[0]);
2116 run = 0;
2117 for (i = 1; i < actual; i++) {
2118
2119 /* Still in a run? */
2120 if (irqs[i] == irqs[i - 1] + 1) {
2121 run = 1;
2122 continue;
2123 }
2124
2125 /* Finish previous range. */
2126 if (run) {
2127 kprintf("-%d", irqs[i - 1]);
2128 run = 0;
2129 }
2130
2131 /* Start new range. */
2132 kprintf(",%d", irqs[i]);
2133 }
2134
2135 /* Unfinished range? */
2136 if (run)
2137 kprintf("-%d", irqs[actual - 1]);
2138 kprintf(" for MSI\n");
2139 }
2140 }
2141
2142 /* Update control register with actual count. */
2143 ctrl = cfg->msi.msi_ctrl;
2144 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2145 ctrl |= (ffs(actual) - 1) << 4;
2146 cfg->msi.msi_ctrl = ctrl;
2147 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2148
2149 /* Update counts of alloc'd messages. */
2150 cfg->msi.msi_alloc = actual;
2151 cfg->msi.msi_handlers = 0;
2152 *count = actual;
2153 return (0);
2154}
2155
2156/* Release the MSI messages associated with this device. */
2157int
2158pci_release_msi_method(device_t dev, device_t child)
2159{
2160 struct pci_devinfo *dinfo = device_get_ivars(child);
2161 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2162 struct resource_list_entry *rle;
2163 int error, i, irqs[32];
2164
2165 /* Try MSI-X first. */
2166 error = pci_release_msix(dev, child);
2167 if (error != ENODEV)
2168 return (error);
2169
2170 /* Do we have any messages to release? */
2171 if (msi->msi_alloc == 0)
2172 return (ENODEV);
2173 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2174
2175 /* Make sure none of the resources are allocated. */
2176 if (msi->msi_handlers > 0)
2177 return (EBUSY);
2178 for (i = 0; i < msi->msi_alloc; i++) {
2179 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2180 KASSERT(rle != NULL, ("missing MSI resource"));
2181 if (rle->res != NULL)
2182 return (EBUSY);
2183 irqs[i] = rle->start;
2184 }
2185
2186 /* Update control register with 0 count. */
2187 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2188 ("%s: MSI still enabled", __func__));
2189 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2190 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2191 msi->msi_ctrl, 2);
2192
2193 /* Release the messages. */
2194 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2195 for (i = 0; i < msi->msi_alloc; i++)
2196 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2197
2198 /* Update alloc count. */
2199 msi->msi_alloc = 0;
2200 msi->msi_addr = 0;
2201 msi->msi_data = 0;
2202 return (0);
2203}
2204
2205/*
2206 * Return the max supported MSI messages this device supports.
2207 * Basically, assuming the MD code can alloc messages, this function
2208 * should return the maximum value that pci_alloc_msi() can return.
2209 * Thus, it is subject to the tunables, etc.
2210 */
2211int
2212pci_msi_count_method(device_t dev, device_t child)
2213{
2214 struct pci_devinfo *dinfo = device_get_ivars(child);
2215 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2216
2217 if (pci_do_msi && msi->msi_location != 0)
2218 return (msi->msi_msgnum);
2219 return (0);
2220}
2221
2222/* kfree pcicfgregs structure and all depending data structures */
2223
2224int
2225pci_freecfg(struct pci_devinfo *dinfo)
2226{
2227 struct devlist *devlist_head;
2228 int i;
2229
2230 devlist_head = &pci_devq;
2231
2232 if (dinfo->cfg.vpd.vpd_reg) {
2233 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2234 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2235 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2236 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2237 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2238 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2239 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2240 }
2241 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2242 kfree(dinfo, M_DEVBUF);
2243
2244 /* increment the generation count */
2245 pci_generation++;
2246
2247 /* we're losing one device */
2248 pci_numdevs--;
2249 return (0);
2250}
2251
2252/*
2253 * PCI power manangement
2254 */
2255int
2256pci_set_powerstate_method(device_t dev, device_t child, int state)
2257{
2258 struct pci_devinfo *dinfo = device_get_ivars(child);
2259 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2260 uint16_t status;
2261 int result, oldstate, highest, delay;
984263bc 2262
4d28e78f 2263 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2264 return (EOPNOTSUPP);
2265
2266 /*
2267 * Optimize a no state change request away. While it would be OK to
2268 * write to the hardware in theory, some devices have shown odd
2269 * behavior when going from D3 -> D3.
2270 */
2271 oldstate = pci_get_powerstate(child);
2272 if (oldstate == state)
2273 return (0);
2274
2275 /*
2276 * The PCI power management specification states that after a state
2277 * transition between PCI power states, system software must
2278 * guarantee a minimal delay before the function accesses the device.
2279 * Compute the worst case delay that we need to guarantee before we
2280 * access the device. Many devices will be responsive much more
2281 * quickly than this delay, but there are some that don't respond
2282 * instantly to state changes. Transitions to/from D3 state require
2283 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2284 * is done below with DELAY rather than a sleeper function because
2285 * this function can be called from contexts where we cannot sleep.
2286 */
2287 highest = (oldstate > state) ? oldstate : state;
2288 if (highest == PCI_POWERSTATE_D3)
2289 delay = 10000;
2290 else if (highest == PCI_POWERSTATE_D2)
2291 delay = 200;
2292 else
2293 delay = 0;
4d28e78f 2294 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2295 & ~PCIM_PSTAT_DMASK;
2296 result = 0;
2297 switch (state) {
2298 case PCI_POWERSTATE_D0:
2299 status |= PCIM_PSTAT_D0;
2300 break;
2301 case PCI_POWERSTATE_D1:
4d28e78f 2302 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2303 return (EOPNOTSUPP);
2304 status |= PCIM_PSTAT_D1;
2305 break;
2306 case PCI_POWERSTATE_D2:
4d28e78f 2307 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2308 return (EOPNOTSUPP);
2309 status |= PCIM_PSTAT_D2;
2310 break;
2311 case PCI_POWERSTATE_D3:
2312 status |= PCIM_PSTAT_D3;
2313 break;
2314 default:
2315 return (EINVAL);
984263bc 2316 }
f4754a59
HT
2317
2318 if (bootverbose)
2319 kprintf(
4d28e78f
SZ
2320 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2321 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2322 dinfo->cfg.func, oldstate, state);
f4754a59 2323
4d28e78f 2324 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2325 if (delay)
2326 DELAY(delay);
2327 return (0);
984263bc
MD
2328}
2329
e126caf1 2330int
984263bc
MD
2331pci_get_powerstate_method(device_t dev, device_t child)
2332{
2333 struct pci_devinfo *dinfo = device_get_ivars(child);
2334 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2335 uint16_t status;
984263bc
MD
2336 int result;
2337
4d28e78f
SZ
2338 if (cfg->pp.pp_cap != 0) {
2339 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2340 switch (status & PCIM_PSTAT_DMASK) {
2341 case PCIM_PSTAT_D0:
2342 result = PCI_POWERSTATE_D0;
2343 break;
2344 case PCIM_PSTAT_D1:
2345 result = PCI_POWERSTATE_D1;
2346 break;
2347 case PCIM_PSTAT_D2:
2348 result = PCI_POWERSTATE_D2;
2349 break;
2350 case PCIM_PSTAT_D3:
2351 result = PCI_POWERSTATE_D3;
2352 break;
2353 default:
2354 result = PCI_POWERSTATE_UNKNOWN;
2355 break;
2356 }
2357 } else {
2358 /* No support, device is always at D0 */
2359 result = PCI_POWERSTATE_D0;
2360 }
f4754a59 2361 return (result);
984263bc
MD
2362}
2363
2364/*
2365 * Some convenience functions for PCI device drivers.
2366 */
2367
2368static __inline void
4d28e78f 2369pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2370{
4d28e78f 2371 uint16_t command;
984263bc 2372
4d28e78f
SZ
2373 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2374 command |= bit;
2375 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2376}
2377
2378static __inline void
4d28e78f
SZ
2379pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2380{
2381 uint16_t command;
984263bc 2382
4d28e78f
SZ
2383 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2384 command &= ~bit;
2385 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2386}
2387
4d28e78f
SZ
2388int
2389pci_enable_busmaster_method(device_t dev, device_t child)
2390{
2391 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2392 return (0);
2393}
984263bc 2394
4d28e78f
SZ
2395int
2396pci_disable_busmaster_method(device_t dev, device_t child)
2397{
2398 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2399 return (0);
2400}
984263bc 2401
4d28e78f
SZ
2402int
2403pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2404{
4d28e78f
SZ
2405 uint16_t command;
2406 uint16_t bit;
2407 char *error;
ed1bd994 2408
4d28e78f
SZ
2409 bit = 0;
2410 error = NULL;
2411
2412 switch(space) {
2413 case SYS_RES_IOPORT:
2414 bit = PCIM_CMD_PORTEN;
2415 error = "port";
ed1bd994 2416 break;
4d28e78f
SZ
2417 case SYS_RES_MEMORY:
2418 bit = PCIM_CMD_MEMEN;
2419 error = "memory";
ed1bd994
MD
2420 break;
2421 default:
4d28e78f 2422 return (EINVAL);
ed1bd994 2423 }
4d28e78f
SZ
2424 pci_set_command_bit(dev, child, bit);
2425 /* Some devices seem to need a brief stall here, what do to? */
2426 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2427 if (command & bit)
2428 return (0);
2429 device_printf(child, "failed to enable %s mapping!\n", error);
2430 return (ENXIO);
ed1bd994 2431}
984263bc 2432
4d28e78f
SZ
2433int
2434pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2435{
4d28e78f
SZ
2436 uint16_t command;
2437 uint16_t bit;
2438 char *error;
b4c0a845 2439
4d28e78f
SZ
2440 bit = 0;
2441 error = NULL;
b4c0a845 2442
4d28e78f
SZ
2443 switch(space) {
2444 case SYS_RES_IOPORT:
2445 bit = PCIM_CMD_PORTEN;
2446 error = "port";
b4c0a845 2447 break;
4d28e78f
SZ
2448 case SYS_RES_MEMORY:
2449 bit = PCIM_CMD_MEMEN;
2450 error = "memory";
b4c0a845
SZ
2451 break;
2452 default:
4d28e78f 2453 return (EINVAL);
b4c0a845 2454 }
4d28e78f
SZ
2455 pci_clear_command_bit(dev, child, bit);
2456 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2457 if (command & bit) {
2458 device_printf(child, "failed to disable %s mapping!\n", error);
2459 return (ENXIO);
b4c0a845 2460 }
4d28e78f 2461 return (0);
b4c0a845
SZ
2462}
2463
4d28e78f
SZ
2464/*
2465 * New style pci driver. Parent device is either a pci-host-bridge or a
2466 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2467 */
2468
22457186 2469void
984263bc
MD
2470pci_print_verbose(struct pci_devinfo *dinfo)
2471{
4d28e78f 2472
984263bc
MD
2473 if (bootverbose) {
2474 pcicfgregs *cfg = &dinfo->cfg;
2475
4d28e78f
SZ
2476 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2477 cfg->vendor, cfg->device, cfg->revid);
2478 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2479 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2480 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2481 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2482 cfg->mfdev);
2483 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2484 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2485 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2486 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2487 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2488 if (cfg->intpin > 0)
4d28e78f
SZ
2489 kprintf("\tintpin=%c, irq=%d\n",
2490 cfg->intpin +'a' -1, cfg->intline);
2491 if (cfg->pp.pp_cap) {
2492 uint16_t status;
2493
2494 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2495 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2496 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2497 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2498 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2499 status & PCIM_PSTAT_DMASK);
2500 }
2501 if (cfg->msi.msi_location) {
2502 int ctrl;
2503
2504 ctrl = cfg->msi.msi_ctrl;
2505 kprintf("\tMSI supports %d message%s%s%s\n",
2506 cfg->msi.msi_msgnum,
2507 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2508 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2509 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2510 }
2511 if (cfg->msix.msix_location) {
2512 kprintf("\tMSI-X supports %d message%s ",
2513 cfg->msix.msix_msgnum,
2514 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2515 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2516 kprintf("in map 0x%x\n",
2517 cfg->msix.msix_table_bar);
2518 else
2519 kprintf("in maps 0x%x and 0x%x\n",
2520 cfg->msix.msix_table_bar,
2521 cfg->msix.msix_pba_bar);
2522 }
d85e7311 2523 pci_print_verbose_expr(cfg);
984263bc
MD
2524 }
2525}
2526
d85e7311
SZ
2527static void
2528pci_print_verbose_expr(const pcicfgregs *cfg)
2529{
2530 const struct pcicfg_expr *expr = &cfg->expr;
2531 const char *port_name;
2532 uint16_t port_type;
2533
2534 if (!bootverbose)
2535 return;
2536
2537 if (expr->expr_ptr == 0) /* No PCI Express capability */
2538 return;
2539
2540 kprintf("\tPCI Express ver.%d cap=0x%04x",
2541 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2542 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2543 goto back;
2544
2545 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2546
2547 switch (port_type) {
2548 case PCIE_END_POINT:
2549 port_name = "DEVICE";
2550 break;
2551 case PCIE_LEG_END_POINT:
2552 port_name = "LEGDEV";
2553 break;
2554 case PCIE_ROOT_PORT:
2555 port_name = "ROOT";
2556 break;
2557 case PCIE_UP_STREAM_PORT:
2558 port_name = "UPSTREAM";
2559 break;
2560 case PCIE_DOWN_STREAM_PORT:
2561 port_name = "DOWNSTRM";
2562 break;
2563 case PCIE_PCIE2PCI_BRIDGE:
2564 port_name = "PCIE2PCI";
2565 break;
2566 case PCIE_PCI2PCIE_BRIDGE:
2567 port_name = "PCI2PCIE";
2568 break;
2569 default:
2570 port_name = NULL;
2571 break;
2572 }
2573 if ((port_type == PCIE_ROOT_PORT ||
2574 port_type == PCIE_DOWN_STREAM_PORT) &&
2575 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2576 port_name = NULL;
2577 if (port_name != NULL)
2578 kprintf("[%s]", port_name);
2579
2580 if (pcie_slotimpl(cfg)) {
2581 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2582 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2583 kprintf("[HOTPLUG]");
2584 }
2585back:
2586 kprintf("\n");
2587}
2588
984263bc 2589static int
4a5a2d63 2590pci_porten(device_t pcib, int b, int s, int f)
984263bc 2591{
4a5a2d63
JS
2592 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2593 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2594}
2595
2596static int
4a5a2d63 2597pci_memen(device_t pcib, int b, int s, int f)
984263bc 2598{
4a5a2d63
JS
2599 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2600 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2601}
2602
2603/*
2604 * Add a resource based on a pci map register. Return 1 if the map
2605 * register is a 32bit map register or 2 if it is a 64bit register.
2606 */
2607static int
4d28e78f
SZ
2608pci_add_map(device_t pcib, device_t bus, device_t dev,
2609 int b, int s, int f, int reg, struct resource_list *rl, int force,
2610 int prefetch)
2611{
2612 uint32_t map;
2613 pci_addr_t base;
2614 pci_addr_t start, end, count;
2615 uint8_t ln2size;
2616 uint8_t ln2range;
2617 uint32_t testval;
2618 uint16_t cmd;
984263bc 2619 int type;
4d28e78f
SZ
2620 int barlen;
2621 struct resource *res;
984263bc 2622
4a5a2d63 2623 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2624 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2625 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2626 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2627
4d28e78f 2628 if (PCI_BAR_MEM(map)) {
984263bc 2629 type = SYS_RES_MEMORY;
4d28e78f
SZ
2630 if (map & PCIM_BAR_MEM_PREFETCH)
2631 prefetch = 1;
2632 } else
984263bc
MD
2633 type = SYS_RES_IOPORT;
2634 ln2size = pci_mapsize(testval);
2635 ln2range = pci_maprange(testval);
4d28e78f
SZ
2636 base = pci_mapbase(map);
2637 barlen = ln2range == 64 ? 2 : 1;
2638
2639 /*
2640 * For I/O registers, if bottom bit is set, and the next bit up
2641 * isn't clear, we know we have a BAR that doesn't conform to the
2642 * spec, so ignore it. Also, sanity check the size of the data
2643 * areas to the type of memory involved. Memory must be at least
2644 * 16 bytes in size, while I/O ranges must be at least 4.
2645 */
2646 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2647 return (barlen);
2648 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2649 (type == SYS_RES_IOPORT && ln2size < 2))
2650 return (barlen);
2651
2652 if (ln2range == 64)
984263bc 2653 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2654 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2655 if (bootverbose) {
2656 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2657 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2658 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2659 kprintf(", port disabled\n");
2660 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2661 kprintf(", memory disabled\n");
2662 else
2663 kprintf(", enabled\n");
984263bc
MD
2664 }
2665
984263bc 2666 /*
4d28e78f
SZ
2667 * If base is 0, then we have problems. It is best to ignore
2668 * such entries for the moment. These will be allocated later if
2669 * the driver specifically requests them. However, some
2670 * removable busses look better when all resources are allocated,
2671 * so allow '0' to be overriden.
2672 *
2673 * Similarly treat maps whose values is the same as the test value
2674 * read back. These maps have had all f's written to them by the
2675 * BIOS in an attempt to disable the resources.
984263bc 2676 */
4d28e78f
SZ
2677 if (!force && (base == 0 || map == testval))
2678 return (barlen);
2679 if ((u_long)base != base) {
2680 device_printf(bus,
2681 "pci%d:%d:%d:%d bar %#x too many address bits",
2682 pci_get_domain(dev), b, s, f, reg);
2683 return (barlen);
984263bc 2684 }
984263bc 2685
4d28e78f
SZ
2686 /*
2687 * This code theoretically does the right thing, but has
2688 * undesirable side effects in some cases where peripherals
2689 * respond oddly to having these bits enabled. Let the user
2690 * be able to turn them off (since pci_enable_io_modes is 1 by
2691 * default).
2692 */
2693 if (pci_enable_io_modes) {
2694 /* Turn on resources that have been left off by a lazy BIOS */
2695 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2696 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2697 cmd |= PCIM_CMD_PORTEN;
2698 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2699 }
2700 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2701 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2702 cmd |= PCIM_CMD_MEMEN;
2703 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2704 }
2705 } else {
2706 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2707 return (barlen);
2708 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2709 return (barlen);
2710 }
984263bc 2711
4d28e78f
SZ
2712 count = 1 << ln2size;
2713 if (base == 0 || base == pci_mapbase(testval)) {
2714 start = 0; /* Let the parent decide. */
2715 end = ~0ULL;
2716 } else {
2717 start = base;
2718 end = base + (1 << ln2size) - 1;
984263bc 2719 }
4d28e78f 2720 resource_list_add(rl, type, reg, start, end, count);
984263bc 2721
4d28e78f
SZ
2722 /*
2723 * Try to allocate the resource for this BAR from our parent
2724 * so that this resource range is already reserved. The
2725 * driver for this device will later inherit this resource in
2726 * pci_alloc_resource().
2727 */
2728 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
2729 prefetch ? RF_PREFETCHABLE : 0);
2730 if (res == NULL) {
2731 /*
2732 * If the allocation fails, clear the BAR and delete
2733 * the resource list entry to force
2734 * pci_alloc_resource() to allocate resources from the
2735 * parent.
2736 */
2737 resource_list_delete(rl, type, reg);
2738 start = 0;
2739 } else
2740 start = rman_get_start(res);
2741 pci_write_config(dev, reg, start, 4);
2742 if (ln2range == 64)
2743 pci_write_config(dev, reg + 4, start >> 32, 4);
2744 return (barlen);
984263bc
MD
2745}
2746
201eb0a7 2747/*
4d28e78f 2748 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2749 * Legacy demands that the primary and secondary ATA ports sits on the
2750 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2751 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2752 * addressing mode.
2753 */
2754static void
4d28e78f
SZ
2755pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2756 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2757{
2758 int rid, type, progif;
2759#if 0
2760 /* if this device supports PCI native addressing use it */
2761 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2762 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2763 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2764 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2765 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2766 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2767 }
2768 }
2769#endif
201eb0a7
TS
2770 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2771 type = SYS_RES_IOPORT;
2772 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2773 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2774 prefetchmask & (1 << 0));
2775 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2776 prefetchmask & (1 << 1));
201eb0a7
TS
2777 } else {
2778 rid = PCIR_BAR(0);
2779 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2780 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
4d28e78f 2781 0);
201eb0a7
TS
2782 rid = PCIR_BAR(1);
2783 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2784 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
4d28e78f 2785 0);
201eb0a7
TS
2786 }
2787 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2788 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2789 prefetchmask & (1 << 2));
2790 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2791 prefetchmask & (1 << 3));
201eb0a7
TS
2792 } else {
2793 rid = PCIR_BAR(2);
2794 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2795 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
4d28e78f 2796 0);
201eb0a7
TS
2797 rid = PCIR_BAR(3);
2798 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2799 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
4d28e78f 2800 0);
201eb0a7 2801 }
4d28e78f
SZ
2802 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2803 prefetchmask & (1 << 4));
2804 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2805 prefetchmask & (1 << 5));
201eb0a7 2806}
201eb0a7 2807
984263bc 2808static void
4d28e78f
SZ
2809pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2810{
2811 struct pci_devinfo *dinfo = device_get_ivars(dev);
2812 pcicfgregs *cfg = &dinfo->cfg;
2813 char tunable_name[64];
2814 int irq;
2815
2816 /* Has to have an intpin to have an interrupt. */
2817 if (cfg->intpin == 0)
2818 return;
2819
2820 /* Let the user override the IRQ with a tunable. */
2821 irq = PCI_INVALID_IRQ;
2822 ksnprintf(tunable_name, sizeof(tunable_name),
2823 "hw.pci%d.%d.%d.INT%c.irq",
2824 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2825 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2826 irq = PCI_INVALID_IRQ;
2827
2828 /*
2829 * If we didn't get an IRQ via the tunable, then we either use the
2830 * IRQ value in the intline register or we ask the bus to route an
2831 * interrupt for us. If force_route is true, then we only use the
2832 * value in the intline register if the bus was unable to assign an
2833 * IRQ.
2834 */
2835 if (!PCI_INTERRUPT_VALID(irq)) {
2836 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2837 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2838 if (!PCI_INTERRUPT_VALID(irq))
2839 irq = cfg->intline;
2840 }
2841
2842 /* If after all that we don't have an IRQ, just bail. */
2843 if (!PCI_INTERRUPT_VALID(irq))
2844 return;
2845
2846 /* Update the config register if it changed. */
2847 if (irq != cfg->intline) {
2848 cfg->intline = irq;
2849 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2850 }
2851
2852 /* Add this IRQ as rid 0 interrupt resource. */
2853 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2854}
2855
2856void
2857pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2858{
2859 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2860 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2861 struct resource_list *rl = &dinfo->resources;
2862 struct pci_quirk *q;
e126caf1 2863 int b, i, f, s;
984263bc 2864
e126caf1
MD
2865 b = cfg->bus;
2866 s = cfg->slot;
2867 f = cfg->func;
4d28e78f
SZ
2868
2869 /* ATA devices needs special map treatment */
201eb0a7
TS
2870 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2871 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2872 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2873 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2874 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2875 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2876 else
4d28e78f
SZ
2877 for (i = 0; i < cfg->nummaps;)
2878 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2879 rl, force, prefetchmask & (1 << i));
984263bc 2880
4d28e78f
SZ
2881 /*
2882 * Add additional, quirked resources.
2883 */
984263bc
MD
2884 for (q = &pci_quirks[0]; q->devid; q++) {
2885 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2886 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2887 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2888 force, 0);
984263bc
MD
2889 }
2890
4d28e78f
SZ
2891 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2892#ifdef __PCI_REROUTE_INTERRUPT
2893 /*
2894 * Try to re-route interrupts. Sometimes the BIOS or
2895 * firmware may leave bogus values in these registers.
2896 * If the re-route fails, then just stick with what we
2897 * have.
2898 */
2899 pci_assign_interrupt(bus, dev, 1);
2900#else
2901 pci_assign_interrupt(bus, dev, 0);
2902#endif
2903 }
984263bc
MD
2904}
2905
e126caf1 2906void
4d28e78f 2907pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2908{
4d28e78f 2909#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2910 device_t pcib = device_get_parent(dev);
e126caf1 2911 struct pci_devinfo *dinfo;
4a5a2d63 2912 int maxslots;
e126caf1
MD
2913 int s, f, pcifunchigh;
2914 uint8_t hdrtype;
2915
4d28e78f
SZ
2916 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2917 ("dinfo_size too small"));
4a5a2d63 2918 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2919 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2920 pcifunchigh = 0;
2921 f = 0;
4d28e78f 2922 DELAY(1);
e126caf1
MD
2923 hdrtype = REG(PCIR_HDRTYPE, 1);
2924 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2925 continue;
2926 if (hdrtype & PCIM_MFDEV)
2927 pcifunchigh = PCI_FUNCMAX;
5e658043 2928 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2929 dinfo = pci_read_device(pcib, domain, busno, s, f,
2930 dinfo_size);
984263bc 2931 if (dinfo != NULL) {
e126caf1 2932 pci_add_child(dev, dinfo);
984263bc
MD
2933 }
2934 }
2935 }
e126caf1
MD
2936#undef REG
2937}
2938
2939void
2940pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2941{
2942 device_t pcib;
2943
2944 pcib = device_get_parent(bus);
2945 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2946 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2947 resource_list_init(&dinfo->resources);
638744c5
HT
2948 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2949 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2950 pci_print_verbose(dinfo);
4d28e78f 2951 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2952}
2953
2954static int
4a5a2d63 2955pci_probe(device_t dev)
984263bc 2956{
984263bc 2957 device_set_desc(dev, "PCI bus");
4a5a2d63 2958
4d28e78f
SZ
2959 /* Allow other subclasses to override this driver. */
2960 return (-1000);
984263bc
MD
2961}
2962
2963static int
e126caf1
MD
2964pci_attach(device_t dev)
2965{
4d28e78f
SZ
2966 int busno, domain;
2967
2968 /*
2969 * Since there can be multiple independantly numbered PCI
2970 * busses on systems with multiple PCI domains, we can't use
2971 * the unit number to decide which bus we are probing. We ask
2972 * the parent pcib what our domain and bus numbers are.
2973 */
2974 domain = pcib_get_domain(dev);
2975 busno = pcib_get_bus(dev);
2976 if (bootverbose)
2977 device_printf(dev, "domain=%d, physical bus=%d\n",
2978 domain, busno);
e4c9c0c8 2979
4d28e78f 2980 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2981
4d28e78f
SZ
2982 return (bus_generic_attach(dev));
2983}
2984
2985int
2986pci_suspend(device_t dev)
2987{
2988 int dstate, error, i, numdevs;
2989 device_t acpi_dev, child, *devlist;
2990 struct pci_devinfo *dinfo;
2991
2992 /*
2993 * Save the PCI configuration space for each child and set the
2994 * device in the appropriate power state for this sleep state.
2995 */
2996 acpi_dev = NULL;
2997 if (pci_do_power_resume)
2998 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2999 device_get_children(dev, &devlist, &numdevs);
3000 for (i = 0; i < numdevs; i++) {
3001 child = devlist[i];
3002 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3003 pci_cfg_save(child, dinfo, 0);
3004 }
e126caf1 3005
4d28e78f
SZ
3006 /* Suspend devices before potentially powering them down. */
3007 error = bus_generic_suspend(dev);
3008 if (error) {
3009 kfree(devlist, M_TEMP);
3010 return (error);
3011 }
e126caf1 3012
4d28e78f
SZ
3013 /*
3014 * Always set the device to D3. If ACPI suggests a different
3015 * power state, use it instead. If ACPI is not present, the
3016 * firmware is responsible for managing device power. Skip
3017 * children who aren't attached since they are powered down
3018 * separately. Only manage type 0 devices for now.
3019 */
3020 for (i = 0; acpi_dev && i < numdevs; i++) {
3021 child = devlist[i];
3022 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3023 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3024 dstate = PCI_POWERSTATE_D3;
3025 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3026 pci_set_powerstate(child, dstate);
3027 }
3028 }
3029 kfree(devlist, M_TEMP);
3030 return (0);
e126caf1
MD
3031}
3032
4d28e78f
SZ
3033int
3034pci_resume(device_t dev)
984263bc 3035{
4d28e78f
SZ
3036 int i, numdevs;
3037 device_t acpi_dev, child, *devlist;
3038 struct pci_devinfo *dinfo;
3039
3040 /*
3041 * Set each child to D0 and restore its PCI configuration space.
3042 */
3043 acpi_dev = NULL;
3044 if (pci_do_power_resume)
3045 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3046 device_get_children(dev, &devlist, &numdevs);
3047 for (i = 0; i < numdevs; i++) {
3048 /*
3049 * Notify ACPI we're going to D0 but ignore the result. If
3050 * ACPI is not present, the firmware is responsible for
3051 * managing device power. Only manage type 0 devices for now.
3052 */
3053 child = devlist[i];
3054 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3055 if (acpi_dev && device_is_attached(child) &&
3056 dinfo->cfg.hdrtype == 0) {
3057 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3058 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3059 }
3060
3061 /* Now the device is powered up, restore its config space. */
3062 pci_cfg_restore(child, dinfo);
3063 }
3064 kfree(devlist, M_TEMP);
3065 return (bus_generic_resume(dev));
3066}
3067
3068static void
3069pci_load_vendor_data(void)
3070{
3071 caddr_t vendordata, info;
3072
3073 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3074 info = preload_search_info(vendordata, MODINFO_ADDR);
3075 pci_vendordata = *(char **)info;
3076 info = preload_search_info(vendordata, MODINFO_SIZE);
3077 pci_vendordata_size = *(size_t *)info;
3078 /* terminate the database */
3079 pci_vendordata[pci_vendordata_size] = '\n';
3080 }
3081}
3082
3083void
3084pci_driver_added(device_t dev, driver_t *driver)
3085{
3086 int numdevs;
3087 device_t *devlist;
3088 device_t child;
3089 struct pci_devinfo *dinfo;
3090 int i;
3091
3092 if (bootverbose)
3093 device_printf(dev, "driver added\n");
3094 DEVICE_IDENTIFY(driver, dev);
3095 device_get_children(dev, &devlist, &numdevs);
3096 for (i = 0; i < numdevs; i++) {
3097 child = devlist[i];
3098 if (device_get_state(child) != DS_NOTPRESENT)
3099 continue;
3100 dinfo = device_get_ivars(child);
3101 pci_print_verbose(dinfo);
3102 if (bootverbose)
3103 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
3104 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
3105 dinfo->cfg.func);
3106 pci_cfg_restore(child, dinfo);
3107 if (device_probe_and_attach(child) != 0)
3108 pci_cfg_save(child, dinfo, 1);
3109 }
3110 kfree(devlist, M_TEMP);
3111}
3112
11a49859
SZ
3113static void
3114pci_child_detached(device_t parent __unused, device_t child)
3115{
3116 /* Turn child's power off */
3117 pci_cfg_save(child, device_get_ivars(child), 1);
3118}
3119
4d28e78f
SZ
3120int
3121pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3122 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
3123{
3124#ifdef MSI
3125 struct pci_devinfo *dinfo;
3126 struct msix_table_entry *mte;
3127 struct msix_vector *mv;
3128 uint64_t addr;
3129 uint32_t data;
3130#endif
3131 int error, rid;
3132 void *cookie;
3133 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
3134 arg, &cookie, serializer);
3135 if (error)
3136 return (error);
3137
3138 /* If this is not a direct child, just bail out. */
3139 if (device_get_parent(child) != dev) {
3140 *cookiep = cookie;
3141 return(0);
3142 }
3143
3144 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3145#ifdef MSI
3146 rid = rman_get_rid(irq);
3147 if (rid == 0) {
3148 /* Make sure that INTx is enabled */
3149 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3150 } else {
3151 /*
3152 * Check to see if the interrupt is MSI or MSI-X.
3153 * Ask our parent to map the MSI and give
3154 * us the address and data register values.
3155 * If we fail for some reason, teardown the
3156 * interrupt handler.
3157 */
3158 dinfo = device_get_ivars(child);
3159 if (dinfo->cfg.msi.msi_alloc > 0) {
3160 if (dinfo->cfg.msi.msi_addr == 0) {
3161 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3162 ("MSI has handlers, but vectors not mapped"));
3163 error = PCIB_MAP_MSI(device_get_parent(dev),
3164 child, rman_get_start(irq), &addr, &data);
3165 if (error)
3166 goto bad;
3167 dinfo->cfg.msi.msi_addr = addr;
3168 dinfo->cfg.msi.msi_data = data;
3169 pci_enable_msi(child, addr, data);
984263bc 3170 }
4d28e78f
SZ
3171 dinfo->cfg.msi.msi_handlers++;
3172 } else {
3173 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3174 ("No MSI or MSI-X interrupts allocated"));
3175 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3176 ("MSI-X index too high"));
3177 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3178 KASSERT(mte->mte_vector != 0, ("no message vector"));
3179 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3180 KASSERT(mv->mv_irq == rman_get_start(irq),
3181 ("IRQ mismatch"));
3182 if (mv->mv_address == 0) {
3183 KASSERT(mte->mte_handlers == 0,
3184 ("MSI-X table entry has handlers, but vector not mapped"));
3185 error = PCIB_MAP_MSI(device_get_parent(dev),
3186 child, rman_get_start(irq), &addr, &data);
3187 if (error)
3188 goto bad;
3189 mv->mv_address = addr;
3190 mv->mv_data = data;
3191 }
3192 if (mte->mte_handlers == 0) {
3193 pci_enable_msix(child, rid - 1, mv->mv_address,
3194 mv->mv_data);
3195 pci_unmask_msix(child, rid - 1);
3196 }
3197 mte->mte_handlers++;
3198 }
3199
3200 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3201 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3202 bad:
3203 if (error) {
3204 (void)bus_generic_teardown_intr(dev, child, irq,
3205 cookie);
3206 return (error);
3207 }
3208 }
3209#endif
3210 *cookiep = cookie;
3211 return (0);
3212}
3213
3214int
3215pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3216 void *cookie)
3217{
3218#ifdef MSI
3219 struct msix_table_entry *mte;
3220 struct resource_list_entry *rle;
3221 struct pci_devinfo *dinfo;
3222#endif
3223 int error, rid;
3224
3225 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3226 return (EINVAL);
3227
3228 /* If this isn't a direct child, just bail out */
3229 if (device_get_parent(child) != dev)
3230 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3231
3232 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3233#ifdef MSI
3234 rid = rman_get_rid(irq);
3235 if (rid == 0) {
3236 /* Mask INTx */
3237 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3238 } else {
3239 /*
3240 * Check to see if the interrupt is MSI or MSI-X. If so,
3241 * decrement the appropriate handlers count and mask the
3242 * MSI-X message, or disable MSI messages if the count
3243 * drops to 0.
3244 */
3245 dinfo = device_get_ivars(child);
3246 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3247 if (rle->res != irq)
3248 return (EINVAL);
3249 if (dinfo->cfg.msi.msi_alloc > 0) {
3250 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3251 ("MSI-X index too high"));
3252 if (dinfo->cfg.msi.msi_handlers == 0)
3253 return (EINVAL);
3254 dinfo->cfg.msi.msi_handlers--;
3255 if (dinfo->cfg.msi.msi_handlers == 0)
3256 pci_disable_msi(child);
3257 } else {
3258 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3259 ("No MSI or MSI-X interrupts allocated"));
3260 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3261 ("MSI-X index too high"));
3262 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3263 if (mte->mte_handlers == 0)
3264 return (EINVAL);
3265 mte->mte_handlers--;
3266 if (mte->mte_handlers == 0)
3267 pci_mask_msix(child, rid - 1);
984263bc
MD
3268 }
3269 }
4d28e78f
SZ
3270 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3271 if (rid > 0)
3272 KASSERT(error == 0,
3273 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3274#endif
3275 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3276 return (error);
984263bc
MD
3277}
3278
e126caf1 3279int
984263bc
MD
3280pci_print_child(device_t dev, device_t child)
3281{
3282 struct pci_devinfo *dinfo;
3283 struct resource_list *rl;
984263bc
MD
3284 int retval = 0;
3285
3286 dinfo = device_get_ivars(child);
984263bc
MD
3287 rl = &dinfo->resources;
3288
3289 retval += bus_print_child_header(dev, child);
3290
4d28e78f
SZ
3291 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3292 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3293 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3294 if (device_get_flags(dev))
85f8e2ea 3295 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3296
85f8e2ea 3297 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3298 pci_get_function(child));
984263bc
MD
3299
3300 retval += bus_print_child_footer(dev, child);
3301
3302 return (retval);
3303}
3304
4d28e78f
SZ
3305static struct
3306{
3307 int class;
3308 int subclass;
3309 char *desc;
3310} pci_nomatch_tab[] = {
3311 {PCIC_OLD, -1, "old"},
3312 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3313 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3314 {PCIC_STORAGE, -1, "mass storage"},
3315 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3316 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3317 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3318 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3319 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3320 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3321 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3322 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3323 {PCIC_NETWORK, -1, "network"},
3324 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3325 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3326 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3327 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3328 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3329 {PCIC_DISPLAY, -1, "display"},
3330 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3331 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3332 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3333 {PCIC_MULTIMEDIA, -1, "multimedia"},
3334 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3335 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3336 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3337 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3338 {PCIC_MEMORY, -1, "memory"},
3339 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3340 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3341 {PCIC_BRIDGE, -1, "bridge"},
3342 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3343 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3344 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3345 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3346 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3347 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3348 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3349 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3350 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3351 {PCIC_SIMPLECOMM, -1, "simple comms"},
3352 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3353 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3354 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3355 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3356 {PCIC_BASEPERIPH, -1, "base peripheral"},
3357 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3358 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3359 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3360 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3361 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3362 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3363 {PCIC_INPUTDEV, -1, "input device"},
3364 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3365 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3366 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3367 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3368 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3369 {PCIC_DOCKING, -1, "docking station"},
3370 {PCIC_PROCESSOR, -1, "processor"},
3371 {PCIC_SERIALBUS, -1, "serial bus"},
3372 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3373 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3374 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3375 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3376 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3377 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3378 {PCIC_WIRELESS, -1, "wireless controller"},
3379 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3380 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3381 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3382 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3383 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3384 {PCIC_SATCOM, -1, "satellite communication"},
3385 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3386 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3387 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3388 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3389 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3390 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3391 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3392 {PCIC_DASP, -1, "dasp"},
3393 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3394 {0, 0, NULL}
3395};
3396
e126caf1 3397void
984263bc
MD
3398pci_probe_nomatch(device_t dev, device_t child)
3399{
4d28e78f
SZ
3400 int i;
3401 char *cp, *scp, *device;
984263bc 3402
4d28e78f
SZ
3403 /*
3404 * Look for a listing for this device in a loaded device database.
3405 */
3406 if ((device = pci_describe_device(child)) != NULL) {
3407 device_printf(dev, "<%s>", device);
3408 kfree(device, M_DEVBUF);
3409 } else {
3410 /*
3411 * Scan the class/subclass descriptions for a general
3412 * description.
3413 */
3414 cp = "unknown";
3415 scp = NULL;
3416 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3417 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3418 if (pci_nomatch_tab[i].subclass == -1) {
3419 cp = pci_nomatch_tab[i].desc;
3420 } else if (pci_nomatch_tab[i].subclass ==
3421 pci_get_subclass(child)) {
3422 scp = pci_nomatch_tab[i].desc;
3423 }
3424 }
3425 }
3426 device_printf(dev, "<%s%s%s>",
3427 cp ? cp : "",
3428 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3429 scp ? scp : "");
3430 }
3431 kprintf(" at device %d.%d (no driver attached)\n",
3432 pci_get_slot(child), pci_get_function(child));
638744c5 3433 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3434 return;
3435}
3436
4d28e78f
SZ
3437/*
3438 * Parse the PCI device database, if loaded, and return a pointer to a
3439 * description of the device.
3440 *
3441 * The database is flat text formatted as follows:
3442 *
3443 * Any line not in a valid format is ignored.
3444 * Lines are terminated with newline '\n' characters.
3445 *
3446 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3447 * the vendor name.
3448 *
3449 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3450 * - devices cannot be listed without a corresponding VENDOR line.
3451 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3452 * another TAB, then the device name.
3453 */
3454
3455/*
3456 * Assuming (ptr) points to the beginning of a line in the database,
3457 * return the vendor or device and description of the next entry.
3458 * The value of (vendor) or (device) inappropriate for the entry type
3459 * is set to -1. Returns nonzero at the end of the database.
3460 *
3461 * Note that this is slightly unrobust in the face of corrupt data;
3462 * we attempt to safeguard against this by spamming the end of the
3463 * database with a newline when we initialise.
3464 */
3465static int
3466pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3467{
3468 char *cp = *ptr;
3469 int left;
3470
3471 *device = -1;
3472 *vendor = -1;
3473 **desc = '\0';
3474 for (;;) {
3475 left = pci_vendordata_size - (cp - pci_vendordata);
3476 if (left <= 0) {
3477 *ptr = cp;
3478 return(1);
3479 }
3480
3481 /* vendor entry? */
3482 if (*cp != '\t' &&
3483 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3484 break;
3485 /* device entry? */
3486 if (*cp == '\t' &&
3487 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3488 break;
3489
3490 /* skip to next line */
3491 while (*cp != '\n' && left > 0) {
3492 cp++;
3493 left--;
3494 }
3495 if (*cp == '\n') {
3496 cp++;
3497 left--;
3498 }
3499 }
3500 /* skip to next line */
3501 while (*cp != '\n' && left > 0) {
3502 cp++;
3503 left--;
3504 }
3505 if (*cp == '\n' && left > 0)
3506 cp++;
3507 *ptr = cp;
3508 return(0);
3509}
3510
3511static char *
3512pci_describe_device(device_t dev)
3513{
3514 int vendor, device;
3515 char *desc, *vp, *dp, *line;
3516
3517 desc = vp = dp = NULL;
3518
3519 /*
3520 * If we have no vendor data, we can't do anything.
3521 */
3522 if (pci_vendordata == NULL)
3523 goto out;
3524
3525 /*
3526 * Scan the vendor data looking for this device
3527 */
3528 line = pci_vendordata;
3529 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3530 goto out;
3531 for (;;) {
3532 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3533 goto out;
3534 if (vendor == pci_get_vendor(dev))
3535 break;
3536 }
3537 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3538 goto out;
3539 for (;;) {
3540 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3541 *dp = 0;
3542 break;
3543 }
3544 if (vendor != -1) {
3545 *dp = 0;
3546 break;
3547 }
3548 if (device == pci_get_device(dev))
3549 break;
3550 }
3551 if (dp[0] == '\0')
3552 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3553 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3554 NULL)
3555 ksprintf(desc, "%s, %s", vp, dp);
3556 out:
3557 if (vp != NULL)
3558 kfree(vp, M_DEVBUF);
3559 if (dp != NULL)
3560 kfree(dp, M_DEVBUF);
3561 return(desc);
3562}
3563
22457186 3564int
4a5a2d63 3565pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3566{
3567 struct pci_devinfo *dinfo;
3568 pcicfgregs *cfg;
3569
3570 dinfo = device_get_ivars(child);
3571 cfg = &dinfo->cfg;
3572
3573 switch (which) {
4d28e78f
SZ
3574 case PCI_IVAR_ETHADDR:
3575 /*
3576 * The generic accessor doesn't deal with failure, so
3577 * we set the return value, then return an error.
3578 */
3579 *((uint8_t **) result) = NULL;
3580 return (EINVAL);
984263bc
MD
3581 case PCI_IVAR_SUBVENDOR:
3582 *result = cfg->subvendor;
3583 break;
3584 case PCI_IVAR_SUBDEVICE:
3585 *result = cfg->subdevice;
3586 break;
3587 case PCI_IVAR_VENDOR:
3588 *result = cfg->vendor;
3589 break;
3590 case PCI_IVAR_DEVICE:
3591 *result = cfg->device;
3592 break;
3593 case PCI_IVAR_DEVID:
3594 *result = (cfg->device << 16) | cfg->vendor;
3595 break;
3596 case PCI_IVAR_CLASS:
3597 *result = cfg->baseclass;
3598 break;
3599 case PCI_IVAR_SUBCLASS:
3600 *result = cfg->subclass;
3601 break;
3602 case PCI_IVAR_PROGIF:
3603 *result = cfg->progif;
3604 break;
3605 case PCI_IVAR_REVID:
3606 *result = cfg->revid;
3607 break;
3608 case PCI_IVAR_INTPIN:
3609 *result = cfg->intpin;
3610 break;
3611 case PCI_IVAR_IRQ:
3612 *result = cfg->intline;
3613 break;
4d28e78f
SZ
3614 case PCI_IVAR_DOMAIN:
3615 *result = cfg->domain;
3616 break;
984263bc
MD
3617 case PCI_IVAR_BUS:
3618 *result = cfg->bus;
3619 break;
3620 case PCI_IVAR_SLOT:
3621 *result = cfg->slot;
3622 break;
3623 case PCI_IVAR_FUNCTION:
3624 *result = cfg->func;
3625 break;
4d28e78f
SZ
3626 case PCI_IVAR_CMDREG:
3627 *result = cfg->cmdreg;
984263bc 3628 break;
4d28e78f
SZ
3629 case PCI_IVAR_CACHELNSZ:
3630 *result = cfg->cachelnsz;
984263bc 3631 break;
4d28e78f
SZ
3632 case PCI_IVAR_MINGNT:
3633 *result = cfg->mingnt;
c7e4e7eb 3634 break;
4d28e78f
SZ
3635 case PCI_IVAR_MAXLAT:
3636 *result = cfg->maxlat;
c7e4e7eb 3637 break;
4d28e78f
SZ
3638 case PCI_IVAR_LATTIMER:
3639 *result = cfg->lattimer;
0254566f 3640 break;
d85e7311
SZ
3641 case PCI_IVAR_PCIXCAP_PTR:
3642 *result = cfg->pcix.pcix_ptr;
3643 break;
3644 case PCI_IVAR_PCIECAP_PTR:
3645 *result = cfg->expr.expr_ptr;
3646 break;
3647 case PCI_IVAR_VPDCAP_PTR:
3648 *result = cfg->vpd.vpd_reg;
3649 break;
984263bc 3650 default:
4d28e78f 3651 return (ENOENT);
984263bc 3652 }
4d28e78f 3653 return (0);
984263bc
MD
3654}
3655
22457186 3656int
984263bc
MD
3657pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3658{
3659 struct pci_devinfo *dinfo;
984263bc
MD
3660
3661 dinfo = device_get_ivars(child);
984263bc
MD
3662
3663 switch (which) {
4d28e78f
SZ
3664 case PCI_IVAR_INTPIN:
3665 dinfo->cfg.intpin = value;
3666 return (0);
3667 case PCI_IVAR_ETHADDR:
984263bc
MD
3668 case PCI_IVAR_SUBVENDOR:
3669 case PCI_IVAR_SUBDEVICE:
3670 case PCI_IVAR_VENDOR:
3671 case PCI_IVAR_DEVICE:
3672 case PCI_IVAR_DEVID:
3673 case PCI_IVAR_CLASS:
3674 case PCI_IVAR_SUBCLASS:
3675 case PCI_IVAR_PROGIF:
3676 case PCI_IVAR_REVID:
984263bc 3677 case PCI_IVAR_IRQ:
4d28e78f 3678 case PCI_IVAR_DOMAIN:
984263bc
MD
3679 case PCI_IVAR_BUS:
3680 case PCI_IVAR_SLOT:
3681 case PCI_IVAR_FUNCTION:
4d28e78f 3682 return (EINVAL); /* disallow for now */
984263bc 3683
984263bc 3684 default:
4d28e78f
SZ
3685 return (ENOENT);
3686 }
3687}
3688#ifdef notyet
3689#include "opt_ddb.h"
3690#ifdef DDB
3691#include <ddb/ddb.h>
3692#include <sys/cons.h>
3693
3694/*
3695 * List resources based on pci map registers, used for within ddb
3696 */
3697
3698DB_SHOW_COMMAND(pciregs, db_pci_dump)
3699{
3700 struct pci_devinfo *dinfo;
3701 struct devlist *devlist_head;
3702 struct pci_conf *p;
3703 const char *name;
3704 int i, error, none_count;
3705
3706 none_count = 0;
3707 /* get the head of the device queue */
3708 devlist_head = &pci_devq;
3709
3710 /*
3711 * Go through the list of devices and print out devices
3712 */
3713 for (error = 0, i = 0,
3714 dinfo = STAILQ_FIRST(devlist_head);
3715 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3716 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3717
3718 /* Populate pd_name and pd_unit */
3719 name = NULL;
3720 if (dinfo->cfg.dev)
3721 name = device_get_name(dinfo->cfg.dev);
3722
3723 p = &dinfo->conf;
3724 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3725 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3726 (name && *name) ? name : "none",
3727 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3728 none_count++,
3729 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3730 p->pc_sel.pc_func, (p->pc_class << 16) |
3731 (p->pc_subclass << 8) | p->pc_progif,
3732 (p->pc_subdevice << 16) | p->pc_subvendor,
3733 (p->pc_device << 16) | p->pc_vendor,
3734 p->pc_revid, p->pc_hdr);
984263bc 3735 }
984263bc 3736}
4d28e78f
SZ
3737#endif /* DDB */
3738#endif
984263bc 3739
201eb0a7 3740static struct resource *
4d28e78f
SZ
3741pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3742 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3743{
3744 struct pci_devinfo *dinfo = device_get_ivars(child);
3745 struct resource_list *rl = &dinfo->resources;
3746 struct resource_list_entry *rle;
3747 struct resource *res;
4d28e78f 3748 pci_addr_t map, testval;
201eb0a7
TS
3749 int mapsize;
3750
3751 /*
3752 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3753 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3754 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3755 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3756 * it fails here, that other code is broken.
3757 */
3758 res = NULL;
3759 map = pci_read_config(child, *rid, 4);
3760 pci_write_config(child, *rid, 0xffffffff, 4);
3761 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3762 if (pci_maprange(testval) == 64)
3763 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3764 if (pci_mapbase(testval) == 0)
3765 goto out;
4d28e78f
SZ
3766
3767 /*
3768 * Restore the original value of the BAR. We may have reprogrammed
3769 * the BAR of the low-level console device and when booting verbose,
3770 * we need the console device addressable.
3771 */
3772 pci_write_config(child, *rid, map, 4);
3773
3774 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3775 if (type != SYS_RES_MEMORY) {
3776 if (bootverbose)
4d28e78f
SZ
3777 device_printf(dev,
3778 "child %s requested type %d for rid %#x,"
3779 " but the BAR says it is an memio\n",
3780 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3781 goto out;
3782 }
3783 } else {
3784 if (type != SYS_RES_IOPORT) {
3785 if (bootverbose)
4d28e78f
SZ
3786 device_printf(dev,
3787 "child %s requested type %d for rid %#x,"
3788 " but the BAR says it is an ioport\n",
3789 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3790 goto out;
3791 }
3792 }
3793 /*
3794 * For real BARs, we need to override the size that
3795 * the driver requests, because that's what the BAR
3796 * actually uses and we would otherwise have a
3797 * situation where we might allocate the excess to
3798 * another driver, which won't work.
3799 */
3800 mapsize = pci_mapsize(testval);
4d28e78f 3801 count = 1UL << mapsize;
201eb0a7 3802 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3803 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3804 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3805 flags |= RF_PREFETCHABLE;
3806
201eb0a7
TS
3807 /*
3808 * Allocate enough resource, and then write back the
4d28e78f 3809 * appropriate bar for that resource.
201eb0a7
TS
3810 */
3811 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4d28e78f 3812 start, end, count, flags);
201eb0a7 3813 if (res == NULL) {
4d28e78f
SZ
3814 device_printf(child,
3815 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3816 count, *rid, type, start, end);
201eb0a7
TS
3817 goto out;
3818 }
3819 resource_list_add(rl, type, *rid, start, end, count);
3820 rle = resource_list_find(rl, type, *rid);
3821 if (rle == NULL)
3822 panic("pci_alloc_map: unexpectedly can't find resource.");
3823 rle->res = res;
3824 rle->start = rman_get_start(res);
3825 rle->end = rman_get_end(res);
3826 rle->count = count;
3827 if (bootverbose)
4d28e78f
SZ
3828 device_printf(child,
3829 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3830 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3831 map = rman_get_start(res);
3832out:;
3833 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3834 if (pci_maprange(testval) == 64)
3835 pci_write_config(child, *rid + 4, map >> 32, 4);
3836 return (res);
201eb0a7 3837}
4d28e78f 3838
201eb0a7 3839
261fa16d 3840struct resource *
984263bc
MD
3841pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3842 u_long start, u_long end, u_long count, u_int flags)
3843{
3844 struct pci_devinfo *dinfo = device_get_ivars(child);
3845 struct resource_list *rl = &dinfo->resources;
201eb0a7 3846 struct resource_list_entry *rle;
984263bc 3847 pcicfgregs *cfg = &dinfo->cfg;
4d28e78f 3848//kprintf("%s on %s: requesting resource\n", device_get_desc(child), device_get_desc(dev));
984263bc
MD
3849 /*
3850 * Perform lazy resource allocation
984263bc
MD
3851 */
3852 if (device_get_parent(child) == dev) {
de67e43b
JS
3853 switch (type) {
3854 case SYS_RES_IRQ:
4d28e78f
SZ
3855 /*
3856 * Can't alloc legacy interrupt once MSI messages
3857 * have been allocated.
3858 */
3859#ifdef MSI
3860 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3861 cfg->msix.msix_alloc > 0))
3862 return (NULL);
de67e43b 3863#endif
4d28e78f
SZ
3864 /*
3865 * If the child device doesn't have an
3866 * interrupt routed and is deserving of an
3867 * interrupt, try to assign it one.
3868 */
3869 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3870 (cfg->intpin != 0))
3871 pci_assign_interrupt(dev, child, 0);
3872 break;
de67e43b
JS
3873 case SYS_RES_IOPORT:
3874 case SYS_RES_MEMORY:
3875 if (*rid < PCIR_BAR(cfg->nummaps)) {
3876 /*
3877 * Enable the I/O mode. We should
3878 * also be assigning resources too
3879 * when none are present. The
3880 * resource_list_alloc kind of sorta does
3881 * this...
3882 */
3883 if (PCI_ENABLE_IO(dev, child, type))
3884 return (NULL);
984263bc 3885 }
201eb0a7
TS
3886 rle = resource_list_find(rl, type, *rid);
3887 if (rle == NULL)
4d28e78f
SZ
3888 return (pci_alloc_map(dev, child, type, rid,
3889 start, end, count, flags));
820c1612 3890 break;
984263bc 3891 }
201eb0a7
TS
3892 /*
3893 * If we've already allocated the resource, then
4d28e78f 3894 * return it now. But first we may need to activate
201eb0a7 3895 * it, since we don't allocate the resource as active
4d28e78f 3896 * above. Normally this would be done down in the
201eb0a7 3897 * nexus, but since we short-circuit that path we have
4d28e78f 3898 * to do its job here. Not sure if we should kfree the
201eb0a7 3899 * resource if it fails to activate.
201eb0a7
TS
3900 */
3901 rle = resource_list_find(rl, type, *rid);
3902 if (rle != NULL && rle->res != NULL) {
3903 if (bootverbose)
4d28e78f
SZ
3904 device_printf(child,
3905 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3906 rman_get_size(rle->res), *rid, type,
3907 rman_get_start(rle->res));
201eb0a7
TS
3908 if ((flags & RF_ACTIVE) &&
3909 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3910 *rid, rle->res) != 0)
3911 return (NULL);
3912 return (rle->res);
201eb0a7 3913 }
984263bc 3914 }
4d28e78f
SZ
3915 return (resource_list_alloc(rl, dev, child, type, rid,
3916 start, end, count, flags));
984263bc
MD
3917}
3918
4d28e78f
SZ
3919void
3920pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3921{
4d28e78f
SZ
3922 struct pci_devinfo *dinfo;
3923 struct resource_list *rl;
984263bc
MD
3924 struct resource_list_entry *rle;
3925
4d28e78f
SZ
3926 if (device_get_parent(child) != dev)
3927 return;
984263bc 3928
4d28e78f
SZ
3929 dinfo = device_get_ivars(child);
3930 rl = &dinfo->resources;
3931 rle = resource_list_find(rl, type, rid);
3932 if (rle) {
3933 if (rle->res) {
3934 if (rman_get_device(rle->res) != dev ||
3935 rman_get_flags(rle->res) & RF_ACTIVE) {
3936 device_printf(dev, "delete_resource: "
3937 "Resource still owned by child, oops. "
3938 "(type=%d, rid=%d, addr=%lx)\n",
3939 rle->type, rle->rid,
3940 rman_get_start(rle->res));
3941 return;
3942 }
3943 bus_release_resource(dev, type, rid, rle->res);
3944 }
3945 resource_list_delete(rl, type, rid);
3946 }
3947 /*
3948 * Why do we turn off the PCI configuration BAR when we delete a
3949 * resource? -- imp
3950 */
3951 pci_write_config(child, rid, 0, 4);
3952 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3953}
3954
e126caf1
MD
3955struct resource_list *
3956pci_get_resource_list (device_t dev, device_t child)
3957{
4d28e78f 3958 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3959
bcc66dfa
SZ
3960 if (dinfo == NULL)
3961 return (NULL);
3962
b0486c83 3963 return (&dinfo->resources);
e126caf1
MD
3964}
3965
4d28e78f 3966uint32_t
984263bc
MD
3967pci_read_config_method(device_t dev, device_t child, int reg, int width)
3968{
3969 struct pci_devinfo *dinfo = device_get_ivars(child);
3970 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3971
4d28e78f
SZ
3972 return (PCIB_READ_CONFIG(device_get_parent(dev),
3973 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3974}
3975
e126caf1 3976void
984263bc 3977pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3978 uint32_t val, int width)
984263bc
MD
3979{
3980 struct pci_devinfo *dinfo = device_get_ivars(child);
3981 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3982
3983 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3984 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3985}
3986
e126caf1 3987int
4d28e78f 3988pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3989 size_t buflen)
3990{
e126caf1 3991
f8c7a42d 3992 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
e126caf1
MD
3993 pci_get_function(child));
3994 return (0);
3995}
3996
3997int
4d28e78f 3998pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3999 size_t buflen)
4000{
4001 struct pci_devinfo *dinfo;
4002 pcicfgregs *cfg;
4003
4004 dinfo = device_get_ivars(child);
4005 cfg = &dinfo->cfg;
f8c7a42d 4006 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
e126caf1
MD
4007 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4008 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4009 cfg->progif);
4010 return (0);
4011}
4012
4013int
4014pci_assign_interrupt_method(device_t dev, device_t child)
4d28e78f
SZ
4015{
4016 struct pci_devinfo *dinfo = device_get_ivars(child);
4017 pcicfgregs *cfg = &dinfo->cfg;
4018
4019 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4020 cfg->intpin));
e126caf1
MD
4021}
4022
984263bc
MD
4023static int
4024pci_modevent(module_t mod, int what, void *arg)
4025{
4d28e78f
SZ
4026 static struct cdev *pci_cdev;
4027 extern struct dev_ops pcic_ops;
4028
984263bc
MD
4029 switch (what) {
4030 case MOD_LOAD:
4031 STAILQ_INIT(&pci_devq);
4d28e78f
SZ
4032 pci_generation = 0;
4033 dev_ops_add(&pcic_ops, -1, 0);
4034 pci_cdev = make_dev(&pcic_ops, 0, UID_ROOT, GID_WHEEL, 0644,
4035 "pci%d", 0);
4036 pci_load_vendor_data();
984263bc 4037 break;
4d28e78f 4038
984263bc 4039 case MOD_UNLOAD:
4d28e78f 4040 destroy_dev(pci_cdev);
984263bc
MD