DEVFS - remove dev_ops_add(), dev_ops_get(), and get_dev()
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83c1faaa
SW
27 *
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
984263bc
MD
29 */
30
4d28e78f 31#include "opt_bus.h"
6951547b 32#include "opt_compat_oldpci.h"
984263bc
MD
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
4d28e78f 38#include <sys/linker.h>
984263bc
MD
39#include <sys/fcntl.h>
40#include <sys/conf.h>
41#include <sys/kernel.h>
42#include <sys/queue.h>
638744c5 43#include <sys/sysctl.h>
4d28e78f 44#include <sys/endian.h>
984263bc
MD
45
46#include <vm/vm.h>
47#include <vm/pmap.h>
48#include <vm/vm_extern.h>
49
50#include <sys/bus.h>
984263bc 51#include <sys/rman.h>
4d28e78f 52#include <sys/device.h>
984263bc 53
dc5a7bd2 54#include <sys/pciio.h>
4d28e78f
SZ
55#include <bus/pci/pcireg.h>
56#include <bus/pci/pcivar.h>
57#include <bus/pci/pci_private.h>
984263bc 58
4a5a2d63 59#include "pcib_if.h"
4d28e78f
SZ
60#include "pci_if.h"
61
62#ifdef __HAVE_ACPI
63#include <contrib/dev/acpica/acpi.h>
64#include "acpi_if.h"
65#else
66#define ACPI_PWR_FOR_SLEEP(x, y, z)
67#endif
68
35b72619
SZ
69extern struct dev_ops pcic_ops; /* XXX */
70
3a6dc23c
SZ
71typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
72
4d28e78f
SZ
73static uint32_t pci_mapbase(unsigned mapreg);
74static const char *pci_maptype(unsigned mapreg);
75static int pci_mapsize(unsigned testval);
76static int pci_maprange(unsigned mapreg);
77static void pci_fixancient(pcicfgregs *cfg);
78
79static int pci_porten(device_t pcib, int b, int s, int f);
80static int pci_memen(device_t pcib, int b, int s, int f);
81static void pci_assign_interrupt(device_t bus, device_t dev,
82 int force_route);
83static int pci_add_map(device_t pcib, device_t bus, device_t dev,
84 int b, int s, int f, int reg,
85 struct resource_list *rl, int force, int prefetch);
86static int pci_probe(device_t dev);
87static int pci_attach(device_t dev);
11a49859 88static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
89static void pci_load_vendor_data(void);
90static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92static char *pci_describe_device(device_t dev);
93static int pci_modevent(module_t mod, int what, void *arg);
94static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
95 pcicfgregs *cfg);
3a6dc23c 96static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
97static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
98 int reg, uint32_t *data);
99#if 0
100static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
102#endif
103static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
104static void pci_disable_msi(device_t dev);
105static void pci_enable_msi(device_t dev, uint64_t address,
106 uint16_t data);
107static void pci_enable_msix(device_t dev, u_int index,
108 uint64_t address, uint32_t data);
109static void pci_mask_msix(device_t dev, u_int index);
110static void pci_unmask_msix(device_t dev, u_int index);
111static int pci_msi_blacklisted(void);
112static void pci_resume_msi(device_t dev);
113static void pci_resume_msix(device_t dev);
d85e7311
SZ
114static int pcie_slotimpl(const pcicfgregs *);
115static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 116
3a6dc23c
SZ
117static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
118static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
119static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
120static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
121static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
122static void pci_read_cap_subvendor(device_t, int, int,
123 pcicfgregs *);
124static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 125static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 126
4d28e78f
SZ
127static device_method_t pci_methods[] = {
128 /* Device interface */
129 DEVMETHOD(device_probe, pci_probe),
130 DEVMETHOD(device_attach, pci_attach),
131 DEVMETHOD(device_detach, bus_generic_detach),
132 DEVMETHOD(device_shutdown, bus_generic_shutdown),
133 DEVMETHOD(device_suspend, pci_suspend),
134 DEVMETHOD(device_resume, pci_resume),
135
136 /* Bus interface */
137 DEVMETHOD(bus_print_child, pci_print_child),
138 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
139 DEVMETHOD(bus_read_ivar, pci_read_ivar),
140 DEVMETHOD(bus_write_ivar, pci_write_ivar),
141 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 142 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
143 DEVMETHOD(bus_setup_intr, pci_setup_intr),
144 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
145
146 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
147 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
148 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
149 DEVMETHOD(bus_delete_resource, pci_delete_resource),
150 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
151 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
152 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
153 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
154 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
155 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
156
157 /* PCI interface */
158 DEVMETHOD(pci_read_config, pci_read_config_method),
159 DEVMETHOD(pci_write_config, pci_write_config_method),
160 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
161 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
162 DEVMETHOD(pci_enable_io, pci_enable_io_method),
163 DEVMETHOD(pci_disable_io, pci_disable_io_method),
164 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
165 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
166 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
167 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
168 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
169 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
170 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
171 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
172 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
173 DEVMETHOD(pci_release_msi, pci_release_msi_method),
174 DEVMETHOD(pci_msi_count, pci_msi_count_method),
175 DEVMETHOD(pci_msix_count, pci_msix_count_method),
176
177 { 0, 0 }
178};
179
180DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 181
4d28e78f
SZ
182static devclass_t pci_devclass;
183DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
184MODULE_VERSION(pci, 1);
185
186static char *pci_vendordata;
187static size_t pci_vendordata_size;
dc5a7bd2 188
984263bc 189
3a6dc23c
SZ
190static const struct pci_read_cap {
191 int cap;
192 pci_read_cap_t read_cap;
193} pci_read_caps[] = {
194 { PCIY_PMG, pci_read_cap_pmgt },
195 { PCIY_HT, pci_read_cap_ht },
196 { PCIY_MSI, pci_read_cap_msi },
197 { PCIY_MSIX, pci_read_cap_msix },
198 { PCIY_VPD, pci_read_cap_vpd },
199 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
200 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 201 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
202 { 0, NULL } /* required last entry */
203};
204
984263bc 205struct pci_quirk {
4d28e78f 206 uint32_t devid; /* Vendor/device of the card */
984263bc 207 int type;
4d28e78f
SZ
208#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
209#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
210 int arg1;
211 int arg2;
212};
213
214struct pci_quirk pci_quirks[] = {
4d28e78f 215 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
216 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
217 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
218 /* As does the Serverworks OSB4 (the SMBus mapping register) */
219 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 220
4d28e78f
SZ
221 /*
222 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
223 * or the CMIC-SL (AKA ServerWorks GC_LE).
224 */
225 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
226 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
227
228 /*
229 * MSI doesn't work on earlier Intel chipsets including
230 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
231 */
232 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239
240 /*
241 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
242 * bridge.
243 */
244 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
245
984263bc
MD
246 { 0 }
247};
248
249/* map register information */
4d28e78f
SZ
250#define PCI_MAPMEM 0x01 /* memory map */
251#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
252#define PCI_MAPPORT 0x04 /* port map */
253
254struct devlist pci_devq;
255uint32_t pci_generation;
256uint32_t pci_numdevs = 0;
257static int pcie_chipset, pcix_chipset;
258
259/* sysctl vars */
260SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
261
262static int pci_enable_io_modes = 1;
263TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
264SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
265 &pci_enable_io_modes, 1,
266 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
267enable these bits correctly. We'd like to do this all the time, but there\n\
268are some peripherals that this causes problems with.");
984263bc 269
638744c5
HT
270static int pci_do_power_nodriver = 0;
271TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
272SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
273 &pci_do_power_nodriver, 0,
274 "Place a function into D3 state when no driver attaches to it. 0 means\n\
275disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 276aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
277in D3 state.");
278
4d28e78f
SZ
279static int pci_do_power_resume = 1;
280TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
281SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
282 &pci_do_power_resume, 1,
283 "Transition from D3 -> D0 on resume.");
284
285static int pci_do_msi = 1;
286TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
287SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
288 "Enable support for MSI interrupts");
289
290static int pci_do_msix = 1;
291TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
292SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
293 "Enable support for MSI-X interrupts");
294
295static int pci_honor_msi_blacklist = 1;
296TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
297SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
298 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
299
300/* Find a device_t by bus/slot/function in domain 0 */
301
302device_t
303pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
304{
305
306 return (pci_find_dbsf(0, bus, slot, func));
307}
308
309/* Find a device_t by domain/bus/slot/function */
310
984263bc 311device_t
4d28e78f 312pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
313{
314 struct pci_devinfo *dinfo;
315
316 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
317 if ((dinfo->cfg.domain == domain) &&
318 (dinfo->cfg.bus == bus) &&
984263bc
MD
319 (dinfo->cfg.slot == slot) &&
320 (dinfo->cfg.func == func)) {
321 return (dinfo->cfg.dev);
322 }
323 }
324
325 return (NULL);
326}
327
4d28e78f
SZ
328/* Find a device_t by vendor/device ID */
329
984263bc 330device_t
4d28e78f 331pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
332{
333 struct pci_devinfo *dinfo;
334
335 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
336 if ((dinfo->cfg.vendor == vendor) &&
337 (dinfo->cfg.device == device)) {
338 return (dinfo->cfg.dev);
339 }
340 }
341
342 return (NULL);
343}
344
345/* return base address of memory or port map */
346
4d28e78f
SZ
347static uint32_t
348pci_mapbase(uint32_t mapreg)
984263bc 349{
4d28e78f
SZ
350
351 if (PCI_BAR_MEM(mapreg))
352 return (mapreg & PCIM_BAR_MEM_BASE);
353 else
354 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
355}
356
357/* return map type of memory or port map */
358
4d28e78f 359static const char *
984263bc
MD
360pci_maptype(unsigned mapreg)
361{
984263bc 362
4d28e78f
SZ
363 if (PCI_BAR_IO(mapreg))
364 return ("I/O Port");
365 if (mapreg & PCIM_BAR_MEM_PREFETCH)
366 return ("Prefetchable Memory");
367 return ("Memory");
984263bc
MD
368}
369
370/* return log2 of map size decoded for memory or port map */
371
372static int
4d28e78f 373pci_mapsize(uint32_t testval)
984263bc
MD
374{
375 int ln2size;
376
377 testval = pci_mapbase(testval);
378 ln2size = 0;
379 if (testval != 0) {
380 while ((testval & 1) == 0)
381 {
382 ln2size++;
383 testval >>= 1;
384 }
385 }
386 return (ln2size);
387}
388
389/* return log2 of address range supported by map register */
390
391static int
392pci_maprange(unsigned mapreg)
393{
394 int ln2range = 0;
4d28e78f
SZ
395
396 if (PCI_BAR_IO(mapreg))
984263bc 397 ln2range = 32;
4d28e78f
SZ
398 else
399 switch (mapreg & PCIM_BAR_MEM_TYPE) {
400 case PCIM_BAR_MEM_32:
401 ln2range = 32;
402 break;
403 case PCIM_BAR_MEM_1MB:
404 ln2range = 20;
405 break;
406 case PCIM_BAR_MEM_64:
407 ln2range = 64;
408 break;
409 }
984263bc
MD
410 return (ln2range);
411}
412
413/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
414
415static void
416pci_fixancient(pcicfgregs *cfg)
417{
418 if (cfg->hdrtype != 0)
419 return;
420
421 /* PCI to PCI bridges use header type 1 */
422 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
423 cfg->hdrtype = 1;
424}
425
984263bc
MD
426/* extract header type specific config data */
427
428static void
4a5a2d63 429pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 430{
4d28e78f 431#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
432 switch (cfg->hdrtype) {
433 case 0:
4a5a2d63
JS
434 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
435 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
436 cfg->nummaps = PCI_MAXMAPS_0;
437 break;
438 case 1:
984263bc 439 cfg->nummaps = PCI_MAXMAPS_1;
6951547b
SZ
440#ifdef COMPAT_OLDPCI
441 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
442#endif
984263bc
MD
443 break;
444 case 2:
4a5a2d63
JS
445 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
446 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 447 cfg->nummaps = PCI_MAXMAPS_2;
6951547b
SZ
448#ifdef COMPAT_OLDPCI
449 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
450#endif
984263bc
MD
451 break;
452 }
4a5a2d63 453#undef REG
984263bc
MD
454}
455
4d28e78f 456/* read configuration header into pcicfgregs structure */
22457186 457struct pci_devinfo *
4d28e78f 458pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 459{
4d28e78f 460#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
461 pcicfgregs *cfg = NULL;
462 struct pci_devinfo *devlist_entry;
463 struct devlist *devlist_head;
464
465 devlist_head = &pci_devq;
466
467 devlist_entry = NULL;
468
4d28e78f 469 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 470 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
471
472 cfg = &devlist_entry->cfg;
4d28e78f
SZ
473
474 cfg->domain = d;
4a5a2d63
JS
475 cfg->bus = b;
476 cfg->slot = s;
477 cfg->func = f;
478 cfg->vendor = REG(PCIR_VENDOR, 2);
479 cfg->device = REG(PCIR_DEVICE, 2);
480 cfg->cmdreg = REG(PCIR_COMMAND, 2);
481 cfg->statreg = REG(PCIR_STATUS, 2);
482 cfg->baseclass = REG(PCIR_CLASS, 1);
483 cfg->subclass = REG(PCIR_SUBCLASS, 1);
484 cfg->progif = REG(PCIR_PROGIF, 1);
485 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 486 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
487 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
488 cfg->lattimer = REG(PCIR_LATTIMER, 1);
489 cfg->intpin = REG(PCIR_INTPIN, 1);
490 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 491
4a5a2d63
JS
492 cfg->mingnt = REG(PCIR_MINGNT, 1);
493 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
494
495 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
496 cfg->hdrtype &= ~PCIM_MFDEV;
497
498 pci_fixancient(cfg);
4a5a2d63 499 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 500
3a6dc23c 501 pci_read_capabilities(pcib, cfg);
984263bc
MD
502
503 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
504
4d28e78f 505 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
506 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
507 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
508 devlist_entry->conf.pc_sel.pc_func = cfg->func;
509 devlist_entry->conf.pc_hdr = cfg->hdrtype;
510
511 devlist_entry->conf.pc_subvendor = cfg->subvendor;
512 devlist_entry->conf.pc_subdevice = cfg->subdevice;
513 devlist_entry->conf.pc_vendor = cfg->vendor;
514 devlist_entry->conf.pc_device = cfg->device;
515
516 devlist_entry->conf.pc_class = cfg->baseclass;
517 devlist_entry->conf.pc_subclass = cfg->subclass;
518 devlist_entry->conf.pc_progif = cfg->progif;
519 devlist_entry->conf.pc_revid = cfg->revid;
520
521 pci_numdevs++;
522 pci_generation++;
523 }
524 return (devlist_entry);
525#undef REG
526}
527
3a6dc23c
SZ
528static int
529pci_fixup_nextptr(int *nextptr0)
530{
531 int nextptr = *nextptr0;
532
533 /* "Next pointer" is only one byte */
534 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
535
536 if (nextptr & 0x3) {
537 /*
538 * PCI local bus spec 3.0:
539 *
540 * "... The bottom two bits of all pointers are reserved
541 * and must be implemented as 00b although software must
542 * mask them to allow for future uses of these bits ..."
543 */
544 if (bootverbose) {
545 kprintf("Illegal PCI extended capability "
546 "offset, fixup 0x%02x -> 0x%02x\n",
547 nextptr, nextptr & ~0x3);
548 }
549 nextptr &= ~0x3;
550 }
551 *nextptr0 = nextptr;
552
553 if (nextptr < 0x40) {
554 if (nextptr != 0) {
555 kprintf("Illegal PCI extended capability "
556 "offset 0x%02x", nextptr);
557 }
558 return 0;
559 }
560 return 1;
561}
562
b4c0a845 563static void
3a6dc23c 564pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 565{
3a6dc23c
SZ
566#define REG(n, w) \
567 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
568
569 struct pcicfg_pp *pp = &cfg->pp;
570
571 if (pp->pp_cap)
572 return;
573
574 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
575 pp->pp_status = ptr + PCIR_POWER_STATUS;
576 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
577
578 if ((nextptr - ptr) > PCIR_POWER_DATA) {
579 /*
580 * XXX
581 * We should write to data_select and read back from
582 * data_scale to determine whether data register is
583 * implemented.
584 */
585#ifdef foo
586 pp->pp_data = ptr + PCIR_POWER_DATA;
587#else
588 pp->pp_data = 0;
589#endif
590 }
591
592#undef REG
593}
594
595static void
596pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
597{
598#ifdef notyet
4d28e78f 599#if defined(__i386__) || defined(__amd64__)
3a6dc23c
SZ
600
601#define REG(n, w) \
602 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
603
604 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 605 uint64_t addr;
4d28e78f 606 uint32_t val;
3a6dc23c
SZ
607
608 /* Determine HT-specific capability type. */
609 val = REG(ptr + PCIR_HT_COMMAND, 2);
610
611 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
612 return;
613
614 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
615 /* Sanity check the mapping window. */
616 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
617 addr <<= 32;
618 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
619 if (addr != MSI_INTEL_ADDR_BASE) {
620 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
621 "has non-default MSI window 0x%llx\n",
622 cfg->domain, cfg->bus, cfg->slot, cfg->func,
623 (long long)addr);
624 }
625 } else {
626 addr = MSI_INTEL_ADDR_BASE;
627 }
628
629 ht->ht_msimap = ptr;
630 ht->ht_msictrl = val;
631 ht->ht_msiaddr = addr;
632
633#undef REG
634
635#endif /* __i386__ || __amd64__ */
636#endif /* notyet */
637}
638
639static void
640pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
641{
642#define REG(n, w) \
643 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
644
645 struct pcicfg_msi *msi = &cfg->msi;
646
647 msi->msi_location = ptr;
648 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
649 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
650
651#undef REG
652}
653
654static void
655pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
656{
657#define REG(n, w) \
658 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
659
660 struct pcicfg_msix *msix = &cfg->msix;
661 uint32_t val;
662
663 msix->msix_location = ptr;
664 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
665 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
666
667 val = REG(ptr + PCIR_MSIX_TABLE, 4);
668 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
669 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
670
671 val = REG(ptr + PCIR_MSIX_PBA, 4);
672 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
673 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
674
675#undef REG
676}
677
678static void
679pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
680{
681 cfg->vpd.vpd_reg = ptr;
682}
683
684static void
685pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
686{
687#define REG(n, w) \
688 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
689
690 /* Should always be true. */
691 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
692 uint32_t val;
693
694 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
695 cfg->subvendor = val & 0xffff;
696 cfg->subdevice = val >> 16;
697 }
698
699#undef REG
700}
701
702static void
703pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
704{
705 /*
706 * Assume we have a PCI-X chipset if we have
707 * at least one PCI-PCI bridge with a PCI-X
708 * capability. Note that some systems with
709 * PCI-express or HT chipsets might match on
710 * this check as well.
711 */
712 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
713 pcix_chipset = 1;
d85e7311
SZ
714
715 cfg->pcix.pcix_ptr = ptr;
716}
717
718static int
719pcie_slotimpl(const pcicfgregs *cfg)
720{
721 const struct pcicfg_expr *expr = &cfg->expr;
722 uint16_t port_type;
723
724 /*
725 * Only version 1 can be parsed currently
726 */
727 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
728 return 0;
729
730 /*
731 * - Slot implemented bit is meaningful iff current port is
732 * root port or down stream port.
733 * - Testing for root port or down stream port is meanningful
734 * iff PCI configure has type 1 header.
735 */
736
737 if (cfg->hdrtype != 1)
738 return 0;
739
740 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
741 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
742 return 0;
743
744 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
745 return 0;
746
747 return 1;
3a6dc23c
SZ
748}
749
750static void
d85e7311 751pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 752{
d85e7311
SZ
753#define REG(n, w) \
754 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
755
756 struct pcicfg_expr *expr = &cfg->expr;
757
3a6dc23c
SZ
758 /*
759 * Assume we have a PCI-express chipset if we have
760 * at least one PCI-express device.
761 */
762 pcie_chipset = 1;
d85e7311
SZ
763
764 expr->expr_ptr = ptr;
765 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
766
767 /*
768 * Only version 1 can be parsed currently
769 */
770 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
771 return;
772
773 /*
774 * Read slot capabilities. Slot capabilities exists iff
775 * current port's slot is implemented
776 */
777 if (pcie_slotimpl(cfg))
778 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
779
780#undef REG
3a6dc23c
SZ
781}
782
783static void
784pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
785{
786#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
787#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
788
789 uint32_t val;
790 int nextptr, ptrptr;
791
792 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
793 /* No capabilities */
794 return;
795 }
0c78fe3f 796
4d28e78f 797 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 798 case 0:
81c29ce4
SZ
799 case 1:
800 ptrptr = PCIR_CAP_PTR;
984263bc
MD
801 break;
802 case 2:
4d28e78f 803 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
804 break;
805 default:
3a6dc23c 806 return; /* no capabilities support */
984263bc 807 }
4d28e78f 808 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
809
810 /*
811 * Read capability entries.
812 */
3a6dc23c
SZ
813 while (pci_fixup_nextptr(&nextptr)) {
814 const struct pci_read_cap *rc;
815 int ptr = nextptr;
816
4d28e78f 817 /* Find the next entry */
4d28e78f 818 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
819
820 /* Process this entry */
3a6dc23c
SZ
821 val = REG(ptr + PCICAP_ID, 1);
822 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
823 if (rc->cap == val) {
824 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
825 break;
826 }
984263bc
MD
827 }
828 }
4d28e78f 829/* REG and WREG use carry through to next functions */
984263bc
MD
830}
831
4d28e78f
SZ
832/*
833 * PCI Vital Product Data
834 */
835
836#define PCI_VPD_TIMEOUT 1000000
984263bc 837
4d28e78f
SZ
838static int
839pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 840{
4d28e78f 841 int count = PCI_VPD_TIMEOUT;
984263bc 842
4d28e78f 843 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 844
4d28e78f 845 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 846
4d28e78f
SZ
847 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
848 if (--count < 0)
849 return (ENXIO);
850 DELAY(1); /* limit looping */
851 }
852 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 853
984263bc
MD
854 return (0);
855}
984263bc 856
4d28e78f
SZ
857#if 0
858static int
859pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 860{
4d28e78f
SZ
861 int count = PCI_VPD_TIMEOUT;
862
863 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
864
865 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
866 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
867 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
868 if (--count < 0)
869 return (ENXIO);
870 DELAY(1); /* limit looping */
871 }
872
873 return (0);
874}
875#endif
876
877#undef PCI_VPD_TIMEOUT
878
879struct vpd_readstate {
880 device_t pcib;
881 pcicfgregs *cfg;
882 uint32_t val;
883 int bytesinval;
884 int off;
885 uint8_t cksum;
886};
887
888static int
889vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
890{
891 uint32_t reg;
892 uint8_t byte;
893
894 if (vrs->bytesinval == 0) {
895 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
896 return (ENXIO);
897 vrs->val = le32toh(reg);
898 vrs->off += 4;
899 byte = vrs->val & 0xff;
900 vrs->bytesinval = 3;
901 } else {
902 vrs->val = vrs->val >> 8;
903 byte = vrs->val & 0xff;
904 vrs->bytesinval--;
905 }
906
907 vrs->cksum += byte;
908 *data = byte;
909 return (0);
910}
911
d85e7311
SZ
912int
913pcie_slot_implemented(device_t dev)
914{
915 struct pci_devinfo *dinfo = device_get_ivars(dev);
916
917 return pcie_slotimpl(&dinfo->cfg);
918}
919
4d28e78f
SZ
920void
921pcie_set_max_readrq(device_t dev, uint16_t rqsize)
922{
d85e7311
SZ
923 uint8_t expr_ptr;
924 uint16_t val;
925
926 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
927 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
928 panic("%s: invalid max read request size 0x%02x\n",
929 device_get_nameunit(dev), rqsize);
930 }
931
932 expr_ptr = pci_get_pciecap_ptr(dev);
933 if (!expr_ptr)
934 panic("%s: not PCIe device\n", device_get_nameunit(dev));
935
936 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
937 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
938 if (bootverbose)
939 device_printf(dev, "adjust device control 0x%04x", val);
940
941 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
942 val |= rqsize;
943 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
944
945 if (bootverbose)
946 kprintf(" -> 0x%04x\n", val);
947 }
4d28e78f
SZ
948}
949
950static void
951pci_read_vpd(device_t pcib, pcicfgregs *cfg)
952{
953 struct vpd_readstate vrs;
954 int state;
955 int name;
956 int remain;
957 int i;
958 int alloc, off; /* alloc/off for RO/W arrays */
959 int cksumvalid;
960 int dflen;
961 uint8_t byte;
962 uint8_t byte2;
963
964 /* init vpd reader */
965 vrs.bytesinval = 0;
966 vrs.off = 0;
967 vrs.pcib = pcib;
968 vrs.cfg = cfg;
969 vrs.cksum = 0;
970
971 state = 0;
972 name = remain = i = 0; /* shut up stupid gcc */
973 alloc = off = 0; /* shut up stupid gcc */
974 dflen = 0; /* shut up stupid gcc */
975 cksumvalid = -1;
976 while (state >= 0) {
977 if (vpd_nextbyte(&vrs, &byte)) {
978 state = -2;
979 break;
980 }
981#if 0
982 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
983 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
984 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
985#endif
986 switch (state) {
987 case 0: /* item name */
988 if (byte & 0x80) {
989 if (vpd_nextbyte(&vrs, &byte2)) {
990 state = -2;
991 break;
992 }
993 remain = byte2;
994 if (vpd_nextbyte(&vrs, &byte2)) {
995 state = -2;
996 break;
997 }
998 remain |= byte2 << 8;
999 if (remain > (0x7f*4 - vrs.off)) {
1000 state = -1;
1001 kprintf(
1002 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1003 cfg->domain, cfg->bus, cfg->slot,
1004 cfg->func, remain);
1005 }
1006 name = byte & 0x7f;
1007 } else {
1008 remain = byte & 0x7;
1009 name = (byte >> 3) & 0xf;
1010 }
1011 switch (name) {
1012 case 0x2: /* String */
1013 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1014 M_DEVBUF, M_WAITOK);
1015 i = 0;
1016 state = 1;
1017 break;
1018 case 0xf: /* End */
1019 state = -1;
1020 break;
1021 case 0x10: /* VPD-R */
1022 alloc = 8;
1023 off = 0;
1024 cfg->vpd.vpd_ros = kmalloc(alloc *
1025 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1026 M_WAITOK | M_ZERO);
1027 state = 2;
1028 break;
1029 case 0x11: /* VPD-W */
1030 alloc = 8;
1031 off = 0;
1032 cfg->vpd.vpd_w = kmalloc(alloc *
1033 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1034 M_WAITOK | M_ZERO);
1035 state = 5;
1036 break;
1037 default: /* Invalid data, abort */
1038 state = -1;
1039 break;
1040 }
1041 break;
1042
1043 case 1: /* Identifier String */
1044 cfg->vpd.vpd_ident[i++] = byte;
1045 remain--;
1046 if (remain == 0) {
1047 cfg->vpd.vpd_ident[i] = '\0';
1048 state = 0;
1049 }
1050 break;
1051
1052 case 2: /* VPD-R Keyword Header */
1053 if (off == alloc) {
a68a7edf 1054 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1055 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1056 M_DEVBUF, M_WAITOK | M_ZERO);
1057 }
1058 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1059 if (vpd_nextbyte(&vrs, &byte2)) {
1060 state = -2;
1061 break;
1062 }
1063 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1064 if (vpd_nextbyte(&vrs, &byte2)) {
1065 state = -2;
1066 break;
1067 }
1068 dflen = byte2;
1069 if (dflen == 0 &&
1070 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1071 2) == 0) {
1072 /*
1073 * if this happens, we can't trust the rest
1074 * of the VPD.
1075 */
1076 kprintf(
1077 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1078 cfg->domain, cfg->bus, cfg->slot,
1079 cfg->func, dflen);
1080 cksumvalid = 0;
1081 state = -1;
1082 break;
1083 } else if (dflen == 0) {
1084 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1085 sizeof(*cfg->vpd.vpd_ros[off].value),
1086 M_DEVBUF, M_WAITOK);
1087 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1088 } else
1089 cfg->vpd.vpd_ros[off].value = kmalloc(
1090 (dflen + 1) *
1091 sizeof(*cfg->vpd.vpd_ros[off].value),
1092 M_DEVBUF, M_WAITOK);
1093 remain -= 3;
1094 i = 0;
1095 /* keep in sync w/ state 3's transistions */
1096 if (dflen == 0 && remain == 0)
1097 state = 0;
1098 else if (dflen == 0)
1099 state = 2;
1100 else
1101 state = 3;
1102 break;
1103
1104 case 3: /* VPD-R Keyword Value */
1105 cfg->vpd.vpd_ros[off].value[i++] = byte;
1106 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1107 "RV", 2) == 0 && cksumvalid == -1) {
1108 if (vrs.cksum == 0)
1109 cksumvalid = 1;
1110 else {
1111 if (bootverbose)
1112 kprintf(
1113 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1114 cfg->domain, cfg->bus,
1115 cfg->slot, cfg->func,
1116 vrs.cksum);
1117 cksumvalid = 0;
1118 state = -1;
1119 break;
1120 }
1121 }
1122 dflen--;
1123 remain--;
1124 /* keep in sync w/ state 2's transistions */
1125 if (dflen == 0)
1126 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1127 if (dflen == 0 && remain == 0) {
1128 cfg->vpd.vpd_rocnt = off;
a68a7edf 1129 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1130 off * sizeof(*cfg->vpd.vpd_ros),
1131 M_DEVBUF, M_WAITOK | M_ZERO);
1132 state = 0;
1133 } else if (dflen == 0)
1134 state = 2;
1135 break;
1136
1137 case 4:
1138 remain--;
1139 if (remain == 0)
1140 state = 0;
1141 break;
1142
1143 case 5: /* VPD-W Keyword Header */
1144 if (off == alloc) {
a68a7edf 1145 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1146 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1147 M_DEVBUF, M_WAITOK | M_ZERO);
1148 }
1149 cfg->vpd.vpd_w[off].keyword[0] = byte;
1150 if (vpd_nextbyte(&vrs, &byte2)) {
1151 state = -2;
1152 break;
1153 }
1154 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1155 if (vpd_nextbyte(&vrs, &byte2)) {
1156 state = -2;
1157 break;
1158 }
1159 cfg->vpd.vpd_w[off].len = dflen = byte2;
1160 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1161 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1162 sizeof(*cfg->vpd.vpd_w[off].value),
1163 M_DEVBUF, M_WAITOK);
1164 remain -= 3;
1165 i = 0;
1166 /* keep in sync w/ state 6's transistions */
1167 if (dflen == 0 && remain == 0)
1168 state = 0;
1169 else if (dflen == 0)
1170 state = 5;
1171 else
1172 state = 6;
1173 break;
1174
1175 case 6: /* VPD-W Keyword Value */
1176 cfg->vpd.vpd_w[off].value[i++] = byte;
1177 dflen--;
1178 remain--;
1179 /* keep in sync w/ state 5's transistions */
1180 if (dflen == 0)
1181 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1182 if (dflen == 0 && remain == 0) {
1183 cfg->vpd.vpd_wcnt = off;
a68a7edf 1184 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1185 off * sizeof(*cfg->vpd.vpd_w),
1186 M_DEVBUF, M_WAITOK | M_ZERO);
1187 state = 0;
1188 } else if (dflen == 0)
1189 state = 5;
1190 break;
1191
1192 default:
1193 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1194 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1195 state);
1196 state = -1;
1197 break;
1198 }
1199 }
1200
1201 if (cksumvalid == 0 || state < -1) {
1202 /* read-only data bad, clean up */
1203 if (cfg->vpd.vpd_ros != NULL) {
1204 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1205 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1206 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1207 cfg->vpd.vpd_ros = NULL;
1208 }
1209 }
1210 if (state < -1) {
1211 /* I/O error, clean up */
1212 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1213 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1214 if (cfg->vpd.vpd_ident != NULL) {
1215 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1216 cfg->vpd.vpd_ident = NULL;
1217 }
1218 if (cfg->vpd.vpd_w != NULL) {
1219 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1220 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1221 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1222 cfg->vpd.vpd_w = NULL;
1223 }
1224 }
1225 cfg->vpd.vpd_cached = 1;
1226#undef REG
1227#undef WREG
1228}
1229
1230int
1231pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1232{
1233 struct pci_devinfo *dinfo = device_get_ivars(child);
1234 pcicfgregs *cfg = &dinfo->cfg;
1235
1236 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1237 pci_read_vpd(device_get_parent(dev), cfg);
1238
1239 *identptr = cfg->vpd.vpd_ident;
1240
1241 if (*identptr == NULL)
1242 return (ENXIO);
1243
1244 return (0);
1245}
1246
1247int
1248pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1249 const char **vptr)
1250{
1251 struct pci_devinfo *dinfo = device_get_ivars(child);
1252 pcicfgregs *cfg = &dinfo->cfg;
1253 int i;
1254
1255 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1256 pci_read_vpd(device_get_parent(dev), cfg);
1257
1258 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1259 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1260 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1261 *vptr = cfg->vpd.vpd_ros[i].value;
1262 }
1263
1264 if (i != cfg->vpd.vpd_rocnt)
1265 return (0);
1266
1267 *vptr = NULL;
1268 return (ENXIO);
1269}
1270
1271/*
1272 * Return the offset in configuration space of the requested extended
1273 * capability entry or 0 if the specified capability was not found.
1274 */
1275int
1276pci_find_extcap_method(device_t dev, device_t child, int capability,
1277 int *capreg)
1278{
1279 struct pci_devinfo *dinfo = device_get_ivars(child);
1280 pcicfgregs *cfg = &dinfo->cfg;
1281 u_int32_t status;
1282 u_int8_t ptr;
1283
1284 /*
1285 * Check the CAP_LIST bit of the PCI status register first.
1286 */
1287 status = pci_read_config(child, PCIR_STATUS, 2);
1288 if (!(status & PCIM_STATUS_CAPPRESENT))
1289 return (ENXIO);
1290
1291 /*
1292 * Determine the start pointer of the capabilities list.
1293 */
1294 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1295 case 0:
1296 case 1:
1297 ptr = PCIR_CAP_PTR;
1298 break;
1299 case 2:
1300 ptr = PCIR_CAP_PTR_2;
1301 break;
1302 default:
1303 /* XXX: panic? */
1304 return (ENXIO); /* no extended capabilities support */
1305 }
1306 ptr = pci_read_config(child, ptr, 1);
1307
1308 /*
1309 * Traverse the capabilities list.
1310 */
1311 while (ptr != 0) {
1312 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1313 if (capreg != NULL)
1314 *capreg = ptr;
1315 return (0);
1316 }
1317 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1318 }
1319
1320 return (ENOENT);
1321}
1322
1323/*
1324 * Support for MSI-X message interrupts.
1325 */
1326void
1327pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1328{
1329 struct pci_devinfo *dinfo = device_get_ivars(dev);
1330 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1331 uint32_t offset;
1332
1333 KASSERT(msix->msix_table_len > index, ("bogus index"));
1334 offset = msix->msix_table_offset + index * 16;
1335 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1336 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1337 bus_write_4(msix->msix_table_res, offset + 8, data);
1338
1339 /* Enable MSI -> HT mapping. */
1340 pci_ht_map_msi(dev, address);
1341}
1342
1343void
1344pci_mask_msix(device_t dev, u_int index)
1345{
1346 struct pci_devinfo *dinfo = device_get_ivars(dev);
1347 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1348 uint32_t offset, val;
1349
1350 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1351 offset = msix->msix_table_offset + index * 16 + 12;
1352 val = bus_read_4(msix->msix_table_res, offset);
1353 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1354 val |= PCIM_MSIX_VCTRL_MASK;
1355 bus_write_4(msix->msix_table_res, offset, val);
1356 }
1357}
1358
1359void
1360pci_unmask_msix(device_t dev, u_int index)
1361{
1362 struct pci_devinfo *dinfo = device_get_ivars(dev);
1363 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1364 uint32_t offset, val;
1365
1366 KASSERT(msix->msix_table_len > index, ("bogus index"));
1367 offset = msix->msix_table_offset + index * 16 + 12;
1368 val = bus_read_4(msix->msix_table_res, offset);
1369 if (val & PCIM_MSIX_VCTRL_MASK) {
1370 val &= ~PCIM_MSIX_VCTRL_MASK;
1371 bus_write_4(msix->msix_table_res, offset, val);
1372 }
1373}
1374
1375int
1376pci_pending_msix(device_t dev, u_int index)
1377{
1378 struct pci_devinfo *dinfo = device_get_ivars(dev);
1379 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1380 uint32_t offset, bit;
1381
1382 KASSERT(msix->msix_table_len > index, ("bogus index"));
1383 offset = msix->msix_pba_offset + (index / 32) * 4;
1384 bit = 1 << index % 32;
1385 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1386}
1387
1388/*
1389 * Restore MSI-X registers and table during resume. If MSI-X is
1390 * enabled then walk the virtual table to restore the actual MSI-X
1391 * table.
1392 */
1393static void
1394pci_resume_msix(device_t dev)
1395{
1396 struct pci_devinfo *dinfo = device_get_ivars(dev);
1397 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1398 struct msix_table_entry *mte;
1399 struct msix_vector *mv;
1400 int i;
1401
1402 if (msix->msix_alloc > 0) {
1403 /* First, mask all vectors. */
1404 for (i = 0; i < msix->msix_msgnum; i++)
1405 pci_mask_msix(dev, i);
1406
1407 /* Second, program any messages with at least one handler. */
1408 for (i = 0; i < msix->msix_table_len; i++) {
1409 mte = &msix->msix_table[i];
1410 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1411 continue;
1412 mv = &msix->msix_vectors[mte->mte_vector - 1];
1413 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1414 pci_unmask_msix(dev, i);
1415 }
1416 }
1417 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1418 msix->msix_ctrl, 2);
1419}
1420
1421/*
1422 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1423 * returned in *count. After this function returns, each message will be
1424 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1425 */
1426int
1427pci_alloc_msix_method(device_t dev, device_t child, int *count)
1428{
1429 struct pci_devinfo *dinfo = device_get_ivars(child);
1430 pcicfgregs *cfg = &dinfo->cfg;
1431 struct resource_list_entry *rle;
1432 int actual, error, i, irq, max;
1433
1434 /* Don't let count == 0 get us into trouble. */
1435 if (*count == 0)
1436 return (EINVAL);
1437
1438 /* If rid 0 is allocated, then fail. */
1439 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1440 if (rle != NULL && rle->res != NULL)
1441 return (ENXIO);
1442
1443 /* Already have allocated messages? */
1444 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1445 return (ENXIO);
1446
1447 /* If MSI is blacklisted for this system, fail. */
1448 if (pci_msi_blacklisted())
1449 return (ENXIO);
1450
1451 /* MSI-X capability present? */
1452 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1453 return (ENODEV);
1454
1455 /* Make sure the appropriate BARs are mapped. */
1456 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1457 cfg->msix.msix_table_bar);
1458 if (rle == NULL || rle->res == NULL ||
1459 !(rman_get_flags(rle->res) & RF_ACTIVE))
1460 return (ENXIO);
1461 cfg->msix.msix_table_res = rle->res;
1462 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1463 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1464 cfg->msix.msix_pba_bar);
1465 if (rle == NULL || rle->res == NULL ||
1466 !(rman_get_flags(rle->res) & RF_ACTIVE))
1467 return (ENXIO);
1468 }
1469 cfg->msix.msix_pba_res = rle->res;
1470
1471 if (bootverbose)
1472 device_printf(child,
1473 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1474 *count, cfg->msix.msix_msgnum);
1475 max = min(*count, cfg->msix.msix_msgnum);
1476 for (i = 0; i < max; i++) {
1477 /* Allocate a message. */
1478 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1479 if (error)
1480 break;
1481 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1482 irq, 1);
1483 }
1484 actual = i;
1485
1486 if (bootverbose) {
1487 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1488 if (actual == 1)
1489 device_printf(child, "using IRQ %lu for MSI-X\n",
1490 rle->start);
1491 else {
1492 int run;
1493
1494 /*
1495 * Be fancy and try to print contiguous runs of
1496 * IRQ values as ranges. 'irq' is the previous IRQ.
1497 * 'run' is true if we are in a range.
1498 */
1499 device_printf(child, "using IRQs %lu", rle->start);
1500 irq = rle->start;
1501 run = 0;
1502 for (i = 1; i < actual; i++) {
1503 rle = resource_list_find(&dinfo->resources,
1504 SYS_RES_IRQ, i + 1);
1505
1506 /* Still in a run? */
1507 if (rle->start == irq + 1) {
1508 run = 1;
1509 irq++;
1510 continue;
1511 }
1512
1513 /* Finish previous range. */
1514 if (run) {
1515 kprintf("-%d", irq);
1516 run = 0;
1517 }
1518
1519 /* Start new range. */
1520 kprintf(",%lu", rle->start);
1521 irq = rle->start;
1522 }
1523
1524 /* Unfinished range? */
1525 if (run)
1526 kprintf("-%d", irq);
1527 kprintf(" for MSI-X\n");
1528 }
1529 }
1530
1531 /* Mask all vectors. */
1532 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1533 pci_mask_msix(child, i);
1534
1535 /* Allocate and initialize vector data and virtual table. */
1536 cfg->msix.msix_vectors = kmalloc(sizeof(struct msix_vector) * actual,
1537 M_DEVBUF, M_WAITOK | M_ZERO);
1538 cfg->msix.msix_table = kmalloc(sizeof(struct msix_table_entry) * actual,
1539 M_DEVBUF, M_WAITOK | M_ZERO);
1540 for (i = 0; i < actual; i++) {
1541 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1542 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1543 cfg->msix.msix_table[i].mte_vector = i + 1;
1544 }
1545
1546 /* Update control register to enable MSI-X. */
1547 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1548 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1549 cfg->msix.msix_ctrl, 2);
1550
1551 /* Update counts of alloc'd messages. */
1552 cfg->msix.msix_alloc = actual;
1553 cfg->msix.msix_table_len = actual;
1554 *count = actual;
1555 return (0);
1556}
1557
1558/*
1559 * By default, pci_alloc_msix() will assign the allocated IRQ
1560 * resources consecutively to the first N messages in the MSI-X table.
1561 * However, device drivers may want to use different layouts if they
1562 * either receive fewer messages than they asked for, or they wish to
1563 * populate the MSI-X table sparsely. This method allows the driver
1564 * to specify what layout it wants. It must be called after a
1565 * successful pci_alloc_msix() but before any of the associated
1566 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1567 *
1568 * The 'vectors' array contains 'count' message vectors. The array
1569 * maps directly to the MSI-X table in that index 0 in the array
1570 * specifies the vector for the first message in the MSI-X table, etc.
1571 * The vector value in each array index can either be 0 to indicate
1572 * that no vector should be assigned to a message slot, or it can be a
1573 * number from 1 to N (where N is the count returned from a
1574 * succcessful call to pci_alloc_msix()) to indicate which message
1575 * vector (IRQ) to be used for the corresponding message.
1576 *
1577 * On successful return, each message with a non-zero vector will have
1578 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1579 * 1. Additionally, if any of the IRQs allocated via the previous
1580 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1581 * will be kfreed back to the system automatically.
1582 *
1583 * For example, suppose a driver has a MSI-X table with 6 messages and
1584 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1585 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1586 * C. After the call to pci_alloc_msix(), the device will be setup to
1587 * have an MSI-X table of ABC--- (where - means no vector assigned).
1588 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1589 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1590 * be kfreed back to the system. This device will also have valid
1591 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1592 *
1593 * In any case, the SYS_RES_IRQ rid X will always map to the message
1594 * at MSI-X table index X - 1 and will only be valid if a vector is
1595 * assigned to that table entry.
1596 */
1597int
1598pci_remap_msix_method(device_t dev, device_t child, int count,
1599 const u_int *vectors)
1600{
1601 struct pci_devinfo *dinfo = device_get_ivars(child);
1602 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1603 struct resource_list_entry *rle;
1604 int i, irq, j, *used;
1605
1606 /*
1607 * Have to have at least one message in the table but the
1608 * table can't be bigger than the actual MSI-X table in the
1609 * device.
1610 */
1611 if (count == 0 || count > msix->msix_msgnum)
1612 return (EINVAL);
1613
1614 /* Sanity check the vectors. */
1615 for (i = 0; i < count; i++)
1616 if (vectors[i] > msix->msix_alloc)
1617 return (EINVAL);
1618
1619 /*
1620 * Make sure there aren't any holes in the vectors to be used.
1621 * It's a big pain to support it, and it doesn't really make
1622 * sense anyway. Also, at least one vector must be used.
1623 */
1624 used = kmalloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1625 M_ZERO);
1626 for (i = 0; i < count; i++)
1627 if (vectors[i] != 0)
1628 used[vectors[i] - 1] = 1;
1629 for (i = 0; i < msix->msix_alloc - 1; i++)
1630 if (used[i] == 0 && used[i + 1] == 1) {
1631 kfree(used, M_DEVBUF);
1632 return (EINVAL);
1633 }
1634 if (used[0] != 1) {
1635 kfree(used, M_DEVBUF);
1636 return (EINVAL);
1637 }
1638
1639 /* Make sure none of the resources are allocated. */
1640 for (i = 0; i < msix->msix_table_len; i++) {
1641 if (msix->msix_table[i].mte_vector == 0)
1642 continue;
1643 if (msix->msix_table[i].mte_handlers > 0)
1644 return (EBUSY);
1645 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1646 KASSERT(rle != NULL, ("missing resource"));
1647 if (rle->res != NULL)
1648 return (EBUSY);
1649 }
1650
1651 /* Free the existing resource list entries. */
1652 for (i = 0; i < msix->msix_table_len; i++) {
1653 if (msix->msix_table[i].mte_vector == 0)
1654 continue;
1655 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1656 }
1657
1658 /*
1659 * Build the new virtual table keeping track of which vectors are
1660 * used.
1661 */
1662 kfree(msix->msix_table, M_DEVBUF);
1663 msix->msix_table = kmalloc(sizeof(struct msix_table_entry) * count,
1664 M_DEVBUF, M_WAITOK | M_ZERO);
1665 for (i = 0; i < count; i++)
1666 msix->msix_table[i].mte_vector = vectors[i];
1667 msix->msix_table_len = count;
1668
1669 /* Free any unused IRQs and resize the vectors array if necessary. */
1670 j = msix->msix_alloc - 1;
1671 if (used[j] == 0) {
1672 struct msix_vector *vec;
1673
1674 while (used[j] == 0) {
1675 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1676 msix->msix_vectors[j].mv_irq);
1677 j--;
1678 }
1679 vec = kmalloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1680 M_WAITOK);
1681 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1682 (j + 1));
1683 kfree(msix->msix_vectors, M_DEVBUF);
1684 msix->msix_vectors = vec;
1685 msix->msix_alloc = j + 1;
1686 }
1687 kfree(used, M_DEVBUF);
1688
1689 /* Map the IRQs onto the rids. */
1690 for (i = 0; i < count; i++) {
1691 if (vectors[i] == 0)
1692 continue;
1693 irq = msix->msix_vectors[vectors[i]].mv_irq;
1694 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1695 irq, 1);
1696 }
1697
1698 if (bootverbose) {
1699 device_printf(child, "Remapped MSI-X IRQs as: ");
1700 for (i = 0; i < count; i++) {
1701 if (i != 0)
1702 kprintf(", ");
1703 if (vectors[i] == 0)
1704 kprintf("---");
1705 else
1706 kprintf("%d",
1707 msix->msix_vectors[vectors[i]].mv_irq);
1708 }
1709 kprintf("\n");
1710 }
1711
1712 return (0);
1713}
1714
1715static int
1716pci_release_msix(device_t dev, device_t child)
1717{
1718 struct pci_devinfo *dinfo = device_get_ivars(child);
1719 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1720 struct resource_list_entry *rle;
1721 int i;
1722
1723 /* Do we have any messages to release? */
1724 if (msix->msix_alloc == 0)
1725 return (ENODEV);
1726
1727 /* Make sure none of the resources are allocated. */
1728 for (i = 0; i < msix->msix_table_len; i++) {
1729 if (msix->msix_table[i].mte_vector == 0)
1730 continue;
1731 if (msix->msix_table[i].mte_handlers > 0)
1732 return (EBUSY);
1733 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1734 KASSERT(rle != NULL, ("missing resource"));
1735 if (rle->res != NULL)
1736 return (EBUSY);
1737 }
1738
1739 /* Update control register to disable MSI-X. */
1740 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1741 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1742 msix->msix_ctrl, 2);
1743
1744 /* Free the resource list entries. */
1745 for (i = 0; i < msix->msix_table_len; i++) {
1746 if (msix->msix_table[i].mte_vector == 0)
1747 continue;
1748 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1749 }
1750 kfree(msix->msix_table, M_DEVBUF);
1751 msix->msix_table_len = 0;
1752
1753 /* Release the IRQs. */
1754 for (i = 0; i < msix->msix_alloc; i++)
1755 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1756 msix->msix_vectors[i].mv_irq);
1757 kfree(msix->msix_vectors, M_DEVBUF);
1758 msix->msix_alloc = 0;
1759 return (0);
1760}
1761
1762/*
1763 * Return the max supported MSI-X messages this device supports.
1764 * Basically, assuming the MD code can alloc messages, this function
1765 * should return the maximum value that pci_alloc_msix() can return.
1766 * Thus, it is subject to the tunables, etc.
1767 */
1768int
1769pci_msix_count_method(device_t dev, device_t child)
1770{
1771 struct pci_devinfo *dinfo = device_get_ivars(child);
1772 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1773
1774 if (pci_do_msix && msix->msix_location != 0)
1775 return (msix->msix_msgnum);
1776 return (0);
1777}
1778
1779/*
1780 * HyperTransport MSI mapping control
1781 */
1782void
1783pci_ht_map_msi(device_t dev, uint64_t addr)
1784{
1785 struct pci_devinfo *dinfo = device_get_ivars(dev);
1786 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1787
1788 if (!ht->ht_msimap)
1789 return;
1790
1791 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1792 ht->ht_msiaddr >> 20 == addr >> 20) {
1793 /* Enable MSI -> HT mapping. */
1794 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1795 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1796 ht->ht_msictrl, 2);
1797 }
1798
1799 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1800 /* Disable MSI -> HT mapping. */
1801 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1802 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1803 ht->ht_msictrl, 2);
1804 }
1805}
1806
1807/*
1808 * Support for MSI message signalled interrupts.
1809 */
1810void
1811pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1812{
1813 struct pci_devinfo *dinfo = device_get_ivars(dev);
1814 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1815
1816 /* Write data and address values. */
1817 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1818 address & 0xffffffff, 4);
1819 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1820 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1821 address >> 32, 4);
1822 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1823 data, 2);
1824 } else
1825 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1826 2);
1827
1828 /* Enable MSI in the control register. */
1829 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1830 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1831 2);
1832
1833 /* Enable MSI -> HT mapping. */
1834 pci_ht_map_msi(dev, address);
1835}
1836
1837void
1838pci_disable_msi(device_t dev)
1839{
1840 struct pci_devinfo *dinfo = device_get_ivars(dev);
1841 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1842
1843 /* Disable MSI -> HT mapping. */
1844 pci_ht_map_msi(dev, 0);
1845
1846 /* Disable MSI in the control register. */
1847 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1848 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1849 2);
1850}
1851
1852/*
1853 * Restore MSI registers during resume. If MSI is enabled then
1854 * restore the data and address registers in addition to the control
1855 * register.
1856 */
1857static void
1858pci_resume_msi(device_t dev)
1859{
1860 struct pci_devinfo *dinfo = device_get_ivars(dev);
1861 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1862 uint64_t address;
1863 uint16_t data;
1864
1865 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1866 address = msi->msi_addr;
1867 data = msi->msi_data;
1868 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1869 address & 0xffffffff, 4);
1870 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1871 pci_write_config(dev, msi->msi_location +
1872 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1873 pci_write_config(dev, msi->msi_location +
1874 PCIR_MSI_DATA_64BIT, data, 2);
1875 } else
1876 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1877 data, 2);
1878 }
1879 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1880 2);
1881}
1882
1883int
1884pci_remap_msi_irq(device_t dev, u_int irq)
1885{
1886 struct pci_devinfo *dinfo = device_get_ivars(dev);
1887 pcicfgregs *cfg = &dinfo->cfg;
1888 struct resource_list_entry *rle;
1889 struct msix_table_entry *mte;
1890 struct msix_vector *mv;
1891 device_t bus;
1892 uint64_t addr;
1893 uint32_t data;
1894 int error, i, j;
1895
1896 bus = device_get_parent(dev);
1897
1898 /*
1899 * Handle MSI first. We try to find this IRQ among our list
1900 * of MSI IRQs. If we find it, we request updated address and
1901 * data registers and apply the results.
1902 */
1903 if (cfg->msi.msi_alloc > 0) {
1904
1905 /* If we don't have any active handlers, nothing to do. */
1906 if (cfg->msi.msi_handlers == 0)
1907 return (0);
1908 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1909 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1910 i + 1);
1911 if (rle->start == irq) {
1912 error = PCIB_MAP_MSI(device_get_parent(bus),
1913 dev, irq, &addr, &data);
1914 if (error)
1915 return (error);
1916 pci_disable_msi(dev);
1917 dinfo->cfg.msi.msi_addr = addr;
1918 dinfo->cfg.msi.msi_data = data;
1919 pci_enable_msi(dev, addr, data);
1920 return (0);
1921 }
1922 }
1923 return (ENOENT);
1924 }
1925
1926 /*
1927 * For MSI-X, we check to see if we have this IRQ. If we do,
1928 * we request the updated mapping info. If that works, we go
1929 * through all the slots that use this IRQ and update them.
1930 */
1931 if (cfg->msix.msix_alloc > 0) {
1932 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1933 mv = &cfg->msix.msix_vectors[i];
1934 if (mv->mv_irq == irq) {
1935 error = PCIB_MAP_MSI(device_get_parent(bus),
1936 dev, irq, &addr, &data);
1937 if (error)
1938 return (error);
1939 mv->mv_address = addr;
1940 mv->mv_data = data;
1941 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1942 mte = &cfg->msix.msix_table[j];
1943 if (mte->mte_vector != i + 1)
1944 continue;
1945 if (mte->mte_handlers == 0)
1946 continue;
1947 pci_mask_msix(dev, j);
1948 pci_enable_msix(dev, j, addr, data);
1949 pci_unmask_msix(dev, j);
1950 }
1951 }
1952 }
1953 return (ENOENT);
1954 }
1955
1956 return (ENOENT);
1957}
1958
1959/*
1960 * Returns true if the specified device is blacklisted because MSI
1961 * doesn't work.
1962 */
1963int
1964pci_msi_device_blacklisted(device_t dev)
1965{
1966 struct pci_quirk *q;
1967
1968 if (!pci_honor_msi_blacklist)
1969 return (0);
1970
1971 for (q = &pci_quirks[0]; q->devid; q++) {
1972 if (q->devid == pci_get_devid(dev) &&
1973 q->type == PCI_QUIRK_DISABLE_MSI)
1974 return (1);
1975 }
1976 return (0);
1977}
1978
1979/*
1980 * Determine if MSI is blacklisted globally on this sytem. Currently,
1981 * we just check for blacklisted chipsets as represented by the
1982 * host-PCI bridge at device 0:0:0. In the future, it may become
1983 * necessary to check other system attributes, such as the kenv values
1984 * that give the motherboard manufacturer and model number.
1985 */
1986static int
1987pci_msi_blacklisted(void)
1988{
1989 device_t dev;
1990
1991 if (!pci_honor_msi_blacklist)
1992 return (0);
1993
1994 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1995 if (!(pcie_chipset || pcix_chipset))
1996 return (1);
1997
1998 dev = pci_find_bsf(0, 0, 0);
1999 if (dev != NULL)
2000 return (pci_msi_device_blacklisted(dev));
2001 return (0);
2002}
2003
2004/*
2005 * Attempt to allocate *count MSI messages. The actual number allocated is
2006 * returned in *count. After this function returns, each message will be
2007 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2008 */
2009int
2010pci_alloc_msi_method(device_t dev, device_t child, int *count)
2011{
2012 struct pci_devinfo *dinfo = device_get_ivars(child);
2013 pcicfgregs *cfg = &dinfo->cfg;
2014 struct resource_list_entry *rle;
2015 int actual, error, i, irqs[32];
2016 uint16_t ctrl;
2017
2018 /* Don't let count == 0 get us into trouble. */
2019 if (*count == 0)
2020 return (EINVAL);
2021
2022 /* If rid 0 is allocated, then fail. */
2023 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2024 if (rle != NULL && rle->res != NULL)
2025 return (ENXIO);
2026
2027 /* Already have allocated messages? */
2028 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2029 return (ENXIO);
2030
2031 /* If MSI is blacklisted for this system, fail. */
2032 if (pci_msi_blacklisted())
2033 return (ENXIO);
2034
2035 /* MSI capability present? */
2036 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2037 return (ENODEV);
2038
2039 if (bootverbose)
2040 device_printf(child,
2041 "attempting to allocate %d MSI vectors (%d supported)\n",
2042 *count, cfg->msi.msi_msgnum);
2043
2044 /* Don't ask for more than the device supports. */
2045 actual = min(*count, cfg->msi.msi_msgnum);
2046
2047 /* Don't ask for more than 32 messages. */
2048 actual = min(actual, 32);
2049
2050 /* MSI requires power of 2 number of messages. */
2051 if (!powerof2(actual))
2052 return (EINVAL);
2053
2054 for (;;) {
2055 /* Try to allocate N messages. */
2056 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2057 cfg->msi.msi_msgnum, irqs);
2058 if (error == 0)
2059 break;
2060 if (actual == 1)
2061 return (error);
2062
2063 /* Try N / 2. */
2064 actual >>= 1;
2065 }
2066
2067 /*
2068 * We now have N actual messages mapped onto SYS_RES_IRQ
2069 * resources in the irqs[] array, so add new resources
2070 * starting at rid 1.
2071 */
2072 for (i = 0; i < actual; i++)
2073 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2074 irqs[i], irqs[i], 1);
2075
2076 if (bootverbose) {
2077 if (actual == 1)
2078 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2079 else {
2080 int run;
2081
2082 /*
2083 * Be fancy and try to print contiguous runs
2084 * of IRQ values as ranges. 'run' is true if
2085 * we are in a range.
2086 */
2087 device_printf(child, "using IRQs %d", irqs[0]);
2088 run = 0;
2089 for (i = 1; i < actual; i++) {
2090
2091 /* Still in a run? */
2092 if (irqs[i] == irqs[i - 1] + 1) {
2093 run = 1;
2094 continue;
2095 }
2096
2097 /* Finish previous range. */
2098 if (run) {
2099 kprintf("-%d", irqs[i - 1]);
2100 run = 0;
2101 }
2102
2103 /* Start new range. */
2104 kprintf(",%d", irqs[i]);
2105 }
2106
2107 /* Unfinished range? */
2108 if (run)
2109 kprintf("-%d", irqs[actual - 1]);
2110 kprintf(" for MSI\n");
2111 }
2112 }
2113
2114 /* Update control register with actual count. */
2115 ctrl = cfg->msi.msi_ctrl;
2116 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2117 ctrl |= (ffs(actual) - 1) << 4;
2118 cfg->msi.msi_ctrl = ctrl;
2119 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2120
2121 /* Update counts of alloc'd messages. */
2122 cfg->msi.msi_alloc = actual;
2123 cfg->msi.msi_handlers = 0;
2124 *count = actual;
2125 return (0);
2126}
2127
2128/* Release the MSI messages associated with this device. */
2129int
2130pci_release_msi_method(device_t dev, device_t child)
2131{
2132 struct pci_devinfo *dinfo = device_get_ivars(child);
2133 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2134 struct resource_list_entry *rle;
2135 int error, i, irqs[32];
2136
2137 /* Try MSI-X first. */
2138 error = pci_release_msix(dev, child);
2139 if (error != ENODEV)
2140 return (error);
2141
2142 /* Do we have any messages to release? */
2143 if (msi->msi_alloc == 0)
2144 return (ENODEV);
2145 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2146
2147 /* Make sure none of the resources are allocated. */
2148 if (msi->msi_handlers > 0)
2149 return (EBUSY);
2150 for (i = 0; i < msi->msi_alloc; i++) {
2151 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2152 KASSERT(rle != NULL, ("missing MSI resource"));
2153 if (rle->res != NULL)
2154 return (EBUSY);
2155 irqs[i] = rle->start;
2156 }
2157
2158 /* Update control register with 0 count. */
2159 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2160 ("%s: MSI still enabled", __func__));
2161 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2162 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2163 msi->msi_ctrl, 2);
2164
2165 /* Release the messages. */
2166 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2167 for (i = 0; i < msi->msi_alloc; i++)
2168 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2169
2170 /* Update alloc count. */
2171 msi->msi_alloc = 0;
2172 msi->msi_addr = 0;
2173 msi->msi_data = 0;
2174 return (0);
2175}
2176
2177/*
2178 * Return the max supported MSI messages this device supports.
2179 * Basically, assuming the MD code can alloc messages, this function
2180 * should return the maximum value that pci_alloc_msi() can return.
2181 * Thus, it is subject to the tunables, etc.
2182 */
2183int
2184pci_msi_count_method(device_t dev, device_t child)
2185{
2186 struct pci_devinfo *dinfo = device_get_ivars(child);
2187 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2188
2189 if (pci_do_msi && msi->msi_location != 0)
2190 return (msi->msi_msgnum);
2191 return (0);
2192}
2193
2194/* kfree pcicfgregs structure and all depending data structures */
2195
2196int
2197pci_freecfg(struct pci_devinfo *dinfo)
2198{
2199 struct devlist *devlist_head;
2200 int i;
2201
2202 devlist_head = &pci_devq;
2203
2204 if (dinfo->cfg.vpd.vpd_reg) {
2205 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2206 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2207 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2208 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2209 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2210 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2211 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2212 }
2213 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2214 kfree(dinfo, M_DEVBUF);
2215
2216 /* increment the generation count */
2217 pci_generation++;
2218
2219 /* we're losing one device */
2220 pci_numdevs--;
2221 return (0);
2222}
2223
2224/*
2225 * PCI power manangement
2226 */
2227int
2228pci_set_powerstate_method(device_t dev, device_t child, int state)
2229{
2230 struct pci_devinfo *dinfo = device_get_ivars(child);
2231 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2232 uint16_t status;
2233 int result, oldstate, highest, delay;
984263bc 2234
4d28e78f 2235 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2236 return (EOPNOTSUPP);
2237
2238 /*
2239 * Optimize a no state change request away. While it would be OK to
2240 * write to the hardware in theory, some devices have shown odd
2241 * behavior when going from D3 -> D3.
2242 */
2243 oldstate = pci_get_powerstate(child);
2244 if (oldstate == state)
2245 return (0);
2246
2247 /*
2248 * The PCI power management specification states that after a state
2249 * transition between PCI power states, system software must
2250 * guarantee a minimal delay before the function accesses the device.
2251 * Compute the worst case delay that we need to guarantee before we
2252 * access the device. Many devices will be responsive much more
2253 * quickly than this delay, but there are some that don't respond
2254 * instantly to state changes. Transitions to/from D3 state require
2255 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2256 * is done below with DELAY rather than a sleeper function because
2257 * this function can be called from contexts where we cannot sleep.
2258 */
2259 highest = (oldstate > state) ? oldstate : state;
2260 if (highest == PCI_POWERSTATE_D3)
2261 delay = 10000;
2262 else if (highest == PCI_POWERSTATE_D2)
2263 delay = 200;
2264 else
2265 delay = 0;
4d28e78f 2266 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2267 & ~PCIM_PSTAT_DMASK;
2268 result = 0;
2269 switch (state) {
2270 case PCI_POWERSTATE_D0:
2271 status |= PCIM_PSTAT_D0;
2272 break;
2273 case PCI_POWERSTATE_D1:
4d28e78f 2274 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2275 return (EOPNOTSUPP);
2276 status |= PCIM_PSTAT_D1;
2277 break;
2278 case PCI_POWERSTATE_D2:
4d28e78f 2279 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2280 return (EOPNOTSUPP);
2281 status |= PCIM_PSTAT_D2;
2282 break;
2283 case PCI_POWERSTATE_D3:
2284 status |= PCIM_PSTAT_D3;
2285 break;
2286 default:
2287 return (EINVAL);
984263bc 2288 }
f4754a59
HT
2289
2290 if (bootverbose)
2291 kprintf(
4d28e78f
SZ
2292 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2293 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2294 dinfo->cfg.func, oldstate, state);
f4754a59 2295
4d28e78f 2296 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2297 if (delay)
2298 DELAY(delay);
2299 return (0);
984263bc
MD
2300}
2301
e126caf1 2302int
984263bc
MD
2303pci_get_powerstate_method(device_t dev, device_t child)
2304{
2305 struct pci_devinfo *dinfo = device_get_ivars(child);
2306 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2307 uint16_t status;
984263bc
MD
2308 int result;
2309
4d28e78f
SZ
2310 if (cfg->pp.pp_cap != 0) {
2311 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2312 switch (status & PCIM_PSTAT_DMASK) {
2313 case PCIM_PSTAT_D0:
2314 result = PCI_POWERSTATE_D0;
2315 break;
2316 case PCIM_PSTAT_D1:
2317 result = PCI_POWERSTATE_D1;
2318 break;
2319 case PCIM_PSTAT_D2:
2320 result = PCI_POWERSTATE_D2;
2321 break;
2322 case PCIM_PSTAT_D3:
2323 result = PCI_POWERSTATE_D3;
2324 break;
2325 default:
2326 result = PCI_POWERSTATE_UNKNOWN;
2327 break;
2328 }
2329 } else {
2330 /* No support, device is always at D0 */
2331 result = PCI_POWERSTATE_D0;
2332 }
f4754a59 2333 return (result);
984263bc
MD
2334}
2335
2336/*
2337 * Some convenience functions for PCI device drivers.
2338 */
2339
2340static __inline void
4d28e78f 2341pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2342{
4d28e78f 2343 uint16_t command;
984263bc 2344
4d28e78f
SZ
2345 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2346 command |= bit;
2347 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2348}
2349
2350static __inline void
4d28e78f
SZ
2351pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2352{
2353 uint16_t command;
984263bc 2354
4d28e78f
SZ
2355 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2356 command &= ~bit;
2357 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2358}
2359
4d28e78f
SZ
2360int
2361pci_enable_busmaster_method(device_t dev, device_t child)
2362{
2363 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2364 return (0);
2365}
984263bc 2366
4d28e78f
SZ
2367int
2368pci_disable_busmaster_method(device_t dev, device_t child)
2369{
2370 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2371 return (0);
2372}
984263bc 2373
4d28e78f
SZ
2374int
2375pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2376{
4d28e78f
SZ
2377 uint16_t command;
2378 uint16_t bit;
2379 char *error;
ed1bd994 2380
4d28e78f
SZ
2381 bit = 0;
2382 error = NULL;
2383
2384 switch(space) {
2385 case SYS_RES_IOPORT:
2386 bit = PCIM_CMD_PORTEN;
2387 error = "port";
ed1bd994 2388 break;
4d28e78f
SZ
2389 case SYS_RES_MEMORY:
2390 bit = PCIM_CMD_MEMEN;
2391 error = "memory";
ed1bd994
MD
2392 break;
2393 default:
4d28e78f 2394 return (EINVAL);
ed1bd994 2395 }
4d28e78f
SZ
2396 pci_set_command_bit(dev, child, bit);
2397 /* Some devices seem to need a brief stall here, what do to? */
2398 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2399 if (command & bit)
2400 return (0);
2401 device_printf(child, "failed to enable %s mapping!\n", error);
2402 return (ENXIO);
ed1bd994 2403}
984263bc 2404
4d28e78f
SZ
2405int
2406pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2407{
4d28e78f
SZ
2408 uint16_t command;
2409 uint16_t bit;
2410 char *error;
b4c0a845 2411
4d28e78f
SZ
2412 bit = 0;
2413 error = NULL;
b4c0a845 2414
4d28e78f
SZ
2415 switch(space) {
2416 case SYS_RES_IOPORT:
2417 bit = PCIM_CMD_PORTEN;
2418 error = "port";
b4c0a845 2419 break;
4d28e78f
SZ
2420 case SYS_RES_MEMORY:
2421 bit = PCIM_CMD_MEMEN;
2422 error = "memory";
b4c0a845
SZ
2423 break;
2424 default:
4d28e78f 2425 return (EINVAL);
b4c0a845 2426 }
4d28e78f
SZ
2427 pci_clear_command_bit(dev, child, bit);
2428 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2429 if (command & bit) {
2430 device_printf(child, "failed to disable %s mapping!\n", error);
2431 return (ENXIO);
b4c0a845 2432 }
4d28e78f 2433 return (0);
b4c0a845
SZ
2434}
2435
4d28e78f
SZ
2436/*
2437 * New style pci driver. Parent device is either a pci-host-bridge or a
2438 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2439 */
2440
22457186 2441void
984263bc
MD
2442pci_print_verbose(struct pci_devinfo *dinfo)
2443{
4d28e78f 2444
984263bc
MD
2445 if (bootverbose) {
2446 pcicfgregs *cfg = &dinfo->cfg;
2447
4d28e78f
SZ
2448 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2449 cfg->vendor, cfg->device, cfg->revid);
2450 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2451 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2452 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2453 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2454 cfg->mfdev);
2455 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2456 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2457 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2458 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2459 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2460 if (cfg->intpin > 0)
4d28e78f
SZ
2461 kprintf("\tintpin=%c, irq=%d\n",
2462 cfg->intpin +'a' -1, cfg->intline);
2463 if (cfg->pp.pp_cap) {
2464 uint16_t status;
2465
2466 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2467 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2468 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2469 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2470 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2471 status & PCIM_PSTAT_DMASK);
2472 }
2473 if (cfg->msi.msi_location) {
2474 int ctrl;
2475
2476 ctrl = cfg->msi.msi_ctrl;
2477 kprintf("\tMSI supports %d message%s%s%s\n",
2478 cfg->msi.msi_msgnum,
2479 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2480 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2481 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2482 }
2483 if (cfg->msix.msix_location) {
2484 kprintf("\tMSI-X supports %d message%s ",
2485 cfg->msix.msix_msgnum,
2486 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2487 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2488 kprintf("in map 0x%x\n",
2489 cfg->msix.msix_table_bar);
2490 else
2491 kprintf("in maps 0x%x and 0x%x\n",
2492 cfg->msix.msix_table_bar,
2493 cfg->msix.msix_pba_bar);
2494 }
d85e7311 2495 pci_print_verbose_expr(cfg);
984263bc
MD
2496 }
2497}
2498
d85e7311
SZ
2499static void
2500pci_print_verbose_expr(const pcicfgregs *cfg)
2501{
2502 const struct pcicfg_expr *expr = &cfg->expr;
2503 const char *port_name;
2504 uint16_t port_type;
2505
2506 if (!bootverbose)
2507 return;
2508
2509 if (expr->expr_ptr == 0) /* No PCI Express capability */
2510 return;
2511
2512 kprintf("\tPCI Express ver.%d cap=0x%04x",
2513 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2514 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2515 goto back;
2516
2517 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2518
2519 switch (port_type) {
2520 case PCIE_END_POINT:
2521 port_name = "DEVICE";
2522 break;
2523 case PCIE_LEG_END_POINT:
2524 port_name = "LEGDEV";
2525 break;
2526 case PCIE_ROOT_PORT:
2527 port_name = "ROOT";
2528 break;
2529 case PCIE_UP_STREAM_PORT:
2530 port_name = "UPSTREAM";
2531 break;
2532 case PCIE_DOWN_STREAM_PORT:
2533 port_name = "DOWNSTRM";
2534 break;
2535 case PCIE_PCIE2PCI_BRIDGE:
2536 port_name = "PCIE2PCI";
2537 break;
2538 case PCIE_PCI2PCIE_BRIDGE:
2539 port_name = "PCI2PCIE";
2540 break;
2541 default:
2542 port_name = NULL;
2543 break;
2544 }
2545 if ((port_type == PCIE_ROOT_PORT ||
2546 port_type == PCIE_DOWN_STREAM_PORT) &&
2547 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2548 port_name = NULL;
2549 if (port_name != NULL)
2550 kprintf("[%s]", port_name);
2551
2552 if (pcie_slotimpl(cfg)) {
2553 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2554 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2555 kprintf("[HOTPLUG]");
2556 }
2557back:
2558 kprintf("\n");
2559}
2560
984263bc 2561static int
4a5a2d63 2562pci_porten(device_t pcib, int b, int s, int f)
984263bc 2563{
4a5a2d63
JS
2564 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2565 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2566}
2567
2568static int
4a5a2d63 2569pci_memen(device_t pcib, int b, int s, int f)
984263bc 2570{
4a5a2d63
JS
2571 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2572 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2573}
2574
2575/*
2576 * Add a resource based on a pci map register. Return 1 if the map
2577 * register is a 32bit map register or 2 if it is a 64bit register.
2578 */
2579static int
4d28e78f
SZ
2580pci_add_map(device_t pcib, device_t bus, device_t dev,
2581 int b, int s, int f, int reg, struct resource_list *rl, int force,
2582 int prefetch)
2583{
2584 uint32_t map;
2585 pci_addr_t base;
2586 pci_addr_t start, end, count;
2587 uint8_t ln2size;
2588 uint8_t ln2range;
2589 uint32_t testval;
2590 uint16_t cmd;
984263bc 2591 int type;
4d28e78f
SZ
2592 int barlen;
2593 struct resource *res;
984263bc 2594
4a5a2d63 2595 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2596 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2597 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2598 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2599
4d28e78f 2600 if (PCI_BAR_MEM(map)) {
984263bc 2601 type = SYS_RES_MEMORY;
4d28e78f
SZ
2602 if (map & PCIM_BAR_MEM_PREFETCH)
2603 prefetch = 1;
2604 } else
984263bc
MD
2605 type = SYS_RES_IOPORT;
2606 ln2size = pci_mapsize(testval);
2607 ln2range = pci_maprange(testval);
4d28e78f
SZ
2608 base = pci_mapbase(map);
2609 barlen = ln2range == 64 ? 2 : 1;
2610
2611 /*
2612 * For I/O registers, if bottom bit is set, and the next bit up
2613 * isn't clear, we know we have a BAR that doesn't conform to the
2614 * spec, so ignore it. Also, sanity check the size of the data
2615 * areas to the type of memory involved. Memory must be at least
2616 * 16 bytes in size, while I/O ranges must be at least 4.
2617 */
2618 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2619 return (barlen);
2620 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2621 (type == SYS_RES_IOPORT && ln2size < 2))
2622 return (barlen);
2623
2624 if (ln2range == 64)
984263bc 2625 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2626 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2627 if (bootverbose) {
2628 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2629 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2630 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2631 kprintf(", port disabled\n");
2632 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2633 kprintf(", memory disabled\n");
2634 else
2635 kprintf(", enabled\n");
984263bc
MD
2636 }
2637
984263bc 2638 /*
4d28e78f
SZ
2639 * If base is 0, then we have problems. It is best to ignore
2640 * such entries for the moment. These will be allocated later if
2641 * the driver specifically requests them. However, some
2642 * removable busses look better when all resources are allocated,
2643 * so allow '0' to be overriden.
2644 *
2645 * Similarly treat maps whose values is the same as the test value
2646 * read back. These maps have had all f's written to them by the
2647 * BIOS in an attempt to disable the resources.
984263bc 2648 */
4d28e78f
SZ
2649 if (!force && (base == 0 || map == testval))
2650 return (barlen);
2651 if ((u_long)base != base) {
2652 device_printf(bus,
2653 "pci%d:%d:%d:%d bar %#x too many address bits",
2654 pci_get_domain(dev), b, s, f, reg);
2655 return (barlen);
984263bc 2656 }
984263bc 2657
4d28e78f
SZ
2658 /*
2659 * This code theoretically does the right thing, but has
2660 * undesirable side effects in some cases where peripherals
2661 * respond oddly to having these bits enabled. Let the user
2662 * be able to turn them off (since pci_enable_io_modes is 1 by
2663 * default).
2664 */
2665 if (pci_enable_io_modes) {
2666 /* Turn on resources that have been left off by a lazy BIOS */
2667 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2668 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2669 cmd |= PCIM_CMD_PORTEN;
2670 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2671 }
2672 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2673 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2674 cmd |= PCIM_CMD_MEMEN;
2675 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2676 }
2677 } else {
2678 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2679 return (barlen);
2680 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2681 return (barlen);
2682 }
984263bc 2683
4d28e78f
SZ
2684 count = 1 << ln2size;
2685 if (base == 0 || base == pci_mapbase(testval)) {
2686 start = 0; /* Let the parent decide. */
2687 end = ~0ULL;
2688 } else {
2689 start = base;
2690 end = base + (1 << ln2size) - 1;
984263bc 2691 }
4d28e78f 2692 resource_list_add(rl, type, reg, start, end, count);
984263bc 2693
4d28e78f
SZ
2694 /*
2695 * Try to allocate the resource for this BAR from our parent
2696 * so that this resource range is already reserved. The
2697 * driver for this device will later inherit this resource in
2698 * pci_alloc_resource().
2699 */
2700 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
2701 prefetch ? RF_PREFETCHABLE : 0);
2702 if (res == NULL) {
2703 /*
d0c4beb1
SZ
2704 * If the allocation fails, delete the resource list
2705 * entry to force pci_alloc_resource() to allocate
2706 * resources from the parent.
4d28e78f
SZ
2707 */
2708 resource_list_delete(rl, type, reg);
d0c4beb1
SZ
2709#ifdef PCI_BAR_CLEAR
2710 /* Clear the BAR */
4d28e78f 2711 start = 0;
d0c4beb1
SZ
2712#else /* !PCI_BAR_CLEAR */
2713 /*
2714 * Don't clear BAR here. Some BIOS lists HPET as a
2715 * PCI function, clearing the BAR causes HPET timer
2716 * stop ticking.
2717 */
2718 if (bootverbose) {
2719 kprintf("pci:%d:%d:%d: resource reservation failed "
2720 "%#llx - %#llx\n", b, s, f, start, end);
2721 }
2722 return (barlen);
2723#endif /* PCI_BAR_CLEAR */
2724 } else {
4d28e78f 2725 start = rman_get_start(res);
d0c4beb1 2726 }
4d28e78f
SZ
2727 pci_write_config(dev, reg, start, 4);
2728 if (ln2range == 64)
2729 pci_write_config(dev, reg + 4, start >> 32, 4);
2730 return (barlen);
984263bc
MD
2731}
2732
201eb0a7 2733/*
4d28e78f 2734 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2735 * Legacy demands that the primary and secondary ATA ports sits on the
2736 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2737 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2738 * addressing mode.
2739 */
2740static void
4d28e78f
SZ
2741pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2742 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2743{
2744 int rid, type, progif;
2745#if 0
2746 /* if this device supports PCI native addressing use it */
2747 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2748 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2749 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2750 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2751 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2752 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2753 }
2754 }
2755#endif
201eb0a7
TS
2756 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2757 type = SYS_RES_IOPORT;
2758 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2759 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2760 prefetchmask & (1 << 0));
2761 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2762 prefetchmask & (1 << 1));
201eb0a7
TS
2763 } else {
2764 rid = PCIR_BAR(0);
2765 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2766 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
4d28e78f 2767 0);
201eb0a7
TS
2768 rid = PCIR_BAR(1);
2769 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2770 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
4d28e78f 2771 0);
201eb0a7
TS
2772 }
2773 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2774 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2775 prefetchmask & (1 << 2));
2776 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2777 prefetchmask & (1 << 3));
201eb0a7
TS
2778 } else {
2779 rid = PCIR_BAR(2);
2780 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2781 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
4d28e78f 2782 0);
201eb0a7
TS
2783 rid = PCIR_BAR(3);
2784 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2785 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
4d28e78f 2786 0);
201eb0a7 2787 }
4d28e78f
SZ
2788 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2789 prefetchmask & (1 << 4));
2790 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2791 prefetchmask & (1 << 5));
201eb0a7 2792}
201eb0a7 2793
984263bc 2794static void
4d28e78f
SZ
2795pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2796{
2797 struct pci_devinfo *dinfo = device_get_ivars(dev);
2798 pcicfgregs *cfg = &dinfo->cfg;
2799 char tunable_name[64];
2800 int irq;
2801
2802 /* Has to have an intpin to have an interrupt. */
2803 if (cfg->intpin == 0)
2804 return;
2805
2806 /* Let the user override the IRQ with a tunable. */
2807 irq = PCI_INVALID_IRQ;
2808 ksnprintf(tunable_name, sizeof(tunable_name),
2809 "hw.pci%d.%d.%d.INT%c.irq",
2810 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2811 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2812 irq = PCI_INVALID_IRQ;
2813
2814 /*
2815 * If we didn't get an IRQ via the tunable, then we either use the
2816 * IRQ value in the intline register or we ask the bus to route an
2817 * interrupt for us. If force_route is true, then we only use the
2818 * value in the intline register if the bus was unable to assign an
2819 * IRQ.
2820 */
2821 if (!PCI_INTERRUPT_VALID(irq)) {
2822 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2823 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2824 if (!PCI_INTERRUPT_VALID(irq))
2825 irq = cfg->intline;
2826 }
2827
2828 /* If after all that we don't have an IRQ, just bail. */
2829 if (!PCI_INTERRUPT_VALID(irq))
2830 return;
2831
2832 /* Update the config register if it changed. */
2833 if (irq != cfg->intline) {
2834 cfg->intline = irq;
2835 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2836 }
2837
2838 /* Add this IRQ as rid 0 interrupt resource. */
2839 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2840}
2841
2842void
2843pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2844{
2845 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2846 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2847 struct resource_list *rl = &dinfo->resources;
2848 struct pci_quirk *q;
e126caf1 2849 int b, i, f, s;
984263bc 2850
e126caf1
MD
2851 b = cfg->bus;
2852 s = cfg->slot;
2853 f = cfg->func;
4d28e78f
SZ
2854
2855 /* ATA devices needs special map treatment */
201eb0a7
TS
2856 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2857 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2858 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2859 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2860 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2861 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2862 else
4d28e78f
SZ
2863 for (i = 0; i < cfg->nummaps;)
2864 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2865 rl, force, prefetchmask & (1 << i));
984263bc 2866
4d28e78f
SZ
2867 /*
2868 * Add additional, quirked resources.
2869 */
984263bc
MD
2870 for (q = &pci_quirks[0]; q->devid; q++) {
2871 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2872 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2873 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2874 force, 0);
984263bc
MD
2875 }
2876
4d28e78f 2877 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
a0f63ddc 2878#if defined(__PCI_REROUTE_INTERRUPT) || defined(APIC_IO)
4d28e78f
SZ
2879 /*
2880 * Try to re-route interrupts. Sometimes the BIOS or
2881 * firmware may leave bogus values in these registers.
2882 * If the re-route fails, then just stick with what we
2883 * have.
2884 */
2885 pci_assign_interrupt(bus, dev, 1);
2886#else
2887 pci_assign_interrupt(bus, dev, 0);
2888#endif
2889 }
984263bc
MD
2890}
2891
e126caf1 2892void
4d28e78f 2893pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2894{
4d28e78f 2895#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2896 device_t pcib = device_get_parent(dev);
e126caf1 2897 struct pci_devinfo *dinfo;
4a5a2d63 2898 int maxslots;
e126caf1
MD
2899 int s, f, pcifunchigh;
2900 uint8_t hdrtype;
2901
4d28e78f
SZ
2902 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2903 ("dinfo_size too small"));
4a5a2d63 2904 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2905 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2906 pcifunchigh = 0;
2907 f = 0;
4d28e78f 2908 DELAY(1);
e126caf1
MD
2909 hdrtype = REG(PCIR_HDRTYPE, 1);
2910 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2911 continue;
2912 if (hdrtype & PCIM_MFDEV)
2913 pcifunchigh = PCI_FUNCMAX;
5e658043 2914 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2915 dinfo = pci_read_device(pcib, domain, busno, s, f,
2916 dinfo_size);
984263bc 2917 if (dinfo != NULL) {
e126caf1 2918 pci_add_child(dev, dinfo);
984263bc
MD
2919 }
2920 }
2921 }
e126caf1
MD
2922#undef REG
2923}
2924
2925void
2926pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2927{
2928 device_t pcib;
2929
2930 pcib = device_get_parent(bus);
2931 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2932 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2933 resource_list_init(&dinfo->resources);
638744c5
HT
2934 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2935 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2936 pci_print_verbose(dinfo);
4d28e78f 2937 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2938}
2939
2940static int
4a5a2d63 2941pci_probe(device_t dev)
984263bc 2942{
984263bc 2943 device_set_desc(dev, "PCI bus");
4a5a2d63 2944
4d28e78f
SZ
2945 /* Allow other subclasses to override this driver. */
2946 return (-1000);
984263bc
MD
2947}
2948
2949static int
e126caf1
MD
2950pci_attach(device_t dev)
2951{
4d28e78f
SZ
2952 int busno, domain;
2953
2954 /*
2955 * Since there can be multiple independantly numbered PCI
2956 * busses on systems with multiple PCI domains, we can't use
2957 * the unit number to decide which bus we are probing. We ask
2958 * the parent pcib what our domain and bus numbers are.
2959 */
2960 domain = pcib_get_domain(dev);
2961 busno = pcib_get_bus(dev);
2962 if (bootverbose)
2963 device_printf(dev, "domain=%d, physical bus=%d\n",
2964 domain, busno);
e4c9c0c8 2965
4d28e78f 2966 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2967
4d28e78f
SZ
2968 return (bus_generic_attach(dev));
2969}
2970
2971int
2972pci_suspend(device_t dev)
2973{
2974 int dstate, error, i, numdevs;
2975 device_t acpi_dev, child, *devlist;
2976 struct pci_devinfo *dinfo;
2977
2978 /*
2979 * Save the PCI configuration space for each child and set the
2980 * device in the appropriate power state for this sleep state.
2981 */
2982 acpi_dev = NULL;
2983 if (pci_do_power_resume)
2984 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2985 device_get_children(dev, &devlist, &numdevs);
2986 for (i = 0; i < numdevs; i++) {
2987 child = devlist[i];
2988 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2989 pci_cfg_save(child, dinfo, 0);
2990 }
e126caf1 2991
4d28e78f
SZ
2992 /* Suspend devices before potentially powering them down. */
2993 error = bus_generic_suspend(dev);
2994 if (error) {
2995 kfree(devlist, M_TEMP);
2996 return (error);
2997 }
e126caf1 2998
4d28e78f
SZ
2999 /*
3000 * Always set the device to D3. If ACPI suggests a different
3001 * power state, use it instead. If ACPI is not present, the
3002 * firmware is responsible for managing device power. Skip
3003 * children who aren't attached since they are powered down
3004 * separately. Only manage type 0 devices for now.
3005 */
3006 for (i = 0; acpi_dev && i < numdevs; i++) {
3007 child = devlist[i];
3008 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3009 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3010 dstate = PCI_POWERSTATE_D3;
3011 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3012 pci_set_powerstate(child, dstate);
3013 }
3014 }
3015 kfree(devlist, M_TEMP);
3016 return (0);
e126caf1
MD
3017}
3018
4d28e78f
SZ
3019int
3020pci_resume(device_t dev)
984263bc 3021{
4d28e78f
SZ
3022 int i, numdevs;
3023 device_t acpi_dev, child, *devlist;
3024 struct pci_devinfo *dinfo;
3025
3026 /*
3027 * Set each child to D0 and restore its PCI configuration space.
3028 */
3029 acpi_dev = NULL;
3030 if (pci_do_power_resume)
3031 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3032 device_get_children(dev, &devlist, &numdevs);
3033 for (i = 0; i < numdevs; i++) {
3034 /*
3035 * Notify ACPI we're going to D0 but ignore the result. If
3036 * ACPI is not present, the firmware is responsible for
3037 * managing device power. Only manage type 0 devices for now.
3038 */
3039 child = devlist[i];
3040 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3041 if (acpi_dev && device_is_attached(child) &&
3042 dinfo->cfg.hdrtype == 0) {
3043 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3044 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3045 }
3046
3047 /* Now the device is powered up, restore its config space. */
3048 pci_cfg_restore(child, dinfo);
3049 }
3050 kfree(devlist, M_TEMP);
3051 return (bus_generic_resume(dev));
3052}
3053
3054static void
3055pci_load_vendor_data(void)
3056{
3057 caddr_t vendordata, info;
3058
3059 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3060 info = preload_search_info(vendordata, MODINFO_ADDR);
3061 pci_vendordata = *(char **)info;
3062 info = preload_search_info(vendordata, MODINFO_SIZE);
3063 pci_vendordata_size = *(size_t *)info;
3064 /* terminate the database */
3065 pci_vendordata[pci_vendordata_size] = '\n';
3066 }
3067}
3068
3069void
3070pci_driver_added(device_t dev, driver_t *driver)
3071{
3072 int numdevs;
3073 device_t *devlist;
3074 device_t child;
3075 struct pci_devinfo *dinfo;
3076 int i;
3077
3078 if (bootverbose)
3079 device_printf(dev, "driver added\n");
3080 DEVICE_IDENTIFY(driver, dev);
3081 device_get_children(dev, &devlist, &numdevs);
3082 for (i = 0; i < numdevs; i++) {
3083 child = devlist[i];
3084 if (device_get_state(child) != DS_NOTPRESENT)
3085 continue;
3086 dinfo = device_get_ivars(child);
3087 pci_print_verbose(dinfo);
3088 if (bootverbose)
3089 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
3090 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
3091 dinfo->cfg.func);
3092 pci_cfg_restore(child, dinfo);
3093 if (device_probe_and_attach(child) != 0)
3094 pci_cfg_save(child, dinfo, 1);
3095 }
3096 kfree(devlist, M_TEMP);
3097}
3098
11a49859
SZ
3099static void
3100pci_child_detached(device_t parent __unused, device_t child)
3101{
3102 /* Turn child's power off */
3103 pci_cfg_save(child, device_get_ivars(child), 1);
3104}
3105
4d28e78f
SZ
3106int
3107pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3108 driver_intr_t *intr, void *arg, void **cookiep, lwkt_serialize_t serializer)
3109{
3110#ifdef MSI
3111 struct pci_devinfo *dinfo;
3112 struct msix_table_entry *mte;
3113 struct msix_vector *mv;
3114 uint64_t addr;
3115 uint32_t data;
35b72619 3116 int rid;
4d28e78f 3117#endif
35b72619 3118 int error;
4d28e78f
SZ
3119 void *cookie;
3120 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
3121 arg, &cookie, serializer);
3122 if (error)
3123 return (error);
3124
3125 /* If this is not a direct child, just bail out. */
3126 if (device_get_parent(child) != dev) {
3127 *cookiep = cookie;
3128 return(0);
3129 }
3130
3131 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3132#ifdef MSI
3133 rid = rman_get_rid(irq);
3134 if (rid == 0) {
3135 /* Make sure that INTx is enabled */
3136 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3137 } else {
3138 /*
3139 * Check to see if the interrupt is MSI or MSI-X.
3140 * Ask our parent to map the MSI and give
3141 * us the address and data register values.
3142 * If we fail for some reason, teardown the
3143 * interrupt handler.
3144 */
3145 dinfo = device_get_ivars(child);
3146 if (dinfo->cfg.msi.msi_alloc > 0) {
3147 if (dinfo->cfg.msi.msi_addr == 0) {
3148 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3149 ("MSI has handlers, but vectors not mapped"));
3150 error = PCIB_MAP_MSI(device_get_parent(dev),
3151 child, rman_get_start(irq), &addr, &data);
3152 if (error)
3153 goto bad;
3154 dinfo->cfg.msi.msi_addr = addr;
3155 dinfo->cfg.msi.msi_data = data;
3156 pci_enable_msi(child, addr, data);
984263bc 3157 }
4d28e78f
SZ
3158 dinfo->cfg.msi.msi_handlers++;
3159 } else {
3160 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3161 ("No MSI or MSI-X interrupts allocated"));
3162 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3163 ("MSI-X index too high"));
3164 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3165 KASSERT(mte->mte_vector != 0, ("no message vector"));
3166 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3167 KASSERT(mv->mv_irq == rman_get_start(irq),
3168 ("IRQ mismatch"));
3169 if (mv->mv_address == 0) {
3170 KASSERT(mte->mte_handlers == 0,
3171 ("MSI-X table entry has handlers, but vector not mapped"));
3172 error = PCIB_MAP_MSI(device_get_parent(dev),
3173 child, rman_get_start(irq), &addr, &data);
3174 if (error)
3175 goto bad;
3176 mv->mv_address = addr;
3177 mv->mv_data = data;
3178 }
3179 if (mte->mte_handlers == 0) {
3180 pci_enable_msix(child, rid - 1, mv->mv_address,
3181 mv->mv_data);
3182 pci_unmask_msix(child, rid - 1);
3183 }
3184 mte->mte_handlers++;
3185 }
3186
3187 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3188 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3189 bad:
3190 if (error) {
3191 (void)bus_generic_teardown_intr(dev, child, irq,
3192 cookie);
3193 return (error);
3194 }
3195 }
3196#endif
3197 *cookiep = cookie;
3198 return (0);
3199}
3200
3201int
3202pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3203 void *cookie)
3204{
3205#ifdef MSI
3206 struct msix_table_entry *mte;
3207 struct resource_list_entry *rle;
3208 struct pci_devinfo *dinfo;
35b72619 3209 int rid;
4d28e78f 3210#endif
35b72619 3211 int error;
4d28e78f
SZ
3212
3213 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3214 return (EINVAL);
3215
3216 /* If this isn't a direct child, just bail out */
3217 if (device_get_parent(child) != dev)
3218 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3219
3220 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3221#ifdef MSI
3222 rid = rman_get_rid(irq);
3223 if (rid == 0) {
3224 /* Mask INTx */
3225 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3226 } else {
3227 /*
3228 * Check to see if the interrupt is MSI or MSI-X. If so,
3229 * decrement the appropriate handlers count and mask the
3230 * MSI-X message, or disable MSI messages if the count
3231 * drops to 0.
3232 */
3233 dinfo = device_get_ivars(child);
3234 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3235 if (rle->res != irq)
3236 return (EINVAL);
3237 if (dinfo->cfg.msi.msi_alloc > 0) {
3238 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3239 ("MSI-X index too high"));
3240 if (dinfo->cfg.msi.msi_handlers == 0)
3241 return (EINVAL);
3242 dinfo->cfg.msi.msi_handlers--;
3243 if (dinfo->cfg.msi.msi_handlers == 0)
3244 pci_disable_msi(child);
3245 } else {
3246 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3247 ("No MSI or MSI-X interrupts allocated"));
3248 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3249 ("MSI-X index too high"));
3250 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3251 if (mte->mte_handlers == 0)
3252 return (EINVAL);
3253 mte->mte_handlers--;
3254 if (mte->mte_handlers == 0)
3255 pci_mask_msix(child, rid - 1);
984263bc
MD
3256 }
3257 }
4d28e78f
SZ
3258 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3259 if (rid > 0)
3260 KASSERT(error == 0,
3261 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3262#endif
3263 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3264 return (error);
984263bc
MD
3265}
3266
e126caf1 3267int
984263bc
MD
3268pci_print_child(device_t dev, device_t child)
3269{
3270 struct pci_devinfo *dinfo;
3271 struct resource_list *rl;
984263bc
MD
3272 int retval = 0;
3273
3274 dinfo = device_get_ivars(child);
984263bc
MD
3275 rl = &dinfo->resources;
3276
3277 retval += bus_print_child_header(dev, child);
3278
4d28e78f
SZ
3279 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3280 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3281 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3282 if (device_get_flags(dev))
85f8e2ea 3283 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3284
85f8e2ea 3285 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3286 pci_get_function(child));
984263bc
MD
3287
3288 retval += bus_print_child_footer(dev, child);
3289
3290 return (retval);
3291}
3292
4d28e78f
SZ
3293static struct
3294{
3295 int class;
3296 int subclass;
3297 char *desc;
3298} pci_nomatch_tab[] = {
3299 {PCIC_OLD, -1, "old"},
3300 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3301 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3302 {PCIC_STORAGE, -1, "mass storage"},
3303 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3304 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3305 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3306 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3307 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3308 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3309 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3310 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3311 {PCIC_NETWORK, -1, "network"},
3312 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3313 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3314 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3315 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3316 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3317 {PCIC_DISPLAY, -1, "display"},
3318 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3319 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3320 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3321 {PCIC_MULTIMEDIA, -1, "multimedia"},
3322 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3323 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3324 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3325 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3326 {PCIC_MEMORY, -1, "memory"},
3327 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3328 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3329 {PCIC_BRIDGE, -1, "bridge"},
3330 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3331 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3332 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3333 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3334 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3335 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3336 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3337 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3338 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3339 {PCIC_SIMPLECOMM, -1, "simple comms"},
3340 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3341 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3342 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3343 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3344 {PCIC_BASEPERIPH, -1, "base peripheral"},
3345 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3346 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3347 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3348 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3349 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3350 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3351 {PCIC_INPUTDEV, -1, "input device"},
3352 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3353 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3354 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3355 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3356 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3357 {PCIC_DOCKING, -1, "docking station"},
3358 {PCIC_PROCESSOR, -1, "processor"},
3359 {PCIC_SERIALBUS, -1, "serial bus"},
3360 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3361 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3362 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3363 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3364 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3365 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3366 {PCIC_WIRELESS, -1, "wireless controller"},
3367 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3368 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3369 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3370 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3371 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3372 {PCIC_SATCOM, -1, "satellite communication"},
3373 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3374 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3375 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3376 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3377 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3378 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3379 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3380 {PCIC_DASP, -1, "dasp"},
3381 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3382 {0, 0, NULL}
3383};
3384
e126caf1 3385void
984263bc
MD
3386pci_probe_nomatch(device_t dev, device_t child)
3387{
4d28e78f
SZ
3388 int i;
3389 char *cp, *scp, *device;
984263bc 3390
4d28e78f
SZ
3391 /*
3392 * Look for a listing for this device in a loaded device database.
3393 */
3394 if ((device = pci_describe_device(child)) != NULL) {
3395 device_printf(dev, "<%s>", device);
3396 kfree(device, M_DEVBUF);
3397 } else {
3398 /*
3399 * Scan the class/subclass descriptions for a general
3400 * description.
3401 */
3402 cp = "unknown";
3403 scp = NULL;
3404 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3405 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3406 if (pci_nomatch_tab[i].subclass == -1) {
3407 cp = pci_nomatch_tab[i].desc;
3408 } else if (pci_nomatch_tab[i].subclass ==
3409 pci_get_subclass(child)) {
3410 scp = pci_nomatch_tab[i].desc;
3411 }
3412 }
3413 }
3414 device_printf(dev, "<%s%s%s>",
3415 cp ? cp : "",
3416 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3417 scp ? scp : "");
3418 }
6a45dbfa
SZ
3419 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3420 pci_get_vendor(child), pci_get_device(child),
3421 pci_get_slot(child), pci_get_function(child));
3422 if (pci_get_intpin(child) > 0) {
3423 int irq;
3424
3425 irq = pci_get_irq(child);
3426 if (PCI_INTERRUPT_VALID(irq))
3427 kprintf(" irq %d", irq);
3428 }
3429 kprintf("\n");
3430
638744c5 3431 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3432}
3433
4d28e78f
SZ
3434/*
3435 * Parse the PCI device database, if loaded, and return a pointer to a
3436 * description of the device.
3437 *
3438 * The database is flat text formatted as follows:
3439 *
3440 * Any line not in a valid format is ignored.
3441 * Lines are terminated with newline '\n' characters.
3442 *
3443 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3444 * the vendor name.
3445 *
3446 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3447 * - devices cannot be listed without a corresponding VENDOR line.
3448 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3449 * another TAB, then the device name.
3450 */
3451
3452/*
3453 * Assuming (ptr) points to the beginning of a line in the database,
3454 * return the vendor or device and description of the next entry.
3455 * The value of (vendor) or (device) inappropriate for the entry type
3456 * is set to -1. Returns nonzero at the end of the database.
3457 *
3458 * Note that this is slightly unrobust in the face of corrupt data;
3459 * we attempt to safeguard against this by spamming the end of the
3460 * database with a newline when we initialise.
3461 */
3462static int
3463pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3464{
3465 char *cp = *ptr;
3466 int left;
3467
3468 *device = -1;
3469 *vendor = -1;
3470 **desc = '\0';
3471 for (;;) {
3472 left = pci_vendordata_size - (cp - pci_vendordata);
3473 if (left <= 0) {
3474 *ptr = cp;
3475 return(1);
3476 }
3477
3478 /* vendor entry? */
3479 if (*cp != '\t' &&
3480 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3481 break;
3482 /* device entry? */
3483 if (*cp == '\t' &&
3484 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3485 break;
3486
3487 /* skip to next line */
3488 while (*cp != '\n' && left > 0) {
3489 cp++;
3490 left--;
3491 }
3492 if (*cp == '\n') {
3493 cp++;
3494 left--;
3495 }
3496 }
3497 /* skip to next line */
3498 while (*cp != '\n' && left > 0) {
3499 cp++;
3500 left--;
3501 }
3502 if (*cp == '\n' && left > 0)
3503 cp++;
3504 *ptr = cp;
3505 return(0);
3506}
3507
3508static char *
3509pci_describe_device(device_t dev)
3510{
3511 int vendor, device;
3512 char *desc, *vp, *dp, *line;
3513
3514 desc = vp = dp = NULL;
3515
3516 /*
3517 * If we have no vendor data, we can't do anything.
3518 */
3519 if (pci_vendordata == NULL)
3520 goto out;
3521
3522 /*
3523 * Scan the vendor data looking for this device
3524 */
3525 line = pci_vendordata;
3526 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3527 goto out;
3528 for (;;) {
3529 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3530 goto out;
3531 if (vendor == pci_get_vendor(dev))
3532 break;
3533 }
3534 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3535 goto out;
3536 for (;;) {
3537 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3538 *dp = 0;
3539 break;
3540 }
3541 if (vendor != -1) {
3542 *dp = 0;
3543 break;
3544 }
3545 if (device == pci_get_device(dev))
3546 break;
3547 }
3548 if (dp[0] == '\0')
3549 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3550 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3551 NULL)
3552 ksprintf(desc, "%s, %s", vp, dp);
3553 out:
3554 if (vp != NULL)
3555 kfree(vp, M_DEVBUF);
3556 if (dp != NULL)
3557 kfree(dp, M_DEVBUF);
3558 return(desc);
3559}
3560
22457186 3561int
4a5a2d63 3562pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3563{
3564 struct pci_devinfo *dinfo;
3565 pcicfgregs *cfg;
3566
3567 dinfo = device_get_ivars(child);
3568 cfg = &dinfo->cfg;
3569
3570 switch (which) {
4d28e78f
SZ
3571 case PCI_IVAR_ETHADDR:
3572 /*
3573 * The generic accessor doesn't deal with failure, so
3574 * we set the return value, then return an error.
3575 */
3576 *((uint8_t **) result) = NULL;
3577 return (EINVAL);
984263bc
MD
3578 case PCI_IVAR_SUBVENDOR:
3579 *result = cfg->subvendor;
3580 break;
3581 case PCI_IVAR_SUBDEVICE:
3582 *result = cfg->subdevice;
3583 break;
3584 case PCI_IVAR_VENDOR:
3585 *result = cfg->vendor;
3586 break;
3587 case PCI_IVAR_DEVICE:
3588 *result = cfg->device;
3589 break;
3590 case PCI_IVAR_DEVID:
3591 *result = (cfg->device << 16) | cfg->vendor;
3592 break;
3593 case PCI_IVAR_CLASS:
3594 *result = cfg->baseclass;
3595 break;
3596 case PCI_IVAR_SUBCLASS:
3597 *result = cfg->subclass;
3598 break;
3599 case PCI_IVAR_PROGIF:
3600 *result = cfg->progif;
3601 break;
3602 case PCI_IVAR_REVID:
3603 *result = cfg->revid;
3604 break;
3605 case PCI_IVAR_INTPIN:
3606 *result = cfg->intpin;
3607 break;
3608 case PCI_IVAR_IRQ:
3609 *result = cfg->intline;
3610 break;
4d28e78f
SZ
3611 case PCI_IVAR_DOMAIN:
3612 *result = cfg->domain;
3613 break;
984263bc
MD
3614 case PCI_IVAR_BUS:
3615 *result = cfg->bus;
3616 break;
3617 case PCI_IVAR_SLOT:
3618 *result = cfg->slot;
3619 break;
3620 case PCI_IVAR_FUNCTION:
3621 *result = cfg->func;
3622 break;
4d28e78f
SZ
3623 case PCI_IVAR_CMDREG:
3624 *result = cfg->cmdreg;
984263bc 3625 break;
4d28e78f
SZ
3626 case PCI_IVAR_CACHELNSZ:
3627 *result = cfg->cachelnsz;
984263bc 3628 break;
4d28e78f
SZ
3629 case PCI_IVAR_MINGNT:
3630 *result = cfg->mingnt;
c7e4e7eb 3631 break;
4d28e78f
SZ
3632 case PCI_IVAR_MAXLAT:
3633 *result = cfg->maxlat;
c7e4e7eb 3634 break;
4d28e78f
SZ
3635 case PCI_IVAR_LATTIMER:
3636 *result = cfg->lattimer;
0254566f 3637 break;
d85e7311
SZ
3638 case PCI_IVAR_PCIXCAP_PTR:
3639 *result = cfg->pcix.pcix_ptr;
3640 break;
3641 case PCI_IVAR_PCIECAP_PTR:
3642 *result = cfg->expr.expr_ptr;
3643 break;
3644 case PCI_IVAR_VPDCAP_PTR:
3645 *result = cfg->vpd.vpd_reg;
3646 break;
984263bc 3647 default:
4d28e78f 3648 return (ENOENT);
984263bc 3649 }
4d28e78f 3650 return (0);
984263bc
MD
3651}
3652
22457186 3653int
984263bc
MD
3654pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3655{
3656 struct pci_devinfo *dinfo;
984263bc
MD
3657
3658 dinfo = device_get_ivars(child);
984263bc
MD
3659
3660 switch (which) {
4d28e78f
SZ
3661 case PCI_IVAR_INTPIN:
3662 dinfo->cfg.intpin = value;
3663 return (0);
3664 case PCI_IVAR_ETHADDR:
984263bc
MD
3665 case PCI_IVAR_SUBVENDOR:
3666 case PCI_IVAR_SUBDEVICE:
3667 case PCI_IVAR_VENDOR:
3668 case PCI_IVAR_DEVICE:
3669 case PCI_IVAR_DEVID:
3670 case PCI_IVAR_CLASS:
3671 case PCI_IVAR_SUBCLASS:
3672 case PCI_IVAR_PROGIF:
3673 case PCI_IVAR_REVID:
984263bc 3674 case PCI_IVAR_IRQ:
4d28e78f 3675 case PCI_IVAR_DOMAIN:
984263bc
MD
3676 case PCI_IVAR_BUS:
3677 case PCI_IVAR_SLOT:
3678 case PCI_IVAR_FUNCTION:
4d28e78f 3679 return (EINVAL); /* disallow for now */
984263bc 3680
984263bc 3681 default:
4d28e78f
SZ
3682 return (ENOENT);
3683 }
3684}
3685#ifdef notyet
3686#include "opt_ddb.h"
3687#ifdef DDB
3688#include <ddb/ddb.h>
3689#include <sys/cons.h>
3690
3691/*
3692 * List resources based on pci map registers, used for within ddb
3693 */
3694
3695DB_SHOW_COMMAND(pciregs, db_pci_dump)
3696{
3697 struct pci_devinfo *dinfo;
3698 struct devlist *devlist_head;
3699 struct pci_conf *p;
3700 const char *name;
3701 int i, error, none_count;
3702
3703 none_count = 0;
3704 /* get the head of the device queue */
3705 devlist_head = &pci_devq;
3706
3707 /*
3708 * Go through the list of devices and print out devices
3709 */
3710 for (error = 0, i = 0,
3711 dinfo = STAILQ_FIRST(devlist_head);
3712 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3713 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3714
3715 /* Populate pd_name and pd_unit */
3716 name = NULL;
3717 if (dinfo->cfg.dev)
3718 name = device_get_name(dinfo->cfg.dev);
3719
3720 p = &dinfo->conf;
3721 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3722 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3723 (name && *name) ? name : "none",
3724 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3725 none_count++,
3726 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3727 p->pc_sel.pc_func, (p->pc_class << 16) |
3728 (p->pc_subclass << 8) | p->pc_progif,
3729 (p->pc_subdevice << 16) | p->pc_subvendor,
3730 (p->pc_device << 16) | p->pc_vendor,
3731 p->pc_revid, p->pc_hdr);
984263bc 3732 }
984263bc 3733}
4d28e78f
SZ
3734#endif /* DDB */
3735#endif
984263bc 3736
201eb0a7 3737static struct resource *
4d28e78f
SZ
3738pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3739 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3740{
3741 struct pci_devinfo *dinfo = device_get_ivars(child);
3742 struct resource_list *rl = &dinfo->resources;
3743 struct resource_list_entry *rle;
3744 struct resource *res;
4d28e78f 3745 pci_addr_t map, testval;
201eb0a7
TS
3746 int mapsize;
3747
3748 /*
3749 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3750 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3751 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3752 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3753 * it fails here, that other code is broken.
3754 */
3755 res = NULL;
3756 map = pci_read_config(child, *rid, 4);
3757 pci_write_config(child, *rid, 0xffffffff, 4);
3758 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3759 if (pci_maprange(testval) == 64)
3760 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3761 if (pci_mapbase(testval) == 0)
3762 goto out;
4d28e78f
SZ
3763
3764 /*
3765 * Restore the original value of the BAR. We may have reprogrammed
3766 * the BAR of the low-level console device and when booting verbose,
3767 * we need the console device addressable.
3768 */
3769 pci_write_config(child, *rid, map, 4);
3770
3771 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3772 if (type != SYS_RES_MEMORY) {
3773 if (bootverbose)
4d28e78f
SZ
3774 device_printf(dev,
3775 "child %s requested type %d for rid %#x,"
3776 " but the BAR says it is an memio\n",
3777 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3778 goto out;
3779 }
3780 } else {
3781 if (type != SYS_RES_IOPORT) {
3782 if (bootverbose)
4d28e78f
SZ
3783 device_printf(dev,
3784 "child %s requested type %d for rid %#x,"
3785 " but the BAR says it is an ioport\n",
3786 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3787 goto out;
3788 }
3789 }
3790 /*
3791 * For real BARs, we need to override the size that
3792 * the driver requests, because that's what the BAR
3793 * actually uses and we would otherwise have a
3794 * situation where we might allocate the excess to
3795 * another driver, which won't work.
3796 */
3797 mapsize = pci_mapsize(testval);
4d28e78f 3798 count = 1UL << mapsize;
201eb0a7 3799 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3800 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3801 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3802 flags |= RF_PREFETCHABLE;
3803
201eb0a7
TS
3804 /*
3805 * Allocate enough resource, and then write back the
4d28e78f 3806 * appropriate bar for that resource.
201eb0a7
TS
3807 */
3808 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4d28e78f 3809 start, end, count, flags);
201eb0a7 3810 if (res == NULL) {
4d28e78f
SZ
3811 device_printf(child,
3812 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3813 count, *rid, type, start, end);
201eb0a7
TS
3814 goto out;
3815 }
3816 resource_list_add(rl, type, *rid, start, end, count);
3817 rle = resource_list_find(rl, type, *rid);
3818 if (rle == NULL)
3819 panic("pci_alloc_map: unexpectedly can't find resource.");
3820 rle->res = res;
3821 rle->start = rman_get_start(res);
3822 rle->end = rman_get_end(res);
3823 rle->count = count;
3824 if (bootverbose)
4d28e78f
SZ
3825 device_printf(child,
3826 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3827 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3828 map = rman_get_start(res);
3829out:;
3830 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3831 if (pci_maprange(testval) == 64)
3832 pci_write_config(child, *rid + 4, map >> 32, 4);
3833 return (res);
201eb0a7 3834}
4d28e78f 3835
201eb0a7 3836
261fa16d 3837struct resource *
984263bc
MD
3838pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3839 u_long start, u_long end, u_long count, u_int flags)
3840{
3841 struct pci_devinfo *dinfo = device_get_ivars(child);
3842 struct resource_list *rl = &dinfo->resources;
201eb0a7 3843 struct resource_list_entry *rle;
984263bc 3844 pcicfgregs *cfg = &dinfo->cfg;
09e7d9f3 3845
984263bc
MD
3846 /*
3847 * Perform lazy resource allocation
984263bc
MD
3848 */
3849 if (device_get_parent(child) == dev) {
de67e43b
JS
3850 switch (type) {
3851 case SYS_RES_IRQ:
4d28e78f
SZ
3852 /*
3853 * Can't alloc legacy interrupt once MSI messages
3854 * have been allocated.
3855 */
3856#ifdef MSI
3857 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3858 cfg->msix.msix_alloc > 0))
3859 return (NULL);
de67e43b 3860#endif
4d28e78f
SZ
3861 /*
3862 * If the child device doesn't have an
3863 * interrupt routed and is deserving of an
3864 * interrupt, try to assign it one.
3865 */
3866 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3867 (cfg->intpin != 0))
3868 pci_assign_interrupt(dev, child, 0);
3869 break;
de67e43b
JS
3870 case SYS_RES_IOPORT:
3871 case SYS_RES_MEMORY:
3872 if (*rid < PCIR_BAR(cfg->nummaps)) {
3873 /*
3874 * Enable the I/O mode. We should
3875 * also be assigning resources too
3876 * when none are present. The
3877 * resource_list_alloc kind of sorta does
3878 * this...
3879 */
3880 if (PCI_ENABLE_IO(dev, child, type))
3881 return (NULL);
984263bc 3882 }
201eb0a7
TS
3883 rle = resource_list_find(rl, type, *rid);
3884 if (rle == NULL)
4d28e78f
SZ
3885 return (pci_alloc_map(dev, child, type, rid,
3886 start, end, count, flags));
820c1612 3887 break;
984263bc 3888 }
201eb0a7
TS
3889 /*
3890 * If we've already allocated the resource, then
4d28e78f 3891 * return it now. But first we may need to activate
201eb0a7 3892 * it, since we don't allocate the resource as active
4d28e78f 3893 * above. Normally this would be done down in the
201eb0a7 3894 * nexus, but since we short-circuit that path we have
4d28e78f 3895 * to do its job here. Not sure if we should kfree the
201eb0a7 3896 * resource if it fails to activate.
201eb0a7
TS
3897 */
3898 rle = resource_list_find(rl, type, *rid);
3899 if (rle != NULL && rle->res != NULL) {
3900 if (bootverbose)
4d28e78f
SZ
3901 device_printf(child,
3902 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3903 rman_get_size(rle->res), *rid, type,
3904 rman_get_start(rle->res));
201eb0a7
TS
3905 if ((flags & RF_ACTIVE) &&
3906 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3907 *rid, rle->res) != 0)
3908 return (NULL);
3909 return (rle->res);
201eb0a7 3910 }
984263bc 3911 }
4d28e78f
SZ
3912 return (resource_list_alloc(rl, dev, child, type, rid,
3913 start, end, count, flags));
984263bc
MD
3914}
3915
4d28e78f
SZ
3916void
3917pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3918{
4d28e78f
SZ
3919 struct pci_devinfo *dinfo;
3920 struct resource_list *rl;
984263bc
MD
3921 struct resource_list_entry *rle;
3922
4d28e78f
SZ
3923 if (device_get_parent(child) != dev)
3924 return;
984263bc 3925
4d28e78f
SZ
3926 dinfo = device_get_ivars(child);
3927 rl = &dinfo->resources;
3928 rle = resource_list_find(rl, type, rid);
3929 if (rle) {
3930 if (rle->res) {
3931 if (rman_get_device(rle->res) != dev ||
3932 rman_get_flags(rle->res) & RF_ACTIVE) {
3933 device_printf(dev, "delete_resource: "
3934 "Resource still owned by child, oops. "
3935 "(type=%d, rid=%d, addr=%lx)\n",
3936 rle->type, rle->rid,
3937 rman_get_start(rle->res));
3938 return;
3939 }
3940 bus_release_resource(dev, type, rid, rle->res);
3941 }
3942 resource_list_delete(rl, type, rid);
3943 }
3944 /*
3945 * Why do we turn off the PCI configuration BAR when we delete a
3946 * resource? -- imp
3947 */
3948 pci_write_config(child, rid, 0, 4);
3949 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3950}
3951
e126caf1
MD
3952struct resource_list *
3953pci_get_resource_list (device_t dev, device_t child)
3954{
4d28e78f 3955 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3956
bcc66dfa
SZ
3957 if (dinfo == NULL)
3958 return (NULL);
3959
b0486c83 3960 return (&dinfo->resources);
e126caf1
MD
3961}
3962
4d28e78f 3963uint32_t
984263bc
MD
3964pci_read_config_method(device_t dev, device_t child, int reg, int width)
3965{
3966 struct pci_devinfo *dinfo = device_get_ivars(child);
3967 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3968
4d28e78f
SZ
3969 return (PCIB_READ_CONFIG(device_get_parent(dev),
3970 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3971}
3972
e126caf1 3973void
984263bc 3974pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3975 uint32_t val, int width)
984263bc
MD
3976{
3977 struct pci_devinfo *dinfo = device_get_ivars(child);
3978 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3979
3980 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3981 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3982}
3983
e126caf1 3984int
4d28e78f 3985pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3986 size_t buflen)
3987{
e126caf1 3988
f8c7a42d 3989 ksnprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
e126caf1
MD
3990 pci_get_function(child));
3991 return (0);
3992}
3993
3994int
4d28e78f 3995pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3996 size_t buflen)
3997{
3998 struct pci_devinfo *dinfo;
3999 pcicfgregs *cfg;
4000
4001 dinfo = device_get_ivars(child);
4002 cfg = &dinfo->cfg;
f8c7a42d 4003 ksnprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
e126caf1
MD
4004 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4005 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4006 cfg->progif);
4007 return (0);
4008}
4009
4010int
4011pci_assign_interrupt_method(device_t dev, device_t child)
4d28e78f
SZ
4012{
4013 struct pci_devinfo *dinfo = device_get_ivars(child);
4014 pcicfgregs *cfg = &dinfo->cfg;
4015
4016 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4017 cfg->intpin));
e126caf1
MD
4018}
4019
984263bc
MD
4020static int
4021pci_modevent(module_t mod, int what, void *arg)
4022{
4d28e78f 4023 static struct cdev *pci_cdev;
4d28e78f 4024