pci: Utilize device_getenv_int
[dragonfly.git] / sys / bus / pci / pci.c
CommitLineData
4d28e78f
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
984263bc
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83c1faaa
SW
27 *
28 * $FreeBSD: src/sys/dev/pci/pci.c,v 1.355.2.9.2.1 2009/04/15 03:14:26 kensmith Exp $
984263bc
MD
29 */
30
92683a33 31#include "opt_acpi.h"
6951547b 32#include "opt_compat_oldpci.h"
984263bc
MD
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
4d28e78f 38#include <sys/linker.h>
984263bc
MD
39#include <sys/fcntl.h>
40#include <sys/conf.h>
41#include <sys/kernel.h>
42#include <sys/queue.h>
638744c5 43#include <sys/sysctl.h>
4d28e78f 44#include <sys/endian.h>
d2f04fe0 45#include <sys/machintr.h>
984263bc 46
941460da
SZ
47#include <machine/msi_machdep.h>
48
984263bc
MD
49#include <vm/vm.h>
50#include <vm/pmap.h>
51#include <vm/vm_extern.h>
52
53#include <sys/bus.h>
984263bc 54#include <sys/rman.h>
4d28e78f 55#include <sys/device.h>
984263bc 56
dc5a7bd2 57#include <sys/pciio.h>
4d28e78f
SZ
58#include <bus/pci/pcireg.h>
59#include <bus/pci/pcivar.h>
60#include <bus/pci/pci_private.h>
984263bc 61
4a5a2d63 62#include "pcib_if.h"
4d28e78f
SZ
63#include "pci_if.h"
64
65#ifdef __HAVE_ACPI
66#include <contrib/dev/acpica/acpi.h>
67#include "acpi_if.h"
68#else
69#define ACPI_PWR_FOR_SLEEP(x, y, z)
70#endif
71
35b72619
SZ
72extern struct dev_ops pcic_ops; /* XXX */
73
3a6dc23c
SZ
74typedef void (*pci_read_cap_t)(device_t, int, int, pcicfgregs *);
75
4d28e78f
SZ
76static uint32_t pci_mapbase(unsigned mapreg);
77static const char *pci_maptype(unsigned mapreg);
78static int pci_mapsize(unsigned testval);
79static int pci_maprange(unsigned mapreg);
80static void pci_fixancient(pcicfgregs *cfg);
81
82static int pci_porten(device_t pcib, int b, int s, int f);
83static int pci_memen(device_t pcib, int b, int s, int f);
84static void pci_assign_interrupt(device_t bus, device_t dev,
85 int force_route);
86static int pci_add_map(device_t pcib, device_t bus, device_t dev,
87 int b, int s, int f, int reg,
88 struct resource_list *rl, int force, int prefetch);
89static int pci_probe(device_t dev);
90static int pci_attach(device_t dev);
11a49859 91static void pci_child_detached(device_t, device_t);
4d28e78f
SZ
92static void pci_load_vendor_data(void);
93static int pci_describe_parse_line(char **ptr, int *vendor,
94 int *device, char **desc);
95static char *pci_describe_device(device_t dev);
96static int pci_modevent(module_t mod, int what, void *arg);
97static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
98 pcicfgregs *cfg);
3a6dc23c 99static void pci_read_capabilities(device_t pcib, pcicfgregs *cfg);
4d28e78f
SZ
100static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t *data);
102#if 0
103static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
104 int reg, uint32_t data);
105#endif
106static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
107static void pci_disable_msi(device_t dev);
108static void pci_enable_msi(device_t dev, uint64_t address,
109 uint16_t data);
cf8f3133 110static void pci_setup_msix_vector(device_t dev, u_int index,
4d28e78f 111 uint64_t address, uint32_t data);
cf8f3133
SZ
112static void pci_mask_msix_vector(device_t dev, u_int index);
113static void pci_unmask_msix_vector(device_t dev, u_int index);
31646171 114static void pci_mask_msix_allvectors(device_t dev);
f9c942fb 115static struct msix_vector *pci_find_msix_vector(device_t dev, int rid);
4d28e78f
SZ
116static int pci_msi_blacklisted(void);
117static void pci_resume_msi(device_t dev);
118static void pci_resume_msix(device_t dev);
d85e7311
SZ
119static int pcie_slotimpl(const pcicfgregs *);
120static void pci_print_verbose_expr(const pcicfgregs *);
4d28e78f 121
3a6dc23c
SZ
122static void pci_read_cap_pmgt(device_t, int, int, pcicfgregs *);
123static void pci_read_cap_ht(device_t, int, int, pcicfgregs *);
124static void pci_read_cap_msi(device_t, int, int, pcicfgregs *);
125static void pci_read_cap_msix(device_t, int, int, pcicfgregs *);
126static void pci_read_cap_vpd(device_t, int, int, pcicfgregs *);
127static void pci_read_cap_subvendor(device_t, int, int,
128 pcicfgregs *);
129static void pci_read_cap_pcix(device_t, int, int, pcicfgregs *);
d85e7311 130static void pci_read_cap_express(device_t, int, int, pcicfgregs *);
3a6dc23c 131
4d28e78f
SZ
132static device_method_t pci_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_probe, pci_probe),
135 DEVMETHOD(device_attach, pci_attach),
136 DEVMETHOD(device_detach, bus_generic_detach),
137 DEVMETHOD(device_shutdown, bus_generic_shutdown),
138 DEVMETHOD(device_suspend, pci_suspend),
139 DEVMETHOD(device_resume, pci_resume),
140
141 /* Bus interface */
142 DEVMETHOD(bus_print_child, pci_print_child),
143 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
144 DEVMETHOD(bus_read_ivar, pci_read_ivar),
145 DEVMETHOD(bus_write_ivar, pci_write_ivar),
146 DEVMETHOD(bus_driver_added, pci_driver_added),
11a49859 147 DEVMETHOD(bus_child_detached, pci_child_detached),
4d28e78f
SZ
148 DEVMETHOD(bus_setup_intr, pci_setup_intr),
149 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
150
151 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
152 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
153 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
154 DEVMETHOD(bus_delete_resource, pci_delete_resource),
155 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
156 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
157 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
159 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
160 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
161
162 /* PCI interface */
163 DEVMETHOD(pci_read_config, pci_read_config_method),
164 DEVMETHOD(pci_write_config, pci_write_config_method),
165 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
166 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
167 DEVMETHOD(pci_enable_io, pci_enable_io_method),
168 DEVMETHOD(pci_disable_io, pci_disable_io_method),
169 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
170 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
171 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
172 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
173 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
174 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
175 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
4d28e78f 176 DEVMETHOD(pci_release_msi, pci_release_msi_method),
a7854dd0
SZ
177 DEVMETHOD(pci_alloc_msix_vector, pci_alloc_msix_vector_method),
178 DEVMETHOD(pci_release_msix_vector, pci_release_msix_vector_method),
4d28e78f
SZ
179 DEVMETHOD(pci_msi_count, pci_msi_count_method),
180 DEVMETHOD(pci_msix_count, pci_msix_count_method),
181
182 { 0, 0 }
183};
184
185DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
4a5a2d63 186
4d28e78f 187static devclass_t pci_devclass;
aa2b9d05 188DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
4d28e78f
SZ
189MODULE_VERSION(pci, 1);
190
191static char *pci_vendordata;
192static size_t pci_vendordata_size;
dc5a7bd2 193
984263bc 194
3a6dc23c
SZ
195static const struct pci_read_cap {
196 int cap;
197 pci_read_cap_t read_cap;
198} pci_read_caps[] = {
199 { PCIY_PMG, pci_read_cap_pmgt },
200 { PCIY_HT, pci_read_cap_ht },
201 { PCIY_MSI, pci_read_cap_msi },
202 { PCIY_MSIX, pci_read_cap_msix },
203 { PCIY_VPD, pci_read_cap_vpd },
204 { PCIY_SUBVENDOR, pci_read_cap_subvendor },
205 { PCIY_PCIX, pci_read_cap_pcix },
d85e7311 206 { PCIY_EXPRESS, pci_read_cap_express },
3a6dc23c
SZ
207 { 0, NULL } /* required last entry */
208};
209
984263bc 210struct pci_quirk {
4d28e78f 211 uint32_t devid; /* Vendor/device of the card */
984263bc 212 int type;
4d28e78f
SZ
213#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
214#define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
984263bc
MD
215 int arg1;
216 int arg2;
217};
218
219struct pci_quirk pci_quirks[] = {
4d28e78f 220 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
984263bc
MD
221 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
222 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
f1f0bfb2
JS
223 /* As does the Serverworks OSB4 (the SMBus mapping register) */
224 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
984263bc 225
4d28e78f
SZ
226 /*
227 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
228 * or the CMIC-SL (AKA ServerWorks GC_LE).
229 */
230 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232
233 /*
234 * MSI doesn't work on earlier Intel chipsets including
235 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
236 */
237 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244
245 /*
246 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
247 * bridge.
248 */
249 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250
984263bc
MD
251 { 0 }
252};
253
254/* map register information */
4d28e78f
SZ
255#define PCI_MAPMEM 0x01 /* memory map */
256#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
257#define PCI_MAPPORT 0x04 /* port map */
258
f9c942fb
SZ
259#define PCI_MSIX_RID2VEC(rid) ((rid) - 1) /* rid -> MSI-X vector # */
260#define PCI_MSIX_VEC2RID(vec) ((vec) + 1) /* MSI-X vector # -> rid */
261
4d28e78f
SZ
262struct devlist pci_devq;
263uint32_t pci_generation;
264uint32_t pci_numdevs = 0;
265static int pcie_chipset, pcix_chipset;
266
267/* sysctl vars */
268SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
269
270static int pci_enable_io_modes = 1;
271TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
272SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
273 &pci_enable_io_modes, 1,
274 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
275enable these bits correctly. We'd like to do this all the time, but there\n\
276are some peripherals that this causes problems with.");
984263bc 277
638744c5
HT
278static int pci_do_power_nodriver = 0;
279TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
280SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
281 &pci_do_power_nodriver, 0,
282 "Place a function into D3 state when no driver attaches to it. 0 means\n\
283disable. 1 means conservatively place devices into D3 state. 2 means\n\
6699890a 284aggressively place devices into D3 state. 3 means put absolutely everything\n\
638744c5
HT
285in D3 state.");
286
4d28e78f
SZ
287static int pci_do_power_resume = 1;
288TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
289SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
290 &pci_do_power_resume, 1,
291 "Transition from D3 -> D0 on resume.");
292
293static int pci_do_msi = 1;
294TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
295SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
296 "Enable support for MSI interrupts");
297
4f459073 298static int pci_do_msix = 1;
4d28e78f
SZ
299TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
300SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
301 "Enable support for MSI-X interrupts");
302
303static int pci_honor_msi_blacklist = 1;
304TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
305SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
306 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
307
2c3d7ac8
SZ
308static int pci_msi_cpuid;
309
4d28e78f
SZ
310/* Find a device_t by bus/slot/function in domain 0 */
311
312device_t
313pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
314{
315
316 return (pci_find_dbsf(0, bus, slot, func));
317}
318
319/* Find a device_t by domain/bus/slot/function */
320
984263bc 321device_t
4d28e78f 322pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
984263bc
MD
323{
324 struct pci_devinfo *dinfo;
325
326 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
4d28e78f
SZ
327 if ((dinfo->cfg.domain == domain) &&
328 (dinfo->cfg.bus == bus) &&
984263bc
MD
329 (dinfo->cfg.slot == slot) &&
330 (dinfo->cfg.func == func)) {
331 return (dinfo->cfg.dev);
332 }
333 }
334
335 return (NULL);
336}
337
4d28e78f
SZ
338/* Find a device_t by vendor/device ID */
339
984263bc 340device_t
4d28e78f 341pci_find_device(uint16_t vendor, uint16_t device)
984263bc
MD
342{
343 struct pci_devinfo *dinfo;
344
345 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
346 if ((dinfo->cfg.vendor == vendor) &&
347 (dinfo->cfg.device == device)) {
348 return (dinfo->cfg.dev);
349 }
350 }
351
352 return (NULL);
353}
354
355/* return base address of memory or port map */
356
4d28e78f
SZ
357static uint32_t
358pci_mapbase(uint32_t mapreg)
984263bc 359{
4d28e78f
SZ
360
361 if (PCI_BAR_MEM(mapreg))
362 return (mapreg & PCIM_BAR_MEM_BASE);
363 else
364 return (mapreg & PCIM_BAR_IO_BASE);
984263bc
MD
365}
366
367/* return map type of memory or port map */
368
4d28e78f 369static const char *
984263bc
MD
370pci_maptype(unsigned mapreg)
371{
984263bc 372
4d28e78f
SZ
373 if (PCI_BAR_IO(mapreg))
374 return ("I/O Port");
375 if (mapreg & PCIM_BAR_MEM_PREFETCH)
376 return ("Prefetchable Memory");
377 return ("Memory");
984263bc
MD
378}
379
380/* return log2 of map size decoded for memory or port map */
381
382static int
4d28e78f 383pci_mapsize(uint32_t testval)
984263bc
MD
384{
385 int ln2size;
386
387 testval = pci_mapbase(testval);
388 ln2size = 0;
389 if (testval != 0) {
390 while ((testval & 1) == 0)
391 {
392 ln2size++;
393 testval >>= 1;
394 }
395 }
396 return (ln2size);
397}
398
399/* return log2 of address range supported by map register */
400
401static int
402pci_maprange(unsigned mapreg)
403{
404 int ln2range = 0;
4d28e78f
SZ
405
406 if (PCI_BAR_IO(mapreg))
984263bc 407 ln2range = 32;
4d28e78f
SZ
408 else
409 switch (mapreg & PCIM_BAR_MEM_TYPE) {
410 case PCIM_BAR_MEM_32:
411 ln2range = 32;
412 break;
413 case PCIM_BAR_MEM_1MB:
414 ln2range = 20;
415 break;
416 case PCIM_BAR_MEM_64:
417 ln2range = 64;
418 break;
419 }
984263bc
MD
420 return (ln2range);
421}
422
423/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
424
425static void
426pci_fixancient(pcicfgregs *cfg)
427{
428 if (cfg->hdrtype != 0)
429 return;
430
431 /* PCI to PCI bridges use header type 1 */
432 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
433 cfg->hdrtype = 1;
434}
435
984263bc
MD
436/* extract header type specific config data */
437
438static void
4a5a2d63 439pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
984263bc 440{
4d28e78f 441#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
442 switch (cfg->hdrtype) {
443 case 0:
4a5a2d63
JS
444 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
445 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
984263bc
MD
446 cfg->nummaps = PCI_MAXMAPS_0;
447 break;
448 case 1:
984263bc 449 cfg->nummaps = PCI_MAXMAPS_1;
6951547b
SZ
450#ifdef COMPAT_OLDPCI
451 cfg->secondarybus = REG(PCIR_SECBUS_1, 1);
452#endif
984263bc
MD
453 break;
454 case 2:
4a5a2d63
JS
455 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
456 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
984263bc 457 cfg->nummaps = PCI_MAXMAPS_2;
6951547b
SZ
458#ifdef COMPAT_OLDPCI
459 cfg->secondarybus = REG(PCIR_SECBUS_2, 1);
460#endif
984263bc
MD
461 break;
462 }
4a5a2d63 463#undef REG
984263bc
MD
464}
465
4d28e78f 466/* read configuration header into pcicfgregs structure */
22457186 467struct pci_devinfo *
4d28e78f 468pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
984263bc 469{
4d28e78f 470#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
984263bc
MD
471 pcicfgregs *cfg = NULL;
472 struct pci_devinfo *devlist_entry;
473 struct devlist *devlist_head;
474
475 devlist_head = &pci_devq;
476
477 devlist_entry = NULL;
478
4d28e78f 479 if (REG(PCIR_DEVVENDOR, 4) != -1) {
efda3bd0 480 devlist_entry = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
984263bc
MD
481
482 cfg = &devlist_entry->cfg;
4d28e78f
SZ
483
484 cfg->domain = d;
4a5a2d63
JS
485 cfg->bus = b;
486 cfg->slot = s;
487 cfg->func = f;
488 cfg->vendor = REG(PCIR_VENDOR, 2);
489 cfg->device = REG(PCIR_DEVICE, 2);
490 cfg->cmdreg = REG(PCIR_COMMAND, 2);
491 cfg->statreg = REG(PCIR_STATUS, 2);
492 cfg->baseclass = REG(PCIR_CLASS, 1);
493 cfg->subclass = REG(PCIR_SUBCLASS, 1);
494 cfg->progif = REG(PCIR_PROGIF, 1);
495 cfg->revid = REG(PCIR_REVID, 1);
e126caf1 496 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
4a5a2d63
JS
497 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
498 cfg->lattimer = REG(PCIR_LATTIMER, 1);
499 cfg->intpin = REG(PCIR_INTPIN, 1);
500 cfg->intline = REG(PCIR_INTLINE, 1);
984263bc 501
4a5a2d63
JS
502 cfg->mingnt = REG(PCIR_MINGNT, 1);
503 cfg->maxlat = REG(PCIR_MAXLAT, 1);
984263bc
MD
504
505 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
506 cfg->hdrtype &= ~PCIM_MFDEV;
507
508 pci_fixancient(cfg);
4a5a2d63 509 pci_hdrtypedata(pcib, b, s, f, cfg);
4d28e78f 510
3a6dc23c 511 pci_read_capabilities(pcib, cfg);
984263bc
MD
512
513 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
514
4d28e78f 515 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
984263bc
MD
516 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
517 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
518 devlist_entry->conf.pc_sel.pc_func = cfg->func;
519 devlist_entry->conf.pc_hdr = cfg->hdrtype;
520
521 devlist_entry->conf.pc_subvendor = cfg->subvendor;
522 devlist_entry->conf.pc_subdevice = cfg->subdevice;
523 devlist_entry->conf.pc_vendor = cfg->vendor;
524 devlist_entry->conf.pc_device = cfg->device;
525
526 devlist_entry->conf.pc_class = cfg->baseclass;
527 devlist_entry->conf.pc_subclass = cfg->subclass;
528 devlist_entry->conf.pc_progif = cfg->progif;
529 devlist_entry->conf.pc_revid = cfg->revid;
530
531 pci_numdevs++;
532 pci_generation++;
533 }
534 return (devlist_entry);
535#undef REG
536}
537
3a6dc23c
SZ
538static int
539pci_fixup_nextptr(int *nextptr0)
540{
541 int nextptr = *nextptr0;
542
543 /* "Next pointer" is only one byte */
544 KASSERT(nextptr <= 0xff, ("Illegal next pointer %d\n", nextptr));
545
546 if (nextptr & 0x3) {
547 /*
548 * PCI local bus spec 3.0:
549 *
550 * "... The bottom two bits of all pointers are reserved
551 * and must be implemented as 00b although software must
552 * mask them to allow for future uses of these bits ..."
553 */
554 if (bootverbose) {
555 kprintf("Illegal PCI extended capability "
556 "offset, fixup 0x%02x -> 0x%02x\n",
557 nextptr, nextptr & ~0x3);
558 }
559 nextptr &= ~0x3;
560 }
561 *nextptr0 = nextptr;
562
563 if (nextptr < 0x40) {
564 if (nextptr != 0) {
565 kprintf("Illegal PCI extended capability "
566 "offset 0x%02x", nextptr);
567 }
568 return 0;
569 }
570 return 1;
571}
572
b4c0a845 573static void
3a6dc23c 574pci_read_cap_pmgt(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
984263bc 575{
3a6dc23c
SZ
576#define REG(n, w) \
577 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
578
579 struct pcicfg_pp *pp = &cfg->pp;
580
581 if (pp->pp_cap)
582 return;
583
584 pp->pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
585 pp->pp_status = ptr + PCIR_POWER_STATUS;
586 pp->pp_pmcsr = ptr + PCIR_POWER_PMCSR;
587
588 if ((nextptr - ptr) > PCIR_POWER_DATA) {
589 /*
590 * XXX
591 * We should write to data_select and read back from
592 * data_scale to determine whether data register is
593 * implemented.
594 */
595#ifdef foo
596 pp->pp_data = ptr + PCIR_POWER_DATA;
597#else
598 pp->pp_data = 0;
599#endif
600 }
601
602#undef REG
603}
604
605static void
606pci_read_cap_ht(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
607{
b2b3ffcd 608#if defined(__i386__) || defined(__x86_64__)
3a6dc23c
SZ
609
610#define REG(n, w) \
611 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
612
613 struct pcicfg_ht *ht = &cfg->ht;
4d28e78f 614 uint64_t addr;
4d28e78f 615 uint32_t val;
3a6dc23c
SZ
616
617 /* Determine HT-specific capability type. */
618 val = REG(ptr + PCIR_HT_COMMAND, 2);
619
941460da
SZ
620 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
621 cfg->ht.ht_slave = ptr;
622
3a6dc23c
SZ
623 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_MSI_MAPPING)
624 return;
625
626 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
627 /* Sanity check the mapping window. */
628 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
629 addr <<= 32;
630 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
941460da 631 if (addr != MSI_X86_ADDR_BASE) {
3a6dc23c
SZ
632 device_printf(pcib, "HT Bridge at pci%d:%d:%d:%d "
633 "has non-default MSI window 0x%llx\n",
634 cfg->domain, cfg->bus, cfg->slot, cfg->func,
635 (long long)addr);
636 }
637 } else {
941460da 638 addr = MSI_X86_ADDR_BASE;
3a6dc23c
SZ
639 }
640
641 ht->ht_msimap = ptr;
642 ht->ht_msictrl = val;
643 ht->ht_msiaddr = addr;
644
645#undef REG
646
b2b3ffcd 647#endif /* __i386__ || __x86_64__ */
3a6dc23c
SZ
648}
649
650static void
651pci_read_cap_msi(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
652{
653#define REG(n, w) \
654 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
655
656 struct pcicfg_msi *msi = &cfg->msi;
657
658 msi->msi_location = ptr;
659 msi->msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
660 msi->msi_msgnum = 1 << ((msi->msi_ctrl & PCIM_MSICTRL_MMC_MASK) >> 1);
661
662#undef REG
663}
664
665static void
666pci_read_cap_msix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
667{
668#define REG(n, w) \
669 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
670
671 struct pcicfg_msix *msix = &cfg->msix;
672 uint32_t val;
673
674 msix->msix_location = ptr;
675 msix->msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
676 msix->msix_msgnum = (msix->msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
677
678 val = REG(ptr + PCIR_MSIX_TABLE, 4);
679 msix->msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
680 msix->msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
681
682 val = REG(ptr + PCIR_MSIX_PBA, 4);
683 msix->msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
684 msix->msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
685
f9c942fb
SZ
686 TAILQ_INIT(&msix->msix_vectors);
687
3a6dc23c
SZ
688#undef REG
689}
690
691static void
692pci_read_cap_vpd(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
693{
694 cfg->vpd.vpd_reg = ptr;
695}
696
697static void
698pci_read_cap_subvendor(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
699{
700#define REG(n, w) \
701 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
702
703 /* Should always be true. */
704 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
705 uint32_t val;
706
707 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
708 cfg->subvendor = val & 0xffff;
709 cfg->subdevice = val >> 16;
710 }
711
712#undef REG
713}
714
715static void
716pci_read_cap_pcix(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
717{
718 /*
719 * Assume we have a PCI-X chipset if we have
720 * at least one PCI-PCI bridge with a PCI-X
721 * capability. Note that some systems with
722 * PCI-express or HT chipsets might match on
723 * this check as well.
724 */
725 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
726 pcix_chipset = 1;
d85e7311
SZ
727
728 cfg->pcix.pcix_ptr = ptr;
729}
730
731static int
732pcie_slotimpl(const pcicfgregs *cfg)
733{
734 const struct pcicfg_expr *expr = &cfg->expr;
735 uint16_t port_type;
736
737 /*
738 * Only version 1 can be parsed currently
739 */
740 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
741 return 0;
742
743 /*
744 * - Slot implemented bit is meaningful iff current port is
745 * root port or down stream port.
746 * - Testing for root port or down stream port is meanningful
747 * iff PCI configure has type 1 header.
748 */
749
750 if (cfg->hdrtype != 1)
751 return 0;
752
753 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
754 if (port_type != PCIE_ROOT_PORT && port_type != PCIE_DOWN_STREAM_PORT)
755 return 0;
756
757 if (!(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
758 return 0;
759
760 return 1;
3a6dc23c
SZ
761}
762
763static void
d85e7311 764pci_read_cap_express(device_t pcib, int ptr, int nextptr, pcicfgregs *cfg)
3a6dc23c 765{
d85e7311
SZ
766#define REG(n, w) \
767 PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
768
769 struct pcicfg_expr *expr = &cfg->expr;
770
3a6dc23c
SZ
771 /*
772 * Assume we have a PCI-express chipset if we have
773 * at least one PCI-express device.
774 */
775 pcie_chipset = 1;
d85e7311
SZ
776
777 expr->expr_ptr = ptr;
778 expr->expr_cap = REG(ptr + PCIER_CAPABILITY, 2);
779
780 /*
781 * Only version 1 can be parsed currently
782 */
783 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
784 return;
785
786 /*
787 * Read slot capabilities. Slot capabilities exists iff
788 * current port's slot is implemented
789 */
790 if (pcie_slotimpl(cfg))
791 expr->expr_slotcap = REG(ptr + PCIER_SLOTCAP, 4);
792
793#undef REG
3a6dc23c
SZ
794}
795
796static void
797pci_read_capabilities(device_t pcib, pcicfgregs *cfg)
798{
799#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
800#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
801
802 uint32_t val;
803 int nextptr, ptrptr;
804
805 if ((REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) == 0) {
806 /* No capabilities */
807 return;
808 }
0c78fe3f 809
4d28e78f 810 switch (cfg->hdrtype & PCIM_HDRTYPE) {
984263bc 811 case 0:
81c29ce4
SZ
812 case 1:
813 ptrptr = PCIR_CAP_PTR;
984263bc
MD
814 break;
815 case 2:
4d28e78f 816 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
984263bc
MD
817 break;
818 default:
3a6dc23c 819 return; /* no capabilities support */
984263bc 820 }
4d28e78f 821 nextptr = REG(ptrptr, 1); /* sanity check? */
984263bc
MD
822
823 /*
824 * Read capability entries.
825 */
3a6dc23c
SZ
826 while (pci_fixup_nextptr(&nextptr)) {
827 const struct pci_read_cap *rc;
828 int ptr = nextptr;
829
4d28e78f 830 /* Find the next entry */
4d28e78f 831 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
984263bc
MD
832
833 /* Process this entry */
3a6dc23c
SZ
834 val = REG(ptr + PCICAP_ID, 1);
835 for (rc = pci_read_caps; rc->read_cap != NULL; ++rc) {
836 if (rc->cap == val) {
837 rc->read_cap(pcib, ptr, nextptr, cfg);
4d28e78f
SZ
838 break;
839 }
984263bc
MD
840 }
841 }
941460da
SZ
842
843#if defined(__i386__) || defined(__x86_64__)
844 /*
845 * Enable the MSI mapping window for all HyperTransport
846 * slaves. PCI-PCI bridges have their windows enabled via
847 * PCIB_MAP_MSI().
848 */
849 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
850 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
851 device_printf(pcib,
852 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
853 cfg->domain, cfg->bus, cfg->slot, cfg->func);
854 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
855 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
856 2);
857 }
858#endif
859
4d28e78f 860/* REG and WREG use carry through to next functions */
984263bc
MD
861}
862
4d28e78f
SZ
863/*
864 * PCI Vital Product Data
865 */
866
867#define PCI_VPD_TIMEOUT 1000000
984263bc 868
4d28e78f
SZ
869static int
870pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
984263bc 871{
4d28e78f 872 int count = PCI_VPD_TIMEOUT;
984263bc 873
4d28e78f 874 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
984263bc 875
4d28e78f 876 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
984263bc 877
4d28e78f
SZ
878 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
879 if (--count < 0)
880 return (ENXIO);
881 DELAY(1); /* limit looping */
882 }
883 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
984263bc 884
984263bc
MD
885 return (0);
886}
984263bc 887
4d28e78f
SZ
888#if 0
889static int
890pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
984263bc 891{
4d28e78f
SZ
892 int count = PCI_VPD_TIMEOUT;
893
894 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
895
896 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
897 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
898 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
899 if (--count < 0)
900 return (ENXIO);
901 DELAY(1); /* limit looping */
902 }
903
904 return (0);
905}
906#endif
907
908#undef PCI_VPD_TIMEOUT
909
910struct vpd_readstate {
911 device_t pcib;
912 pcicfgregs *cfg;
913 uint32_t val;
914 int bytesinval;
915 int off;
916 uint8_t cksum;
917};
918
919static int
920vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
921{
922 uint32_t reg;
923 uint8_t byte;
924
925 if (vrs->bytesinval == 0) {
926 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
927 return (ENXIO);
928 vrs->val = le32toh(reg);
929 vrs->off += 4;
930 byte = vrs->val & 0xff;
931 vrs->bytesinval = 3;
932 } else {
933 vrs->val = vrs->val >> 8;
934 byte = vrs->val & 0xff;
935 vrs->bytesinval--;
936 }
937
938 vrs->cksum += byte;
939 *data = byte;
940 return (0);
941}
942
d85e7311
SZ
943int
944pcie_slot_implemented(device_t dev)
945{
946 struct pci_devinfo *dinfo = device_get_ivars(dev);
947
948 return pcie_slotimpl(&dinfo->cfg);
949}
950
4d28e78f
SZ
951void
952pcie_set_max_readrq(device_t dev, uint16_t rqsize)
953{
d85e7311
SZ
954 uint8_t expr_ptr;
955 uint16_t val;
956
957 rqsize &= PCIEM_DEVCTL_MAX_READRQ_MASK;
958 if (rqsize > PCIEM_DEVCTL_MAX_READRQ_4096) {
959 panic("%s: invalid max read request size 0x%02x\n",
960 device_get_nameunit(dev), rqsize);
961 }
962
963 expr_ptr = pci_get_pciecap_ptr(dev);
964 if (!expr_ptr)
965 panic("%s: not PCIe device\n", device_get_nameunit(dev));
966
967 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
968 if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) != rqsize) {
969 if (bootverbose)
970 device_printf(dev, "adjust device control 0x%04x", val);
971
972 val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
973 val |= rqsize;
974 pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
975
976 if (bootverbose)
977 kprintf(" -> 0x%04x\n", val);
978 }
4d28e78f
SZ
979}
980
441580ca
SZ
981uint16_t
982pcie_get_max_readrq(device_t dev)
983{
984 uint8_t expr_ptr;
985 uint16_t val;
986
987 expr_ptr = pci_get_pciecap_ptr(dev);
988 if (!expr_ptr)
989 panic("%s: not PCIe device\n", device_get_nameunit(dev));
990
991 val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
992 return (val & PCIEM_DEVCTL_MAX_READRQ_MASK);
993}
994
4d28e78f
SZ
995static void
996pci_read_vpd(device_t pcib, pcicfgregs *cfg)
997{
998 struct vpd_readstate vrs;
999 int state;
1000 int name;
1001 int remain;
1002 int i;
1003 int alloc, off; /* alloc/off for RO/W arrays */
1004 int cksumvalid;
1005 int dflen;
1006 uint8_t byte;
1007 uint8_t byte2;
1008
1009 /* init vpd reader */
1010 vrs.bytesinval = 0;
1011 vrs.off = 0;
1012 vrs.pcib = pcib;
1013 vrs.cfg = cfg;
1014 vrs.cksum = 0;
1015
1016 state = 0;
1017 name = remain = i = 0; /* shut up stupid gcc */
1018 alloc = off = 0; /* shut up stupid gcc */
1019 dflen = 0; /* shut up stupid gcc */
1020 cksumvalid = -1;
1021 while (state >= 0) {
1022 if (vpd_nextbyte(&vrs, &byte)) {
1023 state = -2;
1024 break;
1025 }
1026#if 0
1027 kprintf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1028 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1029 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1030#endif
1031 switch (state) {
1032 case 0: /* item name */
1033 if (byte & 0x80) {
1034 if (vpd_nextbyte(&vrs, &byte2)) {
1035 state = -2;
1036 break;
1037 }
1038 remain = byte2;
1039 if (vpd_nextbyte(&vrs, &byte2)) {
1040 state = -2;
1041 break;
1042 }
1043 remain |= byte2 << 8;
1044 if (remain > (0x7f*4 - vrs.off)) {
1045 state = -1;
1046 kprintf(
1047 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
1048 cfg->domain, cfg->bus, cfg->slot,
1049 cfg->func, remain);
1050 }
1051 name = byte & 0x7f;
1052 } else {
1053 remain = byte & 0x7;
1054 name = (byte >> 3) & 0xf;
1055 }
1056 switch (name) {
1057 case 0x2: /* String */
1058 cfg->vpd.vpd_ident = kmalloc(remain + 1,
1059 M_DEVBUF, M_WAITOK);
1060 i = 0;
1061 state = 1;
1062 break;
1063 case 0xf: /* End */
1064 state = -1;
1065 break;
1066 case 0x10: /* VPD-R */
1067 alloc = 8;
1068 off = 0;
1069 cfg->vpd.vpd_ros = kmalloc(alloc *
1070 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1071 M_WAITOK | M_ZERO);
1072 state = 2;
1073 break;
1074 case 0x11: /* VPD-W */
1075 alloc = 8;
1076 off = 0;
1077 cfg->vpd.vpd_w = kmalloc(alloc *
1078 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1079 M_WAITOK | M_ZERO);
1080 state = 5;
1081 break;
1082 default: /* Invalid data, abort */
1083 state = -1;
1084 break;
1085 }
1086 break;
1087
1088 case 1: /* Identifier String */
1089 cfg->vpd.vpd_ident[i++] = byte;
1090 remain--;
1091 if (remain == 0) {
1092 cfg->vpd.vpd_ident[i] = '\0';
1093 state = 0;
1094 }
1095 break;
1096
1097 case 2: /* VPD-R Keyword Header */
1098 if (off == alloc) {
a68a7edf 1099 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1100 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1101 M_DEVBUF, M_WAITOK | M_ZERO);
1102 }
1103 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1104 if (vpd_nextbyte(&vrs, &byte2)) {
1105 state = -2;
1106 break;
1107 }
1108 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1109 if (vpd_nextbyte(&vrs, &byte2)) {
1110 state = -2;
1111 break;
1112 }
1113 dflen = byte2;
1114 if (dflen == 0 &&
1115 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1116 2) == 0) {
1117 /*
1118 * if this happens, we can't trust the rest
1119 * of the VPD.
1120 */
1121 kprintf(
1122 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1123 cfg->domain, cfg->bus, cfg->slot,
1124 cfg->func, dflen);
1125 cksumvalid = 0;
1126 state = -1;
1127 break;
1128 } else if (dflen == 0) {
1129 cfg->vpd.vpd_ros[off].value = kmalloc(1 *
1130 sizeof(*cfg->vpd.vpd_ros[off].value),
1131 M_DEVBUF, M_WAITOK);
1132 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1133 } else
1134 cfg->vpd.vpd_ros[off].value = kmalloc(
1135 (dflen + 1) *
1136 sizeof(*cfg->vpd.vpd_ros[off].value),
1137 M_DEVBUF, M_WAITOK);
1138 remain -= 3;
1139 i = 0;
1140 /* keep in sync w/ state 3's transistions */
1141 if (dflen == 0 && remain == 0)
1142 state = 0;
1143 else if (dflen == 0)
1144 state = 2;
1145 else
1146 state = 3;
1147 break;
1148
1149 case 3: /* VPD-R Keyword Value */
1150 cfg->vpd.vpd_ros[off].value[i++] = byte;
1151 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1152 "RV", 2) == 0 && cksumvalid == -1) {
1153 if (vrs.cksum == 0)
1154 cksumvalid = 1;
1155 else {
1156 if (bootverbose)
1157 kprintf(
1158 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1159 cfg->domain, cfg->bus,
1160 cfg->slot, cfg->func,
1161 vrs.cksum);
1162 cksumvalid = 0;
1163 state = -1;
1164 break;
1165 }
1166 }
1167 dflen--;
1168 remain--;
1169 /* keep in sync w/ state 2's transistions */
1170 if (dflen == 0)
1171 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1172 if (dflen == 0 && remain == 0) {
1173 cfg->vpd.vpd_rocnt = off;
a68a7edf 1174 cfg->vpd.vpd_ros = krealloc(cfg->vpd.vpd_ros,
4d28e78f
SZ
1175 off * sizeof(*cfg->vpd.vpd_ros),
1176 M_DEVBUF, M_WAITOK | M_ZERO);
1177 state = 0;
1178 } else if (dflen == 0)
1179 state = 2;
1180 break;
1181
1182 case 4:
1183 remain--;
1184 if (remain == 0)
1185 state = 0;
1186 break;
1187
1188 case 5: /* VPD-W Keyword Header */
1189 if (off == alloc) {
a68a7edf 1190 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1191 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1192 M_DEVBUF, M_WAITOK | M_ZERO);
1193 }
1194 cfg->vpd.vpd_w[off].keyword[0] = byte;
1195 if (vpd_nextbyte(&vrs, &byte2)) {
1196 state = -2;
1197 break;
1198 }
1199 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1200 if (vpd_nextbyte(&vrs, &byte2)) {
1201 state = -2;
1202 break;
1203 }
1204 cfg->vpd.vpd_w[off].len = dflen = byte2;
1205 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1206 cfg->vpd.vpd_w[off].value = kmalloc((dflen + 1) *
1207 sizeof(*cfg->vpd.vpd_w[off].value),
1208 M_DEVBUF, M_WAITOK);
1209 remain -= 3;
1210 i = 0;
1211 /* keep in sync w/ state 6's transistions */
1212 if (dflen == 0 && remain == 0)
1213 state = 0;
1214 else if (dflen == 0)
1215 state = 5;
1216 else
1217 state = 6;
1218 break;
1219
1220 case 6: /* VPD-W Keyword Value */
1221 cfg->vpd.vpd_w[off].value[i++] = byte;
1222 dflen--;
1223 remain--;
1224 /* keep in sync w/ state 5's transistions */
1225 if (dflen == 0)
1226 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1227 if (dflen == 0 && remain == 0) {
1228 cfg->vpd.vpd_wcnt = off;
a68a7edf 1229 cfg->vpd.vpd_w = krealloc(cfg->vpd.vpd_w,
4d28e78f
SZ
1230 off * sizeof(*cfg->vpd.vpd_w),
1231 M_DEVBUF, M_WAITOK | M_ZERO);
1232 state = 0;
1233 } else if (dflen == 0)
1234 state = 5;
1235 break;
1236
1237 default:
1238 kprintf("pci%d:%d:%d:%d: invalid state: %d\n",
1239 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1240 state);
1241 state = -1;
1242 break;
1243 }
1244 }
1245
1246 if (cksumvalid == 0 || state < -1) {
1247 /* read-only data bad, clean up */
1248 if (cfg->vpd.vpd_ros != NULL) {
1249 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1250 kfree(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1251 kfree(cfg->vpd.vpd_ros, M_DEVBUF);
1252 cfg->vpd.vpd_ros = NULL;
1253 }
1254 }
1255 if (state < -1) {
1256 /* I/O error, clean up */
1257 kprintf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1258 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1259 if (cfg->vpd.vpd_ident != NULL) {
1260 kfree(cfg->vpd.vpd_ident, M_DEVBUF);
1261 cfg->vpd.vpd_ident = NULL;
1262 }
1263 if (cfg->vpd.vpd_w != NULL) {
1264 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1265 kfree(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1266 kfree(cfg->vpd.vpd_w, M_DEVBUF);
1267 cfg->vpd.vpd_w = NULL;
1268 }
1269 }
1270 cfg->vpd.vpd_cached = 1;
1271#undef REG
1272#undef WREG
1273}
1274
1275int
1276pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1277{
1278 struct pci_devinfo *dinfo = device_get_ivars(child);
1279 pcicfgregs *cfg = &dinfo->cfg;
1280
1281 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1282 pci_read_vpd(device_get_parent(dev), cfg);
1283
1284 *identptr = cfg->vpd.vpd_ident;
1285
1286 if (*identptr == NULL)
1287 return (ENXIO);
1288
1289 return (0);
1290}
1291
1292int
1293pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1294 const char **vptr)
1295{
1296 struct pci_devinfo *dinfo = device_get_ivars(child);
1297 pcicfgregs *cfg = &dinfo->cfg;
1298 int i;
1299
1300 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1301 pci_read_vpd(device_get_parent(dev), cfg);
1302
1303 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1304 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1305 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1306 *vptr = cfg->vpd.vpd_ros[i].value;
1307 }
1308
1309 if (i != cfg->vpd.vpd_rocnt)
1310 return (0);
1311
1312 *vptr = NULL;
1313 return (ENXIO);
1314}
1315
1316/*
1317 * Return the offset in configuration space of the requested extended
1318 * capability entry or 0 if the specified capability was not found.
1319 */
1320int
1321pci_find_extcap_method(device_t dev, device_t child, int capability,
1322 int *capreg)
1323{
1324 struct pci_devinfo *dinfo = device_get_ivars(child);
1325 pcicfgregs *cfg = &dinfo->cfg;
1326 u_int32_t status;
1327 u_int8_t ptr;
1328
1329 /*
1330 * Check the CAP_LIST bit of the PCI status register first.
1331 */
1332 status = pci_read_config(child, PCIR_STATUS, 2);
1333 if (!(status & PCIM_STATUS_CAPPRESENT))
1334 return (ENXIO);
1335
1336 /*
1337 * Determine the start pointer of the capabilities list.
1338 */
1339 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1340 case 0:
1341 case 1:
1342 ptr = PCIR_CAP_PTR;
1343 break;
1344 case 2:
1345 ptr = PCIR_CAP_PTR_2;
1346 break;
1347 default:
1348 /* XXX: panic? */
1349 return (ENXIO); /* no extended capabilities support */
1350 }
1351 ptr = pci_read_config(child, ptr, 1);
1352
1353 /*
1354 * Traverse the capabilities list.
1355 */
1356 while (ptr != 0) {
1357 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1358 if (capreg != NULL)
1359 *capreg = ptr;
1360 return (0);
1361 }
1362 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1363 }
1364
1365 return (ENOENT);
1366}
1367
1368/*
1369 * Support for MSI-X message interrupts.
1370 */
cf8f3133
SZ
1371static void
1372pci_setup_msix_vector(device_t dev, u_int index, uint64_t address,
1373 uint32_t data)
4d28e78f
SZ
1374{
1375 struct pci_devinfo *dinfo = device_get_ivars(dev);
1376 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1377 uint32_t offset;
1378
f9c942fb 1379 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1380 offset = msix->msix_table_offset + index * 16;
1381 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1382 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1383 bus_write_4(msix->msix_table_res, offset + 8, data);
1384
1385 /* Enable MSI -> HT mapping. */
1386 pci_ht_map_msi(dev, address);
1387}
1388
cf8f3133
SZ
1389static void
1390pci_mask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1391{
1392 struct pci_devinfo *dinfo = device_get_ivars(dev);
1393 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1394 uint32_t offset, val;
1395
1396 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1397 offset = msix->msix_table_offset + index * 16 + 12;
1398 val = bus_read_4(msix->msix_table_res, offset);
1399 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1400 val |= PCIM_MSIX_VCTRL_MASK;
1401 bus_write_4(msix->msix_table_res, offset, val);
1402 }
1403}
1404
cf8f3133
SZ
1405static void
1406pci_unmask_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1407{
1408 struct pci_devinfo *dinfo = device_get_ivars(dev);
1409 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1410 uint32_t offset, val;
1411
f9c942fb 1412 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1413 offset = msix->msix_table_offset + index * 16 + 12;
1414 val = bus_read_4(msix->msix_table_res, offset);
1415 if (val & PCIM_MSIX_VCTRL_MASK) {
1416 val &= ~PCIM_MSIX_VCTRL_MASK;
1417 bus_write_4(msix->msix_table_res, offset, val);
1418 }
1419}
1420
1421int
cf8f3133 1422pci_pending_msix_vector(device_t dev, u_int index)
4d28e78f
SZ
1423{
1424 struct pci_devinfo *dinfo = device_get_ivars(dev);
1425 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1426 uint32_t offset, bit;
1427
31646171
SZ
1428 KASSERT(msix->msix_table_res != NULL && msix->msix_pba_res != NULL,
1429 ("MSI-X is not setup yet\n"));
1430
f9c942fb 1431 KASSERT(msix->msix_msgnum > index, ("bogus index"));
4d28e78f
SZ
1432 offset = msix->msix_pba_offset + (index / 32) * 4;
1433 bit = 1 << index % 32;
1434 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1435}
1436
1437/*
1438 * Restore MSI-X registers and table during resume. If MSI-X is
1439 * enabled then walk the virtual table to restore the actual MSI-X
1440 * table.
1441 */
1442static void
1443pci_resume_msix(device_t dev)
1444{
1445 struct pci_devinfo *dinfo = device_get_ivars(dev);
1446 struct pcicfg_msix *msix = &dinfo->cfg.msix;
4d28e78f 1447
fa36cf1d 1448 if (msix->msix_table_res != NULL) {
f9c942fb
SZ
1449 const struct msix_vector *mv;
1450
31646171 1451 pci_mask_msix_allvectors(dev);
4d28e78f 1452
f9c942fb
SZ
1453 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1454 u_int vector;
1455
1456 if (mv->mv_address == 0)
4d28e78f 1457 continue;
f9c942fb
SZ
1458
1459 vector = PCI_MSIX_RID2VEC(mv->mv_rid);
1460 pci_setup_msix_vector(dev, vector,
1461 mv->mv_address, mv->mv_data);
1462 pci_unmask_msix_vector(dev, vector);
4d28e78f
SZ
1463 }
1464 }
1465 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1466 msix->msix_ctrl, 2);
1467}
1468
1469/*
84ee3613
SZ
1470 * Attempt to allocate one MSI-X message at the specified vector on cpuid.
1471 *
1472 * After this function returns, the MSI-X's rid will be saved in rid0.
4d28e78f
SZ
1473 */
1474int
84ee3613
SZ
1475pci_alloc_msix_vector_method(device_t dev, device_t child, u_int vector,
1476 int *rid0, int cpuid)
4d28e78f
SZ
1477{
1478 struct pci_devinfo *dinfo = device_get_ivars(child);
84ee3613 1479 struct pcicfg_msix *msix = &dinfo->cfg.msix;
f9c942fb 1480 struct msix_vector *mv;
4d28e78f 1481 struct resource_list_entry *rle;
84ee3613 1482 int error, irq, rid;
4d28e78f 1483
84ee3613
SZ
1484 KASSERT(msix->msix_table_res != NULL &&
1485 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1486 KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d\n", cpuid));
1487 KASSERT(vector < msix->msix_msgnum,
1488 ("invalid MSI-X vector %u, total %d\n", vector, msix->msix_msgnum));
4d28e78f 1489
84ee3613 1490 if (bootverbose) {
4d28e78f 1491 device_printf(child,
84ee3613
SZ
1492 "attempting to allocate MSI-X #%u vector (%d supported)\n",
1493 vector, msix->msix_msgnum);
4d28e78f 1494 }
4d28e78f 1495
84ee3613 1496 /* Set rid according to vector number */
f9c942fb
SZ
1497 rid = PCI_MSIX_VEC2RID(vector);
1498
1499 /* Vector has already been allocated */
1500 mv = pci_find_msix_vector(child, rid);
1501 if (mv != NULL)
1502 return EBUSY;
14ae4dce 1503
84ee3613
SZ
1504 /* Allocate a message. */
1505 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq, cpuid);
1506 if (error)
1507 return error;
1508 resource_list_add(&dinfo->resources, SYS_RES_IRQ, rid,
1509 irq, irq, 1, cpuid);
4d28e78f 1510
84ee3613
SZ
1511 if (bootverbose) {
1512 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1513 device_printf(child, "using IRQ %lu for MSI-X on cpu%d\n",
1514 rle->start, cpuid);
4d28e78f
SZ
1515 }
1516
4d28e78f 1517 /* Update counts of alloc'd messages. */
84ee3613
SZ
1518 msix->msix_alloc++;
1519
f9c942fb
SZ
1520 mv = kmalloc(sizeof(*mv), M_DEVBUF, M_WAITOK | M_ZERO);
1521 mv->mv_rid = rid;
1522 TAILQ_INSERT_TAIL(&msix->msix_vectors, mv, mv_link);
1523
84ee3613
SZ
1524 *rid0 = rid;
1525 return 0;
4d28e78f
SZ
1526}
1527
a7854dd0
SZ
1528int
1529pci_release_msix_vector_method(device_t dev, device_t child, int rid)
4d28e78f
SZ
1530{
1531 struct pci_devinfo *dinfo = device_get_ivars(child);
1532 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1533 struct resource_list_entry *rle;
a7854dd0
SZ
1534 struct msix_vector *mv;
1535 int irq, cpuid;
4d28e78f 1536
a7854dd0
SZ
1537 KASSERT(msix->msix_table_res != NULL &&
1538 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1539 KASSERT(msix->msix_alloc > 0, ("No MSI-X allocated\n"));
1540 KASSERT(rid > 0, ("invalid rid %d\n", rid));
4d28e78f 1541
a7854dd0
SZ
1542 mv = pci_find_msix_vector(child, rid);
1543 KASSERT(mv != NULL, ("MSI-X rid %d is not allocated\n", rid));
1544 KASSERT(mv->mv_address == 0, ("MSI-X rid %d not teardown\n", rid));
1545
1546 /* Make sure resource is no longer allocated. */
1547 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
1548 KASSERT(rle != NULL, ("missing MSI-X resource, rid %d\n", rid));
1549 KASSERT(rle->res == NULL,
1550 ("MSI-X resource is still allocated, rid %d\n", rid));
1551
1552 irq = rle->start;
1553 cpuid = rle->cpuid;
4d28e78f
SZ
1554
1555 /* Free the resource list entries. */
a7854dd0
SZ
1556 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, rid);
1557
1558 /* Release the IRQ. */
1559 PCIB_RELEASE_MSIX(device_get_parent(dev), child, irq, cpuid);
1560
1561 TAILQ_REMOVE(&msix->msix_vectors, mv, mv_link);
1562 kfree(mv, M_DEVBUF);
1563
1564 msix->msix_alloc--;
4d28e78f
SZ
1565 return (0);
1566}
1567
1568/*
1569 * Return the max supported MSI-X messages this device supports.
1570 * Basically, assuming the MD code can alloc messages, this function
1571 * should return the maximum value that pci_alloc_msix() can return.
1572 * Thus, it is subject to the tunables, etc.
1573 */
1574int
1575pci_msix_count_method(device_t dev, device_t child)
1576{
1577 struct pci_devinfo *dinfo = device_get_ivars(child);
1578 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1579
1580 if (pci_do_msix && msix->msix_location != 0)
1581 return (msix->msix_msgnum);
1582 return (0);
1583}
1584
31646171
SZ
1585int
1586pci_setup_msix(device_t dev)
1587{
1588 struct pci_devinfo *dinfo = device_get_ivars(dev);
1589 pcicfgregs *cfg = &dinfo->cfg;
1590 struct resource_list_entry *rle;
1591 struct resource *table_res, *pba_res;
1592
1593 KASSERT(cfg->msix.msix_table_res == NULL &&
1594 cfg->msix.msix_pba_res == NULL, ("MSI-X has been setup yet\n"));
1595
1596 /* If rid 0 is allocated, then fail. */
1597 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1598 if (rle != NULL && rle->res != NULL)
1599 return (ENXIO);
1600
1601 /* Already have allocated MSIs? */
1602 if (cfg->msi.msi_alloc != 0)
1603 return (ENXIO);
1604
1605 /* If MSI is blacklisted for this system, fail. */
1606 if (pci_msi_blacklisted())
1607 return (ENXIO);
1608
1609 /* MSI-X capability present? */
ce92281b
SZ
1610 if (cfg->msix.msix_location == 0 || cfg->msix.msix_msgnum == 0 ||
1611 !pci_do_msix)
31646171
SZ
1612 return (ENODEV);
1613
ac0a3f31
SZ
1614 KASSERT(cfg->msix.msix_alloc == 0 &&
1615 TAILQ_EMPTY(&cfg->msix.msix_vectors),
1616 ("MSI-X vector has been allocated\n"));
1617
31646171
SZ
1618 /* Make sure the appropriate BARs are mapped. */
1619 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1620 cfg->msix.msix_table_bar);
1621 if (rle == NULL || rle->res == NULL ||
1622 !(rman_get_flags(rle->res) & RF_ACTIVE))
1623 return (ENXIO);
1624 table_res = rle->res;
1625 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1626 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1627 cfg->msix.msix_pba_bar);
1628 if (rle == NULL || rle->res == NULL ||
1629 !(rman_get_flags(rle->res) & RF_ACTIVE))
1630 return (ENXIO);
1631 }
1632 pba_res = rle->res;
1633
1634 cfg->msix.msix_table_res = table_res;
1635 cfg->msix.msix_pba_res = pba_res;
1636
1637 pci_mask_msix_allvectors(dev);
1638
1639 return 0;
1640}
1641
1642void
1643pci_teardown_msix(device_t dev)
1644{
1645 struct pci_devinfo *dinfo = device_get_ivars(dev);
1646 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1647
1648 KASSERT(msix->msix_table_res != NULL &&
1649 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
ac0a3f31
SZ
1650 KASSERT(msix->msix_alloc == 0 && TAILQ_EMPTY(&msix->msix_vectors),
1651 ("MSI-X vector is still allocated\n"));
31646171
SZ
1652
1653 pci_mask_msix_allvectors(dev);
1654
1655 msix->msix_table_res = NULL;
1656 msix->msix_pba_res = NULL;
1657}
1658
a39a2984
SZ
1659void
1660pci_enable_msix(device_t dev)
1661{
1662 struct pci_devinfo *dinfo = device_get_ivars(dev);
1663 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1664
1665 KASSERT(msix->msix_table_res != NULL &&
1666 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1667
1668 /* Update control register to enable MSI-X. */
1669 msix->msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1670 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1671 msix->msix_ctrl, 2);
1672}
1673
1674void
1675pci_disable_msix(device_t dev)
1676{
1677 struct pci_devinfo *dinfo = device_get_ivars(dev);
1678 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1679
1680 KASSERT(msix->msix_table_res != NULL &&
1681 msix->msix_pba_res != NULL, ("MSI-X is not setup yet\n"));
1682
1683 /* Disable MSI -> HT mapping. */
1684 pci_ht_map_msi(dev, 0);
1685
1686 /* Update control register to disable MSI-X. */
1687 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1688 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1689 msix->msix_ctrl, 2);
1690}
1691
31646171
SZ
1692static void
1693pci_mask_msix_allvectors(device_t dev)
1694{
1695 struct pci_devinfo *dinfo = device_get_ivars(dev);
1696 u_int i;
1697
1698 for (i = 0; i < dinfo->cfg.msix.msix_msgnum; ++i)
1699 pci_mask_msix_vector(dev, i);
1700}
1701
f9c942fb
SZ
1702static struct msix_vector *
1703pci_find_msix_vector(device_t dev, int rid)
1704{
1705 struct pci_devinfo *dinfo = device_get_ivars(dev);
1706 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1707 struct msix_vector *mv;
1708
1709 TAILQ_FOREACH(mv, &msix->msix_vectors, mv_link) {
1710 if (mv->mv_rid == rid)
1711 return mv;
1712 }
1713 return NULL;
1714}
1715
4d28e78f
SZ
1716/*
1717 * HyperTransport MSI mapping control
1718 */
1719void
1720pci_ht_map_msi(device_t dev, uint64_t addr)
1721{
1722 struct pci_devinfo *dinfo = device_get_ivars(dev);
1723 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1724
1725 if (!ht->ht_msimap)
1726 return;
1727
1728 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1729 ht->ht_msiaddr >> 20 == addr >> 20) {
1730 /* Enable MSI -> HT mapping. */
1731 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1732 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1733 ht->ht_msictrl, 2);
1734 }
1735
a39a2984 1736 if (!addr && (ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
4d28e78f
SZ
1737 /* Disable MSI -> HT mapping. */
1738 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1739 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1740 ht->ht_msictrl, 2);
1741 }
1742}
1743
1744/*
1745 * Support for MSI message signalled interrupts.
1746 */
1747void
1748pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1749{
1750 struct pci_devinfo *dinfo = device_get_ivars(dev);
1751 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1752
1753 /* Write data and address values. */
1754 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1755 address & 0xffffffff, 4);
1756 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1757 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1758 address >> 32, 4);
1759 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1760 data, 2);
1761 } else
1762 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1763 2);
1764
1765 /* Enable MSI in the control register. */
1766 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1767 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1768 2);
1769
1770 /* Enable MSI -> HT mapping. */
1771 pci_ht_map_msi(dev, address);
1772}
1773
1774void
1775pci_disable_msi(device_t dev)
1776{
1777 struct pci_devinfo *dinfo = device_get_ivars(dev);
1778 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1779
1780 /* Disable MSI -> HT mapping. */
1781 pci_ht_map_msi(dev, 0);
1782
1783 /* Disable MSI in the control register. */
1784 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1785 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1786 2);
1787}
1788
1789/*
1790 * Restore MSI registers during resume. If MSI is enabled then
1791 * restore the data and address registers in addition to the control
1792 * register.
1793 */
1794static void
1795pci_resume_msi(device_t dev)
1796{
1797 struct pci_devinfo *dinfo = device_get_ivars(dev);
1798 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1799 uint64_t address;
1800 uint16_t data;
1801
1802 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1803 address = msi->msi_addr;
1804 data = msi->msi_data;
1805 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1806 address & 0xffffffff, 4);
1807 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1808 pci_write_config(dev, msi->msi_location +
1809 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1810 pci_write_config(dev, msi->msi_location +
1811 PCIR_MSI_DATA_64BIT, data, 2);
1812 } else
1813 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1814 data, 2);
1815 }
1816 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1817 2);
1818}
1819
4d28e78f
SZ
1820/*
1821 * Returns true if the specified device is blacklisted because MSI
1822 * doesn't work.
1823 */
1824int
1825pci_msi_device_blacklisted(device_t dev)
1826{
1827 struct pci_quirk *q;
1828
1829 if (!pci_honor_msi_blacklist)
1830 return (0);
1831
1832 for (q = &pci_quirks[0]; q->devid; q++) {
1833 if (q->devid == pci_get_devid(dev) &&
1834 q->type == PCI_QUIRK_DISABLE_MSI)
1835 return (1);
1836 }
1837 return (0);
1838}
1839
1840/*
1841 * Determine if MSI is blacklisted globally on this sytem. Currently,
1842 * we just check for blacklisted chipsets as represented by the
1843 * host-PCI bridge at device 0:0:0. In the future, it may become
1844 * necessary to check other system attributes, such as the kenv values
1845 * that give the motherboard manufacturer and model number.
1846 */
1847static int
1848pci_msi_blacklisted(void)
1849{
1850 device_t dev;
1851
1852 if (!pci_honor_msi_blacklist)
1853 return (0);
1854
1855 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1856 if (!(pcie_chipset || pcix_chipset))
1857 return (1);
1858
1859 dev = pci_find_bsf(0, 0, 0);
1860 if (dev != NULL)
1861 return (pci_msi_device_blacklisted(dev));
1862 return (0);
1863}
1864
1865/*
2c3d7ac8
SZ
1866 * Attempt to allocate count MSI messages on start_cpuid.
1867 *
1868 * If start_cpuid < 0, then the MSI messages' target CPU will be
1869 * selected automaticly.
1870 *
1871 * If the caller explicitly specified the MSI messages' target CPU,
1872 * i.e. start_cpuid >= 0, then we will try to allocate the count MSI
1873 * messages on the specified CPU, if the allocation fails due to MD
1874 * does not have enough vectors (EMSGSIZE), then we will try next
1875 * available CPU, until the allocation fails on all CPUs.
1876 *
1877 * EMSGSIZE will be returned, if all available CPUs does not have
1878 * enough vectors for the requested amount of MSI messages. Caller
1879 * should either reduce the amount of MSI messages to be requested,
1880 * or simply giving up using MSI.
1881 *
1882 * The available SYS_RES_IRQ resources' rids, which are >= 1, are
1883 * returned in 'rid' array, if the allocation succeeds.
4d28e78f
SZ
1884 */
1885int
2c3d7ac8
SZ
1886pci_alloc_msi_method(device_t dev, device_t child, int *rid, int count,
1887 int start_cpuid)
4d28e78f
SZ
1888{
1889 struct pci_devinfo *dinfo = device_get_ivars(child);
1890 pcicfgregs *cfg = &dinfo->cfg;
1891 struct resource_list_entry *rle;
2c3d7ac8 1892 int error, i, irqs[32], cpuid = 0;
4d28e78f
SZ
1893 uint16_t ctrl;
1894
2c3d7ac8
SZ
1895 KASSERT(count != 0 && count <= 32 && powerof2(count),
1896 ("invalid MSI count %d\n", count));
1897 KASSERT(start_cpuid < ncpus, ("invalid cpuid %d\n", start_cpuid));
4d28e78f
SZ
1898
1899 /* If rid 0 is allocated, then fail. */
1900 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1901 if (rle != NULL && rle->res != NULL)
1902 return (ENXIO);
1903
1904 /* Already have allocated messages? */
fa36cf1d 1905 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_table_res != NULL)
4d28e78f
SZ
1906 return (ENXIO);
1907
1908 /* If MSI is blacklisted for this system, fail. */
1909 if (pci_msi_blacklisted())
1910 return (ENXIO);
1911
1912 /* MSI capability present? */
ce92281b
SZ
1913 if (cfg->msi.msi_location == 0 || cfg->msi.msi_msgnum == 0 ||
1914 !pci_do_msi)
4d28e78f
SZ
1915 return (ENODEV);
1916
2c3d7ac8
SZ
1917 KASSERT(count <= cfg->msi.msi_msgnum, ("large MSI count %d, max %d\n",
1918 count, cfg->msi.msi_msgnum));
1919
1920 if (bootverbose) {
4d28e78f 1921 device_printf(child,
cf2e81fb
SW
1922 "attempting to allocate %d MSI vector%s (%d supported)\n",
1923 count, count > 1 ? "s" : "", cfg->msi.msi_msgnum);
2c3d7ac8 1924 }
4d28e78f 1925
2c3d7ac8
SZ
1926 if (start_cpuid < 0)
1927 start_cpuid = atomic_fetchadd_int(&pci_msi_cpuid, 1) % ncpus;
4d28e78f 1928
2c3d7ac8
SZ
1929 error = EINVAL;
1930 for (i = 0; i < ncpus; ++i) {
1931 cpuid = (start_cpuid + i) % ncpus;
4d28e78f 1932
2c3d7ac8 1933 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, count,
803a9933 1934 cfg->msi.msi_msgnum, irqs, cpuid);
4d28e78f
SZ
1935 if (error == 0)
1936 break;
2c3d7ac8
SZ
1937 else if (error != EMSGSIZE)
1938 return error;
4d28e78f 1939 }
2c3d7ac8
SZ
1940 if (error)
1941 return error;
4d28e78f
SZ
1942
1943 /*
2c3d7ac8
SZ
1944 * We now have N messages mapped onto SYS_RES_IRQ resources in
1945 * the irqs[] array, so add new resources starting at rid 1.
4d28e78f 1946 */
2c3d7ac8
SZ
1947 for (i = 0; i < count; i++) {
1948 rid[i] = i + 1;
4d28e78f 1949 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
803a9933 1950 irqs[i], irqs[i], 1, cpuid);
2c3d7ac8 1951 }
4d28e78f
SZ
1952
1953 if (bootverbose) {
2c3d7ac8
SZ
1954 if (count == 1) {
1955 device_printf(child, "using IRQ %d on cpu%d for MSI\n",
1956 irqs[0], cpuid);
1957 } else {
4d28e78f
SZ
1958 int run;
1959
1960 /*
1961 * Be fancy and try to print contiguous runs
1962 * of IRQ values as ranges. 'run' is true if
1963 * we are in a range.
1964 */
1965 device_printf(child, "using IRQs %d", irqs[0]);
1966 run = 0;
2c3d7ac8 1967 for (i = 1; i < count; i++) {
4d28e78f
SZ
1968
1969 /* Still in a run? */
1970 if (irqs[i] == irqs[i - 1] + 1) {
1971 run = 1;
1972 continue;
1973 }
1974
1975 /* Finish previous range. */
1976 if (run) {
1977 kprintf("-%d", irqs[i - 1]);
1978 run = 0;
1979 }
1980
1981 /* Start new range. */
1982 kprintf(",%d", irqs[i]);
1983 }
1984
1985 /* Unfinished range? */
1986 if (run)
2c3d7ac8
SZ
1987 kprintf("-%d", irqs[count - 1]);
1988 kprintf(" for MSI on cpu%d\n", cpuid);
4d28e78f
SZ
1989 }
1990 }
1991
2c3d7ac8 1992 /* Update control register with count. */
4d28e78f
SZ
1993 ctrl = cfg->msi.msi_ctrl;
1994 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2c3d7ac8 1995 ctrl |= (ffs(count) - 1) << 4;
4d28e78f
SZ
1996 cfg->msi.msi_ctrl = ctrl;
1997 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1998
1999 /* Update counts of alloc'd messages. */
2c3d7ac8 2000 cfg->msi.msi_alloc = count;
4d28e78f 2001 cfg->msi.msi_handlers = 0;
4d28e78f
SZ
2002 return (0);
2003}
2004
2005/* Release the MSI messages associated with this device. */
2006int
2007pci_release_msi_method(device_t dev, device_t child)
2008{
2009 struct pci_devinfo *dinfo = device_get_ivars(child);
2010 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2011 struct resource_list_entry *rle;
50a5ba22 2012 int i, irqs[32], cpuid = -1;
4d28e78f
SZ
2013
2014 /* Do we have any messages to release? */
2015 if (msi->msi_alloc == 0)
2016 return (ENODEV);
2017 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2018
2019 /* Make sure none of the resources are allocated. */
2020 if (msi->msi_handlers > 0)
2021 return (EBUSY);
2022 for (i = 0; i < msi->msi_alloc; i++) {
2023 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2024 KASSERT(rle != NULL, ("missing MSI resource"));
2025 if (rle->res != NULL)
2026 return (EBUSY);
975cc3f0
SZ
2027 if (i == 0) {
2028 cpuid = rle->cpuid;
2029 KASSERT(cpuid >= 0 && cpuid < ncpus,
2030 ("invalid MSI target cpuid %d\n", cpuid));
2031 } else {
2032 KASSERT(rle->cpuid == cpuid,
2033 ("MSI targets different cpus, "
2034 "was cpu%d, now cpu%d", cpuid, rle->cpuid));
2035 }
4d28e78f
SZ
2036 irqs[i] = rle->start;
2037 }
2038
2039 /* Update control register with 0 count. */
2040 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2041 ("%s: MSI still enabled", __func__));
2042 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2043 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2044 msi->msi_ctrl, 2);
2045
2046 /* Release the messages. */
975cc3f0
SZ
2047 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs,
2048 cpuid);
4d28e78f
SZ
2049 for (i = 0; i < msi->msi_alloc; i++)
2050 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2051
2052 /* Update alloc count. */
2053 msi->msi_alloc = 0;
2054 msi->msi_addr = 0;
2055 msi->msi_data = 0;
2056 return (0);
2057}
2058
2059/*
2060 * Return the max supported MSI messages this device supports.
2061 * Basically, assuming the MD code can alloc messages, this function
2062 * should return the maximum value that pci_alloc_msi() can return.
2063 * Thus, it is subject to the tunables, etc.
2064 */
2065int
2066pci_msi_count_method(device_t dev, device_t child)
2067{
2068 struct pci_devinfo *dinfo = device_get_ivars(child);
2069 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2070
2071 if (pci_do_msi && msi->msi_location != 0)
2072 return (msi->msi_msgnum);
2073 return (0);
2074}
2075
2076/* kfree pcicfgregs structure and all depending data structures */
2077
2078int
2079pci_freecfg(struct pci_devinfo *dinfo)
2080{
2081 struct devlist *devlist_head;
2082 int i;
2083
2084 devlist_head = &pci_devq;
2085
2086 if (dinfo->cfg.vpd.vpd_reg) {
2087 kfree(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2088 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2089 kfree(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2090 kfree(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2091 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2092 kfree(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2093 kfree(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2094 }
2095 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2096 kfree(dinfo, M_DEVBUF);
2097
2098 /* increment the generation count */
2099 pci_generation++;
2100
2101 /* we're losing one device */
2102 pci_numdevs--;
2103 return (0);
2104}
2105
2106/*
2107 * PCI power manangement
2108 */
2109int
2110pci_set_powerstate_method(device_t dev, device_t child, int state)
2111{
2112 struct pci_devinfo *dinfo = device_get_ivars(child);
2113 pcicfgregs *cfg = &dinfo->cfg;
f4754a59
HT
2114 uint16_t status;
2115 int result, oldstate, highest, delay;
984263bc 2116
4d28e78f 2117 if (cfg->pp.pp_cap == 0)
f4754a59
HT
2118 return (EOPNOTSUPP);
2119
2120 /*
2121 * Optimize a no state change request away. While it would be OK to
2122 * write to the hardware in theory, some devices have shown odd
2123 * behavior when going from D3 -> D3.
2124 */
2125 oldstate = pci_get_powerstate(child);
2126 if (oldstate == state)
2127 return (0);
2128
2129 /*
2130 * The PCI power management specification states that after a state
2131 * transition between PCI power states, system software must
2132 * guarantee a minimal delay before the function accesses the device.
2133 * Compute the worst case delay that we need to guarantee before we
2134 * access the device. Many devices will be responsive much more
2135 * quickly than this delay, but there are some that don't respond
2136 * instantly to state changes. Transitions to/from D3 state require
2137 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2138 * is done below with DELAY rather than a sleeper function because
2139 * this function can be called from contexts where we cannot sleep.
2140 */
2141 highest = (oldstate > state) ? oldstate : state;
2142 if (highest == PCI_POWERSTATE_D3)
2143 delay = 10000;
2144 else if (highest == PCI_POWERSTATE_D2)
2145 delay = 200;
2146 else
2147 delay = 0;
4d28e78f 2148 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
f4754a59
HT
2149 & ~PCIM_PSTAT_DMASK;
2150 result = 0;
2151 switch (state) {
2152 case PCI_POWERSTATE_D0:
2153 status |= PCIM_PSTAT_D0;
2154 break;
2155 case PCI_POWERSTATE_D1:
4d28e78f 2156 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
f4754a59
HT
2157 return (EOPNOTSUPP);
2158 status |= PCIM_PSTAT_D1;
2159 break;
2160 case PCI_POWERSTATE_D2:
4d28e78f 2161 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
f4754a59
HT
2162 return (EOPNOTSUPP);
2163 status |= PCIM_PSTAT_D2;
2164 break;
2165 case PCI_POWERSTATE_D3:
2166 status |= PCIM_PSTAT_D3;
2167 break;
2168 default:
2169 return (EINVAL);
984263bc 2170 }
f4754a59
HT
2171
2172 if (bootverbose)
2173 kprintf(
4d28e78f
SZ
2174 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2175 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2176 dinfo->cfg.func, oldstate, state);
f4754a59 2177
4d28e78f 2178 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
f4754a59
HT
2179 if (delay)
2180 DELAY(delay);
2181 return (0);
984263bc
MD
2182}
2183
e126caf1 2184int
984263bc
MD
2185pci_get_powerstate_method(device_t dev, device_t child)
2186{
2187 struct pci_devinfo *dinfo = device_get_ivars(child);
2188 pcicfgregs *cfg = &dinfo->cfg;
f4754a59 2189 uint16_t status;
984263bc
MD
2190 int result;
2191
4d28e78f
SZ
2192 if (cfg->pp.pp_cap != 0) {
2193 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
984263bc
MD
2194 switch (status & PCIM_PSTAT_DMASK) {
2195 case PCIM_PSTAT_D0:
2196 result = PCI_POWERSTATE_D0;
2197 break;
2198 case PCIM_PSTAT_D1:
2199 result = PCI_POWERSTATE_D1;
2200 break;
2201 case PCIM_PSTAT_D2:
2202 result = PCI_POWERSTATE_D2;
2203 break;
2204 case PCIM_PSTAT_D3:
2205 result = PCI_POWERSTATE_D3;
2206 break;
2207 default:
2208 result = PCI_POWERSTATE_UNKNOWN;
2209 break;
2210 }
2211 } else {
2212 /* No support, device is always at D0 */
2213 result = PCI_POWERSTATE_D0;
2214 }
f4754a59 2215 return (result);
984263bc
MD
2216}
2217
2218/*
2219 * Some convenience functions for PCI device drivers.
2220 */
2221
2222static __inline void
4d28e78f 2223pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
984263bc 2224{
4d28e78f 2225 uint16_t command;
984263bc 2226
4d28e78f
SZ
2227 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2228 command |= bit;
2229 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2230}
2231
2232static __inline void
4d28e78f
SZ
2233pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2234{
2235 uint16_t command;
984263bc 2236
4d28e78f
SZ
2237 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2238 command &= ~bit;
2239 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
984263bc
MD
2240}
2241
4d28e78f
SZ
2242int
2243pci_enable_busmaster_method(device_t dev, device_t child)
2244{
2245 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2246 return (0);
2247}
984263bc 2248
4d28e78f
SZ
2249int
2250pci_disable_busmaster_method(device_t dev, device_t child)
2251{
2252 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2253 return (0);
2254}
984263bc 2255
4d28e78f
SZ
2256int
2257pci_enable_io_method(device_t dev, device_t child, int space)
ed1bd994 2258{
4d28e78f
SZ
2259 uint16_t command;
2260 uint16_t bit;
2261 char *error;
ed1bd994 2262
4d28e78f
SZ
2263 bit = 0;
2264 error = NULL;
2265
2266 switch(space) {
2267 case SYS_RES_IOPORT:
2268 bit = PCIM_CMD_PORTEN;
2269 error = "port";
ed1bd994 2270 break;
4d28e78f
SZ
2271 case SYS_RES_MEMORY:
2272 bit = PCIM_CMD_MEMEN;
2273 error = "memory";
ed1bd994
MD
2274 break;
2275 default:
4d28e78f 2276 return (EINVAL);
ed1bd994 2277 }
4d28e78f
SZ
2278 pci_set_command_bit(dev, child, bit);
2279 /* Some devices seem to need a brief stall here, what do to? */
2280 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2281 if (command & bit)
2282 return (0);
2283 device_printf(child, "failed to enable %s mapping!\n", error);
2284 return (ENXIO);
ed1bd994 2285}
984263bc 2286
4d28e78f
SZ
2287int
2288pci_disable_io_method(device_t dev, device_t child, int space)
b4c0a845 2289{
4d28e78f
SZ
2290 uint16_t command;
2291 uint16_t bit;
2292 char *error;
b4c0a845 2293
4d28e78f
SZ
2294 bit = 0;
2295 error = NULL;
b4c0a845 2296
4d28e78f
SZ
2297 switch(space) {
2298 case SYS_RES_IOPORT:
2299 bit = PCIM_CMD_PORTEN;
2300 error = "port";
b4c0a845 2301 break;
4d28e78f
SZ
2302 case SYS_RES_MEMORY:
2303 bit = PCIM_CMD_MEMEN;
2304 error = "memory";
b4c0a845
SZ
2305 break;
2306 default:
4d28e78f 2307 return (EINVAL);
b4c0a845 2308 }
4d28e78f
SZ
2309 pci_clear_command_bit(dev, child, bit);
2310 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2311 if (command & bit) {
2312 device_printf(child, "failed to disable %s mapping!\n", error);
2313 return (ENXIO);
b4c0a845 2314 }
4d28e78f 2315 return (0);
b4c0a845
SZ
2316}
2317
4d28e78f
SZ
2318/*
2319 * New style pci driver. Parent device is either a pci-host-bridge or a
2320 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2321 */
2322
22457186 2323void
984263bc
MD
2324pci_print_verbose(struct pci_devinfo *dinfo)
2325{
4d28e78f 2326
984263bc
MD
2327 if (bootverbose) {
2328 pcicfgregs *cfg = &dinfo->cfg;
2329
4d28e78f
SZ
2330 kprintf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2331 cfg->vendor, cfg->device, cfg->revid);
2332 kprintf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2333 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2334 kprintf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2335 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2336 cfg->mfdev);
2337 kprintf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2338 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
85f8e2ea 2339 kprintf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
4d28e78f
SZ
2340 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2341 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
984263bc 2342 if (cfg->intpin > 0)
4d28e78f
SZ
2343 kprintf("\tintpin=%c, irq=%d\n",
2344 cfg->intpin +'a' -1, cfg->intline);
2345 if (cfg->pp.pp_cap) {
2346 uint16_t status;
2347
2348 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2349 kprintf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2350 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2351 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2352 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2353 status & PCIM_PSTAT_DMASK);
2354 }
2355 if (cfg->msi.msi_location) {
2356 int ctrl;
2357
2358 ctrl = cfg->msi.msi_ctrl;
2359 kprintf("\tMSI supports %d message%s%s%s\n",
2360 cfg->msi.msi_msgnum,
2361 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2362 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2363 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2364 }
2365 if (cfg->msix.msix_location) {
2366 kprintf("\tMSI-X supports %d message%s ",
2367 cfg->msix.msix_msgnum,
2368 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2369 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2370 kprintf("in map 0x%x\n",
2371 cfg->msix.msix_table_bar);
2372 else
2373 kprintf("in maps 0x%x and 0x%x\n",
2374 cfg->msix.msix_table_bar,
2375 cfg->msix.msix_pba_bar);
2376 }
d85e7311 2377 pci_print_verbose_expr(cfg);
984263bc
MD
2378 }
2379}
2380
d85e7311
SZ
2381static void
2382pci_print_verbose_expr(const pcicfgregs *cfg)
2383{
2384 const struct pcicfg_expr *expr = &cfg->expr;
2385 const char *port_name;
2386 uint16_t port_type;
2387
2388 if (!bootverbose)
2389 return;
2390
2391 if (expr->expr_ptr == 0) /* No PCI Express capability */
2392 return;
2393
2394 kprintf("\tPCI Express ver.%d cap=0x%04x",
2395 expr->expr_cap & PCIEM_CAP_VER_MASK, expr->expr_cap);
2396 if ((expr->expr_cap & PCIEM_CAP_VER_MASK) != PCIEM_CAP_VER_1)
2397 goto back;
2398
2399 port_type = expr->expr_cap & PCIEM_CAP_PORT_TYPE;
2400
2401 switch (port_type) {
2402 case PCIE_END_POINT:
2403 port_name = "DEVICE";
2404 break;
2405 case PCIE_LEG_END_POINT:
2406 port_name = "LEGDEV";
2407 break;
2408 case PCIE_ROOT_PORT:
2409 port_name = "ROOT";
2410 break;
2411 case PCIE_UP_STREAM_PORT:
2412 port_name = "UPSTREAM";
2413 break;
2414 case PCIE_DOWN_STREAM_PORT:
2415 port_name = "DOWNSTRM";
2416 break;
2417 case PCIE_PCIE2PCI_BRIDGE:
2418 port_name = "PCIE2PCI";
2419 break;
2420 case PCIE_PCI2PCIE_BRIDGE:
2421 port_name = "PCI2PCIE";
2422 break;
2423 default:
2424 port_name = NULL;
2425 break;
2426 }
2427 if ((port_type == PCIE_ROOT_PORT ||
2428 port_type == PCIE_DOWN_STREAM_PORT) &&
2429 !(expr->expr_cap & PCIEM_CAP_SLOT_IMPL))
2430 port_name = NULL;
2431 if (port_name != NULL)
2432 kprintf("[%s]", port_name);
2433
2434 if (pcie_slotimpl(cfg)) {
2435 kprintf(", slotcap=0x%08x", expr->expr_slotcap);
2436 if (expr->expr_slotcap & PCIEM_SLTCAP_HP_CAP)
2437 kprintf("[HOTPLUG]");
2438 }
2439back:
2440 kprintf("\n");
2441}
2442
984263bc 2443static int
4a5a2d63 2444pci_porten(device_t pcib, int b, int s, int f)
984263bc 2445{
4a5a2d63
JS
2446 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2447 & PCIM_CMD_PORTEN) != 0;
984263bc
MD
2448}
2449
2450static int
4a5a2d63 2451pci_memen(device_t pcib, int b, int s, int f)
984263bc 2452{
4a5a2d63
JS
2453 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2454 & PCIM_CMD_MEMEN) != 0;
984263bc
MD
2455}
2456
2457/*
2458 * Add a resource based on a pci map register. Return 1 if the map
2459 * register is a 32bit map register or 2 if it is a 64bit register.
2460 */
2461static int
4d28e78f
SZ
2462pci_add_map(device_t pcib, device_t bus, device_t dev,
2463 int b, int s, int f, int reg, struct resource_list *rl, int force,
2464 int prefetch)
2465{
2466 uint32_t map;
2467 pci_addr_t base;
2468 pci_addr_t start, end, count;
2469 uint8_t ln2size;
2470 uint8_t ln2range;
2471 uint32_t testval;
2472 uint16_t cmd;
984263bc 2473 int type;
4d28e78f
SZ
2474 int barlen;
2475 struct resource *res;
984263bc 2476
4a5a2d63 2477 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
4a5a2d63
JS
2478 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2479 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2480 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
984263bc 2481
4d28e78f 2482 if (PCI_BAR_MEM(map)) {
984263bc 2483 type = SYS_RES_MEMORY;
4d28e78f
SZ
2484 if (map & PCIM_BAR_MEM_PREFETCH)
2485 prefetch = 1;
2486 } else
984263bc
MD
2487 type = SYS_RES_IOPORT;
2488 ln2size = pci_mapsize(testval);
2489 ln2range = pci_maprange(testval);
4d28e78f
SZ
2490 base = pci_mapbase(map);
2491 barlen = ln2range == 64 ? 2 : 1;
2492
2493 /*
2494 * For I/O registers, if bottom bit is set, and the next bit up
2495 * isn't clear, we know we have a BAR that doesn't conform to the
2496 * spec, so ignore it. Also, sanity check the size of the data
2497 * areas to the type of memory involved. Memory must be at least
2498 * 16 bytes in size, while I/O ranges must be at least 4.
2499 */
2500 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2501 return (barlen);
2502 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2503 (type == SYS_RES_IOPORT && ln2size < 2))
2504 return (barlen);
2505
2506 if (ln2range == 64)
984263bc 2507 /* Read the other half of a 64bit map register */
4d28e78f
SZ
2508 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2509 if (bootverbose) {
2510 kprintf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2511 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2512 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2513 kprintf(", port disabled\n");
2514 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2515 kprintf(", memory disabled\n");
2516 else
2517 kprintf(", enabled\n");
984263bc
MD
2518 }
2519
984263bc 2520 /*
4d28e78f
SZ
2521 * If base is 0, then we have problems. It is best to ignore
2522 * such entries for the moment. These will be allocated later if
2523 * the driver specifically requests them. However, some
2524 * removable busses look better when all resources are allocated,
2525 * so allow '0' to be overriden.
2526 *
2527 * Similarly treat maps whose values is the same as the test value
2528 * read back. These maps have had all f's written to them by the
2529 * BIOS in an attempt to disable the resources.
984263bc 2530 */
4d28e78f
SZ
2531 if (!force && (base == 0 || map == testval))
2532 return (barlen);
2533 if ((u_long)base != base) {
2534 device_printf(bus,
2535 "pci%d:%d:%d:%d bar %#x too many address bits",
2536 pci_get_domain(dev), b, s, f, reg);
2537 return (barlen);
984263bc 2538 }
984263bc 2539
4d28e78f
SZ
2540 /*
2541 * This code theoretically does the right thing, but has
2542 * undesirable side effects in some cases where peripherals
2543 * respond oddly to having these bits enabled. Let the user
2544 * be able to turn them off (since pci_enable_io_modes is 1 by
2545 * default).
2546 */
2547 if (pci_enable_io_modes) {
2548 /* Turn on resources that have been left off by a lazy BIOS */
2549 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2550 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2551 cmd |= PCIM_CMD_PORTEN;
2552 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2553 }
2554 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2555 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2556 cmd |= PCIM_CMD_MEMEN;
2557 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2558 }
2559 } else {
2560 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2561 return (barlen);
2562 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2563 return (barlen);
2564 }
984263bc 2565
4d28e78f
SZ
2566 count = 1 << ln2size;
2567 if (base == 0 || base == pci_mapbase(testval)) {
2568 start = 0; /* Let the parent decide. */
2569 end = ~0ULL;
2570 } else {
2571 start = base;
2572 end = base + (1 << ln2size) - 1;
984263bc 2573 }
1b000e91 2574 resource_list_add(rl, type, reg, start, end, count, -1);
984263bc 2575
4d28e78f
SZ
2576 /*
2577 * Try to allocate the resource for this BAR from our parent
2578 * so that this resource range is already reserved. The
2579 * driver for this device will later inherit this resource in
2580 * pci_alloc_resource().
2581 */
2582 res = resource_list_alloc(rl, bus, dev, type, &reg, start, end, count,
93fad519 2583 prefetch ? RF_PREFETCHABLE : 0, -1);
4d28e78f
SZ
2584 if (res == NULL) {
2585 /*
d0c4beb1
SZ
2586 * If the allocation fails, delete the resource list
2587 * entry to force pci_alloc_resource() to allocate
2588 * resources from the parent.
4d28e78f
SZ
2589 */
2590 resource_list_delete(rl, type, reg);
d0c4beb1
SZ
2591#ifdef PCI_BAR_CLEAR
2592 /* Clear the BAR */
4d28e78f 2593 start = 0;
d0c4beb1
SZ
2594#else /* !PCI_BAR_CLEAR */
2595 /*
2596 * Don't clear BAR here. Some BIOS lists HPET as a
2597 * PCI function, clearing the BAR causes HPET timer
2598 * stop ticking.
2599 */
2600 if (bootverbose) {
2601 kprintf("pci:%d:%d:%d: resource reservation failed "
bfc09ba0
MD
2602 "%#jx - %#jx\n", b, s, f,
2603 (intmax_t)start, (intmax_t)end);
d0c4beb1
SZ
2604 }
2605 return (barlen);
2606#endif /* PCI_BAR_CLEAR */
2607 } else {
4d28e78f 2608 start = rman_get_start(res);
d0c4beb1 2609 }
4d28e78f
SZ
2610 pci_write_config(dev, reg, start, 4);
2611 if (ln2range == 64)
2612 pci_write_config(dev, reg + 4, start >> 32, 4);
2613 return (barlen);
984263bc
MD
2614}
2615
201eb0a7 2616/*
4d28e78f 2617 * For ATA devices we need to decide early what addressing mode to use.
201eb0a7
TS
2618 * Legacy demands that the primary and secondary ATA ports sits on the
2619 * same addresses that old ISA hardware did. This dictates that we use
4d28e78f 2620 * those addresses and ignore the BAR's if we cannot set PCI native
201eb0a7
TS
2621 * addressing mode.
2622 */
2623static void
4d28e78f
SZ
2624pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2625 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
201eb0a7
TS
2626{
2627 int rid, type, progif;
2628#if 0
2629 /* if this device supports PCI native addressing use it */
2630 progif = pci_read_config(dev, PCIR_PROGIF, 1);
4d28e78f 2631 if ((progif & 0x8a) == 0x8a) {
201eb0a7
TS
2632 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2633 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
85f8e2ea 2634 kprintf("Trying ATA native PCI addressing mode\n");
201eb0a7
TS
2635 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2636 }
2637 }
2638#endif
201eb0a7
TS
2639 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2640 type = SYS_RES_IOPORT;
2641 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
4d28e78f
SZ
2642 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2643 prefetchmask & (1 << 0));
2644 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2645 prefetchmask & (1 << 1));
201eb0a7
TS
2646 } else {
2647 rid = PCIR_BAR(0);
1b000e91 2648 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8, -1);
201eb0a7 2649 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
93fad519 2650 0, -1);
201eb0a7 2651 rid = PCIR_BAR(1);
1b000e91 2652 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1, -1);
201eb0a7 2653 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
93fad519 2654 0, -1);
201eb0a7
TS
2655 }
2656 if (progif & PCIP_STORAGE_IDE_MODESEC) {
4d28e78f
SZ
2657 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2658 prefetchmask & (1 << 2));
2659 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2660 prefetchmask & (1 << 3));
201eb0a7
TS
2661 } else {
2662 rid = PCIR_BAR(2);
1b000e91 2663 resource_list_add(rl, type, rid, 0x170, 0x177, 8, -1);
201eb0a7 2664 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
93fad519 2665 0, -1);
201eb0a7 2666 rid = PCIR_BAR(3);
1b000e91 2667 resource_list_add(rl, type, rid, 0x376, 0x376, 1, -1);
201eb0a7 2668 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
93fad519 2669 0, -1);
201eb0a7 2670 }
4d28e78f
SZ
2671 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2672 prefetchmask & (1 << 4));
2673 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2674 prefetchmask & (1 << 5));
201eb0a7 2675}
201eb0a7 2676
984263bc 2677static void
4d28e78f
SZ
2678pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2679{
2680 struct pci_devinfo *dinfo = device_get_ivars(dev);
2681 pcicfgregs *cfg = &dinfo->cfg;
2682 char tunable_name[64];
2683 int irq;
2684
2685 /* Has to have an intpin to have an interrupt. */
2686 if (cfg->intpin == 0)
2687 return;
2688
2689 /* Let the user override the IRQ with a tunable. */
2690 irq = PCI_INVALID_IRQ;
2691 ksnprintf(tunable_name, sizeof(tunable_name),
3382759b
SZ
2692 "hw.pci%d.%d.%d.%d.INT%c.irq",
2693 cfg->domain, cfg->bus, cfg->slot, cfg->func, cfg->intpin + 'A' - 1);
2694 if (TUNABLE_INT_FETCH(tunable_name, &irq)) {
2695 if (irq >= 255 || irq <= 0) {
2696 irq = PCI_INVALID_IRQ;
2697 } else {
2698 BUS_CONFIG_INTR(bus, dev, irq,
2699 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2700 }
2701 }
4d28e78f
SZ
2702
2703 /*
2704 * If we didn't get an IRQ via the tunable, then we either use the
2705 * IRQ value in the intline register or we ask the bus to route an
2706 * interrupt for us. If force_route is true, then we only use the
2707 * value in the intline register if the bus was unable to assign an
2708 * IRQ.
2709 */
2710 if (!PCI_INTERRUPT_VALID(irq)) {
2711 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2712 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2713 if (!PCI_INTERRUPT_VALID(irq))
2714 irq = cfg->intline;
2715 }
2716
2717 /* If after all that we don't have an IRQ, just bail. */
2718 if (!PCI_INTERRUPT_VALID(irq))
2719 return;
2720
2721 /* Update the config register if it changed. */
2722 if (irq != cfg->intline) {
2723 cfg->intline = irq;
2724 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2725 }
2726
2727 /* Add this IRQ as rid 0 interrupt resource. */
d2f04fe0 2728 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1,
bec969af 2729 machintr_legacy_intr_cpuid(irq));
4d28e78f
SZ
2730}
2731
2732void
2733pci_add_resources(device_t pcib, device_t bus, device_t dev, int force, uint32_t prefetchmask)
984263bc
MD
2734{
2735 struct pci_devinfo *dinfo = device_get_ivars(dev);
4a5a2d63 2736 pcicfgregs *cfg = &dinfo->cfg;
984263bc
MD
2737 struct resource_list *rl = &dinfo->resources;
2738 struct pci_quirk *q;
e126caf1 2739 int b, i, f, s;
984263bc 2740
e126caf1
MD
2741 b = cfg->bus;
2742 s = cfg->slot;
2743 f = cfg->func;
4d28e78f
SZ
2744
2745 /* ATA devices needs special map treatment */
201eb0a7
TS
2746 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2747 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
d3d1ea7a
MD
2748 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2749 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2750 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4d28e78f 2751 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
201eb0a7 2752 else
4d28e78f
SZ
2753 for (i = 0; i < cfg->nummaps;)
2754 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2755 rl, force, prefetchmask & (1 << i));
984263bc 2756
4d28e78f
SZ
2757 /*
2758 * Add additional, quirked resources.
2759 */
984263bc
MD
2760 for (q = &pci_quirks[0]; q->devid; q++) {
2761 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2762 && q->type == PCI_QUIRK_MAP_REG)
4d28e78f
SZ
2763 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2764 force, 0);
984263bc
MD
2765 }
2766
4d28e78f 2767 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
4d28e78f
SZ
2768 /*
2769 * Try to re-route interrupts. Sometimes the BIOS or
2770 * firmware may leave bogus values in these registers.
2771 * If the re-route fails, then just stick with what we
2772 * have.
2773 */
2774 pci_assign_interrupt(bus, dev, 1);
4d28e78f 2775 }
984263bc
MD
2776}
2777
e126caf1 2778void
4d28e78f 2779pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
984263bc 2780{
4d28e78f 2781#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4a5a2d63 2782 device_t pcib = device_get_parent(dev);
e126caf1 2783 struct pci_devinfo *dinfo;
4a5a2d63 2784 int maxslots;
e126caf1
MD
2785 int s, f, pcifunchigh;
2786 uint8_t hdrtype;
2787
4d28e78f
SZ
2788 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2789 ("dinfo_size too small"));
4a5a2d63 2790 maxslots = PCIB_MAXSLOTS(pcib);
57e943f7 2791 for (s = 0; s <= maxslots; s++) {
e126caf1
MD
2792 pcifunchigh = 0;
2793 f = 0;
4d28e78f 2794 DELAY(1);
e126caf1
MD
2795 hdrtype = REG(PCIR_HDRTYPE, 1);
2796 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2797 continue;
2798 if (hdrtype & PCIM_MFDEV)
2799 pcifunchigh = PCI_FUNCMAX;
5e658043 2800 for (f = 0; f <= pcifunchigh; f++) {
4d28e78f
SZ
2801 dinfo = pci_read_device(pcib, domain, busno, s, f,
2802 dinfo_size);
984263bc 2803 if (dinfo != NULL) {
e126caf1 2804 pci_add_child(dev, dinfo);
984263bc
MD
2805 }
2806 }
2807 }
e126caf1
MD
2808#undef REG
2809}
2810
2811void
2812pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2813{
2814 device_t pcib;
2815
2816 pcib = device_get_parent(bus);
2817 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2818 device_set_ivars(dinfo->cfg.dev, dinfo);
4d28e78f 2819 resource_list_init(&dinfo->resources);
638744c5
HT
2820 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2821 pci_cfg_restore(dinfo->cfg.dev, dinfo);
e126caf1 2822 pci_print_verbose(dinfo);
4d28e78f 2823 pci_add_resources(pcib, bus, dinfo->cfg.dev, 0, 0);
984263bc
MD
2824}
2825
2826static int
4a5a2d63 2827pci_probe(device_t dev)
984263bc 2828{
984263bc 2829 device_set_desc(dev, "PCI bus");
4a5a2d63 2830
4d28e78f
SZ
2831 /* Allow other subclasses to override this driver. */
2832 return (-1000);
984263bc
MD
2833}
2834
2835static int
e126caf1
MD
2836pci_attach(device_t dev)
2837{
4d28e78f
SZ
2838 int busno, domain;
2839
2840 /*
2841 * Since there can be multiple independantly numbered PCI
2842 * busses on systems with multiple PCI domains, we can't use
2843 * the unit number to decide which bus we are probing. We ask
2844 * the parent pcib what our domain and bus numbers are.
2845 */
2846 domain = pcib_get_domain(dev);
2847 busno = pcib_get_bus(dev);
2848 if (bootverbose)
2849 device_printf(dev, "domain=%d, physical bus=%d\n",
2850 domain, busno);
e4c9c0c8 2851
4d28e78f 2852 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
e126caf1 2853
4d28e78f
SZ
2854 return (bus_generic_attach(dev));
2855}
2856
2857int
2858pci_suspend(device_t dev)
2859{
2860 int dstate, error, i, numdevs;
2861 device_t acpi_dev, child, *devlist;
2862 struct pci_devinfo *dinfo;
2863
2864 /*
2865 * Save the PCI configuration space for each child and set the
2866 * device in the appropriate power state for this sleep state.
2867 */
2868 acpi_dev = NULL;
2869 if (pci_do_power_resume)
2870 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2871 device_get_children(dev, &devlist, &numdevs);
2872 for (i = 0; i < numdevs; i++) {
2873 child = devlist[i];
2874 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2875 pci_cfg_save(child, dinfo, 0);
2876 }
e126caf1 2877
4d28e78f
SZ
2878 /* Suspend devices before potentially powering them down. */
2879 error = bus_generic_suspend(dev);
2880 if (error) {
2881 kfree(devlist, M_TEMP);
2882 return (error);
2883 }
e126caf1 2884
4d28e78f
SZ
2885 /*
2886 * Always set the device to D3. If ACPI suggests a different
2887 * power state, use it instead. If ACPI is not present, the
2888 * firmware is responsible for managing device power. Skip
2889 * children who aren't attached since they are powered down
2890 * separately. Only manage type 0 devices for now.
2891 */
2892 for (i = 0; acpi_dev && i < numdevs; i++) {
2893 child = devlist[i];
2894 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2895 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2896 dstate = PCI_POWERSTATE_D3;
2897 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2898 pci_set_powerstate(child, dstate);
2899 }
2900 }
2901 kfree(devlist, M_TEMP);
2902 return (0);
e126caf1
MD
2903}
2904
4d28e78f
SZ
2905int
2906pci_resume(device_t dev)
984263bc 2907{
4d28e78f
SZ
2908 int i, numdevs;
2909 device_t acpi_dev, child, *devlist;
2910 struct pci_devinfo *dinfo;
2911
2912 /*
2913 * Set each child to D0 and restore its PCI configuration space.
2914 */
2915 acpi_dev = NULL;
2916 if (pci_do_power_resume)
2917 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2918 device_get_children(dev, &devlist, &numdevs);
2919 for (i = 0; i < numdevs; i++) {
2920 /*
2921 * Notify ACPI we're going to D0 but ignore the result. If
2922 * ACPI is not present, the firmware is responsible for
2923 * managing device power. Only manage type 0 devices for now.
2924 */
2925 child = devlist[i];
2926 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2927 if (acpi_dev && device_is_attached(child) &&
2928 dinfo->cfg.hdrtype == 0) {
2929 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2930 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2931 }
2932
2933 /* Now the device is powered up, restore its config space. */
2934 pci_cfg_restore(child, dinfo);
2935 }
2936 kfree(devlist, M_TEMP);
2937 return (bus_generic_resume(dev));
2938}
2939
2940static void
2941pci_load_vendor_data(void)
2942{
2943 caddr_t vendordata, info;
2944
2945 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2946 info = preload_search_info(vendordata, MODINFO_ADDR);
2947 pci_vendordata = *(char **)info;
2948 info = preload_search_info(vendordata, MODINFO_SIZE);
2949 pci_vendordata_size = *(size_t *)info;
2950 /* terminate the database */
2951 pci_vendordata[pci_vendordata_size] = '\n';
2952 }
2953}
2954
2955void
2956pci_driver_added(device_t dev, driver_t *driver)
2957{
2958 int numdevs;
2959 device_t *devlist;
2960 device_t child;
2961 struct pci_devinfo *dinfo;
2962 int i;
2963
2964 if (bootverbose)
2965 device_printf(dev, "driver added\n");
2966 DEVICE_IDENTIFY(driver, dev);
2967 device_get_children(dev, &devlist, &numdevs);
2968 for (i = 0; i < numdevs; i++) {
2969 child = devlist[i];
2970 if (device_get_state(child) != DS_NOTPRESENT)
2971 continue;
2972 dinfo = device_get_ivars(child);
2973 pci_print_verbose(dinfo);
2974 if (bootverbose)
2975 kprintf("pci%d:%d:%d:%d: reprobing on driver added\n",
2976 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2977 dinfo->cfg.func);
2978 pci_cfg_restore(child, dinfo);
2979 if (device_probe_and_attach(child) != 0)
2980 pci_cfg_save(child, dinfo, 1);
2981 }
2982 kfree(devlist, M_TEMP);
2983}
2984
11a49859
SZ
2985static void
2986pci_child_detached(device_t parent __unused, device_t child)
2987{
2988 /* Turn child's power off */
2989 pci_cfg_save(child, device_get_ivars(child), 1);
2990}
2991
4d28e78f
SZ
2992int
2993pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
0e6f0e28
SZ
2994 driver_intr_t *intr, void *arg, void **cookiep,
2995 lwkt_serialize_t serializer, const char *desc)
4d28e78f 2996{
fb9077ae 2997 int rid, error;
4d28e78f 2998 void *cookie;
fb9077ae 2999
4d28e78f 3000 error = bus_generic_setup_intr(dev, child, irq, flags, intr,
0e6f0e28 3001 arg, &cookie, serializer, desc);
4d28e78f
SZ
3002 if (error)
3003 return (error);
3004
3005 /* If this is not a direct child, just bail out. */
3006 if (device_get_parent(child) != dev) {
3007 *cookiep = cookie;
3008 return(0);
3009 }
3010
4d28e78f
SZ
3011 rid = rman_get_rid(irq);
3012 if (rid == 0) {
3013 /* Make sure that INTx is enabled */
3014 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3015 } else {
f9c942fb
SZ
3016 struct pci_devinfo *dinfo = device_get_ivars(child);
3017 uint64_t addr;
3018 uint32_t data;
3019
4d28e78f
SZ
3020 /*
3021 * Check to see if the interrupt is MSI or MSI-X.
3022 * Ask our parent to map the MSI and give
3023 * us the address and data register values.
3024 * If we fail for some reason, teardown the
3025 * interrupt handler.
3026 */
4d28e78f 3027 if (dinfo->cfg.msi.msi_alloc > 0) {
f9c942fb
SZ
3028 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3029
3030 if (msi->msi_addr == 0) {
3031 KASSERT(msi->msi_handlers == 0,
4d28e78f
SZ
3032 ("MSI has handlers, but vectors not mapped"));
3033 error = PCIB_MAP_MSI(device_get_parent(dev),
0af900e1
SZ
3034 child, rman_get_start(irq), &addr, &data,
3035 rman_get_cpuid(irq));
4d28e78f
SZ
3036 if (error)
3037 goto bad;
f9c942fb
SZ
3038 msi->msi_addr = addr;
3039 msi->msi_data = data;
4d28e78f 3040 pci_enable_msi(child, addr, data);
984263bc 3041 }
f9c942fb 3042 msi->msi_handlers++;
4d28e78f 3043 } else {
f9c942fb
SZ
3044 struct msix_vector *mv;
3045 u_int vector;
3046
4d28e78f 3047 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
f9c942fb
SZ
3048 ("No MSI-X or MSI rid %d allocated\n", rid));
3049
3050 mv = pci_find_msix_vector(child, rid);
3051 KASSERT(mv != NULL,
3052 ("MSI-X rid %d is not allocated\n", rid));
3053 KASSERT(mv->mv_address == 0,
3054 ("MSI-X rid %d has been setup\n", rid));
3055
3056 error = PCIB_MAP_MSI(device_get_parent(dev),
3057 child, rman_get_start(irq), &addr, &data,
3058 rman_get_cpuid(irq));
3059 if (error)
3060 goto bad;
3061 mv->mv_address = addr;
3062 mv->mv_data = data;
3063
3064 vector = PCI_MSIX_RID2VEC(rid);
3065 pci_setup_msix_vector(child, vector,
3066 mv->mv_address, mv->mv_data);
3067 pci_unmask_msix_vector(child, vector);
4d28e78f
SZ
3068 }
3069
3070 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3071 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3072 bad:
3073 if (error) {
3074 (void)bus_generic_teardown_intr(dev, child, irq,
3075 cookie);
3076 return (error);
3077 }
3078 }
4d28e78f
SZ
3079 *cookiep = cookie;
3080 return (0);
3081}
3082
3083int
3084pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3085 void *cookie)
3086{
fb9077ae 3087 int rid, error;
4d28e78f
SZ
3088
3089 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3090 return (EINVAL);
3091
3092 /* If this isn't a direct child, just bail out */
3093 if (device_get_parent(child) != dev)
3094 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3095
4d28e78f
SZ
3096 rid = rman_get_rid(irq);
3097 if (rid == 0) {
3098 /* Mask INTx */
3099 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3100 } else {
f9c942fb
SZ
3101 struct pci_devinfo *dinfo = device_get_ivars(child);
3102
4d28e78f
SZ
3103 /*
3104 * Check to see if the interrupt is MSI or MSI-X. If so,
3105 * decrement the appropriate handlers count and mask the
3106 * MSI-X message, or disable MSI messages if the count
3107 * drops to 0.
3108 */
4d28e78f 3109 if (dinfo->cfg.msi.msi_alloc > 0) {
f9c942fb
SZ
3110 struct pcicfg_msi *msi = &dinfo->cfg.msi;
3111
3112 KASSERT(rid <= msi->msi_alloc,
3113 ("MSI-X index too high\n"));
3114 KASSERT(msi->msi_handlers > 0,
3115 ("MSI rid %d is not setup\n", rid));
3116
3117 msi->msi_handlers--;
3118 if (msi->msi_handlers == 0)
4d28e78f
SZ
3119 pci_disable_msi(child);
3120 } else {
f9c942fb
SZ
3121 struct msix_vector *mv;
3122
4d28e78f 3123 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
f9c942fb
SZ
3124 ("No MSI or MSI-X rid %d allocated", rid));
3125
3126 mv = pci_find_msix_vector(child, rid);
3127 KASSERT(mv != NULL,
3128 ("MSI-X rid %d is not allocated\n", rid));
3129 KASSERT(mv->mv_address != 0,
3130 ("MSI-X rid %d has not been setup\n", rid));
3131
3132 pci_mask_msix_vector(child, PCI_MSIX_RID2VEC(rid));
3133 mv->mv_address = 0;
3134 mv->mv_data = 0;
984263bc
MD
3135 }
3136 }
4d28e78f
SZ
3137 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3138 if (rid > 0)
3139 KASSERT(error == 0,
3140 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4d28e78f 3141 return (error);
984263bc
MD
3142}
3143
e126caf1 3144int
984263bc
MD
3145pci_print_child(device_t dev, device_t child)
3146{
3147 struct pci_devinfo *dinfo;
3148 struct resource_list *rl;
984263bc
MD
3149 int retval = 0;
3150
3151 dinfo = device_get_ivars(child);
984263bc
MD
3152 rl = &dinfo->resources;
3153
3154 retval += bus_print_child_header(dev, child);
3155
4d28e78f
SZ
3156 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3157 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3158 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
984263bc 3159 if (device_get_flags(dev))
85f8e2ea 3160 retval += kprintf(" flags %#x", device_get_flags(dev));
984263bc 3161
85f8e2ea 3162 retval += kprintf(" at device %d.%d", pci_get_slot(child),
4d28e78f 3163 pci_get_function(child));
984263bc
MD
3164
3165 retval += bus_print_child_footer(dev, child);
3166
3167 return (retval);
3168}
3169
4d28e78f
SZ
3170static struct
3171{
3172 int class;
3173 int subclass;
3174 char *desc;
3175} pci_nomatch_tab[] = {
3176 {PCIC_OLD, -1, "old"},
3177 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3178 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3179 {PCIC_STORAGE, -1, "mass storage"},
3180 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3181 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3182 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3183 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3184 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3185 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3186 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3187 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3188 {PCIC_NETWORK, -1, "network"},
3189 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3190 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3191 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3192 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3193 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3194 {PCIC_DISPLAY, -1, "display"},
3195 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3196 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3197 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3198 {PCIC_MULTIMEDIA, -1, "multimedia"},
3199 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3200 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3201 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3202 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3203 {PCIC_MEMORY, -1, "memory"},
3204 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3205 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3206 {PCIC_BRIDGE, -1, "bridge"},
3207 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3208 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3209 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3210 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3211 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3212 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3213 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3214 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3215 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3216 {PCIC_SIMPLECOMM, -1, "simple comms"},
3217 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3218 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3219 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3220 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3221 {PCIC_BASEPERIPH, -1, "base peripheral"},
3222 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3223 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3224 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3225 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3226 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3227 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3228 {PCIC_INPUTDEV, -1, "input device"},
3229 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3230 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3231 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3232 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3233 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3234 {PCIC_DOCKING, -1, "docking station"},
3235 {PCIC_PROCESSOR, -1, "processor"},
3236 {PCIC_SERIALBUS, -1, "serial bus"},
3237 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3238 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3239 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3240 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3241 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3242 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3243 {PCIC_WIRELESS, -1, "wireless controller"},
3244 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3245 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3246 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3247 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3248 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3249 {PCIC_SATCOM, -1, "satellite communication"},
3250 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3251 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3252 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3253 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3254 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3255 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3256 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3257 {PCIC_DASP, -1, "dasp"},
3258 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3259 {0, 0, NULL}
3260};
3261
e126caf1 3262void
984263bc
MD
3263pci_probe_nomatch(device_t dev, device_t child)
3264{
4d28e78f
SZ
3265 int i;
3266 char *cp, *scp, *device;
984263bc 3267
4d28e78f
SZ
3268 /*
3269 * Look for a listing for this device in a loaded device database.
3270 */
3271 if ((device = pci_describe_device(child)) != NULL) {
3272 device_printf(dev, "<%s>", device);
3273 kfree(device, M_DEVBUF);
3274 } else {
3275 /*
3276 * Scan the class/subclass descriptions for a general
3277 * description.
3278 */
3279 cp = "unknown";
3280 scp = NULL;
3281 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3282 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3283 if (pci_nomatch_tab[i].subclass == -1) {
3284 cp = pci_nomatch_tab[i].desc;
3285 } else if (pci_nomatch_tab[i].subclass ==
3286 pci_get_subclass(child)) {
3287 scp = pci_nomatch_tab[i].desc;
3288 }
3289 }
3290 }
3291 device_printf(dev, "<%s%s%s>",
3292 cp ? cp : "",
3293 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3294 scp ? scp : "");
3295 }
6a45dbfa
SZ
3296 kprintf(" (vendor 0x%04x, dev 0x%04x) at device %d.%d",
3297 pci_get_vendor(child), pci_get_device(child),
3298 pci_get_slot(child), pci_get_function(child));
3299 if (pci_get_intpin(child) > 0) {
3300 int irq;
3301
3302 irq = pci_get_irq(child);
3303 if (PCI_INTERRUPT_VALID(irq))
3304 kprintf(" irq %d", irq);
3305 }
3306 kprintf("\n");
3307
638744c5 3308 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
984263bc
MD
3309}
3310
4d28e78f
SZ
3311/*
3312 * Parse the PCI device database, if loaded, and return a pointer to a
3313 * description of the device.
3314 *
3315 * The database is flat text formatted as follows:
3316 *
3317 * Any line not in a valid format is ignored.
3318 * Lines are terminated with newline '\n' characters.
3319 *
3320 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3321 * the vendor name.
3322 *
3323 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3324 * - devices cannot be listed without a corresponding VENDOR line.
3325 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3326 * another TAB, then the device name.
3327 */
3328
3329/*
3330 * Assuming (ptr) points to the beginning of a line in the database,
3331 * return the vendor or device and description of the next entry.
3332 * The value of (vendor) or (device) inappropriate for the entry type
3333 * is set to -1. Returns nonzero at the end of the database.
3334 *
3335 * Note that this is slightly unrobust in the face of corrupt data;
3336 * we attempt to safeguard against this by spamming the end of the
3337 * database with a newline when we initialise.
3338 */
3339static int
3340pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3341{
3342 char *cp = *ptr;
3343 int left;
3344
3345 *device = -1;
3346 *vendor = -1;
3347 **desc = '\0';
3348 for (;;) {
3349 left = pci_vendordata_size - (cp - pci_vendordata);
3350 if (left <= 0) {
3351 *ptr = cp;
3352 return(1);
3353 }
3354
3355 /* vendor entry? */
3356 if (*cp != '\t' &&
3357 ksscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3358 break;
3359 /* device entry? */
3360 if (*cp == '\t' &&
3361 ksscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3362 break;
3363
3364 /* skip to next line */
3365 while (*cp != '\n' && left > 0) {
3366 cp++;
3367 left--;
3368 }
3369 if (*cp == '\n') {
3370 cp++;
3371 left--;
3372 }
3373 }
3374 /* skip to next line */
3375 while (*cp != '\n' && left > 0) {
3376 cp++;
3377 left--;
3378 }
3379 if (*cp == '\n' && left > 0)
3380 cp++;
3381 *ptr = cp;
3382 return(0);
3383}
3384
3385static char *
3386pci_describe_device(device_t dev)
3387{
3388 int vendor, device;
3389 char *desc, *vp, *dp, *line;
3390
3391 desc = vp = dp = NULL;
3392
3393 /*
3394 * If we have no vendor data, we can't do anything.
3395 */
3396 if (pci_vendordata == NULL)
3397 goto out;
3398
3399 /*
3400 * Scan the vendor data looking for this device
3401 */
3402 line = pci_vendordata;
3403 if ((vp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3404 goto out;
3405 for (;;) {
3406 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3407 goto out;
3408 if (vendor == pci_get_vendor(dev))
3409 break;
3410 }
3411 if ((dp = kmalloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3412 goto out;
3413 for (;;) {
3414 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3415 *dp = 0;
3416 break;
3417 }
3418 if (vendor != -1) {
3419 *dp = 0;
3420 break;
3421 }
3422 if (device == pci_get_device(dev))
3423 break;
3424 }
3425 if (dp[0] == '\0')
3426 ksnprintf(dp, 80, "0x%x", pci_get_device(dev));
3427 if ((desc = kmalloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3428 NULL)
3429 ksprintf(desc, "%s, %s", vp, dp);
3430 out:
3431 if (vp != NULL)
3432 kfree(vp, M_DEVBUF);
3433 if (dp != NULL)
3434 kfree(dp, M_DEVBUF);
3435 return(desc);
3436}
3437
22457186 3438int
4a5a2d63 3439pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
984263bc
MD
3440{
3441 struct pci_devinfo *dinfo;
3442 pcicfgregs *cfg;
3443
3444 dinfo = device_get_ivars(child);
3445 cfg = &dinfo->cfg;
3446
3447 switch (which) {
4d28e78f
SZ
3448 case PCI_IVAR_ETHADDR:
3449 /*
3450 * The generic accessor doesn't deal with failure, so
3451 * we set the return value, then return an error.
3452 */
3453 *((uint8_t **) result) = NULL;
3454 return (EINVAL);
984263bc
MD
3455 case PCI_IVAR_SUBVENDOR:
3456 *result = cfg->subvendor;
3457 break;
3458 case PCI_IVAR_SUBDEVICE:
3459 *result = cfg->subdevice;
3460 break;
3461 case PCI_IVAR_VENDOR:
3462 *result = cfg->vendor;
3463 break;
3464 case PCI_IVAR_DEVICE:
3465 *result = cfg->device;
3466 break;
3467 case PCI_IVAR_DEVID:
3468 *result = (cfg->device << 16) | cfg->vendor;
3469 break;
3470 case PCI_IVAR_CLASS:
3471 *result = cfg->baseclass;
3472 break;
3473 case PCI_IVAR_SUBCLASS:
3474 *result = cfg->subclass;
3475 break;
3476 case PCI_IVAR_PROGIF:
3477 *result = cfg->progif;
3478 break;
3479 case PCI_IVAR_REVID:
3480 *result = cfg->revid;
3481 break;
3482 case PCI_IVAR_INTPIN:
3483 *result = cfg->intpin;
3484 break;
3485 case PCI_IVAR_IRQ:
3486 *result = cfg->intline;
3487 break;
4d28e78f
SZ
3488 case PCI_IVAR_DOMAIN:
3489 *result = cfg->domain;
3490 break;
984263bc
MD
3491 case PCI_IVAR_BUS:
3492 *result = cfg->bus;
3493 break;
3494 case PCI_IVAR_SLOT:
3495 *result = cfg->slot;
3496 break;
3497 case PCI_IVAR_FUNCTION:
3498 *result = cfg->func;
3499 break;
4d28e78f
SZ
3500 case PCI_IVAR_CMDREG:
3501 *result = cfg->cmdreg;
984263bc 3502 break;
4d28e78f
SZ
3503 case PCI_IVAR_CACHELNSZ:
3504 *result = cfg->cachelnsz;
984263bc 3505 break;
4d28e78f
SZ
3506 case PCI_IVAR_MINGNT:
3507 *result = cfg->mingnt;
c7e4e7eb 3508 break;
4d28e78f
SZ
3509 case PCI_IVAR_MAXLAT:
3510 *result = cfg->maxlat;
c7e4e7eb 3511 break;
4d28e78f
SZ
3512 case PCI_IVAR_LATTIMER:
3513 *result = cfg->lattimer;
0254566f 3514 break;
d85e7311
SZ
3515 case PCI_IVAR_PCIXCAP_PTR:
3516 *result = cfg->pcix.pcix_ptr;
3517 break;
3518 case PCI_IVAR_PCIECAP_PTR:
3519 *result = cfg->expr.expr_ptr;
3520 break;
3521 case PCI_IVAR_VPDCAP_PTR:
3522 *result = cfg->vpd.vpd_reg;
3523 break;
984263bc 3524 default:
4d28e78f 3525 return (ENOENT);
984263bc 3526 }
4d28e78f 3527 return (0);
984263bc
MD
3528}
3529
22457186 3530int
984263bc
MD
3531pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3532{
3533 struct pci_devinfo *dinfo;
984263bc
MD
3534
3535 dinfo = device_get_ivars(child);
984263bc
MD
3536
3537 switch (which) {
4d28e78f
SZ
3538 case PCI_IVAR_INTPIN:
3539 dinfo->cfg.intpin = value;
3540 return (0);
3541 case PCI_IVAR_ETHADDR:
984263bc
MD
3542 case PCI_IVAR_SUBVENDOR:
3543 case PCI_IVAR_SUBDEVICE:
3544 case PCI_IVAR_VENDOR:
3545 case PCI_IVAR_DEVICE:
3546 case PCI_IVAR_DEVID:
3547 case PCI_IVAR_CLASS:
3548 case PCI_IVAR_SUBCLASS:
3549 case PCI_IVAR_PROGIF:
3550 case PCI_IVAR_REVID:
984263bc 3551 case PCI_IVAR_IRQ:
4d28e78f 3552 case PCI_IVAR_DOMAIN:
984263bc
MD
3553 case PCI_IVAR_BUS:
3554 case PCI_IVAR_SLOT:
3555 case PCI_IVAR_FUNCTION:
4d28e78f 3556 return (EINVAL); /* disallow for now */
984263bc 3557
984263bc 3558 default:
4d28e78f
SZ
3559 return (ENOENT);
3560 }
3561}
3562#ifdef notyet
3563#include "opt_ddb.h"
3564#ifdef DDB
3565#include <ddb/ddb.h>
3566#include <sys/cons.h>
3567
3568/*
3569 * List resources based on pci map registers, used for within ddb
3570 */
3571
3572DB_SHOW_COMMAND(pciregs, db_pci_dump)
3573{
3574 struct pci_devinfo *dinfo;
3575 struct devlist *devlist_head;
3576 struct pci_conf *p;
3577 const char *name;
3578 int i, error, none_count;
3579
3580 none_count = 0;
3581 /* get the head of the device queue */
3582 devlist_head = &pci_devq;
3583
3584 /*
3585 * Go through the list of devices and print out devices
3586 */
3587 for (error = 0, i = 0,
3588 dinfo = STAILQ_FIRST(devlist_head);
3589 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3590 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3591
3592 /* Populate pd_name and pd_unit */
3593 name = NULL;
3594 if (dinfo->cfg.dev)
3595 name = device_get_name(dinfo->cfg.dev);
3596
3597 p = &dinfo->conf;
3598 db_kprintf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3599 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3600 (name && *name) ? name : "none",
3601 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3602 none_count++,
3603 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3604 p->pc_sel.pc_func, (p->pc_class << 16) |
3605 (p->pc_subclass << 8) | p->pc_progif,
3606 (p->pc_subdevice << 16) | p->pc_subvendor,
3607 (p->pc_device << 16) | p->pc_vendor,
3608 p->pc_revid, p->pc_hdr);
984263bc 3609 }
984263bc 3610}
4d28e78f
SZ
3611#endif /* DDB */
3612#endif
984263bc 3613
201eb0a7 3614static struct resource *
4d28e78f
SZ
3615pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3616 u_long start, u_long end, u_long count, u_int flags)
201eb0a7
TS
3617{
3618 struct pci_devinfo *dinfo = device_get_ivars(child);
3619 struct resource_list *rl = &dinfo->resources;
3620 struct resource_list_entry *rle;
3621 struct resource *res;
4d28e78f 3622 pci_addr_t map, testval;
201eb0a7
TS
3623 int mapsize;
3624
3625 /*
3626 * Weed out the bogons, and figure out how large the BAR/map
4d28e78f 3627 * is. Bars that read back 0 here are bogus and unimplemented.
201eb0a7 3628 * Note: atapci in legacy mode are special and handled elsewhere
4d28e78f 3629 * in the code. If you have a atapci device in legacy mode and
201eb0a7
TS
3630 * it fails here, that other code is broken.
3631 */
3632 res = NULL;
3633 map = pci_read_config(child, *rid, 4);
3634 pci_write_config(child, *rid, 0xffffffff, 4);
3635 testval = pci_read_config(child, *rid, 4);
4d28e78f
SZ
3636 if (pci_maprange(testval) == 64)
3637 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
201eb0a7
TS
3638 if (pci_mapbase(testval) == 0)
3639 goto out;
4d28e78f
SZ
3640
3641 /*
3642 * Restore the original value of the BAR. We may have reprogrammed
3643 * the BAR of the low-level console device and when booting verbose,
3644 * we need the console device addressable.
3645 */
3646 pci_write_config(child, *rid, map, 4);
3647
3648 if (PCI_BAR_MEM(testval)) {
201eb0a7
TS
3649 if (type != SYS_RES_MEMORY) {
3650 if (bootverbose)
4d28e78f
SZ
3651 device_printf(dev,
3652 "child %s requested type %d for rid %#x,"
3653 " but the BAR says it is an memio\n",
3654 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3655 goto out;
3656 }
3657 } else {
3658 if (type != SYS_RES_IOPORT) {
3659 if (bootverbose)
4d28e78f
SZ
3660 device_printf(dev,
3661 "child %s requested type %d for rid %#x,"
3662 " but the BAR says it is an ioport\n",
3663 device_get_nameunit(child), type, *rid);
201eb0a7
TS
3664 goto out;
3665 }
3666 }
3667 /*
3668 * For real BARs, we need to override the size that
3669 * the driver requests, because that's what the BAR
3670 * actually uses and we would otherwise have a
3671 * situation where we might allocate the excess to
3672 * another driver, which won't work.
3673 */
3674 mapsize = pci_mapsize(testval);
4d28e78f 3675 count = 1UL << mapsize;
201eb0a7 3676 if (RF_ALIGNMENT(flags) < mapsize)
4d28e78f
SZ
3677 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3678 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3679 flags |= RF_PREFETCHABLE;
3680
201eb0a7
TS
3681 /*
3682 * Allocate enough resource, and then write back the
4d28e78f 3683 * appropriate bar for that resource.
201eb0a7
TS
3684 */
3685 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4f7fe8c7 3686 start, end, count, flags, -1);
201eb0a7 3687 if (res == NULL) {
4d28e78f
SZ
3688 device_printf(child,
3689 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3690 count, *rid, type, start, end);
201eb0a7
TS
3691 goto out;
3692 }
1b000e91 3693 resource_list_add(rl, type, *rid, start, end, count, -1);
201eb0a7
TS
3694 rle = resource_list_find(rl, type, *rid);
3695 if (rle == NULL)
3696 panic("pci_alloc_map: unexpectedly can't find resource.");
3697 rle->res = res;
3698 rle->start = rman_get_start(res);
3699 rle->end = rman_get_end(res);
3700 rle->count = count;
3701 if (bootverbose)
4d28e78f
SZ
3702 device_printf(child,
3703 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3704 count, *rid, type, rman_get_start(res));
201eb0a7
TS
3705 map = rman_get_start(res);
3706out:;
3707 pci_write_config(child, *rid, map, 4);
4d28e78f
SZ
3708 if (pci_maprange(testval) == 64)
3709 pci_write_config(child, *rid + 4, map >> 32, 4);
3710 return (res);
201eb0a7 3711}
4d28e78f 3712
201eb0a7 3713
261fa16d 3714struct resource *
984263bc 3715pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4f7fe8c7 3716 u_long start, u_long end, u_long count, u_int flags, int cpuid)
984263bc
MD
3717{
3718 struct pci_devinfo *dinfo = device_get_ivars(child);
3719 struct resource_list *rl = &dinfo->resources;
201eb0a7 3720 struct resource_list_entry *rle;
984263bc 3721 pcicfgregs *cfg = &dinfo->cfg;
09e7d9f3 3722
984263bc
MD
3723 /*
3724 * Perform lazy resource allocation
984263bc
MD
3725 */
3726 if (device_get_parent(child) == dev) {
de67e43b
JS
3727 switch (type) {
3728 case SYS_RES_IRQ:
4d28e78f
SZ
3729 /*
3730 * Can't alloc legacy interrupt once MSI messages
3731 * have been allocated.
3732 */
4d28e78f
SZ
3733 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3734 cfg->msix.msix_alloc > 0))
3735 return (NULL);
4d28e78f
SZ
3736 /*
3737 * If the child device doesn't have an
3738 * interrupt routed and is deserving of an
3739 * interrupt, try to assign it one.
3740 */
3741 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3742 (cfg->intpin != 0))
3743 pci_assign_interrupt(dev, child, 0);
3744 break;
de67e43b
JS
3745 case SYS_RES_IOPORT:
3746 case SYS_RES_MEMORY:
3747 if (*rid < PCIR_BAR(cfg->nummaps)) {
3748 /*
3749 * Enable the I/O mode. We should
3750 * also be assigning resources too
3751 * when none are present. The
3752 * resource_list_alloc kind of sorta does
3753 * this...
3754 */
3755 if (PCI_ENABLE_IO(dev, child, type))
3756 return (NULL);
984263bc 3757 }
201eb0a7
TS
3758 rle = resource_list_find(rl, type, *rid);
3759 if (rle == NULL)
4d28e78f
SZ
3760 return (pci_alloc_map(dev, child, type, rid,
3761 start, end, count, flags));
820c1612 3762 break;
984263bc 3763 }
201eb0a7
TS
3764 /*
3765 * If we've already allocated the resource, then
4d28e78f 3766 * return it now. But first we may need to activate
201eb0a7 3767 * it, since we don't allocate the resource as active
4d28e78f 3768 * above. Normally this would be done down in the
201eb0a7 3769 * nexus, but since we short-circuit that path we have
4d28e78f 3770 * to do its job here. Not sure if we should kfree the
201eb0a7 3771 * resource if it fails to activate.
201eb0a7
TS
3772 */
3773 rle = resource_list_find(rl, type, *rid);
3774 if (rle != NULL && rle->res != NULL) {
3775 if (bootverbose)
4d28e78f
SZ
3776 device_printf(child,
3777 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3778 rman_get_size(rle->res), *rid, type,
3779 rman_get_start(rle->res));
201eb0a7
TS
3780 if ((flags & RF_ACTIVE) &&
3781 bus_generic_activate_resource(dev, child, type,
4d28e78f
SZ
3782 *rid, rle->res) != 0)
3783 return (NULL);
3784 return (rle->res);
201eb0a7 3785 }
984263bc 3786 }
4d28e78f 3787 return (resource_list_alloc(rl, dev, child, type, rid,
4f7fe8c7 3788 start, end, count, flags, cpuid));
984263bc
MD
3789}
3790
4d28e78f
SZ
3791void
3792pci_delete_resource(device_t dev, device_t child, int type, int rid)
984263bc 3793{
4d28e78f
SZ
3794 struct pci_devinfo *dinfo;
3795 struct resource_list *rl;
984263bc
MD
3796 struct resource_list_entry *rle;
3797
4d28e78f
SZ
3798 if (device_get_parent(child) != dev)
3799 return;
984263bc 3800
4d28e78f
SZ
3801 dinfo = device_get_ivars(child);
3802 rl = &dinfo->resources;
3803 rle = resource_list_find(rl, type, rid);
3804 if (rle) {
3805 if (rle->res) {
3806 if (rman_get_device(rle->res) != dev ||
3807 rman_get_flags(rle->res) & RF_ACTIVE) {
3808 device_printf(dev, "delete_resource: "
3809 "Resource still owned by child, oops. "
3810 "(type=%d, rid=%d, addr=%lx)\n",
3811 rle->type, rle->rid,
3812 rman_get_start(rle->res));
3813 return;
3814 }
3815 bus_release_resource(dev, type, rid, rle->res);
3816 }
3817 resource_list_delete(rl, type, rid);
3818 }
3819 /*
3820 * Why do we turn off the PCI configuration BAR when we delete a
3821 * resource? -- imp
3822 */
3823 pci_write_config(child, rid, 0, 4);
3824 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
984263bc
MD
3825}
3826
e126caf1
MD
3827struct resource_list *
3828pci_get_resource_list (device_t dev, device_t child)
3829{
4d28e78f 3830 struct pci_devinfo *dinfo = device_get_ivars(child);
e126caf1 3831
bcc66dfa
SZ
3832 if (dinfo == NULL)
3833 return (NULL);
3834
b0486c83 3835 return (&dinfo->resources);
e126caf1
MD
3836}
3837
4d28e78f 3838uint32_t
984263bc
MD
3839pci_read_config_method(device_t dev, device_t child, int reg, int width)
3840{
3841 struct pci_devinfo *dinfo = device_get_ivars(child);
3842 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63 3843
4d28e78f
SZ
3844 return (PCIB_READ_CONFIG(device_get_parent(dev),
3845 cfg->bus, cfg->slot, cfg->func, reg, width));
984263bc
MD
3846}
3847
e126caf1 3848void
984263bc 3849pci_write_config_method(device_t dev, device_t child, int reg,
4d28e78f 3850 uint32_t val, int width)
984263bc
MD
3851{
3852 struct pci_devinfo *dinfo = device_get_ivars(child);
3853 pcicfgregs *cfg = &dinfo->cfg;
4a5a2d63
JS
3854
3855 PCIB_WRITE_CONFIG(device_get_parent(dev),
4d28e78f 3856 cfg->bus, cfg->slot, cfg->func, reg, val, width);
984263bc
MD
3857}
3858
e126caf1 3859int
4d28e78f 3860pci_child_location_str_method(device_t dev, device_t child, char *buf,
e126caf1
MD
3861 size_t buflen)
3862{
e126caf1