Use lockmgr locks for ACPI_SERIAL_*
[dragonfly.git] / sys / bus / pci / x86_64 / pci_cfgreg.c
CommitLineData
2ac05e91
SZ
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@kfreebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@kfreebsd.org>
4 * Copyright (c) 2000, BSDi
5 * Copyright (c) 2004, Scott Long <scottl@kfreebsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/i386/pci/pci_cfgreg.c,v 1.124.2.2.6.1 2009/04/15 03:14:26 kensmith Exp $
30 */
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/bus.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/thread2.h>
38#include <sys/spinlock.h>
39#include <sys/spinlock2.h>
40#include <sys/queue.h>
41#include <bus/pci/pcivar.h>
42#include <bus/pci/pcireg.h>
43#include "pci_cfgreg.h"
44#include <machine/pc/bios.h>
2ac05e91
SZ
45
46#include <vm/vm.h>
47#include <vm/vm_param.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_extern.h>
50#include <vm/pmap.h>
51#include <machine/pmap.h>
52
53#if defined(__DragonFly__)
54#define mtx_init(a, b, c, d) spin_init(a)
55#define mtx_lock_spin(a) spin_lock_wr(a)
56#define mtx_unlock_spin(a) spin_unlock_wr(a)
57#endif
58
59#define PRVERB(a) do { \
60 if (bootverbose) \
61 kprintf a ; \
62} while(0)
63
64#define PCIE_CACHE 8
65struct pcie_cfg_elem {
66 TAILQ_ENTRY(pcie_cfg_elem) elem;
67 vm_offset_t vapage;
68 vm_paddr_t papage;
69};
70
71enum {
72 CFGMECH_NONE = 0,
73 CFGMECH_1,
74 CFGMECH_2,
75 CFGMECH_PCIE,
76};
77
78static TAILQ_HEAD(pcie_cfg_list, pcie_cfg_elem) pcie_list[MAXCPU];
79static uint32_t pciebar;
80static int cfgmech;
81static int devmax;
82#if defined(__DragonFly__)
83static struct spinlock pcicfg_mtx;
84#else
85static struct mtx pcicfg_mtx;
86#endif
87
88static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes);
89static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes);
90static int pcireg_cfgopen(void);
91
92static int pciereg_cfgopen(void);
93static int pciereg_cfgread(int bus, int slot, int func, int reg,
94 int bytes);
95static void pciereg_cfgwrite(int bus, int slot, int func, int reg,
96 int data, int bytes);
97
98/*
99 * Some BIOS writers seem to want to ignore the spec and put
100 * 0 in the intline rather than 255 to indicate none. Some use
101 * numbers in the range 128-254 to indicate something strange and
102 * apparently undocumented anywhere. Assume these are completely bogus
103 * and map them to 255, which means "none".
104 */
105static __inline int
106pci_i386_map_intline(int line)
107{
108 if (line == 0 || line >= 128)
109 return (PCI_INVALID_IRQ);
110 return (line);
111}
112
113#ifdef notyet
114
115static u_int16_t
116pcibios_get_version(void)
117{
118 struct bios_regs args;
119
120 if (PCIbios.ventry == 0) {
121 PRVERB(("pcibios: No call entry point\n"));
122 return (0);
123 }
124 args.eax = PCIBIOS_BIOS_PRESENT;
125 if (bios32(&args, PCIbios.ventry, GSEL(GCODE_SEL, SEL_KPL))) {
126 PRVERB(("pcibios: BIOS_PRESENT call failed\n"));
127 return (0);
128 }
129 if (args.edx != 0x20494350) {
130 PRVERB(("pcibios: BIOS_PRESENT didn't return 'PCI ' in edx\n"));
131 return (0);
132 }
133 return (args.ebx & 0xffff);
134}
135
136#endif
137
138/*
139 * Initialise access to PCI configuration space
140 */
141int
142pci_cfgregopen(void)
143{
144 static int opened = 0;
145#ifdef notyet
146 u_int16_t vid, did;
147 u_int16_t v;
148#endif
149
150 if (opened)
151 return(1);
152
153 if (pcireg_cfgopen() == 0)
154 return(0);
155
156#ifdef notyet
157 v = pcibios_get_version();
158 if (v > 0)
159 PRVERB(("pcibios: BIOS version %x.%02x\n", (v & 0xff00) >> 8,
160 v & 0xff));
161 mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN);
162 opened = 1;
163
164 /* $PIR requires PCI BIOS 2.10 or greater. */
165 if (v >= 0x0210)
166 pci_pir_open();
167
168 /*
169 * Grope around in the PCI config space to see if this is a
170 * chipset that is capable of doing memory-mapped config cycles.
171 * This also implies that it can do PCIe extended config cycles.
172 */
173
174 /* Check for supported chipsets */
175 vid = pci_cfgregread(0, 0, 0, PCIR_VENDOR, 2);
176 did = pci_cfgregread(0, 0, 0, PCIR_DEVICE, 2);
177 if (vid == 0x8086) {
178 if (did == 0x3590 || did == 0x3592) {
179 /* Intel 7520 or 7320 */
180 pciebar = pci_cfgregread(0, 0, 0, 0xce, 2) << 16;
181 pciereg_cfgopen();
182 } else if (did == 0x2580 || did == 0x2584) {
183 /* Intel 915 or 925 */
184 pciebar = pci_cfgregread(0, 0, 0, 0x48, 4);
185 pciereg_cfgopen();
186 }
187 }
188#else
189 opened = 1;
190#endif
191 return(1);
192}
193
194/*
195 * Read configuration space register
196 */
197u_int32_t
198pci_cfgregread(int bus, int slot, int func, int reg, int bytes)
199{
200 uint32_t line;
2ac05e91 201
2ac05e91
SZ
202 /*
203 * Some BIOS writers seem to want to ignore the spec and put
204 * 0 in the intline rather than 255 to indicate none. The rest of
205 * the code uses 255 as an invalid IRQ.
206 */
207 if (reg == PCIR_INTLINE && bytes == 1) {
208 line = pcireg_cfgread(bus, slot, func, PCIR_INTLINE, 1);
209 return (pci_i386_map_intline(line));
210 }
2ac05e91
SZ
211 return (pcireg_cfgread(bus, slot, func, reg, bytes));
212}
213
214/*
215 * Write configuration space register
216 */
217void
218pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes)
219{
220
221 pcireg_cfgwrite(bus, slot, func, reg, data, bytes);
222}
223
224/*
225 * Configuration space access using direct register operations
226 */
227
228/* enable configuration space accesses and return data port address */
229static int
230pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes)
231{
232 int dataport = 0;
233
234#ifdef XBOX
235 if (arch_i386_is_xbox) {
236 /*
237 * The Xbox MCPX chipset is a derivative of the nForce 1
238 * chipset. It almost has the same bus layout; some devices
239 * cannot be used, because they have been removed.
240 */
241
242 /*
243 * Devices 00:00.1 and 00:00.2 used to be memory controllers on
244 * the nForce chipset, but on the Xbox, using them will lockup
245 * the chipset.
246 */
247 if (bus == 0 && slot == 0 && (func == 1 || func == 2))
248 return dataport;
249
250 /*
251 * Bus 1 only contains a VGA controller at 01:00.0. When you try
252 * to probe beyond that device, you only get garbage, which
253 * could cause lockups.
254 */
255 if (bus == 1 && (slot != 0 || func != 0))
256 return dataport;
257
258 /*
259 * Bus 2 used to contain the AGP controller, but the Xbox MCPX
260 * doesn't have one. Probing it can cause lockups.
261 */
262 if (bus >= 2)
263 return dataport;
264 }
265#endif
266
267 if (bus <= PCI_BUSMAX
268 && slot < devmax
269 && func <= PCI_FUNCMAX
270 && reg <= PCI_REGMAX
271 && bytes != 3
272 && (unsigned) bytes <= 4
273 && (reg & (bytes - 1)) == 0) {
274 switch (cfgmech) {
275 case CFGMECH_1:
276 outl(CONF1_ADDR_PORT, (1 << 31)
277 | (bus << 16) | (slot << 11)
278 | (func << 8) | (reg & ~0x03));
279 dataport = CONF1_DATA_PORT + (reg & 0x03);
280 break;
281 case CFGMECH_2:
282 outb(CONF2_ENABLE_PORT, 0xf0 | (func << 1));
283 outb(CONF2_FORWARD_PORT, bus);
284 dataport = 0xc000 | (slot << 8) | reg;
285 break;
286 }
287 }
288 return (dataport);
289}
290
291/* disable configuration space accesses */
292static void
293pci_cfgdisable(void)
294{
295 switch (cfgmech) {
296 case CFGMECH_1:
297 /*
298 * Do nothing for the config mechanism 1 case.
299 * Writing a 0 to the address port can apparently
300 * confuse some bridges and cause spurious
301 * access failures.
302 */
303 break;
304 case CFGMECH_2:
305 outb(CONF2_ENABLE_PORT, 0);
306 break;
307 }
308}
309
310static int
311pcireg_cfgread(int bus, int slot, int func, int reg, int bytes)
312{
313 int data = -1;
314 int port;
315
316 if (cfgmech == CFGMECH_PCIE) {
317 data = pciereg_cfgread(bus, slot, func, reg, bytes);
318 return (data);
319 }
320
321 mtx_lock_spin(&pcicfg_mtx);
322 port = pci_cfgenable(bus, slot, func, reg, bytes);
323 if (port != 0) {
324 switch (bytes) {
325 case 1:
326 data = inb(port);
327 break;
328 case 2:
329 data = inw(port);
330 break;
331 case 4:
332 data = inl(port);
333 break;
334 }
335 pci_cfgdisable();
336 }
337 mtx_unlock_spin(&pcicfg_mtx);
338 return (data);
339}
340
341static void
342pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes)
343{
344 int port;
345
346 if (cfgmech == CFGMECH_PCIE) {
347 pciereg_cfgwrite(bus, slot, func, reg, data, bytes);
348 return;
349 }
350
351 mtx_lock_spin(&pcicfg_mtx);
352 port = pci_cfgenable(bus, slot, func, reg, bytes);
353 if (port != 0) {
354 switch (bytes) {
355 case 1:
356 outb(port, data);
357 break;
358 case 2:
359 outw(port, data);
360 break;
361 case 4:
362 outl(port, data);
363 break;
364 }
365 pci_cfgdisable();
366 }
367 mtx_unlock_spin(&pcicfg_mtx);
368}
369
370/* check whether the configuration mechanism has been correctly identified */
371static int
372pci_cfgcheck(int maxdev)
373{
374 uint32_t id, class;
375 uint8_t header;
376 uint8_t device;
377 int port;
378
379 if (bootverbose)
380 kprintf("pci_cfgcheck:\tdevice ");
381
382 for (device = 0; device < maxdev; device++) {
383 if (bootverbose)
384 kprintf("%d ", device);
385
386 port = pci_cfgenable(0, device, 0, 0, 4);
387 id = inl(port);
388 if (id == 0 || id == 0xffffffff)
389 continue;
390
391 port = pci_cfgenable(0, device, 0, 8, 4);
392 class = inl(port) >> 8;
393 if (bootverbose)
394 kprintf("[class=%06x] ", class);
395 if (class == 0 || (class & 0xf870ff) != 0)
396 continue;
397
398 port = pci_cfgenable(0, device, 0, 14, 1);
399 header = inb(port);
400 if (bootverbose)
401 kprintf("[hdr=%02x] ", header);
402 if ((header & 0x7e) != 0)
403 continue;
404
405 if (bootverbose)
406 kprintf("is there (id=%08x)\n", id);
407
408 pci_cfgdisable();
409 return (1);
410 }
411 if (bootverbose)
412 kprintf("-- nothing found\n");
413
414 pci_cfgdisable();
415 return (0);
416}
417
418static int
419pcireg_cfgopen(void)
420{
421 uint32_t mode1res, oldval1;
422 uint8_t mode2res, oldval2;
423
424 /* Check for type #1 first. */
425 oldval1 = inl(CONF1_ADDR_PORT);
426
427 if (bootverbose) {
428 kprintf("pci_open(1):\tmode 1 addr port (0x0cf8) is 0x%08x\n",
429 oldval1);
430 }
431
432 cfgmech = CFGMECH_1;
433 devmax = 32;
434
435 outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK);
436 DELAY(1);
437 mode1res = inl(CONF1_ADDR_PORT);
438 outl(CONF1_ADDR_PORT, oldval1);
439
440 if (bootverbose)
441 kprintf("pci_open(1a):\tmode1res=0x%08x (0x%08lx)\n", mode1res,
442 CONF1_ENABLE_CHK);
443
444 if (mode1res) {
445 if (pci_cfgcheck(32))
446 return (cfgmech);
447 }
448
449 outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK1);
450 mode1res = inl(CONF1_ADDR_PORT);
451 outl(CONF1_ADDR_PORT, oldval1);
452
453 if (bootverbose)
454 kprintf("pci_open(1b):\tmode1res=0x%08x (0x%08lx)\n", mode1res,
455 CONF1_ENABLE_CHK1);
456
457 if ((mode1res & CONF1_ENABLE_MSK1) == CONF1_ENABLE_RES1) {
458 if (pci_cfgcheck(32))
459 return (cfgmech);
460 }
461
462 /* Type #1 didn't work, so try type #2. */
463 oldval2 = inb(CONF2_ENABLE_PORT);
464
465 if (bootverbose) {
466 kprintf("pci_open(2):\tmode 2 enable port (0x0cf8) is 0x%02x\n",
467 oldval2);
468 }
469
470 if ((oldval2 & 0xf0) == 0) {
471
472 cfgmech = CFGMECH_2;
473 devmax = 16;
474
475 outb(CONF2_ENABLE_PORT, CONF2_ENABLE_CHK);
476 mode2res = inb(CONF2_ENABLE_PORT);
477 outb(CONF2_ENABLE_PORT, oldval2);
478
479 if (bootverbose)
480 kprintf("pci_open(2a):\tmode2res=0x%02x (0x%02x)\n",
481 mode2res, CONF2_ENABLE_CHK);
482
483 if (mode2res == CONF2_ENABLE_RES) {
484 if (bootverbose)
485 kprintf("pci_open(2a):\tnow trying mechanism 2\n");
486
487 if (pci_cfgcheck(16))
488 return (cfgmech);
489 }
490 }
491
492 /* Nothing worked, so punt. */
493 cfgmech = CFGMECH_NONE;
494 devmax = 0;
495 return (cfgmech);
496}
497
498static int
499pciereg_cfgopen(void)
500{
501#ifdef PCIE_CFG_MECH
502 struct pcie_cfg_list *pcielist;
503 struct pcie_cfg_elem *pcie_array, *elem;
504#ifdef SMP
505 struct pcpu *pc;
506#endif
507 vm_offset_t va;
508 int i;
509
510 if (bootverbose)
511 kprintf("Setting up PCIe mappings for BAR 0x%x\n", pciebar);
512
513#ifdef SMP
514 SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
515#endif
516 {
517
518 pcie_array = kmalloc(sizeof(struct pcie_cfg_elem) * PCIE_CACHE,
519 M_DEVBUF, M_NOWAIT);
520 if (pcie_array == NULL)
521 return (0);
522
523 va = kmem_alloc_nofault(&kernel_map, PCIE_CACHE * PAGE_SIZE);
524 if (va == 0) {
525 kfree(pcie_array, M_DEVBUF);
526 return (0);
527 }
528
529#ifdef SMP
530 pcielist = &pcie_list[pc->pc_cpuid];
531#else
532 pcielist = &pcie_list[0];
533#endif
534 TAILQ_INIT(pcielist);
535 for (i = 0; i < PCIE_CACHE; i++) {
536 elem = &pcie_array[i];
537 elem->vapage = va + (i * PAGE_SIZE);
538 elem->papage = 0;
539 TAILQ_INSERT_HEAD(pcielist, elem, elem);
540 }
541 }
542
543
544 cfgmech = CFGMECH_PCIE;
545 devmax = 32;
546 return (1);
547#else /* !PCIE_CFG_MECH */
548 return (0);
549#endif /* PCIE_CFG_MECH */
550}
551
552#define PCIE_PADDR(bar, reg, bus, slot, func) \
553 ((bar) | \
554 (((bus) & 0xff) << 20) | \
555 (((slot) & 0x1f) << 15) | \
556 (((func) & 0x7) << 12) | \
557 ((reg) & 0xfff))
558
559/*
560 * Find an element in the cache that matches the physical page desired, or
561 * create a new mapping from the least recently used element.
562 * A very simple LRU algorithm is used here, does it need to be more
563 * efficient?
564 */
565static __inline struct pcie_cfg_elem *
566pciereg_findelem(vm_paddr_t papage)
567{
568 struct pcie_cfg_list *pcielist;
569 struct pcie_cfg_elem *elem;
570 pcielist = &pcie_list[mycpuid];
571 TAILQ_FOREACH(elem, pcielist, elem) {
572 if (elem->papage == papage)
573 break;
574 }
575
576 if (elem == NULL) {
577 elem = TAILQ_LAST(pcielist, pcie_cfg_list);
578 if (elem->papage != 0) {
579 pmap_kremove(elem->vapage);
580 cpu_invlpg(&elem->vapage);
581 }
582 pmap_kenter(elem->vapage, papage);
583 elem->papage = papage;
584 }
585
586 if (elem != TAILQ_FIRST(pcielist)) {
587 TAILQ_REMOVE(pcielist, elem, elem);
588 TAILQ_INSERT_HEAD(pcielist, elem, elem);
589 }
590 return (elem);
591}
592
593static int
594pciereg_cfgread(int bus, int slot, int func, int reg, int bytes)
595{
596 struct pcie_cfg_elem *elem;
597 volatile vm_offset_t va;
598 vm_paddr_t pa, papage;
599 int data;
600
601 crit_enter();
602 pa = PCIE_PADDR(pciebar, reg, bus, slot, func);
603 papage = pa & ~PAGE_MASK;
604 elem = pciereg_findelem(papage);
605 va = elem->vapage | (pa & PAGE_MASK);
606
607 switch (bytes) {
608 case 4:
609 data = *(volatile uint32_t *)(va);
610 break;
611 case 2:
612 data = *(volatile uint16_t *)(va);
613 break;
614 case 1:
615 data = *(volatile uint8_t *)(va);
616 break;
617 default:
618 panic("pciereg_cfgread: invalid width");
619 }
620
621 crit_exit();
622 return (data);
623}
624
625static void
626pciereg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes)
627{
628 struct pcie_cfg_elem *elem;
629 volatile vm_offset_t va;
630 vm_paddr_t pa, papage;
631
632 crit_enter();
633 pa = PCIE_PADDR(pciebar, reg, bus, slot, func);
634 papage = pa & ~PAGE_MASK;
635 elem = pciereg_findelem(papage);
636 va = elem->vapage | (pa & PAGE_MASK);
637
638 switch (bytes) {
639 case 4:
640 *(volatile uint32_t *)(va) = data;
641 break;
642 case 2:
643 *(volatile uint16_t *)(va) = data;
644 break;
645 case 1:
646 *(volatile uint8_t *)(va) = data;
647 break;
648 default:
649 panic("pciereg_cfgwrite: invalid width");
650 }
651
652 crit_exit();
653}