i386: Remove more old IOAPIC code
[dragonfly.git] / sys / platform / pc32 / i386 / mp_machdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
c0c5de70 26 * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
984263bc
MD
27 */
28
29#include "opt_cpu.h"
984263bc 30
984263bc
MD
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
984263bc
MD
34#include <sys/sysctl.h>
35#include <sys/malloc.h>
36#include <sys/memrange.h>
984263bc 37#include <sys/cons.h> /* cngetc() */
37e7efec 38#include <sys/machintr.h>
984263bc
MD
39
40#include <vm/vm.h>
41#include <vm/vm_param.h>
42#include <vm/pmap.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_extern.h>
984263bc
MD
45#include <sys/lock.h>
46#include <vm/vm_map.h>
47#include <sys/user.h>
48#ifdef GPROF
49#include <sys/gmon.h>
50#endif
984263bc 51
684a93c4
MD
52#include <sys/mplock2.h>
53
984263bc 54#include <machine/smp.h>
a9295349 55#include <machine_base/apic/apicreg.h>
984263bc
MD
56#include <machine/atomic.h>
57#include <machine/cpufunc.h>
90e8a35b 58#include <machine/cputypes.h>
e0918665 59#include <machine_base/apic/ioapic_abi.h>
a9295349 60#include <machine_base/apic/mpapic.h>
984263bc
MD
61#include <machine/psl.h>
62#include <machine/segments.h>
984263bc
MD
63#include <machine/tss.h>
64#include <machine/specialreg.h>
65#include <machine/globaldata.h>
4117f2fd 66#include <machine/pmap_inval.h>
984263bc 67
984263bc 68#include <machine/md_var.h> /* setidt() */
87cf6827
SZ
69#include <machine_base/icu/icu.h> /* IPIs */
70#include <machine/intr_machdep.h> /* IPIs */
984263bc 71
1439c090
MD
72#define FIXUP_EXTRA_APIC_INTS 8 /* additional entries we may create */
73
984263bc
MD
74#define WARMBOOT_TARGET 0
75#define WARMBOOT_OFF (KERNBASE + 0x0467)
76#define WARMBOOT_SEG (KERNBASE + 0x0469)
77
984263bc 78#define BIOS_BASE (0xf0000)
1df86978 79#define BIOS_BASE2 (0xe0000)
984263bc 80#define BIOS_SIZE (0x10000)
984263bc
MD
81#define BIOS_COUNT (BIOS_SIZE/4)
82
83#define CMOS_REG (0x70)
84#define CMOS_DATA (0x71)
85#define BIOS_RESET (0x0f)
86#define BIOS_WARM (0x0a)
87
88#define PROCENTRY_FLAG_EN 0x01
89#define PROCENTRY_FLAG_BP 0x02
90#define IOAPICENTRY_FLAG_EN 0x01
91
92
93/* MP Floating Pointer Structure */
94typedef struct MPFPS {
95 char signature[4];
981bebd1 96 u_int32_t pap;
984263bc
MD
97 u_char length;
98 u_char spec_rev;
99 u_char checksum;
100 u_char mpfb1;
101 u_char mpfb2;
102 u_char mpfb3;
103 u_char mpfb4;
104 u_char mpfb5;
105} *mpfps_t;
106
107/* MP Configuration Table Header */
108typedef struct MPCTH {
109 char signature[4];
110 u_short base_table_length;
111 u_char spec_rev;
112 u_char checksum;
113 u_char oem_id[8];
114 u_char product_id[12];
115 void *oem_table_pointer;
116 u_short oem_table_size;
117 u_short entry_count;
118 void *apic_address;
119 u_short extended_table_length;
120 u_char extended_table_checksum;
121 u_char reserved;
122} *mpcth_t;
123
124
125typedef struct PROCENTRY {
126 u_char type;
127 u_char apic_id;
128 u_char apic_version;
129 u_char cpu_flags;
130 u_long cpu_signature;
131 u_long feature_flags;
132 u_long reserved1;
133 u_long reserved2;
134} *proc_entry_ptr;
135
136typedef struct BUSENTRY {
137 u_char type;
138 u_char bus_id;
139 char bus_type[6];
140} *bus_entry_ptr;
141
142typedef struct IOAPICENTRY {
143 u_char type;
144 u_char apic_id;
145 u_char apic_version;
146 u_char apic_flags;
147 void *apic_address;
148} *io_apic_entry_ptr;
149
150typedef struct INTENTRY {
151 u_char type;
152 u_char int_type;
153 u_short int_flags;
154 u_char src_bus_id;
155 u_char src_bus_irq;
156 u_char dst_apic_id;
157 u_char dst_apic_int;
158} *int_entry_ptr;
159
160/* descriptions of MP basetable entries */
161typedef struct BASETABLE_ENTRY {
162 u_char type;
163 u_char length;
164 char name[16];
165} basetable_entry;
166
981bebd1
SZ
167struct mptable_pos {
168 mpfps_t mp_fps;
169 mpcth_t mp_cth;
170 vm_size_t mp_cth_mapsz;
171};
172
c455a23f
SZ
173#define MPTABLE_POS_USE_DEFAULT(mpt) \
174 ((mpt)->mp_fps->mpfb1 != 0 || (mpt)->mp_cth == NULL)
175
e0fd357f
SZ
176struct mptable_bus {
177 int mb_id;
178 int mb_type; /* MPTABLE_BUS_ */
179 TAILQ_ENTRY(mptable_bus) mb_link;
180};
181
182#define MPTABLE_BUS_ISA 0
183#define MPTABLE_BUS_PCI 1
184
185struct mptable_bus_info {
186 TAILQ_HEAD(, mptable_bus) mbi_list;
187};
188
189struct mptable_pci_int {
190 int mpci_bus;
191 int mpci_dev;
192 int mpci_pin;
193
6b881b58 194 int mpci_ioapic_idx;
e0fd357f
SZ
195 int mpci_ioapic_pin;
196 TAILQ_ENTRY(mptable_pci_int) mpci_link;
197};
198
6b881b58
SZ
199struct mptable_ioapic {
200 int mio_idx;
201 int mio_apic_id;
202 uint32_t mio_addr;
0471bb0e
SZ
203 int mio_gsi_base;
204 int mio_npin;
6b881b58
SZ
205 TAILQ_ENTRY(mptable_ioapic) mio_link;
206};
207
fa058384
SZ
208typedef int (*mptable_iter_func)(void *, const void *, int);
209
984263bc
MD
210/*
211 * this code MUST be enabled here and in mpboot.s.
212 * it follows the very early stages of AP boot by placing values in CMOS ram.
213 * it NORMALLY will never be needed and thus the primitive method for enabling.
214 *
984263bc 215 */
7d34994c 216#if defined(CHECK_POINTS)
984263bc
MD
217#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
218#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
219
220#define CHECK_INIT(D); \
221 CHECK_WRITE(0x34, (D)); \
222 CHECK_WRITE(0x35, (D)); \
223 CHECK_WRITE(0x36, (D)); \
224 CHECK_WRITE(0x37, (D)); \
225 CHECK_WRITE(0x38, (D)); \
226 CHECK_WRITE(0x39, (D));
227
228#define CHECK_PRINT(S); \
26be20a0 229 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
984263bc
MD
230 (S), \
231 CHECK_READ(0x34), \
232 CHECK_READ(0x35), \
233 CHECK_READ(0x36), \
234 CHECK_READ(0x37), \
235 CHECK_READ(0x38), \
236 CHECK_READ(0x39));
237
238#else /* CHECK_POINTS */
239
240#define CHECK_INIT(D)
241#define CHECK_PRINT(S)
242
243#endif /* CHECK_POINTS */
244
245/*
246 * Values to send to the POST hardware.
247 */
248#define MP_BOOTADDRESS_POST 0x10
249#define MP_PROBE_POST 0x11
250#define MPTABLE_PASS1_POST 0x12
251
252#define MP_START_POST 0x13
253#define MP_ENABLE_POST 0x14
254#define MPTABLE_PASS2_POST 0x15
255
256#define START_ALL_APS_POST 0x16
257#define INSTALL_AP_TRAMP_POST 0x17
258#define START_AP_POST 0x18
259
260#define MP_ANNOUNCE_POST 0x19
261
984263bc
MD
262/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
263int current_postcode;
264
265/** XXX FIXME: what system files declare these??? */
266extern struct region_descriptor r_gdt, r_idt;
267
984263bc 268int mp_naps; /* # of Applications processors */
984263bc
MD
269extern int nkpt;
270
271u_int32_t cpu_apic_versions[MAXCPU];
374133e3 272int64_t tsc0_offset;
0b698dca 273extern int64_t tsc_offsets[];
984263bc 274
1876681a
SZ
275extern u_long ebda_addr;
276
30c5f287 277#ifdef SMP /* APIC-IO */
8a8d5d85 278struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
97359a5b 279#endif
984263bc 280
984263bc
MD
281/*
282 * APIC ID logical/physical mapping structures.
283 * We oversize these to simplify boot-time config.
284 */
285int cpu_num_to_apic_id[NAPICID];
984263bc
MD
286int apic_id_to_logical[NAPICID];
287
984263bc
MD
288/* AP uses this during bootstrap. Do not staticize. */
289char *bootSTK;
290static int bootAP;
291
292/* Hotwire a 0->4MB V==P mapping */
293extern pt_entry_t *KPTphys;
294
f13b5eec
MD
295/*
296 * SMP page table page. Setup by locore to point to a page table
297 * page from which we allocate per-cpu privatespace areas io_apics,
298 * and so forth.
299 */
300
301#define IO_MAPPING_START_INDEX \
302 (SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
303
984263bc 304extern pt_entry_t *SMPpt;
f13b5eec 305static int SMPpt_alloc_index = IO_MAPPING_START_INDEX;
984263bc
MD
306
307struct pcb stoppcbs[MAXCPU];
308
fa058384
SZ
309static basetable_entry basetable_entry_types[] =
310{
311 {0, 20, "Processor"},
312 {1, 8, "Bus"},
313 {2, 8, "I/O APIC"},
314 {3, 8, "I/O INT"},
315 {4, 8, "Local INT"}
316};
317
984263bc
MD
318/*
319 * Local data and functions.
320 */
321
984263bc
MD
322static u_int boot_address;
323static u_int base_memory;
41a01a4d 324static int mp_finish;
52596b13 325static int mp_finish_lapic;
984263bc 326
984263bc
MD
327static void mp_enable(u_int boot_addr);
328
fa058384
SZ
329static int mptable_iterate_entries(const mpcth_t,
330 mptable_iter_func, void *);
34e6fa63 331static int mptable_search(void);
3aba8f73 332static int mptable_search_sig(u_int32_t target, int count);
da23a592 333static int mptable_hyperthread_fixup(cpumask_t, int);
fe423084 334static int mptable_map(struct mptable_pos *);
981bebd1 335static void mptable_unmap(struct mptable_pos *);
e0fd357f
SZ
336static void mptable_bus_info_alloc(const mpcth_t,
337 struct mptable_bus_info *);
338static void mptable_bus_info_free(struct mptable_bus_info *);
3aba8f73 339
281d9482
SZ
340static int mptable_lapic_probe(struct lapic_enumerator *);
341static void mptable_lapic_enumerate(struct lapic_enumerator *);
342static void mptable_lapic_default(void);
343
7da2706b
SZ
344static int mptable_ioapic_probe(struct ioapic_enumerator *);
345static void mptable_ioapic_enumerate(struct ioapic_enumerator *);
346
984263bc
MD
347static int start_all_aps(u_int boot_addr);
348static void install_ap_tramp(u_int boot_addr);
bb467734
MD
349static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
350static int smitest(void);
984263bc 351
41a01a4d 352static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
52596b13 353static cpumask_t smp_lapic_mask = 1; /* which cpus have lapic been inited */
0f7a3396
MD
354cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
355SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
356
9d758cc4
SZ
357int imcr_present;
358
fe423084 359static vm_paddr_t mptable_fps_phyaddr;
c455a23f 360static int mptable_use_default;
6b881b58 361static TAILQ_HEAD(mptable_pci_int_list, mptable_pci_int) mptable_pci_int_list =
e0fd357f 362 TAILQ_HEAD_INITIALIZER(mptable_pci_int_list);
6b881b58
SZ
363static TAILQ_HEAD(mptable_ioapic_list, mptable_ioapic) mptable_ioapic_list =
364 TAILQ_HEAD_INITIALIZER(mptable_ioapic_list);
fe423084 365
984263bc
MD
366/*
367 * Calculate usable address in base memory for AP trampoline code.
368 */
369u_int
370mp_bootaddress(u_int basemem)
371{
372 POSTCODE(MP_BOOTADDRESS_POST);
373
c0c5de70 374 base_memory = basemem;
984263bc
MD
375
376 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
377 if ((base_memory - boot_address) < bootMP_size)
378 boot_address -= 4096; /* not enough, lower by 4k */
379
380 return boot_address;
381}
382
383
fe423084 384static void
34e6fa63
SZ
385mptable_probe(void)
386{
c455a23f
SZ
387 struct mptable_pos mpt;
388 int error;
389
fe423084 390 KKASSERT(mptable_fps_phyaddr == 0);
c455a23f 391
fe423084 392 mptable_fps_phyaddr = mptable_search();
c455a23f
SZ
393 if (mptable_fps_phyaddr == 0)
394 return;
395
396 error = mptable_map(&mpt);
397 if (error) {
398 mptable_fps_phyaddr = 0;
399 return;
400 }
401
402 if (MPTABLE_POS_USE_DEFAULT(&mpt)) {
403 kprintf("MPTABLE: use default configuration\n");
404 mptable_use_default = 1;
405 }
9d758cc4
SZ
406 if (mpt.mp_fps->mpfb2 & 0x80)
407 imcr_present = 1;
c455a23f
SZ
408
409 mptable_unmap(&mpt);
34e6fa63 410}
fe423084 411SYSINIT(mptable_probe, SI_BOOT2_PRESMP, SI_ORDER_FIRST, mptable_probe, 0);
34e6fa63 412
984263bc
MD
413/*
414 * Look for an Intel MP spec table (ie, SMP capable hardware).
415 */
cb00b5c4 416static int
34e6fa63 417mptable_search(void)
984263bc
MD
418{
419 int x;
984263bc 420 u_int32_t target;
f13b5eec
MD
421
422 /*
423 * Make sure our SMPpt[] page table is big enough to hold all the
424 * mappings we need.
425 */
426 KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
984263bc
MD
427
428 POSTCODE(MP_PROBE_POST);
429
430 /* see if EBDA exists */
1876681a 431 if (ebda_addr != 0) {
984263bc 432 /* search first 1K of EBDA */
1876681a 433 target = (u_int32_t)ebda_addr;
3aba8f73 434 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
aeb48299 435 return x;
984263bc
MD
436 } else {
437 /* last 1K of base memory, effective 'top of base' passed in */
aeb48299 438 target = (u_int32_t)(base_memory - 0x400);
3aba8f73 439 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
aeb48299 440 return x;
984263bc
MD
441 }
442
443 /* search the BIOS */
aeb48299 444 target = (u_int32_t)BIOS_BASE;
3aba8f73 445 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
aeb48299 446 return x;
984263bc 447
1df86978
SZ
448 /* search the extended BIOS */
449 target = (u_int32_t)BIOS_BASE2;
450 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
451 return x;
452
984263bc 453 /* nothing found */
984263bc 454 return 0;
984263bc
MD
455}
456
fa058384
SZ
457static int
458mptable_iterate_entries(const mpcth_t cth, mptable_iter_func func, void *arg)
459{
460 int count, total_size;
461 const void *position;
462
463 KKASSERT(cth->base_table_length >= sizeof(struct MPCTH));
464 total_size = cth->base_table_length - sizeof(struct MPCTH);
465 position = (const uint8_t *)cth + sizeof(struct MPCTH);
466 count = cth->entry_count;
467
468 while (count--) {
469 int type, error;
470
471 KKASSERT(total_size >= 0);
472 if (total_size == 0) {
473 kprintf("invalid base MP table, "
474 "entry count and length mismatch\n");
475 return EINVAL;
476 }
477
478 type = *(const uint8_t *)position;
479 switch (type) {
480 case 0: /* processor_entry */
481 case 1: /* bus_entry */
482 case 2: /* io_apic_entry */
483 case 3: /* int_entry */
484 case 4: /* int_entry */
485 break;
486 default:
487 kprintf("unknown base MP table entry type %d\n", type);
488 return EINVAL;
489 }
490
491 if (total_size < basetable_entry_types[type].length) {
492 kprintf("invalid base MP table length, "
493 "does not contain all entries\n");
494 return EINVAL;
495 }
496 total_size -= basetable_entry_types[type].length;
497
498 error = func(arg, position, type);
499 if (error)
500 return error;
501
502 position = (const uint8_t *)position +
503 basetable_entry_types[type].length;
504 }
505 return 0;
506}
507
984263bc
MD
508
509/*
510 * Startup the SMP processors.
511 */
512void
513mp_start(void)
514{
515 POSTCODE(MP_START_POST);
50bc991e 516 mp_enable(boot_address);
984263bc
MD
517}
518
519
520/*
521 * Print various information about the SMP system hardware and setup.
522 */
523void
524mp_announce(void)
525{
526 int x;
527
528 POSTCODE(MP_ANNOUNCE_POST);
529
26be20a0
SW
530 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
531 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
8629c4ea 532 kprintf(", version: 0x%08x\n", cpu_apic_versions[0]);
984263bc 533 for (x = 1; x <= mp_naps; ++x) {
26be20a0 534 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
8629c4ea 535 kprintf(", version: 0x%08x\n", cpu_apic_versions[x]);
984263bc
MD
536 }
537
6ac31e9d
SZ
538 if (!apic_io_enable)
539 kprintf(" Warning: APIC I/O disabled\n");
984263bc
MD
540}
541
542/*
543 * AP cpu's call this to sync up protected mode.
7160572f
MD
544 *
545 * WARNING! We must ensure that the cpu is sufficiently initialized to
546 * be able to use to the FP for our optimized bzero/bcopy code before
547 * we enter more mainstream C code.
a44bdeec
MD
548 *
549 * WARNING! %fs is not set up on entry. This routine sets up %fs.
984263bc
MD
550 */
551void
552init_secondary(void)
553{
554 int gsel_tss;
555 int x, myid = bootAP;
556 u_int cr0;
8a8d5d85 557 struct mdglobaldata *md;
0f7a3396 558 struct privatespace *ps;
984263bc 559
0f7a3396
MD
560 ps = &CPU_prvspace[myid];
561
562 gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
984263bc 563 gdt_segs[GPROC0_SEL].ssd_base =
0f7a3396
MD
564 (int) &ps->mdglobaldata.gd_common_tss;
565 ps->mdglobaldata.mi.gd_prvspace = ps;
984263bc
MD
566
567 for (x = 0; x < NGDT; x++) {
568 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
569 }
570
571 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
572 r_gdt.rd_base = (int) &gdt[myid * NGDT];
573 lgdt(&r_gdt); /* does magic intra-segment return */
574
575 lidt(&r_idt);
576
577 lldt(_default_ldt);
7b95be2a 578 mdcpu->gd_currentldt = _default_ldt;
984263bc
MD
579
580 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
581 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
8a8d5d85 582
0f7a3396 583 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
8a8d5d85
MD
584
585 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
586 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
587 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
588 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
589 md->gd_common_tssd = *md->gd_tss_gdt;
984263bc
MD
590 ltr(gsel_tss);
591
592 /*
593 * Set to a known state:
594 * Set by mpboot.s: CR0_PG, CR0_PE
595 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
596 */
597 cr0 = rcr0();
598 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
599 load_cr0(cr0);
7160572f 600 pmap_set_opt(); /* PSE/4MB pages, etc */
984263bc 601
7160572f
MD
602 /* set up CPU registers and state */
603 cpu_setregs();
604
605 /* set up FPU state on the AP */
606 npxinit(__INITIAL_NPXCW__);
607
608 /* set up SSE registers */
609 enable_sse();
984263bc
MD
610}
611
984263bc
MD
612/*******************************************************************
613 * local functions and data
614 */
615
616/*
617 * start the SMP system
618 */
619static void
620mp_enable(u_int boot_addr)
621{
984263bc
MD
622 POSTCODE(MP_ENABLE_POST);
623
281d9482 624 lapic_config();
984263bc 625
a40ec003
SZ
626 /* Initialize BSP's local APIC */
627 lapic_init(TRUE);
628
52596b13
SZ
629 /* start each Application Processor */
630 start_all_aps(boot_addr);
631
65b2387f
SZ
632 if (apic_io_enable)
633 ioapic_config();
634
a40ec003
SZ
635 /* Finalize PIC */
636 MachIntrABI.finalize();
984263bc
MD
637}
638
639
640/*
641 * look for the MP spec signature
642 */
643
644/* string defined by the Intel MP Spec as identifying the MP table */
645#define MP_SIG 0x5f504d5f /* _MP_ */
646#define NEXT(X) ((X) += 4)
647static int
3aba8f73 648mptable_search_sig(u_int32_t target, int count)
984263bc 649{
0f85efa2
SZ
650 vm_size_t map_size;
651 u_int32_t *addr;
652 int x, ret;
984263bc 653
aeb48299
SZ
654 KKASSERT(target != 0);
655
0f85efa2
SZ
656 map_size = count * sizeof(u_int32_t);
657 addr = pmap_mapdev((vm_paddr_t)target, map_size);
984263bc 658
aeb48299 659 ret = 0;
0f85efa2
SZ
660 for (x = 0; x < count; NEXT(x)) {
661 if (addr[x] == MP_SIG) {
662 /* make array index a byte index */
663 ret = target + (x * sizeof(u_int32_t));
664 break;
665 }
666 }
aeb48299 667
0f85efa2
SZ
668 pmap_unmapdev((vm_offset_t)addr, map_size);
669 return ret;
984263bc
MD
670}
671
672
984263bc
MD
673typedef struct BUSDATA {
674 u_char bus_id;
675 enum busTypes bus_type;
676} bus_datum;
677
678typedef struct INTDATA {
679 u_char int_type;
680 u_short int_flags;
681 u_char src_bus_id;
682 u_char src_bus_irq;
683 u_char dst_apic_id;
684 u_char dst_apic_int;
685 u_char int_vector;
686} io_int, local_int;
687
688typedef struct BUSTYPENAME {
689 u_char type;
690 char name[7];
691} bus_type_name;
692
a0873f07 693static int processor_entry (const struct PROCENTRY *entry, int cpu);
984263bc 694
984263bc
MD
695/*
696 * Check if we should perform a hyperthreading "fix-up" to
697 * enumerate any logical CPU's that aren't already listed
698 * in the table.
699 *
700 * XXX: We assume that all of the physical CPUs in the
701 * system have the same number of logical CPUs.
702 *
703 * XXX: We assume that APIC ID's are allocated such that
704 * the APIC ID's for a physical processor are aligned
705 * with the number of logical CPU's in the processor.
706 */
44c36320 707static int
da23a592 708mptable_hyperthread_fixup(cpumask_t id_mask, int cpu_count)
984263bc 709{
44c36320 710 int i, id, lcpus_max, logical_cpus;
984263bc 711
984263bc 712 if ((cpu_feature & CPUID_HTT) == 0)
44c36320 713 return 0;
7ea07fd2
SZ
714
715 lcpus_max = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
716 if (lcpus_max <= 1)
44c36320 717 return 0;
984263bc 718
90e8a35b 719 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
7ea07fd2
SZ
720 /*
721 * INSTRUCTION SET REFERENCE, A-M (#253666)
722 * Page 3-181, Table 3-20
723 * "The nearest power-of-2 integer that is not smaller
724 * than EBX[23:16] is the number of unique initial APIC
725 * IDs reserved for addressing different logical
726 * processors in a physical package."
727 */
728 for (i = 0; ; ++i) {
729 if ((1 << i) >= lcpus_max) {
730 lcpus_max = 1 << i;
731 break;
732 }
733 }
734 }
735
44c36320
SZ
736 KKASSERT(cpu_count != 0);
737 if (cpu_count == lcpus_max) {
7ea07fd2 738 /* We have nothing to fix */
44c36320
SZ
739 return 0;
740 } else if (cpu_count == 1) {
7ea07fd2
SZ
741 /* XXX this may be incorrect */
742 logical_cpus = lcpus_max;
743 } else {
744 int cur, prev, dist;
745
746 /*
747 * Calculate the distances between two nearest
748 * APIC IDs. If all such distances are same,
749 * then it is the number of missing cpus that
750 * we are going to fill later.
751 */
752 dist = cur = prev = -1;
753 for (id = 0; id < MAXCPU; ++id) {
da23a592 754 if ((id_mask & CPUMASK(id)) == 0)
7ea07fd2
SZ
755 continue;
756
757 cur = id;
758 if (prev >= 0) {
759 int new_dist = cur - prev;
760
761 if (dist < 0)
762 dist = new_dist;
763
764 /*
765 * Make sure that all distances
766 * between two nearest APIC IDs
767 * are same.
768 */
769 if (dist != new_dist)
44c36320 770 return 0;
7ea07fd2
SZ
771 }
772 prev = cur;
773 }
774 if (dist == 1)
44c36320 775 return 0;
7ea07fd2
SZ
776
777 /* Must be power of 2 */
778 if (dist & (dist - 1))
44c36320 779 return 0;
7ea07fd2
SZ
780
781 /* Can't exceed CPU package capacity */
782 if (dist > lcpus_max)
783 logical_cpus = lcpus_max;
784 else
785 logical_cpus = dist;
786 }
787
984263bc
MD
788 /*
789 * For each APIC ID of a CPU that is set in the mask,
790 * scan the other candidate APIC ID's for this
791 * physical processor. If any of those ID's are
792 * already in the table, then kill the fixup.
793 */
7ea07fd2 794 for (id = 0; id < MAXCPU; id++) {
da23a592 795 if ((id_mask & CPUMASK(id)) == 0)
984263bc
MD
796 continue;
797 /* First, make sure we are on a logical_cpus boundary. */
798 if (id % logical_cpus != 0)
44c36320 799 return 0;
984263bc 800 for (i = id + 1; i < id + logical_cpus; i++)
da23a592 801 if ((id_mask & CPUMASK(i)) != 0)
44c36320 802 return 0;
984263bc 803 }
44c36320 804 return logical_cpus;
984263bc 805}
984263bc 806
fa058384 807static int
fe423084 808mptable_map(struct mptable_pos *mpt)
981bebd1
SZ
809{
810 mpfps_t fps = NULL;
811 mpcth_t cth = NULL;
812 vm_size_t cth_mapsz = 0;
813
fe423084
SZ
814 KKASSERT(mptable_fps_phyaddr != 0);
815
fa058384
SZ
816 bzero(mpt, sizeof(*mpt));
817
fe423084 818 fps = pmap_mapdev(mptable_fps_phyaddr, sizeof(*fps));
981bebd1
SZ
819 if (fps->pap != 0) {
820 /*
821 * Map configuration table header to get
822 * the base table size
823 */
824 cth = pmap_mapdev(fps->pap, sizeof(*cth));
825 cth_mapsz = cth->base_table_length;
826 pmap_unmapdev((vm_offset_t)cth, sizeof(*cth));
827
fa058384
SZ
828 if (cth_mapsz < sizeof(*cth)) {
829 kprintf("invalid base MP table length %d\n",
830 (int)cth_mapsz);
831 pmap_unmapdev((vm_offset_t)fps, sizeof(*fps));
832 return EINVAL;
833 }
834
981bebd1
SZ
835 /*
836 * Map the base table
837 */
838 cth = pmap_mapdev(fps->pap, cth_mapsz);
839 }
840
841 mpt->mp_fps = fps;
842 mpt->mp_cth = cth;
843 mpt->mp_cth_mapsz = cth_mapsz;
fa058384
SZ
844
845 return 0;
981bebd1
SZ
846}
847
848static void
849mptable_unmap(struct mptable_pos *mpt)
850{
851 if (mpt->mp_cth != NULL) {
852 pmap_unmapdev((vm_offset_t)mpt->mp_cth, mpt->mp_cth_mapsz);
853 mpt->mp_cth = NULL;
854 mpt->mp_cth_mapsz = 0;
855 }
856 if (mpt->mp_fps != NULL) {
857 pmap_unmapdev((vm_offset_t)mpt->mp_fps, sizeof(*mpt->mp_fps));
858 mpt->mp_fps = NULL;
859 }
860}
861
984263bc 862void
a9112655
SZ
863mp_set_cpuids(int cpu_id, int apic_id)
864{
865 CPU_TO_ID(cpu_id) = apic_id;
866 ID_TO_CPU(apic_id) = cpu_id;
c5c405ff
SZ
867
868 if (apic_id > lapic_id_max)
869 lapic_id_max = apic_id;
a9112655
SZ
870}
871
984263bc 872static int
a0873f07 873processor_entry(const struct PROCENTRY *entry, int cpu)
984263bc 874{
bd8aa7e2
SZ
875 KKASSERT(cpu > 0);
876
984263bc
MD
877 /* check for usability */
878 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
879 return 0;
880
984263bc
MD
881 /* check for BSP flag */
882 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
a9112655 883 mp_set_cpuids(0, entry->apic_id);
984263bc
MD
884 return 0; /* its already been counted */
885 }
886
887 /* add another AP to list, if less than max number of CPUs */
888 else if (cpu < MAXCPU) {
a9112655 889 mp_set_cpuids(cpu, entry->apic_id);
984263bc
MD
890 return 1;
891 }
892
893 return 0;
894}
895
984263bc 896/*
f13b5eec
MD
897 * Map a physical memory address representing I/O into KVA. The I/O
898 * block is assumed not to cross a page boundary.
899 */
900void *
01616f8b 901ioapic_map(vm_paddr_t pa)
f13b5eec
MD
902{
903 vm_offset_t vaddr;
904 int pgeflag;
905 int i;
906
907 KKASSERT(pa < 0x100000000LL);
908
909 pgeflag = 0; /* not used for SMP yet */
910
911 /*
912 * If the requested physical address has already been incidently
913 * mapped, just use the existing mapping. Otherwise create a new
914 * mapping.
915 */
916 for (i = IO_MAPPING_START_INDEX; i < SMPpt_alloc_index; ++i) {
917 if (((vm_offset_t)SMPpt[i] & PG_FRAME) ==
918 ((vm_offset_t)pa & PG_FRAME)) {
919 break;
920 }
921 }
922 if (i == SMPpt_alloc_index) {
923 if (i == NPTEPG - 2) {
924 panic("permanent_io_mapping: We ran out of space"
925 " in SMPpt[]!");
926 }
5277b9f6 927 SMPpt[i] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
f13b5eec
MD
928 ((vm_offset_t)pa & PG_FRAME));
929 ++SMPpt_alloc_index;
930 }
931 vaddr = (vm_offset_t)CPU_prvspace + (i * PAGE_SIZE) +
932 ((vm_offset_t)pa & PAGE_MASK);
933 return ((void *)vaddr);
934}
935
936/*
984263bc
MD
937 * start each AP in our list
938 */
939static int
940start_all_aps(u_int boot_addr)
941{
b45759e1
MD
942 int x, i, pg;
943 int shift;
bb467734
MD
944 int smicount;
945 int smibest;
946 int smilast;
984263bc
MD
947 u_char mpbiosreason;
948 u_long mpbioswarmvec;
8a8d5d85 949 struct mdglobaldata *gd;
0f7a3396 950 struct privatespace *ps;
984263bc
MD
951 char *stack;
952 uintptr_t kptbase;
953
954 POSTCODE(START_ALL_APS_POST);
955
984263bc
MD
956 /* install the AP 1st level boot code */
957 install_ap_tramp(boot_addr);
958
959
960 /* save the current value of the warm-start vector */
961 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
984263bc
MD
962 outb(CMOS_REG, BIOS_RESET);
963 mpbiosreason = inb(CMOS_DATA);
984263bc 964
bb467734
MD
965 /* setup a vector to our boot code */
966 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
967 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
968 outb(CMOS_REG, BIOS_RESET);
969 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
970
971 /*
972 * If we have a TSC we can figure out the SMI interrupt rate.
973 * The SMI does not necessarily use a constant rate. Spend
974 * up to 250ms trying to figure it out.
975 */
976 smibest = 0;
977 if (cpu_feature & CPUID_TSC) {
978 set_apic_timer(275000);
979 smilast = read_apic_timer();
980 for (x = 0; x < 20 && read_apic_timer(); ++x) {
981 smicount = smitest();
982 if (smibest == 0 || smilast - smicount < smibest)
983 smibest = smilast - smicount;
984 smilast = smicount;
985 }
986 if (smibest > 250000)
987 smibest = 0;
988 if (smibest) {
989 smibest = smibest * (int64_t)1000000 /
990 get_apic_timer_frequency();
991 }
992 }
993 if (smibest)
994 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
995 1000000 / smibest, smibest);
996
997
984263bc
MD
998 /* set up temporary P==V mapping for AP boot */
999 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
1000 kptbase = (uintptr_t)(void *)KPTphys;
a44bdeec 1001 for (x = 0; x < NKPT; x++) {
984263bc
MD
1002 PTD[x] = (pd_entry_t)(PG_V | PG_RW |
1003 ((kptbase + x * PAGE_SIZE) & PG_FRAME));
a44bdeec 1004 }
0f7a3396 1005 cpu_invltlb();
984263bc
MD
1006
1007 /* start each AP */
1008 for (x = 1; x <= mp_naps; ++x) {
1009
1010 /* This is a bit verbose, it will go away soon. */
1011
1012 /* first page of AP's private space */
1013 pg = x * i386_btop(sizeof(struct privatespace));
1014
81c04d07 1015 /* allocate new private data page(s) */
e4846942 1016 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
81c04d07 1017 MDGLOBALDATA_BASEALLOC_SIZE);
984263bc 1018 /* wire it into the private page table page */
81c04d07
MD
1019 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
1020 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
1021 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
1022 }
1023 pg += MDGLOBALDATA_BASEALLOC_PAGES;
1024
1025 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
1026 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
1027 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
1028 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
984263bc
MD
1029
1030 /* allocate and set up an idle stack data page */
e4846942 1031 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
8a8d5d85 1032 for (i = 0; i < UPAGES; i++) {
81c04d07 1033 SMPpt[pg + 4 + i] = (pt_entry_t)
b5b32410 1034 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
8a8d5d85 1035 }
984263bc 1036
8a8d5d85
MD
1037 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
1038 bzero(gd, sizeof(*gd));
0f7a3396 1039 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
8a8d5d85 1040
984263bc 1041 /* prime data page for it to use */
8a8d5d85 1042 mi_gdinit(&gd->mi, x);
8ad65e08 1043 cpu_gdinit(gd, x);
81c04d07
MD
1044 gd->gd_CMAP1 = &SMPpt[pg + 0];
1045 gd->gd_CMAP2 = &SMPpt[pg + 1];
1046 gd->gd_CMAP3 = &SMPpt[pg + 2];
1047 gd->gd_PMAP1 = &SMPpt[pg + 3];
0f7a3396
MD
1048 gd->gd_CADDR1 = ps->CPAGE1;
1049 gd->gd_CADDR2 = ps->CPAGE2;
1050 gd->gd_CADDR3 = ps->CPAGE3;
1051 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
9388fcaa
MD
1052
1053 /*
1054 * Per-cpu pmap for get_ptbase().
1055 */
1056 gd->gd_GDADDR1= (unsigned *)
1057 kmem_alloc_nofault(&kernel_map, SEG_SIZE, SEG_SIZE);
1058 gd->gd_GDMAP1 = &PTD[(vm_offset_t)gd->gd_GDADDR1 >> PDRSHIFT];
1059
e4846942 1060 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
96728c05 1061 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
984263bc 1062
8a8d5d85
MD
1063 /*
1064 * Setup the AP boot stack
1065 */
0f7a3396 1066 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
984263bc
MD
1067 bootAP = x;
1068
1069 /* attempt to start the Application Processor */
1070 CHECK_INIT(99); /* setup checkpoints */
bb467734 1071 if (!start_ap(gd, boot_addr, smibest)) {
26be20a0 1072 kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
984263bc
MD
1073 CHECK_PRINT("trace"); /* show checkpoints */
1074 /* better panic as the AP may be running loose */
26be20a0 1075 kprintf("panic y/n? [y] ");
984263bc
MD
1076 if (cngetc() != 'n')
1077 panic("bye-bye");
1078 }
1079 CHECK_PRINT("trace"); /* show checkpoints */
1080
1081 /* record its version info */
1082 cpu_apic_versions[x] = cpu_apic_versions[0];
984263bc
MD
1083 }
1084
0f7a3396
MD
1085 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
1086 ncpus = x;
1087
b45759e1
MD
1088 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
1089 for (shift = 0; (1 << shift) <= ncpus; ++shift)
1090 ;
1091 --shift;
1092 ncpus2_shift = shift;
1093 ncpus2 = 1 << shift;
90100055
JH
1094 ncpus2_mask = ncpus2 - 1;
1095
b45759e1
MD
1096 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
1097 if ((1 << shift) < ncpus)
1098 ++shift;
1099 ncpus_fit = 1 << shift;
1100 ncpus_fit_mask = ncpus_fit - 1;
1101
984263bc 1102 /* build our map of 'other' CPUs */
da23a592 1103 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
e4846942 1104 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
96728c05 1105 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
984263bc
MD
1106
1107 /* fill in our (BSP) APIC version */
1108 cpu_apic_versions[0] = lapic.version;
1109
1110 /* restore the warmstart vector */
1111 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
984263bc
MD
1112 outb(CMOS_REG, BIOS_RESET);
1113 outb(CMOS_DATA, mpbiosreason);
984263bc
MD
1114
1115 /*
8a8d5d85
MD
1116 * NOTE! The idlestack for the BSP was setup by locore. Finish
1117 * up, clean out the P==V mapping we did earlier.
984263bc 1118 */
984263bc
MD
1119 for (x = 0; x < NKPT; x++)
1120 PTD[x] = 0;
1121 pmap_set_opt();
1122
52596b13
SZ
1123 /*
1124 * Wait all APs to finish initializing LAPIC
1125 */
1126 mp_finish_lapic = 1;
1127 if (bootverbose)
1128 kprintf("SMP: Waiting APs LAPIC initialization\n");
1129 if (cpu_feature & CPUID_TSC)
1130 tsc0_offset = rdtsc();
1131 tsc_offsets[0] = 0;
1132 rel_mplock();
1133 while (smp_lapic_mask != smp_startup_mask) {
1134 cpu_lfence();
1135 if (cpu_feature & CPUID_TSC)
1136 tsc0_offset = rdtsc();
1137 }
1138 while (try_mplock() == 0)
1139 ;
1140
984263bc 1141 /* number of APs actually started */
8a8d5d85 1142 return ncpus - 1;
984263bc
MD
1143}
1144
984263bc
MD
1145/*
1146 * load the 1st level AP boot code into base memory.
1147 */
1148
1149/* targets for relocation */
1150extern void bigJump(void);
1151extern void bootCodeSeg(void);
1152extern void bootDataSeg(void);
1153extern void MPentry(void);
1154extern u_int MP_GDT;
1155extern u_int mp_gdtbase;
1156
1157static void
1158install_ap_tramp(u_int boot_addr)
1159{
1160 int x;
1161 int size = *(int *) ((u_long) & bootMP_size);
1162 u_char *src = (u_char *) ((u_long) bootMP);
1163 u_char *dst = (u_char *) boot_addr + KERNBASE;
1164 u_int boot_base = (u_int) bootMP;
1165 u_int8_t *dst8;
1166 u_int16_t *dst16;
1167 u_int32_t *dst32;
1168
1169 POSTCODE(INSTALL_AP_TRAMP_POST);
1170
1171 for (x = 0; x < size; ++x)
1172 *dst++ = *src++;
1173
1174 /*
1175 * modify addresses in code we just moved to basemem. unfortunately we
1176 * need fairly detailed info about mpboot.s for this to work. changes
1177 * to mpboot.s might require changes here.
1178 */
1179
1180 /* boot code is located in KERNEL space */
1181 dst = (u_char *) boot_addr + KERNBASE;
1182
1183 /* modify the lgdt arg */
1184 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1185 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
1186
1187 /* modify the ljmp target for MPentry() */
1188 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1189 *dst32 = ((u_int) MPentry - KERNBASE);
1190
1191 /* modify the target for boot code segment */
1192 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1193 dst8 = (u_int8_t *) (dst16 + 1);
1194 *dst16 = (u_int) boot_addr & 0xffff;
1195 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1196
1197 /* modify the target for boot data segment */
1198 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1199 dst8 = (u_int8_t *) (dst16 + 1);
1200 *dst16 = (u_int) boot_addr & 0xffff;
1201 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1202}
1203
1204
1205/*
bb467734 1206 * This function starts the AP (application processor) identified
984263bc
MD
1207 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1208 * to accomplish this. This is necessary because of the nuances
1209 * of the different hardware we might encounter. It ain't pretty,
1210 * but it seems to work.
a108bf71
MD
1211 *
1212 * NOTE: eventually an AP gets to ap_init(), which is called just
1213 * before the AP goes into the LWKT scheduler's idle loop.
984263bc
MD
1214 */
1215static int
bb467734 1216start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
984263bc
MD
1217{
1218 int physical_cpu;
1219 int vector;
984263bc
MD
1220 u_long icr_lo, icr_hi;
1221
1222 POSTCODE(START_AP_POST);
1223
1224 /* get the PHYSICAL APIC ID# */
0f7a3396 1225 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
984263bc
MD
1226
1227 /* calculate the vector */
1228 vector = (boot_addr >> 12) & 0xff;
1229
bb467734
MD
1230 /* We don't want anything interfering */
1231 cpu_disable_intr();
1232
8a8d5d85
MD
1233 /* Make sure the target cpu sees everything */
1234 wbinvd();
984263bc
MD
1235
1236 /*
bb467734
MD
1237 * Try to detect when a SMI has occurred, wait up to 200ms.
1238 *
1239 * If a SMI occurs during an AP reset but before we issue
1240 * the STARTUP command, the AP may brick. To work around
1241 * this problem we hold off doing the AP startup until
1242 * after we have detected the SMI. Hopefully another SMI
1243 * will not occur before we finish the AP startup.
1244 *
1245 * Retries don't seem to help. SMIs have a window of opportunity
1246 * and if USB->legacy keyboard emulation is enabled in the BIOS
1247 * the interrupt rate can be quite high.
1248 *
1249 * NOTE: Don't worry about the L1 cache load, it might bloat
1250 * ldelta a little but ndelta will be so huge when the SMI
1251 * occurs the detection logic will still work fine.
1252 */
1253 if (smibest) {
1254 set_apic_timer(200000);
1255 smitest();
1256 }
1257
1258 /*
984263bc
MD
1259 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
1260 * and running the target CPU. OR this INIT IPI might be latched (P5
1261 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1262 * ignored.
bb467734
MD
1263 *
1264 * see apic/apicreg.h for icr bit definitions.
1265 *
1266 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
984263bc
MD
1267 */
1268
bb467734
MD
1269 /*
1270 * Setup the address for the target AP. We can setup
1271 * icr_hi once and then just trigger operations with
1272 * icr_lo.
1273 */
984263bc
MD
1274 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
1275 icr_hi |= (physical_cpu << 24);
bb467734 1276 icr_lo = lapic.icr_lo & 0xfff00000;
984263bc
MD
1277 lapic.icr_hi = icr_hi;
1278
bb467734
MD
1279 /*
1280 * Do an INIT IPI: assert RESET
1281 *
1282 * Use edge triggered mode to assert INIT
1283 */
984263bc 1284 lapic.icr_lo = icr_lo | 0x0000c500;
984263bc
MD
1285 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1286 /* spin */ ;
1287
bb467734
MD
1288 /*
1289 * The spec calls for a 10ms delay but we may have to use a
1290 * MUCH lower delay to avoid bricking an AP due to a fast SMI
1291 * interrupt. We have other loops here too and dividing by 2
1292 * doesn't seem to be enough even after subtracting 350us,
1293 * so we divide by 4.
1294 *
1295 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
1296 * interrupt was detected we use the full 10ms.
1297 */
1298 if (smibest == 0)
1299 u_sleep(10000);
1300 else if (smibest < 150 * 4 + 350)
1301 u_sleep(150);
1302 else if ((smibest - 350) / 4 < 10000)
1303 u_sleep((smibest - 350) / 4);
1304 else
1305 u_sleep(10000);
984263bc 1306
bb467734
MD
1307 /*
1308 * Do an INIT IPI: deassert RESET
1309 *
1310 * Use level triggered mode to deassert. It is unclear
1311 * why we need to do this.
1312 */
1313 lapic.icr_lo = icr_lo | 0x00008500;
984263bc
MD
1314 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1315 /* spin */ ;
bb467734 1316 u_sleep(150); /* wait 150us */
984263bc
MD
1317
1318 /*
bb467734 1319 * Next we do a STARTUP IPI: the previous INIT IPI might still be
984263bc
MD
1320 * latched, (P5 bug) this 1st STARTUP would then terminate
1321 * immediately, and the previously started INIT IPI would continue. OR
1322 * the previous INIT IPI has already run. and this STARTUP IPI will
1323 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1324 * will run.
1325 */
984263bc
MD
1326 lapic.icr_lo = icr_lo | 0x00000600 | vector;
1327 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1328 /* spin */ ;
1329 u_sleep(200); /* wait ~200uS */
1330
1331 /*
bb467734 1332 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
984263bc
MD
1333 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1334 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1335 * recognized after hardware RESET or INIT IPI.
1336 */
984263bc
MD
1337 lapic.icr_lo = icr_lo | 0x00000600 | vector;
1338 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1339 /* spin */ ;
bb467734
MD
1340
1341 /* Resume normal operation */
1342 cpu_enable_intr();
984263bc 1343
8a8d5d85 1344 /* wait for it to start, see ap_init() */
984263bc 1345 set_apic_timer(5000000);/* == 5 seconds */
8a8d5d85 1346 while (read_apic_timer()) {
da23a592 1347 if (smp_startup_mask & CPUMASK(gd->mi.gd_cpuid))
984263bc 1348 return 1; /* return SUCCESS */
8a8d5d85 1349 }
bb467734 1350
984263bc
MD
1351 return 0; /* return FAILURE */
1352}
1353
bb467734
MD
1354static
1355int
1356smitest(void)
1357{
1358 int64_t ltsc;
1359 int64_t ntsc;
1360 int64_t ldelta;
1361 int64_t ndelta;
1362 int count;
1363
1364 ldelta = 0;
1365 ndelta = 0;
1366 while (read_apic_timer()) {
1367 ltsc = rdtsc();
1368 for (count = 0; count < 100; ++count)
1369 ntsc = rdtsc(); /* force loop to occur */
1370 if (ldelta) {
1371 ndelta = ntsc - ltsc;
1372 if (ldelta > ndelta)
1373 ldelta = ndelta;
1374 if (ndelta > ldelta * 2)
1375 break;
1376 } else {
1377 ldelta = ntsc - ltsc;
1378 }
1379 }
1380 return(read_apic_timer());
1381}
984263bc
MD
1382
1383/*
0f7a3396 1384 * Lazy flush the TLB on all other CPU's. DEPRECATED.
984263bc 1385 *
0f7a3396
MD
1386 * If for some reason we were unable to start all cpus we cannot safely
1387 * use broadcast IPIs.
984263bc 1388 */
7d4d6fdb
MD
1389
1390static cpumask_t smp_invltlb_req;
b4b1a37a 1391#define SMP_INVLTLB_DEBUG
7d4d6fdb 1392
984263bc
MD
1393void
1394smp_invltlb(void)
1395{
97359a5b 1396#ifdef SMP
7d4d6fdb 1397 struct mdglobaldata *md = mdcpu;
2d910aaf
MD
1398#ifdef SMP_INVLTLB_DEBUG
1399 long count = 0;
1400 long xcount = 0;
1401#endif
4117f2fd 1402
7d4d6fdb
MD
1403 crit_enter_gd(&md->mi);
1404 md->gd_invltlb_ret = 0;
1405 ++md->mi.gd_cnt.v_smpinvltlb;
da23a592 1406 atomic_set_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
2d910aaf
MD
1407#ifdef SMP_INVLTLB_DEBUG
1408again:
1409#endif
0f7a3396 1410 if (smp_startup_mask == smp_active_mask) {
984263bc 1411 all_but_self_ipi(XINVLTLB_OFFSET);
0f7a3396 1412 } else {
7d4d6fdb
MD
1413 selected_apic_ipi(smp_active_mask & ~md->mi.gd_cpumask,
1414 XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
0f7a3396 1415 }
2d910aaf
MD
1416
1417#ifdef SMP_INVLTLB_DEBUG
1418 if (xcount)
1419 kprintf("smp_invltlb: ipi sent\n");
1420#endif
7d4d6fdb
MD
1421 while ((md->gd_invltlb_ret & smp_active_mask & ~md->mi.gd_cpumask) !=
1422 (smp_active_mask & ~md->mi.gd_cpumask)) {
1423 cpu_mfence();
1424 cpu_pause();
2d910aaf
MD
1425#ifdef SMP_INVLTLB_DEBUG
1426 /* DEBUGGING */
1427 if (++count == 400000000) {
1428 print_backtrace(-1);
1429 kprintf("smp_invltlb: endless loop %08lx %08lx, "
1430 "rflags %016lx retry",
1431 (long)md->gd_invltlb_ret,
1432 (long)smp_invltlb_req,
1433 (long)read_eflags());
1434 __asm __volatile ("sti");
1435 ++xcount;
1436 if (xcount > 2)
1437 lwkt_process_ipiq();
1438 if (xcount > 3) {
da23a592
MD
1439 int bcpu = BSFCPUMASK(~md->gd_invltlb_ret &
1440 ~md->mi.gd_cpumask &
1441 smp_active_mask);
2d910aaf
MD
1442 globaldata_t xgd;
1443 kprintf("bcpu %d\n", bcpu);
1444 xgd = globaldata_find(bcpu);
1445 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm);
1446 }
1447 if (xcount > 5)
1448 panic("giving up");
1449 count = 0;
1450 goto again;
1451 }
1452#endif
7d4d6fdb 1453 }
da23a592 1454 atomic_clear_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
7d4d6fdb 1455 crit_exit_gd(&md->mi);
4117f2fd 1456#endif
984263bc
MD
1457}
1458
7d4d6fdb
MD
1459#ifdef SMP
1460
1461/*
1462 * Called from Xinvltlb assembly with interrupts disabled. We didn't
1463 * bother to bump the critical section count or nested interrupt count
1464 * so only do very low level operations here.
1465 */
1466void
1467smp_invltlb_intr(void)
1468{
1469 struct mdglobaldata *md = mdcpu;
1470 struct mdglobaldata *omd;
1471 cpumask_t mask;
1472 int cpu;
1473
1474 mask = smp_invltlb_req;
1475 cpu_mfence();
1476 cpu_invltlb();
1477 while (mask) {
da23a592
MD
1478 cpu = BSFCPUMASK(mask);
1479 mask &= ~CPUMASK(cpu);
7d4d6fdb 1480 omd = (struct mdglobaldata *)globaldata_find(cpu);
da23a592 1481 atomic_set_cpumask(&omd->gd_invltlb_ret, md->mi.gd_cpumask);
7d4d6fdb
MD
1482 }
1483}
1484
1485#endif
1486
984263bc
MD
1487/*
1488 * When called the executing CPU will send an IPI to all other CPUs
1489 * requesting that they halt execution.
1490 *
1491 * Usually (but not necessarily) called with 'other_cpus' as its arg.
1492 *
1493 * - Signals all CPUs in map to stop.
1494 * - Waits for each to stop.
1495 *
1496 * Returns:
1497 * -1: error
1498 * 0: NA
1499 * 1: ok
1500 *
1501 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1502 * from executing at same time.
1503 */
1504int
da23a592 1505stop_cpus(cpumask_t map)
984263bc 1506{
0f7a3396 1507 map &= smp_active_mask;
984263bc
MD
1508
1509 /* send the Xcpustop IPI to all CPUs in map */
1510 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
1511
1512 while ((stopped_cpus & map) != map)
1513 /* spin */ ;
1514
1515 return 1;
1516}
1517
1518
1519/*
1520 * Called by a CPU to restart stopped CPUs.
1521 *
1522 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1523 *
1524 * - Signals all CPUs in map to restart.
1525 * - Waits for each to restart.
1526 *
1527 * Returns:
1528 * -1: error
1529 * 0: NA
1530 * 1: ok
1531 */
1532int
da23a592 1533restart_cpus(cpumask_t map)
984263bc 1534{
0f7a3396
MD
1535 /* signal other cpus to restart */
1536 started_cpus = map & smp_active_mask;
984263bc
MD
1537
1538 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
1539 /* spin */ ;
1540
1541 return 1;
1542}
1543
984263bc 1544/*
8a8d5d85
MD
1545 * This is called once the mpboot code has gotten us properly relocated
1546 * and the MMU turned on, etc. ap_init() is actually the idle thread,
1547 * and when it returns the scheduler will call the real cpu_idle() main
1548 * loop for the idlethread. Interrupts are disabled on entry and should
1549 * remain disabled at return.
984263bc 1550 */
984263bc 1551void
8a8d5d85 1552ap_init(void)
984263bc
MD
1553{
1554 u_int apic_id;
1555
8a8d5d85 1556 /*
0f7a3396
MD
1557 * Adjust smp_startup_mask to signal the BSP that we have started
1558 * up successfully. Note that we do not yet hold the BGL. The BSP
1559 * is waiting for our signal.
1560 *
1561 * We can't set our bit in smp_active_mask yet because we are holding
1562 * interrupts physically disabled and remote cpus could deadlock
1563 * trying to send us an IPI.
8a8d5d85 1564 */
da23a592 1565 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
35238fa5 1566 cpu_mfence();
8a8d5d85
MD
1567
1568 /*
52596b13
SZ
1569 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1570 * non-zero, then get the MP lock.
41a01a4d
MD
1571 *
1572 * Note: We are in a critical section.
1573 *
41a01a4d
MD
1574 * Note: we are the idle thread, we can only spin.
1575 *
35238fa5 1576 * Note: The load fence is memory volatile and prevents the compiler
52596b13 1577 * from improperly caching mp_finish_lapic, and the cpu from improperly
35238fa5 1578 * caching it.
8a8d5d85 1579 */
52596b13 1580 while (mp_finish_lapic == 0)
b5d16701
MD
1581 cpu_lfence();
1582 while (try_mplock() == 0)
1583 ;
8a8d5d85 1584
374133e3 1585 if (cpu_feature & CPUID_TSC) {
b5d16701
MD
1586 /*
1587 * The BSP is constantly updating tsc0_offset, figure out
1588 * the relative difference to synchronize ktrdump.
1589 */
1590 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
374133e3
MD
1591 }
1592
984263bc
MD
1593 /* BSP may have changed PTD while we're waiting for the lock */
1594 cpu_invltlb();
1595
984263bc
MD
1596#if defined(I586_CPU) && !defined(NO_F00F_HACK)
1597 lidt(&r_idt);
1598#endif
1599
1600 /* Build our map of 'other' CPUs. */
da23a592 1601 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
984263bc 1602
984263bc 1603 /* A quick check from sanity claus */
d53907dd 1604 apic_id = (apic_id_to_logical[(lapic.id & 0xff000000) >> 24]);
8a8d5d85 1605 if (mycpu->gd_cpuid != apic_id) {
26be20a0
SW
1606 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
1607 kprintf("SMP: apic_id = %d\n", apic_id);
1608 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
984263bc
MD
1609 panic("cpuid mismatch! boom!!");
1610 }
1611
b52c8db0 1612 /* Initialize AP's local APIC for irq's */
5ddeabb9 1613 lapic_init(FALSE);
984263bc 1614
52596b13
SZ
1615 /* LAPIC initialization is done */
1616 smp_lapic_mask |= CPUMASK(mycpu->gd_cpuid);
1617 cpu_mfence();
1618
1619 /* Let BSP move onto the next initialization stage */
1620 rel_mplock();
1621
1622 /*
1623 * Interlock for finalization. Wait until mp_finish is non-zero,
1624 * then get the MP lock.
1625 *
1626 * Note: We are in a critical section.
1627 *
1628 * Note: we are the idle thread, we can only spin.
1629 *
1630 * Note: The load fence is memory volatile and prevents the compiler
1631 * from improperly caching mp_finish, and the cpu from improperly
1632 * caching it.
1633 */
1634 while (mp_finish == 0)
1635 cpu_lfence();
1636 while (try_mplock() == 0)
1637 ;
1638
1639 /* BSP may have changed PTD while we're waiting for the lock */
1640 cpu_invltlb();
1641
984263bc
MD
1642 /* Set memory range attributes for this CPU to match the BSP */
1643 mem_range_AP_init();
1644
a2a5ad0d 1645 /*
4c9f5a7f
MD
1646 * Once we go active we must process any IPIQ messages that may
1647 * have been queued, because no actual IPI will occur until we
1648 * set our bit in the smp_active_mask. If we don't the IPI
1649 * message interlock could be left set which would also prevent
1650 * further IPIs.
1651 *
8a8d5d85
MD
1652 * The idle loop doesn't expect the BGL to be held and while
1653 * lwkt_switch() normally cleans things up this is a special case
1654 * because we returning almost directly into the idle loop.
41a01a4d
MD
1655 *
1656 * The idle thread is never placed on the runq, make sure
4c9f5a7f 1657 * nothing we've done put it there.
8a8d5d85 1658 */
b5d16701 1659 KKASSERT(get_mplock_count(curthread) == 1);
da23a592 1660 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
d19f6edf
MD
1661
1662 /*
1663 * Enable interrupts here. idle_restore will also do it, but
1664 * doing it here lets us clean up any strays that got posted to
1665 * the CPU during the AP boot while we are still in a critical
1666 * section.
1667 */
1668 __asm __volatile("sti; pause; pause"::);
c263294b 1669 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
d19f6edf 1670
4a19580d 1671 initclocks_pcpu(); /* clock interrupts (via IPIs) */
4c9f5a7f 1672 lwkt_process_ipiq();
d19f6edf
MD
1673
1674 /*
1675 * Releasing the mp lock lets the BSP finish up the SMP init
1676 */
96728c05 1677 rel_mplock();
41a01a4d 1678 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
984263bc
MD
1679}
1680
41a01a4d
MD
1681/*
1682 * Get SMP fully working before we start initializing devices.
1683 */
1684static
1685void
1686ap_finish(void)
1687{
1688 mp_finish = 1;
1689 if (bootverbose)
26be20a0 1690 kprintf("Finish MP startup\n");
41a01a4d 1691 rel_mplock();
52596b13 1692 while (smp_active_mask != smp_startup_mask)
35238fa5 1693 cpu_lfence();
4da43e1f 1694 while (try_mplock() == 0)
41a01a4d
MD
1695 ;
1696 if (bootverbose)
26be20a0 1697 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
41a01a4d
MD
1698}
1699
ba39e2e0 1700SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
41a01a4d 1701
96728c05
MD
1702void
1703cpu_send_ipiq(int dcpu)
1704{
da23a592 1705 if (CPUMASK(dcpu) & smp_active_mask)
41a01a4d 1706 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
96728c05 1707}
41a01a4d
MD
1708
1709#if 0 /* single_apic_ipi_passive() not working yet */
1710/*
1711 * Returns 0 on failure, 1 on success
1712 */
1713int
1714cpu_send_ipiq_passive(int dcpu)
1715{
1716 int r = 0;
da23a592 1717 if (CPUMASK(dcpu) & smp_active_mask) {
41a01a4d
MD
1718 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1719 APIC_DELMODE_FIXED);
1720 }
1721 return(r);
1722}
1723#endif
1724
e0fd357f
SZ
1725static int
1726mptable_bus_info_callback(void *xarg, const void *pos, int type)
1727{
1728 struct mptable_bus_info *bus_info = xarg;
1729 const struct BUSENTRY *ent;
1730 struct mptable_bus *bus;
1731
1732 if (type != 1)
1733 return 0;
c715f062 1734
e0fd357f 1735 ent = pos;
c715f062
SZ
1736 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
1737 if (bus->mb_id == ent->bus_id) {
1738 kprintf("mptable_bus_info_alloc: duplicated bus id "
1739 "(%d)\n", bus->mb_id);
1740 return EINVAL;
1741 }
1742 }
e0fd357f
SZ
1743
1744 bus = NULL;
1745 if (strncmp(ent->bus_type, "PCI", 3) == 0) {
1746 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1747 bus->mb_type = MPTABLE_BUS_PCI;
1748 } else if (strncmp(ent->bus_type, "ISA", 3) == 0) {
1749 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1750 bus->mb_type = MPTABLE_BUS_ISA;
1751 }
1752
1753 if (bus != NULL) {
c715f062
SZ
1754 bus->mb_id = ent->bus_id;
1755 TAILQ_INSERT_TAIL(&bus_info->mbi_list, bus, mb_link);
e0fd357f
SZ
1756 }
1757 return 0;
1758}
1759
1760static void
1761mptable_bus_info_alloc(const mpcth_t cth, struct mptable_bus_info *bus_info)
1762{
1763 int error;
1764
1765 bzero(bus_info, sizeof(*bus_info));
1766 TAILQ_INIT(&bus_info->mbi_list);
1767
1768 error = mptable_iterate_entries(cth, mptable_bus_info_callback, bus_info);
1769 if (error)
1770 mptable_bus_info_free(bus_info);
1771}
1772
1773static void
1774mptable_bus_info_free(struct mptable_bus_info *bus_info)
1775{
1776 struct mptable_bus *bus;
1777
1778 while ((bus = TAILQ_FIRST(&bus_info->mbi_list)) != NULL) {
1779 TAILQ_REMOVE(&bus_info->mbi_list, bus, mb_link);
1780 kfree(bus, M_TEMP);
1781 }
1782}
1783
a0873f07
SZ
1784struct mptable_lapic_cbarg1 {
1785 int cpu_count;
44c36320
SZ
1786 int ht_fixup;
1787 u_int ht_apicid_mask;
a0873f07
SZ
1788};
1789
1790static int
1791mptable_lapic_pass1_callback(void *xarg, const void *pos, int type)
1792{
1793 const struct PROCENTRY *ent;
1794 struct mptable_lapic_cbarg1 *arg = xarg;
1795
1796 if (type != 0)
1797 return 0;
1798 ent = pos;
1799
1800 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1801 return 0;
1802
1803 arg->cpu_count++;
44c36320
SZ
1804 if (ent->apic_id < 32) {
1805 arg->ht_apicid_mask |= 1 << ent->apic_id;
1806 } else if (arg->ht_fixup) {
1807 kprintf("MPTABLE: lapic id > 32, disable HTT fixup\n");
1808 arg->ht_fixup = 0;
1809 }
a0873f07
SZ
1810 return 0;
1811}
1812
1813struct mptable_lapic_cbarg2 {
1814 int cpu;
44c36320 1815 int logical_cpus;
a0873f07
SZ
1816 int found_bsp;
1817};
1818
1819static int
1820mptable_lapic_pass2_callback(void *xarg, const void *pos, int type)
1821{
1822 const struct PROCENTRY *ent;
1823 struct mptable_lapic_cbarg2 *arg = xarg;
1824
1825 if (type != 0)
1826 return 0;
1827 ent = pos;
1828
1829 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1830 KKASSERT(!arg->found_bsp);
1831 arg->found_bsp = 1;
1832 }
1833
1834 if (processor_entry(ent, arg->cpu))
1835 arg->cpu++;
1836
44c36320 1837 if (arg->logical_cpus) {
a0873f07
SZ
1838 struct PROCENTRY proc;
1839 int i;
1840
1841 /*
1842 * Create fake mptable processor entries
1843 * and feed them to processor_entry() to
1844 * enumerate the logical CPUs.
1845 */
1846 bzero(&proc, sizeof(proc));
1847 proc.type = 0;
1848 proc.cpu_flags = PROCENTRY_FLAG_EN;
1849 proc.apic_id = ent->apic_id;
1850
44c36320 1851 for (i = 1; i < arg->logical_cpus; i++) {
a0873f07
SZ
1852 proc.apic_id++;
1853 processor_entry(&proc, arg->cpu);
a0873f07
SZ
1854 arg->cpu++;
1855 }
1856 }
1857 return 0;
1858}
1859
322abba7
SZ
1860static void
1861mptable_lapic_default(void)
1862{
1863 int ap_apicid, bsp_apicid;
1864
1865 mp_naps = 1; /* exclude BSP */
1866
1867 /* Map local apic before the id field is accessed */
84cc808b 1868 lapic_map(DEFAULT_APIC_BASE);
322abba7
SZ
1869
1870 bsp_apicid = APIC_ID(lapic.id);
1871 ap_apicid = (bsp_apicid == 0) ? 1 : 0;
1872
1873 /* BSP */
1874 mp_set_cpuids(0, bsp_apicid);
1875 /* one and only AP */
1876 mp_set_cpuids(1, ap_apicid);
1877}
1878
a0873f07
SZ
1879/*
1880 * Configure:
a0873f07 1881 * mp_naps
d787e80c 1882 * ID_TO_CPU(N), APIC ID to logical CPU table
a0873f07
SZ
1883 * CPU_TO_ID(N), logical CPU to APIC ID table
1884 */
1885static void
281d9482 1886mptable_lapic_enumerate(struct lapic_enumerator *e)
a0873f07 1887{
281d9482 1888 struct mptable_pos mpt;
322abba7
SZ
1889 struct mptable_lapic_cbarg1 arg1;
1890 struct mptable_lapic_cbarg2 arg2;
1891 mpcth_t cth;
44c36320 1892 int error, logical_cpus = 0;
5a16ccc3 1893 vm_offset_t lapic_addr;
281d9482 1894
c455a23f 1895 if (mptable_use_default) {
322abba7
SZ
1896 mptable_lapic_default();
1897 return;
1898 }
a0873f07 1899
c455a23f
SZ
1900 error = mptable_map(&mpt);
1901 if (error)
1902 panic("mptable_lapic_enumerate mptable_map failed\n");
1903 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
1904
281d9482 1905 cth = mpt.mp_cth;
a0873f07 1906
322abba7
SZ
1907 /* Save local apic address */
1908 lapic_addr = (vm_offset_t)cth->apic_address;
1909 KKASSERT(lapic_addr != 0);
a0873f07 1910
322abba7
SZ
1911 /*
1912 * Find out how many CPUs do we have
1913 */
1914 bzero(&arg1, sizeof(arg1));
44c36320
SZ
1915 arg1.ht_fixup = 1; /* Apply ht fixup by default */
1916
322abba7
SZ
1917 error = mptable_iterate_entries(cth,
1918 mptable_lapic_pass1_callback, &arg1);
1919 if (error)
1920 panic("mptable_iterate_entries(lapic_pass1) failed\n");
322abba7 1921 KKASSERT(arg1.cpu_count != 0);
a0873f07 1922
322abba7 1923 /* See if we need to fixup HT logical CPUs. */
44c36320
SZ
1924 if (arg1.ht_fixup) {
1925 logical_cpus = mptable_hyperthread_fixup(arg1.ht_apicid_mask,
1926 arg1.cpu_count);
1927 if (logical_cpus != 0)
1928 arg1.cpu_count *= logical_cpus;
1929 }
1930 mp_naps = arg1.cpu_count;
a0873f07 1931
44c36320 1932 /* Qualify the numbers again, after possible HT fixup */
322abba7
SZ
1933 if (mp_naps > MAXCPU) {
1934 kprintf("Warning: only using %d of %d available CPUs!\n",
1935 MAXCPU, mp_naps);
1936 mp_naps = MAXCPU;
a0873f07
SZ
1937 }
1938
322abba7 1939 --mp_naps; /* subtract the BSP */
a0873f07 1940
322abba7
SZ
1941 /*
1942 * Link logical CPU id to local apic id
1943 */
1944 bzero(&arg2, sizeof(arg2));
1945 arg2.cpu = 1;
44c36320 1946 arg2.logical_cpus = logical_cpus;
a0873f07 1947
322abba7
SZ
1948 error = mptable_iterate_entries(cth,
1949 mptable_lapic_pass2_callback, &arg2);
1950 if (error)
1951 panic("mptable_iterate_entries(lapic_pass2) failed\n");
1952 KKASSERT(arg2.found_bsp);
a0873f07 1953
322abba7 1954 /* Map local apic */
84cc808b 1955 lapic_map(lapic_addr);
281d9482
SZ
1956
1957 mptable_unmap(&mpt);
1958}
1959
fe423084
SZ
1960struct mptable_lapic_probe_cbarg {
1961 int cpu_count;
1962 int found_bsp;
1963};
1964
281d9482 1965static int
fe423084 1966mptable_lapic_probe_callback(void *xarg, const void *pos, int type)
281d9482 1967{
fe423084
SZ
1968 const struct PROCENTRY *ent;
1969 struct mptable_lapic_probe_cbarg *arg = xarg;
281d9482 1970
fe423084
SZ
1971 if (type != 0)
1972 return 0;
1973 ent = pos;
281d9482 1974
fe423084
SZ
1975 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1976 return 0;
1977 arg->cpu_count++;
1978
1979 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1980 if (arg->found_bsp) {
1981 kprintf("more than one BSP in base MP table\n");
1982 return EINVAL;
1983 }
1984 arg->found_bsp = 1;
1985 }
281d9482 1986 return 0;
a0873f07 1987}
5a16ccc3 1988
fe423084
SZ
1989static int
1990mptable_lapic_probe(struct lapic_enumerator *e)
1991{
1992 struct mptable_pos mpt;
1993 struct mptable_lapic_probe_cbarg arg;
1994 mpcth_t cth;
1995 int error;
1996
1997 if (mptable_fps_phyaddr == 0)
1998 return ENXIO;
1999
c455a23f
SZ
2000 if (mptable_use_default)
2001 return 0;
2002
fe423084
SZ
2003 error = mptable_map(&mpt);
2004 if (error)
2005 return error;
c455a23f 2006 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
fe423084
SZ
2007
2008 error = EINVAL;
fe423084 2009 cth = mpt.mp_cth;
c455a23f 2010
fe423084
SZ
2011 if (cth->apic_address == 0)
2012 goto done;
2013
2014 bzero(&arg, sizeof(arg));
2015 error = mptable_iterate_entries(cth,
2016 mptable_lapic_probe_callback, &arg);
2017 if (!error) {
2018 if (arg.cpu_count == 0) {
2019 kprintf("MP table contains no processor entries\n");
2020 error = EINVAL;
2021 } else if (!arg.found_bsp) {
2022 kprintf("MP table does not contains BSP entry\n");
2023 error = EINVAL;
2024 }
281d9482 2025 }
fe423084
SZ
2026done:
2027 mptable_unmap(&mpt);
2028 return error;
2029}
2030
2031static struct lapic_enumerator mptable_lapic_enumerator = {
2032 .lapic_prio = LAPIC_ENUM_PRIO_MPTABLE,
2033 .lapic_probe = mptable_lapic_probe,
2034 .lapic_enumerate = mptable_lapic_enumerate
281d9482
SZ
2035};
2036
5a16ccc3 2037static void
becce73f 2038mptable_lapic_enum_register(void)
a0eaef71 2039{
fe423084 2040 lapic_enumerator_register(&mptable_lapic_enumerator);
a0eaef71 2041}
becce73f
SZ
2042SYSINIT(mptable_lapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2043 mptable_lapic_enum_register, 0);
e0fd357f
SZ
2044
2045static int
6b881b58
SZ
2046mptable_ioapic_list_callback(void *xarg, const void *pos, int type)
2047{
2048 const struct IOAPICENTRY *ent;
2049 struct mptable_ioapic *nioapic, *ioapic;
2050
2051 if (type != 2)
2052 return 0;
2053 ent = pos;
2054
2055 if ((ent->apic_flags & IOAPICENTRY_FLAG_EN) == 0)
2056 return 0;
2057
2058 if (ent->apic_address == 0) {
2059 kprintf("mptable_ioapic_create_list: zero IOAPIC addr\n");
2060 return EINVAL;
2061 }
2062
2063 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2064 if (ioapic->mio_apic_id == ent->apic_id) {
2065 kprintf("mptable_ioapic_create_list: duplicated "
2066 "apic id %d\n", ioapic->mio_apic_id);
2067 return EINVAL;
2068 }
2069 if (ioapic->mio_addr == (uint32_t)ent->apic_address) {
2070 kprintf("mptable_ioapic_create_list: overlapped "
2071 "IOAPIC addr 0x%08x", ioapic->mio_addr);
2072 return EINVAL;
2073 }
2074 }
2075
2076 nioapic = kmalloc(sizeof(*nioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2077 nioapic->mio_apic_id = ent->apic_id;
2078 nioapic->mio_addr = (uint32_t)ent->apic_address;
2079
2080 /*
2081 * Create IOAPIC list in ascending order of APIC ID
2082 */
2083 TAILQ_FOREACH_REVERSE(ioapic, &mptable_ioapic_list,
2084 mptable_ioapic_list, mio_link) {
2085 if (nioapic->mio_apic_id > ioapic->mio_apic_id) {
2086 TAILQ_INSERT_AFTER(&mptable_ioapic_list,
2087 ioapic, nioapic, mio_link);
2088 break;
2089 }
2090 }
2091 if (ioapic == NULL)
2092 TAILQ_INSERT_HEAD(&mptable_ioapic_list, nioapic, mio_link);
2093
2094 return 0;
2095}
2096
2097static void
2098mptable_ioapic_create_list(void)
2099{
2100 struct mptable_ioapic *ioapic;
2101 struct mptable_pos mpt;
2102 int idx, error;
2103
2104 if (mptable_fps_phyaddr == 0)
2105 return;
2106
2107 if (mptable_use_default) {
2108 ioapic = kmalloc(sizeof(*ioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2109 ioapic->mio_idx = 0;
2110 ioapic->mio_apic_id = 0; /* NOTE: any value is ok here */
2111 ioapic->mio_addr = 0xfec00000; /* XXX magic number */
2112
2113 TAILQ_INSERT_HEAD(&mptable_ioapic_list, ioapic, mio_link);
2114 return;
2115 }
2116
2117 error = mptable_map(&mpt);
2118 if (error)
2119 panic("mptable_ioapic_create_list: mptable_map failed\n");
2120 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2121
2122 error = mptable_iterate_entries(mpt.mp_cth,
2123 mptable_ioapic_list_callback, NULL);
2124 if (error) {
2125 while ((ioapic = TAILQ_FIRST(&mptable_ioapic_list)) != NULL) {
2126 TAILQ_REMOVE(&mptable_ioapic_list, ioapic, mio_link);
2127 kfree(ioapic, M_DEVBUF);
2128 }
2129 goto done;
2130 }
2131
2132 /*
2133 * Assign index number for each IOAPIC
2134 */
2135 idx = 0;
2136 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2137 ioapic->mio_idx = idx;
2138 ++idx;
2139 }
2140done:
2141 mptable_unmap(&mpt);
2142}
2143SYSINIT(mptable_ioapic_list, SI_BOOT2_PRESMP, SI_ORDER_SECOND,
2144 mptable_ioapic_create_list, 0);
2145
2146static int
e0fd357f
SZ
2147mptable_pci_int_callback(void *xarg, const void *pos, int type)
2148{
2149 const struct mptable_bus_info *bus_info = xarg;
6b881b58 2150 const struct mptable_ioapic *ioapic;
e0fd357f
SZ
2151 const struct mptable_bus *bus;
2152 struct mptable_pci_int *pci_int;
2153 const struct INTENTRY *ent;
2154 int pci_pin, pci_dev;
2155
2156 if (type != 3)
2157 return 0;
2158 ent = pos;
2159
2160 if (ent->int_type != 0)
2161 return 0;
2162
2163 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
2164 if (bus->mb_type == MPTABLE_BUS_PCI &&
2165 bus->mb_id == ent->src_bus_id)
2166 break;
2167 }
2168 if (bus == NULL)
2169 return 0;
2170
6b881b58
SZ
2171 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2172 if (ioapic->mio_apic_id == ent->dst_apic_id)
2173 break;
2174 }
2175 if (ioapic == NULL) {
2176 kprintf("MPTABLE: warning PCI int dst apic id %d "
2177 "does not exist\n", ent->dst_apic_id);
2178 return 0;
2179 }
2180
e0fd357f
SZ
2181 pci_pin = ent->src_bus_irq & 0x3;
2182 pci_dev = (ent->src_bus_irq >> 2) & 0x1f;
2183
2184 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2185 if (pci_int->mpci_bus == ent->src_bus_id &&
2186 pci_int->mpci_dev == pci_dev &&
2187 pci_int->mpci_pin == pci_pin) {
6b881b58 2188 if (pci_int->mpci_ioapic_idx == ioapic->mio_idx &&
e0fd357f
SZ
2189 pci_int->mpci_ioapic_pin == ent->dst_apic_int) {
2190 kprintf("MPTABLE: warning duplicated "
2191 "PCI int entry for "
2192 "bus %d, dev %d, pin %d\n",
2193 pci_int->mpci_bus,
2194 pci_int->mpci_dev,
2195 pci_int->mpci_pin);
2196 return 0;
2197 } else {
2198 kprintf("mptable_pci_int_register: "
2199 "conflict PCI int entry for "
2200 "bus %d, dev %d, pin %d, "
2201 "IOAPIC %d.%d -> %d.%d\n",
2202 pci_int->mpci_bus,
2203 pci_int->mpci_dev,
2204 pci_int->mpci_pin,
6b881b58 2205 pci_int->mpci_ioapic_idx,
e0fd357f 2206 pci_int->mpci_ioapic_pin,
6b881b58 2207 ioapic->mio_idx,
e0fd357f
SZ
2208 ent->dst_apic_int);
2209 return EINVAL;
2210 }
2211 }
2212 }
2213
2619977b 2214 pci_int = kmalloc(sizeof(*pci_int), M_DEVBUF, M_WAITOK | M_ZERO);
e0fd357f
SZ
2215
2216 pci_int->mpci_bus = ent->src_bus_id;
2217 pci_int->mpci_dev = pci_dev;
2218 pci_int->mpci_pin = pci_pin;
6b881b58 2219 pci_int->mpci_ioapic_idx = ioapic->mio_idx;
e0fd357f
SZ
2220 pci_int->mpci_ioapic_pin = ent->dst_apic_int;
2221
2222 TAILQ_INSERT_TAIL(&mptable_pci_int_list, pci_int, mpci_link);
2223
2224 return 0;
2225}
2226
2227static void
2228mptable_pci_int_register(void)
2229{
2230 struct mptable_bus_info bus_info;
2231 const struct mptable_bus *bus;
2232 struct mptable_pci_int *pci_int;
2233 struct mptable_pos mpt;
2234 int error, force_pci0, npcibus;
2235 mpcth_t cth;
2236
2237 if (mptable_fps_phyaddr == 0)
2238 return;
2239
2240 if (mptable_use_default)
2241 return;
2242
6b881b58
SZ
2243 if (TAILQ_EMPTY(&mptable_ioapic_list))
2244 return;
2245
e0fd357f
SZ
2246 error = mptable_map(&mpt);
2247 if (error)
2248 panic("mptable_pci_int_register: mptable_map failed\n");
2249 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2250
2251 cth = mpt.mp_cth;
2252
2253 mptable_bus_info_alloc(cth, &bus_info);
2254 if (TAILQ_EMPTY(&bus_info.mbi_list))
2255 goto done;
2256
8d905764 2257 force_pci0 = 0;
e0fd357f
SZ
2258 npcibus = 0;
2259 TAILQ_FOREACH(bus, &bus_info.mbi_list, mb_link) {
2260 if (bus->mb_type == MPTABLE_BUS_PCI)
2261 ++npcibus;
2262 }
2263 if (npcibus == 0) {
2264 mptable_bus_info_free(&bus_info);
2265 goto done;
2266 } else if (npcibus == 1) {
2267 force_pci0 = 1;
2268 }
2269
2270 error = mptable_iterate_entries(cth,
2271 mptable_pci_int_callback, &bus_info);
2272
2273 mptable_bus_info_free(&bus_info);
2274
2275 if (error) {
2276 while ((pci_int = TAILQ_FIRST(&mptable_pci_int_list)) != NULL) {
2277 TAILQ_REMOVE(&mptable_pci_int_list, pci_int, mpci_link);
2278 kfree(pci_int, M_DEVBUF);
2279 }
2280 goto done;
2281 }
2282
2283 if (force_pci0) {
2284 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link)
2285 pci_int->mpci_bus = 0;
2286 }
2287done:
2288 mptable_unmap(&mpt);
2289}
2290SYSINIT(mptable_pci, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2291 mptable_pci_int_register, 0);
7da2706b
SZ
2292
2293struct mptable_ioapic_probe_cbarg {
2294 const struct mptable_bus_info *bus_info;
7da2706b
SZ
2295};
2296
2297static int
2298mptable_ioapic_probe_callback(void *xarg, const void *pos, int type)
2299{
2300 struct mptable_ioapic_probe_cbarg *arg = xarg;
6b881b58
SZ
2301 const struct mptable_ioapic *ioapic;
2302 const struct mptable_bus *bus;
2303 const struct INTENTRY *ent;
7da2706b 2304
6b881b58
SZ
2305 if (type != 3)
2306 return 0;
2307 ent = pos;
7da2706b 2308
6b881b58
SZ
2309 if (ent->int_type != 0)
2310 return 0;
7da2706b 2311
6b881b58
SZ
2312 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2313 if (bus->mb_type == MPTABLE_BUS_ISA &&
2314 bus->mb_id == ent->src_bus_id)
2315 break;
2316 }
2317 if (bus == NULL)
2318 return 0;
7da2706b 2319
6b881b58
SZ
2320 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2321 if (ioapic->mio_apic_id == ent->dst_apic_id)
2322 break;
2323 }
2324 if (ioapic == NULL) {
2325 kprintf("MPTABLE: warning ISA int dst apic id %d "
2326 "does not exist\n", ent->dst_apic_id);
2327 return 0;
2328 }
7da2706b 2329
6b881b58
SZ
2330 /* XXX magic number */
2331 if (ent->src_bus_irq >= 16) {
2332 kprintf("mptable_ioapic_probe: invalid ISA irq (%d)\n",
2333 ent->src_bus_irq);
2334 return EINVAL;
7da2706b
SZ
2335 }
2336 return 0;
2337}
2338
2339static int
2340mptable_ioapic_probe(struct ioapic_enumerator *e)
2341{
2342 struct mptable_ioapic_probe_cbarg arg;
2343 struct mptable_bus_info bus_info;
2344 struct mptable_pos mpt;
2345 mpcth_t cth;
2346 int error;
2347
2348 if (mptable_fps_phyaddr == 0)
2349 return ENXIO;
2350
2351 if (mptable_use_default)
2352 return 0;
2353
6b881b58
SZ
2354 if (TAILQ_EMPTY(&mptable_ioapic_list))
2355 return ENXIO;
2356
7da2706b
SZ
2357 error = mptable_map(&mpt);
2358 if (error)
2359 panic("mptable_ioapic_probe: mptable_map failed\n");
2360 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2361
2362 cth = mpt.mp_cth;
2363
2364 mptable_bus_info_alloc(cth, &bus_info);
2365
2366 bzero(&arg, sizeof(arg));
2367 arg.bus_info = &bus_info;
2368
2369 error = mptable_iterate_entries(cth,
2370 mptable_ioapic_probe_callback, &arg);
7da2706b
SZ
2371
2372 mptable_bus_info_free(&bus_info);
2373 mptable_unmap(&mpt);
2374
2375 return error;
2376}
2377
7da2706b
SZ
2378struct mptable_ioapic_int_cbarg {
2379 const struct mptable_bus_info *bus_info;
2380 int ioapic_nint;
2381};
2382
2383static int
2384mptable_ioapic_int_callback(void *xarg, const void *pos, int type)
2385{
2386 struct mptable_ioapic_int_cbarg *arg = xarg;
512fb675 2387 const struct mptable_ioapic *ioapic;
7da2706b
SZ
2388 const struct mptable_bus *bus;
2389 const struct INTENTRY *ent;
6ac31e9d 2390 int gsi;
7da2706b
SZ
2391
2392 if (type != 3)
2393 return 0;
2394
2395 arg->ioapic_nint++;
2396
2397 ent = pos;
2398 if (ent->int_type != 0)
2399 return 0;
2400
2401 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2402 if (bus->mb_type == MPTABLE_BUS_ISA &&
2403 bus->mb_id == ent->src_bus_id)
2404 break;
2405 }
2406 if (bus == NULL)
2407 return 0;
2408
512fb675
SZ
2409 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2410 if (ioapic->mio_apic_id == ent->dst_apic_id)
2411 break;
2412 }
2413 if (ioapic == NULL) {
2414 kprintf("MPTABLE: warning ISA int dst apic id %d "
2415 "does not exist\n", ent->dst_apic_id);
2416 return 0;
2417 }
2418
6ac31e9d
SZ
2419 if (ent->dst_apic_int >= ioapic->mio_npin) {
2420 panic("mptable_ioapic_enumerate: invalid I/O APIC "
2421 "pin %d, should be < %d",
2422 ent->dst_apic_int, ioapic->mio_npin);
2423 }
2424 gsi = ioapic->mio_gsi_base + ent->dst_apic_int;
512fb675 2425
6ac31e9d
SZ
2426 if (ent->src_bus_irq != gsi) {
2427 if (bootverbose) {
2428 kprintf("MPTABLE: INTSRC irq %d -> GSI %d\n",
2429 ent->src_bus_irq, gsi);
7da2706b 2430 }
6ac31e9d
SZ
2431 ioapic_intsrc(ent->src_bus_irq, gsi,
2432 INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2433 }
2434 return 0;
2435}
2436
2437static void
2438mptable_ioapic_enumerate(struct ioapic_enumerator *e)
2439{
2440 struct mptable_bus_info bus_info;
0471bb0e 2441 struct mptable_ioapic *ioapic;
7da2706b
SZ
2442 struct mptable_pos mpt;
2443 mpcth_t cth;
2444 int error;
2445
2446 KKASSERT(mptable_fps_phyaddr != 0);
6b881b58 2447 KKASSERT(!TAILQ_EMPTY(&mptable_ioapic_list));
7da2706b 2448
6b881b58 2449 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
6ac31e9d
SZ
2450 const struct mptable_ioapic *prev_ioapic;
2451 uint32_t ver;
2452 void *addr;
0471bb0e 2453
6ac31e9d 2454 addr = ioapic_map(ioapic->mio_addr);
0471bb0e 2455
6ac31e9d
SZ
2456 ver = ioapic_read(addr, IOAPIC_VER);
2457 ioapic->mio_npin = ((ver & IOART_VER_MAXREDIR)
2458 >> MAXREDIRSHIFT) + 1;
0471bb0e 2459
6ac31e9d
SZ
2460 prev_ioapic = TAILQ_PREV(ioapic,
2461 mptable_ioapic_list, mio_link);
2462 if (prev_ioapic == NULL) {
2463 ioapic->mio_gsi_base = 0;
2464 } else {
2465 ioapic->mio_gsi_base =
2466 prev_ioapic->mio_gsi_base +
2467 prev_ioapic->mio_npin;
0471bb0e 2468 }
6ac31e9d
SZ
2469 ioapic_add(addr, ioapic->mio_gsi_base,
2470 ioapic->mio_npin);
2471
7da2706b 2472 if (bootverbose) {
6b881b58 2473 kprintf("MPTABLE: IOAPIC addr 0x%08x, "
0471bb0e 2474 "apic id %d, idx %d, gsi base %d, npin %d\n",
6b881b58 2475 ioapic->mio_addr,
0471bb0e
SZ
2476 ioapic->mio_apic_id,
2477 ioapic->mio_idx,
2478 ioapic->mio_gsi_base,
2479 ioapic->mio_npin);
7da2706b 2480 }
6b881b58
SZ
2481 }
2482
2483 if (mptable_use_default) {
2484 if (bootverbose)
2485 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (default)\n");
ae80be10 2486 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2487 return;
2488 }
2489
2490 error = mptable_map(&mpt);
2491 if (error)
2492 panic("mptable_ioapic_probe: mptable_map failed\n");
2493 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2494
2495 cth = mpt.mp_cth;
2496
7da2706b
SZ
2497 mptable_bus_info_alloc(cth, &bus_info);
2498
2499 if (TAILQ_EMPTY(&bus_info.mbi_list)) {
2500 if (bootverbose)
2501 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (no bus)\n");
ae80be10 2502 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2503 } else {
2504 struct mptable_ioapic_int_cbarg arg;
2505
2506 bzero(&arg, sizeof(arg));
2507 arg.bus_info = &bus_info;
2508
2509 error = mptable_iterate_entries(cth,
2510 mptable_ioapic_int_callback, &arg);
2511 if (error)
2512 panic("mptable_ioapic_int failed\n");
2513
2514 if (arg.ioapic_nint == 0) {
2515 if (bootverbose) {
2516 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 "
2517 "(no int)\n");
2518 }
ae80be10
SZ
2519 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE,
2520 INTR_POLARITY_HIGH);
7da2706b
SZ
2521 }
2522 }
2523
2524 mptable_bus_info_free(&bus_info);
2525
2526 mptable_unmap(&mpt);
2527}
2528
2529static struct ioapic_enumerator mptable_ioapic_enumerator = {
2530 .ioapic_prio = IOAPIC_ENUM_PRIO_MPTABLE,
2531 .ioapic_probe = mptable_ioapic_probe,
2532 .ioapic_enumerate = mptable_ioapic_enumerate
2533};
2534
2535static void
2536mptable_ioapic_enum_register(void)
2537{
2538 ioapic_enumerator_register(&mptable_ioapic_enumerator);
2539}
2540SYSINIT(mptable_ioapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2541 mptable_ioapic_enum_register, 0);
e90e7ac4
SZ
2542
2543void
2544mptable_pci_int_dump(void)
2545{
2546 const struct mptable_pci_int *pci_int;
2547
2548 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
eab22b0b 2549 kprintf("MPTABLE: %d:%d INT%c -> IOAPIC %d.%d\n",
e90e7ac4
SZ
2550 pci_int->mpci_bus,
2551 pci_int->mpci_dev,
eab22b0b 2552 pci_int->mpci_pin + 'A',
e90e7ac4
SZ
2553 pci_int->mpci_ioapic_idx,
2554 pci_int->mpci_ioapic_pin);
2555 }
2556}
2557
2558int
2559mptable_pci_int_route(int bus, int dev, int pin, int intline)
2560{
2561 const struct mptable_pci_int *pci_int;
2562 int irq = -1;
2563
2564 KKASSERT(pin >= 1);
2565 --pin; /* zero based */
2566
2567 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2568 if (pci_int->mpci_bus == bus &&
2569 pci_int->mpci_dev == dev &&
2570 pci_int->mpci_pin == pin)
2571 break;
2572 }
2573 if (pci_int != NULL) {
2574 int gsi;
2575
2576 gsi = ioapic_gsi(pci_int->mpci_ioapic_idx,
2577 pci_int->mpci_ioapic_pin);
2578 if (gsi >= 0) {
2579 irq = ioapic_abi_find_gsi(gsi,
2580 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2581 }
2582 }
2583
2584 if (irq < 0) {
40fd5939
SZ
2585 if (bootverbose) {
2586 kprintf("MPTABLE: fixed interrupt routing "
eab22b0b 2587 "for %d:%d INT%c\n", bus, dev, pin + 'A');
40fd5939 2588 }
e90e7ac4
SZ
2589
2590 irq = ioapic_abi_find_irq(intline,
2591 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2592 }
eab22b0b
SZ
2593
2594 if (irq >= 0 && bootverbose) {
2595 kprintf("MPTABLE: %d:%d INT%c routed to irq %d\n",
2596 bus, dev, pin + 'A', irq);
2597 }
e90e7ac4
SZ
2598 return irq;
2599}