x86_64: Move ioapic function declarations from smp.h to apic/ioapic.h
[dragonfly.git] / sys / platform / pc64 / x86_64 / mp_machdep.c
CommitLineData
46d4e165
JG
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
46d4e165
JG
26 */
27
28#include "opt_cpu.h"
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/sysctl.h>
34#include <sys/malloc.h>
35#include <sys/memrange.h>
36#include <sys/cons.h> /* cngetc() */
37#include <sys/machintr.h>
38
684a93c4
MD
39#include <sys/mplock2.h>
40
46d4e165
JG
41#include <vm/vm.h>
42#include <vm/vm_param.h>
43#include <vm/pmap.h>
44#include <vm/vm_kern.h>
45#include <vm/vm_extern.h>
46#include <sys/lock.h>
47#include <vm/vm_map.h>
48#include <sys/user.h>
49#ifdef GPROF
50#include <sys/gmon.h>
51#endif
52
53#include <machine/smp.h>
54#include <machine_base/apic/apicreg.h>
55#include <machine/atomic.h>
56#include <machine/cpufunc.h>
2b6cd37e 57#include <machine_base/apic/lapic.h>
61452645 58#include <machine_base/apic/ioapic.h>
46d4e165
JG
59#include <machine/psl.h>
60#include <machine/segments.h>
61#include <machine/tss.h>
62#include <machine/specialreg.h>
63#include <machine/globaldata.h>
4117f2fd 64#include <machine/pmap_inval.h>
46d4e165
JG
65
66#include <machine/md_var.h> /* setidt() */
57a9c56b 67#include <machine_base/icu/icu.h> /* IPIs */
e0918665 68#include <machine_base/apic/ioapic_abi.h>
57a9c56b 69#include <machine/intr_machdep.h> /* IPIs */
46d4e165 70
46d4e165
JG
71#define WARMBOOT_TARGET 0
72#define WARMBOOT_OFF (KERNBASE + 0x0467)
73#define WARMBOOT_SEG (KERNBASE + 0x0469)
74
75#define BIOS_BASE (0xf0000)
f0df4ded 76#define BIOS_BASE2 (0xe0000)
46d4e165
JG
77#define BIOS_SIZE (0x10000)
78#define BIOS_COUNT (BIOS_SIZE/4)
79
80#define CMOS_REG (0x70)
81#define CMOS_DATA (0x71)
82#define BIOS_RESET (0x0f)
83#define BIOS_WARM (0x0a)
84
85#define PROCENTRY_FLAG_EN 0x01
86#define PROCENTRY_FLAG_BP 0x02
87#define IOAPICENTRY_FLAG_EN 0x01
88
89
90/* MP Floating Pointer Structure */
91typedef struct MPFPS {
92 char signature[4];
93 u_int32_t pap;
94 u_char length;
95 u_char spec_rev;
96 u_char checksum;
97 u_char mpfb1;
98 u_char mpfb2;
99 u_char mpfb3;
100 u_char mpfb4;
101 u_char mpfb5;
102} *mpfps_t;
103
104/* MP Configuration Table Header */
105typedef struct MPCTH {
106 char signature[4];
107 u_short base_table_length;
108 u_char spec_rev;
109 u_char checksum;
110 u_char oem_id[8];
111 u_char product_id[12];
a5f51ef3 112 u_int32_t oem_table_pointer;
46d4e165
JG
113 u_short oem_table_size;
114 u_short entry_count;
a5f51ef3 115 u_int32_t apic_address;
46d4e165
JG
116 u_short extended_table_length;
117 u_char extended_table_checksum;
118 u_char reserved;
119} *mpcth_t;
120
121
122typedef struct PROCENTRY {
123 u_char type;
124 u_char apic_id;
125 u_char apic_version;
126 u_char cpu_flags;
a5f51ef3
JG
127 u_int32_t cpu_signature;
128 u_int32_t feature_flags;
129 u_int32_t reserved1;
130 u_int32_t reserved2;
46d4e165
JG
131} *proc_entry_ptr;
132
133typedef struct BUSENTRY {
134 u_char type;
135 u_char bus_id;
136 char bus_type[6];
137} *bus_entry_ptr;
138
139typedef struct IOAPICENTRY {
140 u_char type;
141 u_char apic_id;
142 u_char apic_version;
143 u_char apic_flags;
a5f51ef3 144 u_int32_t apic_address;
46d4e165
JG
145} *io_apic_entry_ptr;
146
147typedef struct INTENTRY {
148 u_char type;
149 u_char int_type;
150 u_short int_flags;
151 u_char src_bus_id;
152 u_char src_bus_irq;
153 u_char dst_apic_id;
154 u_char dst_apic_int;
155} *int_entry_ptr;
156
157/* descriptions of MP basetable entries */
158typedef struct BASETABLE_ENTRY {
159 u_char type;
160 u_char length;
161 char name[16];
162} basetable_entry;
163
91f1c7a4
MN
164struct mptable_pos {
165 mpfps_t mp_fps;
166 mpcth_t mp_cth;
167 vm_size_t mp_cth_mapsz;
168};
169
c455a23f
SZ
170#define MPTABLE_POS_USE_DEFAULT(mpt) \
171 ((mpt)->mp_fps->mpfb1 != 0 || (mpt)->mp_cth == NULL)
172
e0fd357f
SZ
173struct mptable_bus {
174 int mb_id;
175 int mb_type; /* MPTABLE_BUS_ */
176 TAILQ_ENTRY(mptable_bus) mb_link;
177};
178
179#define MPTABLE_BUS_ISA 0
180#define MPTABLE_BUS_PCI 1
181
182struct mptable_bus_info {
183 TAILQ_HEAD(, mptable_bus) mbi_list;
184};
185
186struct mptable_pci_int {
187 int mpci_bus;
188 int mpci_dev;
189 int mpci_pin;
190
6b881b58 191 int mpci_ioapic_idx;
e0fd357f
SZ
192 int mpci_ioapic_pin;
193 TAILQ_ENTRY(mptable_pci_int) mpci_link;
194};
195
6b881b58
SZ
196struct mptable_ioapic {
197 int mio_idx;
198 int mio_apic_id;
199 uint32_t mio_addr;
0471bb0e
SZ
200 int mio_gsi_base;
201 int mio_npin;
6b881b58
SZ
202 TAILQ_ENTRY(mptable_ioapic) mio_link;
203};
204
8f54b133
MN
205typedef int (*mptable_iter_func)(void *, const void *, int);
206
46d4e165
JG
207/*
208 * this code MUST be enabled here and in mpboot.s.
209 * it follows the very early stages of AP boot by placing values in CMOS ram.
210 * it NORMALLY will never be needed and thus the primitive method for enabling.
211 *
212 */
213#if defined(CHECK_POINTS)
214#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
215#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
216
217#define CHECK_INIT(D); \
218 CHECK_WRITE(0x34, (D)); \
219 CHECK_WRITE(0x35, (D)); \
220 CHECK_WRITE(0x36, (D)); \
221 CHECK_WRITE(0x37, (D)); \
222 CHECK_WRITE(0x38, (D)); \
223 CHECK_WRITE(0x39, (D));
224
225#define CHECK_PRINT(S); \
226 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
227 (S), \
228 CHECK_READ(0x34), \
229 CHECK_READ(0x35), \
230 CHECK_READ(0x36), \
231 CHECK_READ(0x37), \
232 CHECK_READ(0x38), \
233 CHECK_READ(0x39));
234
235#else /* CHECK_POINTS */
236
237#define CHECK_INIT(D)
238#define CHECK_PRINT(S)
239
240#endif /* CHECK_POINTS */
241
242/*
243 * Values to send to the POST hardware.
244 */
245#define MP_BOOTADDRESS_POST 0x10
246#define MP_PROBE_POST 0x11
247#define MPTABLE_PASS1_POST 0x12
248
249#define MP_START_POST 0x13
250#define MP_ENABLE_POST 0x14
251#define MPTABLE_PASS2_POST 0x15
252
253#define START_ALL_APS_POST 0x16
254#define INSTALL_AP_TRAMP_POST 0x17
255#define START_AP_POST 0x18
256
257#define MP_ANNOUNCE_POST 0x19
258
46d4e165
JG
259/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
260int current_postcode;
261
262/** XXX FIXME: what system files declare these??? */
263extern struct region_descriptor r_gdt, r_idt;
264
46d4e165 265int mp_naps; /* # of Applications processors */
46d4e165
JG
266extern int nkpt;
267
8e5ea5f7 268u_int32_t cpu_apic_versions[NAPICID]; /* populated during mptable scan */
46d4e165
JG
269int64_t tsc0_offset;
270extern int64_t tsc_offsets[];
271
927c4c1f
MN
272extern u_long ebda_addr;
273
faaf4131 274#ifdef SMP /* APIC-IO */
46d4e165
JG
275struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
276#endif
277
278/*
279 * APIC ID logical/physical mapping structures.
280 * We oversize these to simplify boot-time config.
281 */
282int cpu_num_to_apic_id[NAPICID];
46d4e165
JG
283int apic_id_to_logical[NAPICID];
284
285/* AP uses this during bootstrap. Do not staticize. */
286char *bootSTK;
287static int bootAP;
288
46d4e165
JG
289struct pcb stoppcbs[MAXCPU];
290
291extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
292
8f54b133
MN
293static basetable_entry basetable_entry_types[] =
294{
295 {0, 20, "Processor"},
296 {1, 8, "Bus"},
297 {2, 8, "I/O APIC"},
298 {3, 8, "I/O INT"},
299 {4, 8, "Local INT"}
300};
301
46d4e165
JG
302/*
303 * Local data and functions.
304 */
305
46d4e165
JG
306static u_int boot_address;
307static u_int base_memory;
308static int mp_finish;
c6b1591c 309static int mp_finish_lapic;
46d4e165 310
46d4e165
JG
311static void mp_enable(u_int boot_addr);
312
8f54b133
MN
313static int mptable_iterate_entries(const mpcth_t,
314 mptable_iter_func, void *);
2814810e 315static int mptable_search(void);
0eaa8172 316static long mptable_search_sig(u_int32_t target, int count);
da23a592 317static int mptable_hyperthread_fixup(cpumask_t, int);
fe423084 318static int mptable_map(struct mptable_pos *);
91f1c7a4 319static void mptable_unmap(struct mptable_pos *);
e0fd357f
SZ
320static void mptable_bus_info_alloc(const mpcth_t,
321 struct mptable_bus_info *);
322static void mptable_bus_info_free(struct mptable_bus_info *);
0eaa8172 323
91903a05
MN
324static int mptable_lapic_probe(struct lapic_enumerator *);
325static void mptable_lapic_enumerate(struct lapic_enumerator *);
326static void mptable_lapic_default(void);
0eaa8172 327
7da2706b
SZ
328static int mptable_ioapic_probe(struct ioapic_enumerator *);
329static void mptable_ioapic_enumerate(struct ioapic_enumerator *);
330
46d4e165 331static int start_all_aps(u_int boot_addr);
bfc09ba0 332#if 0
46d4e165 333static void install_ap_tramp(u_int boot_addr);
bfc09ba0 334#endif
bb467734
MD
335static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
336static int smitest(void);
46d4e165
JG
337
338static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
c6b1591c 339static cpumask_t smp_lapic_mask = 1; /* which cpus have lapic been inited */
46d4e165
JG
340cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
341SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
342static u_int bootMP_size;
343
9d758cc4
SZ
344int imcr_present;
345
fe423084 346static vm_paddr_t mptable_fps_phyaddr;
c455a23f 347static int mptable_use_default;
6b881b58 348static TAILQ_HEAD(mptable_pci_int_list, mptable_pci_int) mptable_pci_int_list =
e0fd357f 349 TAILQ_HEAD_INITIALIZER(mptable_pci_int_list);
6b881b58
SZ
350static TAILQ_HEAD(mptable_ioapic_list, mptable_ioapic) mptable_ioapic_list =
351 TAILQ_HEAD_INITIALIZER(mptable_ioapic_list);
fe423084 352
46d4e165
JG
353/*
354 * Calculate usable address in base memory for AP trampoline code.
355 */
356u_int
357mp_bootaddress(u_int basemem)
358{
359 POSTCODE(MP_BOOTADDRESS_POST);
360
46d4e165
JG
361 base_memory = basemem;
362
c855ebba
JG
363 bootMP_size = mptramp_end - mptramp_start;
364 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
365 if (((basemem * 1024) - boot_address) < bootMP_size)
366 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
46d4e165
JG
367 /* 3 levels of page table pages */
368 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
369
370 return mptramp_pagetables;
371}
372
373
fe423084 374static void
2814810e
MN
375mptable_probe(void)
376{
c455a23f
SZ
377 struct mptable_pos mpt;
378 int error;
379
fe423084 380 KKASSERT(mptable_fps_phyaddr == 0);
c455a23f 381
fe423084 382 mptable_fps_phyaddr = mptable_search();
c455a23f
SZ
383 if (mptable_fps_phyaddr == 0)
384 return;
385
386 error = mptable_map(&mpt);
387 if (error) {
388 mptable_fps_phyaddr = 0;
389 return;
390 }
391
392 if (MPTABLE_POS_USE_DEFAULT(&mpt)) {
393 kprintf("MPTABLE: use default configuration\n");
394 mptable_use_default = 1;
395 }
9d758cc4
SZ
396 if (mpt.mp_fps->mpfb2 & 0x80)
397 imcr_present = 1;
c455a23f
SZ
398
399 mptable_unmap(&mpt);
2814810e 400}
fe423084 401SYSINIT(mptable_probe, SI_BOOT2_PRESMP, SI_ORDER_FIRST, mptable_probe, 0);
2814810e 402
46d4e165
JG
403/*
404 * Look for an Intel MP spec table (ie, SMP capable hardware).
405 */
f804d15f 406static int
2814810e 407mptable_search(void)
46d4e165 408{
3a918cfd 409 long x;
46d4e165
JG
410 u_int32_t target;
411
46d4e165
JG
412 POSTCODE(MP_PROBE_POST);
413
414 /* see if EBDA exists */
927c4c1f 415 if (ebda_addr != 0) {
46d4e165 416 /* search first 1K of EBDA */
927c4c1f 417 target = (u_int32_t)ebda_addr;
0eaa8172 418 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
351254e7 419 return x;
46d4e165
JG
420 } else {
421 /* last 1K of base memory, effective 'top of base' passed in */
351254e7 422 target = (u_int32_t)(base_memory - 0x400);
0eaa8172 423 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
351254e7 424 return x;
46d4e165
JG
425 }
426
427 /* search the BIOS */
351254e7 428 target = (u_int32_t)BIOS_BASE;
0eaa8172 429 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
351254e7 430 return x;
46d4e165 431
f0df4ded
MN
432 /* search the extended BIOS */
433 target = (u_int32_t)BIOS_BASE2;
434 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
435 return x;
436
46d4e165 437 /* nothing found */
46d4e165 438 return 0;
46d4e165
JG
439}
440
8f54b133
MN
441static int
442mptable_iterate_entries(const mpcth_t cth, mptable_iter_func func, void *arg)
443{
444 int count, total_size;
445 const void *position;
446
447 KKASSERT(cth->base_table_length >= sizeof(struct MPCTH));
448 total_size = cth->base_table_length - sizeof(struct MPCTH);
449 position = (const uint8_t *)cth + sizeof(struct MPCTH);
450 count = cth->entry_count;
451
452 while (count--) {
453 int type, error;
454
455 KKASSERT(total_size >= 0);
456 if (total_size == 0) {
457 kprintf("invalid base MP table, "
458 "entry count and length mismatch\n");
459 return EINVAL;
460 }
461
462 type = *(const uint8_t *)position;
463 switch (type) {
464 case 0: /* processor_entry */
465 case 1: /* bus_entry */
466 case 2: /* io_apic_entry */
467 case 3: /* int_entry */
468 case 4: /* int_entry */
469 break;
470 default:
471 kprintf("unknown base MP table entry type %d\n", type);
472 return EINVAL;
473 }
474
475 if (total_size < basetable_entry_types[type].length) {
476 kprintf("invalid base MP table length, "
477 "does not contain all entries\n");
478 return EINVAL;
479 }
480 total_size -= basetable_entry_types[type].length;
481
482 error = func(arg, position, type);
483 if (error)
484 return error;
485
486 position = (const uint8_t *)position +
487 basetable_entry_types[type].length;
488 }
489 return 0;
490}
491
46d4e165
JG
492
493/*
494 * Startup the SMP processors.
495 */
496void
497mp_start(void)
498{
499 POSTCODE(MP_START_POST);
a0679cc7 500 mp_enable(boot_address);
46d4e165
JG
501}
502
503
504/*
505 * Print various information about the SMP system hardware and setup.
506 */
507void
508mp_announce(void)
509{
510 int x;
511
512 POSTCODE(MP_ANNOUNCE_POST);
513
514 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
515 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
a15824ef 516 kprintf(", version: 0x%08x\n", cpu_apic_versions[0]);
46d4e165
JG
517 for (x = 1; x <= mp_naps; ++x) {
518 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
a15824ef 519 kprintf(", version: 0x%08x\n", cpu_apic_versions[x]);
46d4e165
JG
520 }
521
7a603b36
SZ
522 if (!apic_io_enable)
523 kprintf(" Warning: APIC I/O disabled\n");
46d4e165
JG
524}
525
526/*
527 * AP cpu's call this to sync up protected mode.
528 *
ec073ddc 529 * WARNING! %gs is not set up on entry. This routine sets up %gs.
46d4e165
JG
530 */
531void
532init_secondary(void)
533{
534 int gsel_tss;
535 int x, myid = bootAP;
536 u_int64_t msr, cr0;
537 struct mdglobaldata *md;
538 struct privatespace *ps;
539
540 ps = &CPU_prvspace[myid];
541
542 gdt_segs[GPROC0_SEL].ssd_base =
543 (long) &ps->mdglobaldata.gd_common_tss;
544 ps->mdglobaldata.mi.gd_prvspace = ps;
545
546 /* We fill the 32-bit segment descriptors */
547 for (x = 0; x < NGDT; x++) {
548 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
549 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
550 }
551 /* And now a 64-bit one */
552 ssdtosyssd(&gdt_segs[GPROC0_SEL],
553 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
554
555 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
556 r_gdt.rd_base = (long) &gdt[myid * NGDT];
557 lgdt(&r_gdt); /* does magic intra-segment return */
558
ec073ddc
JG
559 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
560 wrmsr(MSR_FSBASE, 0); /* User value */
561 wrmsr(MSR_GSBASE, (u_int64_t)ps);
562 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
563
46d4e165
JG
564 lidt(&r_idt);
565
566#if 0
567 lldt(_default_ldt);
568 mdcpu->gd_currentldt = _default_ldt;
569#endif
570
571 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
572 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
573
574 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
575
576 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
577#if 0 /* JG XXX */
578 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
579#endif
580 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
581 md->gd_common_tssd = *md->gd_tss_gdt;
093565f2
MD
582
583 /* double fault stack */
584 md->gd_common_tss.tss_ist1 =
585 (long)&md->mi.gd_prvspace->idlestack[
586 sizeof(md->mi.gd_prvspace->idlestack)];
587
46d4e165
JG
588 ltr(gsel_tss);
589
46d4e165
JG
590 /*
591 * Set to a known state:
592 * Set by mpboot.s: CR0_PG, CR0_PE
593 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
594 */
595 cr0 = rcr0();
596 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
597 load_cr0(cr0);
598
599 /* Set up the fast syscall stuff */
600 msr = rdmsr(MSR_EFER) | EFER_SCE;
601 wrmsr(MSR_EFER, msr);
602 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
603 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
604 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
605 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
606 wrmsr(MSR_STAR, msr);
607 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
608
609 pmap_set_opt(); /* PSE/4MB pages, etc */
610#if JGXXX
611 /* Initialize the PAT MSR. */
612 pmap_init_pat();
613#endif
614
615 /* set up CPU registers and state */
616 cpu_setregs();
617
618 /* set up SSE/NX registers */
619 initializecpu();
620
621 /* set up FPU state on the AP */
622 npxinit(__INITIAL_NPXCW__);
ec073ddc
JG
623
624 /* disable the APIC, just to be SURE */
625 lapic->svr &= ~APIC_SVR_ENABLE;
626
627 /* data returned to BSP */
628 cpu_apic_versions[0] = lapic->version;
46d4e165
JG
629}
630
631/*******************************************************************
632 * local functions and data
633 */
634
635/*
636 * start the SMP system
637 */
638static void
639mp_enable(u_int boot_addr)
640{
46d4e165
JG
641 POSTCODE(MP_ENABLE_POST);
642
91903a05 643 lapic_config();
8e4c6923 644
a40ec003
SZ
645 /* Initialize BSP's local APIC */
646 lapic_init(TRUE);
647
c6b1591c
SZ
648 /* start each Application Processor */
649 start_all_aps(boot_addr);
650
65b2387f
SZ
651 if (apic_io_enable)
652 ioapic_config();
653
a40ec003
SZ
654 /* Finalize PIC */
655 MachIntrABI.finalize();
46d4e165
JG
656}
657
658
659/*
660 * look for the MP spec signature
661 */
662
663/* string defined by the Intel MP Spec as identifying the MP table */
664#define MP_SIG 0x5f504d5f /* _MP_ */
665#define NEXT(X) ((X) += 4)
3a918cfd 666static long
0eaa8172 667mptable_search_sig(u_int32_t target, int count)
46d4e165 668{
f592025a
MN
669 vm_size_t map_size;
670 u_int32_t *addr;
671 int x, ret;
46d4e165 672
351254e7
MN
673 KKASSERT(target != 0);
674
f592025a
MN
675 map_size = count * sizeof(u_int32_t);
676 addr = pmap_mapdev((vm_paddr_t)target, map_size);
46d4e165 677
351254e7 678 ret = 0;
f592025a
MN
679 for (x = 0; x < count; NEXT(x)) {
680 if (addr[x] == MP_SIG) {
681 /* make array index a byte index */
682 ret = target + (x * sizeof(u_int32_t));
683 break;
684 }
685 }
351254e7 686
f592025a
MN
687 pmap_unmapdev((vm_offset_t)addr, map_size);
688 return ret;
46d4e165
JG
689}
690
16794646 691static int processor_entry (const struct PROCENTRY *entry, int cpu);
46d4e165
JG
692
693/*
694 * Check if we should perform a hyperthreading "fix-up" to
695 * enumerate any logical CPU's that aren't already listed
696 * in the table.
697 *
698 * XXX: We assume that all of the physical CPUs in the
699 * system have the same number of logical CPUs.
700 *
701 * XXX: We assume that APIC ID's are allocated such that
702 * the APIC ID's for a physical processor are aligned
703 * with the number of logical CPU's in the processor.
704 */
7f310ea1 705static int
da23a592 706mptable_hyperthread_fixup(cpumask_t id_mask, int cpu_count)
46d4e165 707{
7f310ea1 708 int i, id, lcpus_max, logical_cpus;
46d4e165 709
46d4e165 710 if ((cpu_feature & CPUID_HTT) == 0)
7f310ea1 711 return 0;
f5abf528
MN
712
713 lcpus_max = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
714 if (lcpus_max <= 1)
7f310ea1 715 return 0;
46d4e165 716
f5abf528
MN
717 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
718 /*
719 * INSTRUCTION SET REFERENCE, A-M (#253666)
720 * Page 3-181, Table 3-20
721 * "The nearest power-of-2 integer that is not smaller
722 * than EBX[23:16] is the number of unique initial APIC
723 * IDs reserved for addressing different logical
724 * processors in a physical package."
725 */
726 for (i = 0; ; ++i) {
727 if ((1 << i) >= lcpus_max) {
728 lcpus_max = 1 << i;
729 break;
730 }
731 }
732 }
733
7f310ea1
MN
734 KKASSERT(cpu_count != 0);
735 if (cpu_count == lcpus_max) {
f5abf528 736 /* We have nothing to fix */
7f310ea1
MN
737 return 0;
738 } else if (cpu_count == 1) {
f5abf528
MN
739 /* XXX this may be incorrect */
740 logical_cpus = lcpus_max;
741 } else {
742 int cur, prev, dist;
743
744 /*
745 * Calculate the distances between two nearest
746 * APIC IDs. If all such distances are same,
747 * then it is the number of missing cpus that
748 * we are going to fill later.
749 */
750 dist = cur = prev = -1;
751 for (id = 0; id < MAXCPU; ++id) {
da23a592 752 if ((id_mask & CPUMASK(id)) == 0)
f5abf528
MN
753 continue;
754
755 cur = id;
756 if (prev >= 0) {
757 int new_dist = cur - prev;
758
759 if (dist < 0)
760 dist = new_dist;
761
762 /*
763 * Make sure that all distances
764 * between two nearest APIC IDs
765 * are same.
766 */
767 if (dist != new_dist)
7f310ea1 768 return 0;
f5abf528
MN
769 }
770 prev = cur;
771 }
772 if (dist == 1)
7f310ea1 773 return 0;
f5abf528
MN
774
775 /* Must be power of 2 */
776 if (dist & (dist - 1))
7f310ea1 777 return 0;
f5abf528
MN
778
779 /* Can't exceed CPU package capacity */
780 if (dist > lcpus_max)
781 logical_cpus = lcpus_max;
782 else
783 logical_cpus = dist;
784 }
785
46d4e165
JG
786 /*
787 * For each APIC ID of a CPU that is set in the mask,
788 * scan the other candidate APIC ID's for this
789 * physical processor. If any of those ID's are
790 * already in the table, then kill the fixup.
791 */
f5abf528 792 for (id = 0; id < MAXCPU; id++) {
da23a592 793 if ((id_mask & CPUMASK(id)) == 0)
46d4e165
JG
794 continue;
795 /* First, make sure we are on a logical_cpus boundary. */
796 if (id % logical_cpus != 0)
7f310ea1 797 return 0;
46d4e165 798 for (i = id + 1; i < id + logical_cpus; i++)
da23a592 799 if ((id_mask & CPUMASK(i)) != 0)
7f310ea1 800 return 0;
46d4e165 801 }
7f310ea1 802 return logical_cpus;
46d4e165
JG
803}
804
8f54b133 805static int
fe423084 806mptable_map(struct mptable_pos *mpt)
91f1c7a4
MN
807{
808 mpfps_t fps = NULL;
809 mpcth_t cth = NULL;
810 vm_size_t cth_mapsz = 0;
811
fe423084
SZ
812 KKASSERT(mptable_fps_phyaddr != 0);
813
8f54b133
MN
814 bzero(mpt, sizeof(*mpt));
815
fe423084 816 fps = pmap_mapdev(mptable_fps_phyaddr, sizeof(*fps));
91f1c7a4
MN
817 if (fps->pap != 0) {
818 /*
819 * Map configuration table header to get
820 * the base table size
821 */
822 cth = pmap_mapdev(fps->pap, sizeof(*cth));
823 cth_mapsz = cth->base_table_length;
824 pmap_unmapdev((vm_offset_t)cth, sizeof(*cth));
825
8f54b133
MN
826 if (cth_mapsz < sizeof(*cth)) {
827 kprintf("invalid base MP table length %d\n",
828 (int)cth_mapsz);
829 pmap_unmapdev((vm_offset_t)fps, sizeof(*fps));
830 return EINVAL;
831 }
832
91f1c7a4
MN
833 /*
834 * Map the base table
835 */
836 cth = pmap_mapdev(fps->pap, cth_mapsz);
837 }
838
839 mpt->mp_fps = fps;
840 mpt->mp_cth = cth;
841 mpt->mp_cth_mapsz = cth_mapsz;
8f54b133
MN
842
843 return 0;
91f1c7a4
MN
844}
845
846static void
847mptable_unmap(struct mptable_pos *mpt)
848{
849 if (mpt->mp_cth != NULL) {
850 pmap_unmapdev((vm_offset_t)mpt->mp_cth, mpt->mp_cth_mapsz);
851 mpt->mp_cth = NULL;
852 mpt->mp_cth_mapsz = 0;
853 }
854 if (mpt->mp_fps != NULL) {
855 pmap_unmapdev((vm_offset_t)mpt->mp_fps, sizeof(*mpt->mp_fps));
856 mpt->mp_fps = NULL;
857 }
858}
859
46d4e165 860void
40d323b6
MN
861mp_set_cpuids(int cpu_id, int apic_id)
862{
863 CPU_TO_ID(cpu_id) = apic_id;
864 ID_TO_CPU(apic_id) = cpu_id;
865}
866
46d4e165 867static int
16794646 868processor_entry(const struct PROCENTRY *entry, int cpu)
46d4e165 869{
bfa17615
MN
870 KKASSERT(cpu > 0);
871
46d4e165
JG
872 /* check for usability */
873 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
874 return 0;
875
46d4e165
JG
876 /* check for BSP flag */
877 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
40d323b6 878 mp_set_cpuids(0, entry->apic_id);
46d4e165
JG
879 return 0; /* its already been counted */
880 }
881
882 /* add another AP to list, if less than max number of CPUs */
883 else if (cpu < MAXCPU) {
40d323b6 884 mp_set_cpuids(cpu, entry->apic_id);
46d4e165
JG
885 return 1;
886 }
887
888 return 0;
889}
890
46d4e165 891/*
46d4e165
JG
892 * Map a physical memory address representing I/O into KVA. The I/O
893 * block is assumed not to cross a page boundary.
894 */
895void *
01616f8b 896ioapic_map(vm_paddr_t pa)
46d4e165 897{
46d4e165
JG
898 KKASSERT(pa < 0x100000000LL);
899
403c36ea 900 return pmap_mapdev_uncacheable(pa, PAGE_SIZE);
46d4e165
JG
901}
902
903/*
904 * start each AP in our list
905 */
906static int
907start_all_aps(u_int boot_addr)
908{
909 vm_offset_t va = boot_address + KERNBASE;
910 u_int64_t *pt4, *pt3, *pt2;
911 int x, i, pg;
912 int shift;
bb467734
MD
913 int smicount;
914 int smibest;
915 int smilast;
46d4e165
JG
916 u_char mpbiosreason;
917 u_long mpbioswarmvec;
918 struct mdglobaldata *gd;
919 struct privatespace *ps;
46d4e165
JG
920
921 POSTCODE(START_ALL_APS_POST);
922
46d4e165
JG
923 /* install the AP 1st level boot code */
924 pmap_kenter(va, boot_address);
bfc09ba0 925 cpu_invlpg((void *)va); /* JG XXX */
46d4e165
JG
926 bcopy(mptramp_start, (void *)va, bootMP_size);
927
928 /* Locate the page tables, they'll be below the trampoline */
929 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
930 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
931 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
932
933 /* Create the initial 1GB replicated page tables */
934 for (i = 0; i < 512; i++) {
935 /* Each slot of the level 4 pages points to the same level 3 page */
936 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
937 pt4[i] |= PG_V | PG_RW | PG_U;
938
939 /* Each slot of the level 3 pages points to the same level 2 page */
940 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
941 pt3[i] |= PG_V | PG_RW | PG_U;
942
943 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
944 pt2[i] = i * (2 * 1024 * 1024);
945 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
946 }
947
948 /* save the current value of the warm-start vector */
949 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
950 outb(CMOS_REG, BIOS_RESET);
951 mpbiosreason = inb(CMOS_DATA);
952
953 /* setup a vector to our boot code */
954 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
955 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
956 outb(CMOS_REG, BIOS_RESET);
957 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
958
bb467734
MD
959 /*
960 * If we have a TSC we can figure out the SMI interrupt rate.
961 * The SMI does not necessarily use a constant rate. Spend
962 * up to 250ms trying to figure it out.
963 */
964 smibest = 0;
965 if (cpu_feature & CPUID_TSC) {
966 set_apic_timer(275000);
967 smilast = read_apic_timer();
968 for (x = 0; x < 20 && read_apic_timer(); ++x) {
969 smicount = smitest();
970 if (smibest == 0 || smilast - smicount < smibest)
971 smibest = smilast - smicount;
972 smilast = smicount;
973 }
974 if (smibest > 250000)
975 smibest = 0;
976 if (smibest) {
977 smibest = smibest * (int64_t)1000000 /
978 get_apic_timer_frequency();
979 }
980 }
981 if (smibest)
982 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
983 1000000 / smibest, smibest);
984
46d4e165
JG
985 /* start each AP */
986 for (x = 1; x <= mp_naps; ++x) {
987
988 /* This is a bit verbose, it will go away soon. */
989
990 /* first page of AP's private space */
b2b3ffcd 991 pg = x * x86_64_btop(sizeof(struct privatespace));
46d4e165
JG
992
993 /* allocate new private data page(s) */
994 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
995 MDGLOBALDATA_BASEALLOC_SIZE);
46d4e165
JG
996
997 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
998 bzero(gd, sizeof(*gd));
999 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
1000
1001 /* prime data page for it to use */
1002 mi_gdinit(&gd->mi, x);
1003 cpu_gdinit(gd, x);
46d4e165
JG
1004 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
1005 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
1006
1007 /* setup a vector to our boot code */
1008 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1009 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1010 outb(CMOS_REG, BIOS_RESET);
1011 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
1012
1013 /*
1014 * Setup the AP boot stack
1015 */
1016 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
1017 bootAP = x;
1018
1019 /* attempt to start the Application Processor */
1020 CHECK_INIT(99); /* setup checkpoints */
bb467734 1021 if (!start_ap(gd, boot_addr, smibest)) {
ea96e50f
MD
1022 kprintf("\nAP #%d (PHY# %d) failed!\n",
1023 x, CPU_TO_ID(x));
46d4e165
JG
1024 CHECK_PRINT("trace"); /* show checkpoints */
1025 /* better panic as the AP may be running loose */
1026 kprintf("panic y/n? [y] ");
1027 if (cngetc() != 'n')
1028 panic("bye-bye");
1029 }
1030 CHECK_PRINT("trace"); /* show checkpoints */
1031
1032 /* record its version info */
1033 cpu_apic_versions[x] = cpu_apic_versions[0];
1034 }
1035
1036 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
1037 ncpus = x;
1038
1039 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
1040 for (shift = 0; (1 << shift) <= ncpus; ++shift)
1041 ;
1042 --shift;
1043 ncpus2_shift = shift;
1044 ncpus2 = 1 << shift;
1045 ncpus2_mask = ncpus2 - 1;
1046
1047 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
1048 if ((1 << shift) < ncpus)
1049 ++shift;
1050 ncpus_fit = 1 << shift;
1051 ncpus_fit_mask = ncpus_fit - 1;
1052
1053 /* build our map of 'other' CPUs */
da23a592 1054 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
46d4e165
JG
1055 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
1056 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
1057
1058 /* fill in our (BSP) APIC version */
1059 cpu_apic_versions[0] = lapic->version;
1060
1061 /* restore the warmstart vector */
1062 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
1063 outb(CMOS_REG, BIOS_RESET);
1064 outb(CMOS_DATA, mpbiosreason);
1065
1066 /*
1067 * NOTE! The idlestack for the BSP was setup by locore. Finish
1068 * up, clean out the P==V mapping we did earlier.
1069 */
46d4e165
JG
1070 pmap_set_opt();
1071
c6b1591c
SZ
1072 /*
1073 * Wait all APs to finish initializing LAPIC
1074 */
1075 mp_finish_lapic = 1;
1076 if (bootverbose)
1077 kprintf("SMP: Waiting APs LAPIC initialization\n");
1078 if (cpu_feature & CPUID_TSC)
1079 tsc0_offset = rdtsc();
1080 tsc_offsets[0] = 0;
1081 rel_mplock();
1082 while (smp_lapic_mask != smp_startup_mask) {
1083 cpu_lfence();
1084 if (cpu_feature & CPUID_TSC)
1085 tsc0_offset = rdtsc();
1086 }
1087 while (try_mplock() == 0)
1088 ;
1089
46d4e165
JG
1090 /* number of APs actually started */
1091 return ncpus - 1;
1092}
1093
1094
1095/*
1096 * load the 1st level AP boot code into base memory.
1097 */
1098
1099/* targets for relocation */
1100extern void bigJump(void);
1101extern void bootCodeSeg(void);
1102extern void bootDataSeg(void);
1103extern void MPentry(void);
1104extern u_int MP_GDT;
1105extern u_int mp_gdtbase;
1106
bfc09ba0
MD
1107#if 0
1108
46d4e165
JG
1109static void
1110install_ap_tramp(u_int boot_addr)
1111{
1112 int x;
1113 int size = *(int *) ((u_long) & bootMP_size);
1114 u_char *src = (u_char *) ((u_long) bootMP);
1115 u_char *dst = (u_char *) boot_addr + KERNBASE;
1116 u_int boot_base = (u_int) bootMP;
1117 u_int8_t *dst8;
1118 u_int16_t *dst16;
1119 u_int32_t *dst32;
1120
1121 POSTCODE(INSTALL_AP_TRAMP_POST);
1122
1123 for (x = 0; x < size; ++x)
1124 *dst++ = *src++;
1125
1126 /*
1127 * modify addresses in code we just moved to basemem. unfortunately we
1128 * need fairly detailed info about mpboot.s for this to work. changes
1129 * to mpboot.s might require changes here.
1130 */
1131
1132 /* boot code is located in KERNEL space */
1133 dst = (u_char *) boot_addr + KERNBASE;
1134
1135 /* modify the lgdt arg */
1136 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1137 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
1138
1139 /* modify the ljmp target for MPentry() */
1140 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1141 *dst32 = ((u_int) MPentry - KERNBASE);
1142
1143 /* modify the target for boot code segment */
1144 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1145 dst8 = (u_int8_t *) (dst16 + 1);
1146 *dst16 = (u_int) boot_addr & 0xffff;
1147 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1148
1149 /* modify the target for boot data segment */
1150 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1151 dst8 = (u_int8_t *) (dst16 + 1);
1152 *dst16 = (u_int) boot_addr & 0xffff;
1153 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1154}
1155
bfc09ba0 1156#endif
46d4e165
JG
1157
1158/*
bb467734 1159 * This function starts the AP (application processor) identified
46d4e165
JG
1160 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1161 * to accomplish this. This is necessary because of the nuances
1162 * of the different hardware we might encounter. It ain't pretty,
1163 * but it seems to work.
1164 *
1165 * NOTE: eventually an AP gets to ap_init(), which is called just
1166 * before the AP goes into the LWKT scheduler's idle loop.
1167 */
1168static int
bb467734 1169start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
46d4e165
JG
1170{
1171 int physical_cpu;
1172 int vector;
1173 u_long icr_lo, icr_hi;
1174
1175 POSTCODE(START_AP_POST);
1176
1177 /* get the PHYSICAL APIC ID# */
1178 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
1179
1180 /* calculate the vector */
1181 vector = (boot_addr >> 12) & 0xff;
1182
bb467734
MD
1183 /* We don't want anything interfering */
1184 cpu_disable_intr();
1185
46d4e165
JG
1186 /* Make sure the target cpu sees everything */
1187 wbinvd();
1188
1189 /*
bb467734
MD
1190 * Try to detect when a SMI has occurred, wait up to 200ms.
1191 *
1192 * If a SMI occurs during an AP reset but before we issue
1193 * the STARTUP command, the AP may brick. To work around
1194 * this problem we hold off doing the AP startup until
1195 * after we have detected the SMI. Hopefully another SMI
1196 * will not occur before we finish the AP startup.
1197 *
1198 * Retries don't seem to help. SMIs have a window of opportunity
1199 * and if USB->legacy keyboard emulation is enabled in the BIOS
1200 * the interrupt rate can be quite high.
1201 *
1202 * NOTE: Don't worry about the L1 cache load, it might bloat
1203 * ldelta a little but ndelta will be so huge when the SMI
1204 * occurs the detection logic will still work fine.
1205 */
1206 if (smibest) {
1207 set_apic_timer(200000);
1208 smitest();
1209 }
1210
1211 /*
46d4e165
JG
1212 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
1213 * and running the target CPU. OR this INIT IPI might be latched (P5
1214 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1215 * ignored.
bb467734
MD
1216 *
1217 * see apic/apicreg.h for icr bit definitions.
1218 *
1219 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
46d4e165
JG
1220 */
1221
bb467734
MD
1222 /*
1223 * Setup the address for the target AP. We can setup
1224 * icr_hi once and then just trigger operations with
1225 * icr_lo.
1226 */
46d4e165
JG
1227 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
1228 icr_hi |= (physical_cpu << 24);
46d4e165 1229 icr_lo = lapic->icr_lo & 0xfff00000;
bb467734 1230 lapic->icr_hi = icr_hi;
46d4e165 1231
bb467734
MD
1232 /*
1233 * Do an INIT IPI: assert RESET
1234 *
1235 * Use edge triggered mode to assert INIT
1236 */
1237 lapic->icr_lo = icr_lo | 0x00004500;
46d4e165
JG
1238 while (lapic->icr_lo & APIC_DELSTAT_MASK)
1239 /* spin */ ;
1240
bb467734
MD
1241 /*
1242 * The spec calls for a 10ms delay but we may have to use a
1243 * MUCH lower delay to avoid bricking an AP due to a fast SMI
1244 * interrupt. We have other loops here too and dividing by 2
1245 * doesn't seem to be enough even after subtracting 350us,
1246 * so we divide by 4.
1247 *
1248 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
1249 * interrupt was detected we use the full 10ms.
1250 */
1251 if (smibest == 0)
1252 u_sleep(10000);
1253 else if (smibest < 150 * 4 + 350)
1254 u_sleep(150);
1255 else if ((smibest - 350) / 4 < 10000)
1256 u_sleep((smibest - 350) / 4);
1257 else
1258 u_sleep(10000);
46d4e165 1259
bb467734
MD
1260 /*
1261 * Do an INIT IPI: deassert RESET
1262 *
1263 * Use level triggered mode to deassert. It is unclear
1264 * why we need to do this.
1265 */
1266 lapic->icr_lo = icr_lo | 0x00008500;
46d4e165
JG
1267 while (lapic->icr_lo & APIC_DELSTAT_MASK)
1268 /* spin */ ;
bb467734 1269 u_sleep(150); /* wait 150us */
46d4e165
JG
1270
1271 /*
bb467734 1272 * Next we do a STARTUP IPI: the previous INIT IPI might still be
46d4e165
JG
1273 * latched, (P5 bug) this 1st STARTUP would then terminate
1274 * immediately, and the previously started INIT IPI would continue. OR
1275 * the previous INIT IPI has already run. and this STARTUP IPI will
1276 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1277 * will run.
1278 */
46d4e165
JG
1279 lapic->icr_lo = icr_lo | 0x00000600 | vector;
1280 while (lapic->icr_lo & APIC_DELSTAT_MASK)
1281 /* spin */ ;
1282 u_sleep(200); /* wait ~200uS */
1283
1284 /*
bb467734 1285 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
46d4e165
JG
1286 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1287 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1288 * recognized after hardware RESET or INIT IPI.
1289 */
46d4e165
JG
1290 lapic->icr_lo = icr_lo | 0x00000600 | vector;
1291 while (lapic->icr_lo & APIC_DELSTAT_MASK)
1292 /* spin */ ;
bb467734
MD
1293
1294 /* Resume normal operation */
1295 cpu_enable_intr();
46d4e165
JG
1296
1297 /* wait for it to start, see ap_init() */
1298 set_apic_timer(5000000);/* == 5 seconds */
1299 while (read_apic_timer()) {
da23a592 1300 if (smp_startup_mask & CPUMASK(gd->mi.gd_cpuid))
46d4e165
JG
1301 return 1; /* return SUCCESS */
1302 }
bb467734 1303
46d4e165
JG
1304 return 0; /* return FAILURE */
1305}
1306
bb467734
MD
1307static
1308int
1309smitest(void)
1310{
1311 int64_t ltsc;
1312 int64_t ntsc;
1313 int64_t ldelta;
1314 int64_t ndelta;
1315 int count;
1316
1317 ldelta = 0;
1318 ndelta = 0;
1319 while (read_apic_timer()) {
1320 ltsc = rdtsc();
1321 for (count = 0; count < 100; ++count)
1322 ntsc = rdtsc(); /* force loop to occur */
1323 if (ldelta) {
1324 ndelta = ntsc - ltsc;
1325 if (ldelta > ndelta)
1326 ldelta = ndelta;
1327 if (ndelta > ldelta * 2)
1328 break;
1329 } else {
1330 ldelta = ntsc - ltsc;
1331 }
1332 }
1333 return(read_apic_timer());
1334}
46d4e165
JG
1335
1336/*
7d4d6fdb
MD
1337 * Synchronously flush the TLB on all other CPU's. The current cpu's
1338 * TLB is not flushed. If the caller wishes to flush the current cpu's
1339 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
46d4e165 1340 *
7d4d6fdb
MD
1341 * NOTE: If for some reason we were unable to start all cpus we cannot
1342 * safely use broadcast IPIs.
46d4e165 1343 */
7d4d6fdb
MD
1344
1345static cpumask_t smp_invltlb_req;
1346
b4b1a37a
MD
1347#define SMP_INVLTLB_DEBUG
1348
46d4e165
JG
1349void
1350smp_invltlb(void)
1351{
1352#ifdef SMP
7d4d6fdb 1353 struct mdglobaldata *md = mdcpu;
2d910aaf 1354#ifdef SMP_INVLTLB_DEBUG
7d4d6fdb 1355 long count = 0;
2d910aaf 1356 long xcount = 0;
7d4d6fdb 1357#endif
4117f2fd 1358
7d4d6fdb
MD
1359 crit_enter_gd(&md->mi);
1360 md->gd_invltlb_ret = 0;
1361 ++md->mi.gd_cnt.v_smpinvltlb;
da23a592 1362 atomic_set_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
2d910aaf
MD
1363#ifdef SMP_INVLTLB_DEBUG
1364again:
1365#endif
46d4e165
JG
1366 if (smp_startup_mask == smp_active_mask) {
1367 all_but_self_ipi(XINVLTLB_OFFSET);
1368 } else {
7d4d6fdb
MD
1369 selected_apic_ipi(smp_active_mask & ~md->mi.gd_cpumask,
1370 XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
46d4e165 1371 }
2d910aaf
MD
1372
1373#ifdef SMP_INVLTLB_DEBUG
1374 if (xcount)
1375 kprintf("smp_invltlb: ipi sent\n");
1376#endif
7d4d6fdb
MD
1377 while ((md->gd_invltlb_ret & smp_active_mask & ~md->mi.gd_cpumask) !=
1378 (smp_active_mask & ~md->mi.gd_cpumask)) {
1379 cpu_mfence();
1380 cpu_pause();
2d910aaf 1381#ifdef SMP_INVLTLB_DEBUG
7d4d6fdb
MD
1382 /* DEBUGGING */
1383 if (++count == 400000000) {
2d910aaf
MD
1384 print_backtrace(-1);
1385 kprintf("smp_invltlb: endless loop %08lx %08lx, "
1386 "rflags %016jx retry",
7d4d6fdb 1387 (long)md->gd_invltlb_ret,
2d910aaf
MD
1388 (long)smp_invltlb_req,
1389 (intmax_t)read_rflags());
1390 __asm __volatile ("sti");
1391 ++xcount;
1392 if (xcount > 2)
1393 lwkt_process_ipiq();
1394 if (xcount > 3) {
da23a592
MD
1395 int bcpu = BSFCPUMASK(~md->gd_invltlb_ret &
1396 ~md->mi.gd_cpumask &
1397 smp_active_mask);
2d910aaf
MD
1398 globaldata_t xgd;
1399
1400 kprintf("bcpu %d\n", bcpu);
1401 xgd = globaldata_find(bcpu);
1402 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm);
1403 }
1404 if (xcount > 5)
1405 Debugger("giving up");
1406 count = 0;
1407 goto again;
7d4d6fdb 1408 }
46d4e165 1409#endif
7d4d6fdb 1410 }
da23a592 1411 atomic_clear_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
7d4d6fdb 1412 crit_exit_gd(&md->mi);
4117f2fd 1413#endif
46d4e165
JG
1414}
1415
7d4d6fdb
MD
1416#ifdef SMP
1417
1418/*
1419 * Called from Xinvltlb assembly with interrupts disabled. We didn't
1420 * bother to bump the critical section count or nested interrupt count
1421 * so only do very low level operations here.
1422 */
1423void
1424smp_invltlb_intr(void)
1425{
1426 struct mdglobaldata *md = mdcpu;
1427 struct mdglobaldata *omd;
1428 cpumask_t mask;
1429 int cpu;
1430
7d4d6fdb 1431 cpu_mfence();
2d910aaf 1432 mask = smp_invltlb_req;
7d4d6fdb
MD
1433 cpu_invltlb();
1434 while (mask) {
da23a592
MD
1435 cpu = BSFCPUMASK(mask);
1436 mask &= ~CPUMASK(cpu);
7d4d6fdb 1437 omd = (struct mdglobaldata *)globaldata_find(cpu);
da23a592 1438 atomic_set_cpumask(&omd->gd_invltlb_ret, md->mi.gd_cpumask);
7d4d6fdb
MD
1439 }
1440}
1441
1442#endif
1443
46d4e165
JG
1444/*
1445 * When called the executing CPU will send an IPI to all other CPUs
1446 * requesting that they halt execution.
1447 *
1448 * Usually (but not necessarily) called with 'other_cpus' as its arg.
1449 *
1450 * - Signals all CPUs in map to stop.
1451 * - Waits for each to stop.
1452 *
1453 * Returns:
1454 * -1: error
1455 * 0: NA
1456 * 1: ok
1457 *
1458 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1459 * from executing at same time.
1460 */
1461int
da23a592 1462stop_cpus(cpumask_t map)
46d4e165
JG
1463{
1464 map &= smp_active_mask;
1465
1466 /* send the Xcpustop IPI to all CPUs in map */
1467 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
1468
1469 while ((stopped_cpus & map) != map)
1470 /* spin */ ;
1471
1472 return 1;
1473}
1474
1475
1476/*
1477 * Called by a CPU to restart stopped CPUs.
1478 *
1479 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1480 *
1481 * - Signals all CPUs in map to restart.
1482 * - Waits for each to restart.
1483 *
1484 * Returns:
1485 * -1: error
1486 * 0: NA
1487 * 1: ok
1488 */
1489int
da23a592 1490restart_cpus(cpumask_t map)
46d4e165
JG
1491{
1492 /* signal other cpus to restart */
1493 started_cpus = map & smp_active_mask;
1494
1495 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
1496 /* spin */ ;
1497
1498 return 1;
1499}
1500
1501/*
1502 * This is called once the mpboot code has gotten us properly relocated
1503 * and the MMU turned on, etc. ap_init() is actually the idle thread,
1504 * and when it returns the scheduler will call the real cpu_idle() main
1505 * loop for the idlethread. Interrupts are disabled on entry and should
1506 * remain disabled at return.
1507 */
1508void
1509ap_init(void)
1510{
1511 u_int apic_id;
1512
1513 /*
1514 * Adjust smp_startup_mask to signal the BSP that we have started
1515 * up successfully. Note that we do not yet hold the BGL. The BSP
1516 * is waiting for our signal.
1517 *
1518 * We can't set our bit in smp_active_mask yet because we are holding
1519 * interrupts physically disabled and remote cpus could deadlock
1520 * trying to send us an IPI.
1521 */
da23a592 1522 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
46d4e165
JG
1523 cpu_mfence();
1524
1525 /*
c6b1591c
SZ
1526 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1527 * non-zero, then get the MP lock.
46d4e165
JG
1528 *
1529 * Note: We are in a critical section.
1530 *
46d4e165
JG
1531 * Note: we are the idle thread, we can only spin.
1532 *
1533 * Note: The load fence is memory volatile and prevents the compiler
c6b1591c 1534 * from improperly caching mp_finish_lapic, and the cpu from improperly
46d4e165
JG
1535 * caching it.
1536 */
c6b1591c 1537 while (mp_finish_lapic == 0)
b5d16701
MD
1538 cpu_lfence();
1539 while (try_mplock() == 0)
1540 ;
46d4e165
JG
1541
1542 if (cpu_feature & CPUID_TSC) {
b5d16701
MD
1543 /*
1544 * The BSP is constantly updating tsc0_offset, figure out
1545 * the relative difference to synchronize ktrdump.
1546 */
1547 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
46d4e165
JG
1548 }
1549
1550 /* BSP may have changed PTD while we're waiting for the lock */
1551 cpu_invltlb();
1552
46d4e165 1553 /* Build our map of 'other' CPUs. */
da23a592 1554 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
46d4e165 1555
46d4e165 1556 /* A quick check from sanity claus */
d53907dd 1557 apic_id = (apic_id_to_logical[(lapic->id & 0xff000000) >> 24]);
46d4e165
JG
1558 if (mycpu->gd_cpuid != apic_id) {
1559 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
d53907dd
MD
1560 kprintf("SMP: apic_id = %d lapicid %d\n",
1561 apic_id, (lapic->id & 0xff000000) >> 24);
46d4e165
JG
1562#if JGXXX
1563 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1564#endif
1565 panic("cpuid mismatch! boom!!");
1566 }
1567
1568 /* Initialize AP's local APIC for irq's */
5ddeabb9 1569 lapic_init(FALSE);
46d4e165 1570
c6b1591c
SZ
1571 /* LAPIC initialization is done */
1572 smp_lapic_mask |= CPUMASK(mycpu->gd_cpuid);
1573 cpu_mfence();
1574
1575 /* Let BSP move onto the next initialization stage */
1576 rel_mplock();
1577
1578 /*
1579 * Interlock for finalization. Wait until mp_finish is non-zero,
1580 * then get the MP lock.
1581 *
1582 * Note: We are in a critical section.
1583 *
1584 * Note: we are the idle thread, we can only spin.
1585 *
1586 * Note: The load fence is memory volatile and prevents the compiler
1587 * from improperly caching mp_finish, and the cpu from improperly
1588 * caching it.
1589 */
1590 while (mp_finish == 0)
1591 cpu_lfence();
1592 while (try_mplock() == 0)
1593 ;
1594
1595 /* BSP may have changed PTD while we're waiting for the lock */
1596 cpu_invltlb();
1597
46d4e165
JG
1598 /* Set memory range attributes for this CPU to match the BSP */
1599 mem_range_AP_init();
1600
1601 /*
1602 * Once we go active we must process any IPIQ messages that may
1603 * have been queued, because no actual IPI will occur until we
1604 * set our bit in the smp_active_mask. If we don't the IPI
1605 * message interlock could be left set which would also prevent
1606 * further IPIs.
1607 *
1608 * The idle loop doesn't expect the BGL to be held and while
1609 * lwkt_switch() normally cleans things up this is a special case
1610 * because we returning almost directly into the idle loop.
1611 *
1612 * The idle thread is never placed on the runq, make sure
1613 * nothing we've done put it there.
1614 */
b5d16701 1615 KKASSERT(get_mplock_count(curthread) == 1);
da23a592 1616 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
46d4e165
JG
1617
1618 /*
1619 * Enable interrupts here. idle_restore will also do it, but
1620 * doing it here lets us clean up any strays that got posted to
1621 * the CPU during the AP boot while we are still in a critical
1622 * section.
1623 */
1624 __asm __volatile("sti; pause; pause"::);
9611ff20 1625 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
46d4e165
JG
1626
1627 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1628 lwkt_process_ipiq();
1629
1630 /*
1631 * Releasing the mp lock lets the BSP finish up the SMP init
1632 */
1633 rel_mplock();
1634 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1635}
1636
1637/*
1638 * Get SMP fully working before we start initializing devices.
1639 */
1640static
1641void
1642ap_finish(void)
1643{
1644 mp_finish = 1;
1645 if (bootverbose)
1646 kprintf("Finish MP startup\n");
46d4e165 1647 rel_mplock();
c6b1591c 1648 while (smp_active_mask != smp_startup_mask)
46d4e165 1649 cpu_lfence();
46d4e165
JG
1650 while (try_mplock() == 0)
1651 ;
da23a592
MD
1652 if (bootverbose) {
1653 kprintf("Active CPU Mask: %016jx\n",
1654 (uintmax_t)smp_active_mask);
1655 }
46d4e165
JG
1656}
1657
1658SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
1659
1660void
1661cpu_send_ipiq(int dcpu)
1662{
da23a592 1663 if (CPUMASK(dcpu) & smp_active_mask)
46d4e165
JG
1664 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1665}
1666
1667#if 0 /* single_apic_ipi_passive() not working yet */
1668/*
1669 * Returns 0 on failure, 1 on success
1670 */
1671int
1672cpu_send_ipiq_passive(int dcpu)
1673{
1674 int r = 0;
da23a592 1675 if (CPUMASK(dcpu) & smp_active_mask) {
46d4e165
JG
1676 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1677 APIC_DELMODE_FIXED);
1678 }
1679 return(r);
1680}
1681#endif
1682
e0fd357f
SZ
1683static int
1684mptable_bus_info_callback(void *xarg, const void *pos, int type)
1685{
1686 struct mptable_bus_info *bus_info = xarg;
1687 const struct BUSENTRY *ent;
1688 struct mptable_bus *bus;
1689
1690 if (type != 1)
1691 return 0;
c715f062 1692
e0fd357f 1693 ent = pos;
c715f062
SZ
1694 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
1695 if (bus->mb_id == ent->bus_id) {
1696 kprintf("mptable_bus_info_alloc: duplicated bus id "
1697 "(%d)\n", bus->mb_id);
1698 return EINVAL;
1699 }
1700 }
e0fd357f
SZ
1701
1702 bus = NULL;
1703 if (strncmp(ent->bus_type, "PCI", 3) == 0) {
1704 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1705 bus->mb_type = MPTABLE_BUS_PCI;
1706 } else if (strncmp(ent->bus_type, "ISA", 3) == 0) {
1707 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1708 bus->mb_type = MPTABLE_BUS_ISA;
1709 }
1710
1711 if (bus != NULL) {
c715f062
SZ
1712 bus->mb_id = ent->bus_id;
1713 TAILQ_INSERT_TAIL(&bus_info->mbi_list, bus, mb_link);
e0fd357f
SZ
1714 }
1715 return 0;
1716}
1717
1718static void
1719mptable_bus_info_alloc(const mpcth_t cth, struct mptable_bus_info *bus_info)
1720{
1721 int error;
1722
1723 bzero(bus_info, sizeof(*bus_info));
1724 TAILQ_INIT(&bus_info->mbi_list);
1725
1726 error = mptable_iterate_entries(cth, mptable_bus_info_callback, bus_info);
1727 if (error)
1728 mptable_bus_info_free(bus_info);
1729}
1730
1731static void
1732mptable_bus_info_free(struct mptable_bus_info *bus_info)
1733{
1734 struct mptable_bus *bus;
1735
1736 while ((bus = TAILQ_FIRST(&bus_info->mbi_list)) != NULL) {
1737 TAILQ_REMOVE(&bus_info->mbi_list, bus, mb_link);
1738 kfree(bus, M_TEMP);
1739 }
1740}
1741
16794646
MN
1742struct mptable_lapic_cbarg1 {
1743 int cpu_count;
7f310ea1
MN
1744 int ht_fixup;
1745 u_int ht_apicid_mask;
16794646
MN
1746};
1747
1748static int
1749mptable_lapic_pass1_callback(void *xarg, const void *pos, int type)
1750{
1751 const struct PROCENTRY *ent;
1752 struct mptable_lapic_cbarg1 *arg = xarg;
1753
1754 if (type != 0)
1755 return 0;
1756 ent = pos;
1757
1758 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1759 return 0;
1760
1761 arg->cpu_count++;
7f310ea1
MN
1762 if (ent->apic_id < 32) {
1763 arg->ht_apicid_mask |= 1 << ent->apic_id;
1764 } else if (arg->ht_fixup) {
1765 kprintf("MPTABLE: lapic id > 32, disable HTT fixup\n");
1766 arg->ht_fixup = 0;
1767 }
16794646
MN
1768 return 0;
1769}
1770
1771struct mptable_lapic_cbarg2 {
1772 int cpu;
7f310ea1 1773 int logical_cpus;
16794646
MN
1774 int found_bsp;
1775};
1776
1777static int
1778mptable_lapic_pass2_callback(void *xarg, const void *pos, int type)
1779{
1780 const struct PROCENTRY *ent;
1781 struct mptable_lapic_cbarg2 *arg = xarg;
1782
1783 if (type != 0)
1784 return 0;
1785 ent = pos;
1786
1787 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1788 KKASSERT(!arg->found_bsp);
1789 arg->found_bsp = 1;
1790 }
1791
1792 if (processor_entry(ent, arg->cpu))
1793 arg->cpu++;
1794
7f310ea1 1795 if (arg->logical_cpus) {
16794646
MN
1796 struct PROCENTRY proc;
1797 int i;
1798
1799 /*
1800 * Create fake mptable processor entries
1801 * and feed them to processor_entry() to
1802 * enumerate the logical CPUs.
1803 */
1804 bzero(&proc, sizeof(proc));
1805 proc.type = 0;
1806 proc.cpu_flags = PROCENTRY_FLAG_EN;
1807 proc.apic_id = ent->apic_id;
1808
7f310ea1 1809 for (i = 1; i < arg->logical_cpus; i++) {
16794646
MN
1810 proc.apic_id++;
1811 processor_entry(&proc, arg->cpu);
16794646
MN
1812 arg->cpu++;
1813 }
1814 }
1815 return 0;
1816}
1817
afcb64b2
MN
1818static void
1819mptable_lapic_default(void)
1820{
1821 int ap_apicid, bsp_apicid;
1822
1823 mp_naps = 1; /* exclude BSP */
1824
1825 /* Map local apic before the id field is accessed */
b44f1d28 1826 lapic_map(DEFAULT_APIC_BASE);
afcb64b2
MN
1827
1828 bsp_apicid = APIC_ID(lapic->id);
1829 ap_apicid = (bsp_apicid == 0) ? 1 : 0;
1830
1831 /* BSP */
1832 mp_set_cpuids(0, bsp_apicid);
1833 /* one and only AP */
1834 mp_set_cpuids(1, ap_apicid);
1835}
1836
16794646
MN
1837/*
1838 * Configure:
16794646 1839 * mp_naps
7159723d 1840 * ID_TO_CPU(N), APIC ID to logical CPU table
16794646
MN
1841 * CPU_TO_ID(N), logical CPU to APIC ID table
1842 */
1843static void
91903a05 1844mptable_lapic_enumerate(struct lapic_enumerator *e)
16794646 1845{
91903a05 1846 struct mptable_pos mpt;
afcb64b2
MN
1847 struct mptable_lapic_cbarg1 arg1;
1848 struct mptable_lapic_cbarg2 arg2;
1849 mpcth_t cth;
7f310ea1 1850 int error, logical_cpus = 0;
f2fc5f9b 1851 vm_offset_t lapic_addr;
16794646 1852
c455a23f 1853 if (mptable_use_default) {
afcb64b2
MN
1854 mptable_lapic_default();
1855 return;
16794646 1856 }
afcb64b2 1857
c455a23f
SZ
1858 error = mptable_map(&mpt);
1859 if (error)
1860 panic("mptable_lapic_enumerate mptable_map failed\n");
1861 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
1862
91903a05 1863 cth = mpt.mp_cth;
afcb64b2
MN
1864
1865 /* Save local apic address */
1866 lapic_addr = (vm_offset_t)cth->apic_address;
1867 KKASSERT(lapic_addr != 0);
1868
1869 /*
1870 * Find out how many CPUs do we have
1871 */
1872 bzero(&arg1, sizeof(arg1));
7f310ea1
MN
1873 arg1.ht_fixup = 1; /* Apply ht fixup by default */
1874
afcb64b2
MN
1875 error = mptable_iterate_entries(cth,
1876 mptable_lapic_pass1_callback, &arg1);
1877 if (error)
1878 panic("mptable_iterate_entries(lapic_pass1) failed\n");
afcb64b2 1879 KKASSERT(arg1.cpu_count != 0);
afcb64b2
MN
1880
1881 /* See if we need to fixup HT logical CPUs. */
7f310ea1
MN
1882 if (arg1.ht_fixup) {
1883 logical_cpus = mptable_hyperthread_fixup(arg1.ht_apicid_mask,
1884 arg1.cpu_count);
1885 if (logical_cpus != 0)
1886 arg1.cpu_count *= logical_cpus;
1887 }
1888 mp_naps = arg1.cpu_count;
afcb64b2 1889
7f310ea1 1890 /* Qualify the numbers again, after possible HT fixup */
afcb64b2
MN
1891 if (mp_naps > MAXCPU) {
1892 kprintf("Warning: only using %d of %d available CPUs!\n",
1893 MAXCPU, mp_naps);
8e5ea5f7 1894 DELAY(1000000);
afcb64b2
MN
1895 mp_naps = MAXCPU;
1896 }
16794646 1897
afcb64b2 1898 --mp_naps; /* subtract the BSP */
16794646 1899
afcb64b2
MN
1900 /*
1901 * Link logical CPU id to local apic id
1902 */
1903 bzero(&arg2, sizeof(arg2));
1904 arg2.cpu = 1;
7f310ea1 1905 arg2.logical_cpus = logical_cpus;
16794646 1906
afcb64b2
MN
1907 error = mptable_iterate_entries(cth,
1908 mptable_lapic_pass2_callback, &arg2);
1909 if (error)
1910 panic("mptable_iterate_entries(lapic_pass2) failed\n");
1911 KKASSERT(arg2.found_bsp);
16794646 1912
afcb64b2 1913 /* Map local apic */
b44f1d28 1914 lapic_map(lapic_addr);
91903a05
MN
1915
1916 mptable_unmap(&mpt);
1917}
1918
fe423084
SZ
1919struct mptable_lapic_probe_cbarg {
1920 int cpu_count;
1921 int found_bsp;
1922};
1923
91903a05 1924static int
fe423084 1925mptable_lapic_probe_callback(void *xarg, const void *pos, int type)
91903a05 1926{
fe423084
SZ
1927 const struct PROCENTRY *ent;
1928 struct mptable_lapic_probe_cbarg *arg = xarg;
91903a05 1929
fe423084
SZ
1930 if (type != 0)
1931 return 0;
1932 ent = pos;
91903a05 1933
fe423084
SZ
1934 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1935 return 0;
1936 arg->cpu_count++;
1937
1938 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1939 if (arg->found_bsp) {
1940 kprintf("more than one BSP in base MP table\n");
1941 return EINVAL;
1942 }
1943 arg->found_bsp = 1;
1944 }
91903a05 1945 return 0;
16794646 1946}
f2fc5f9b 1947
fe423084
SZ
1948static int
1949mptable_lapic_probe(struct lapic_enumerator *e)
1950{
1951 struct mptable_pos mpt;
1952 struct mptable_lapic_probe_cbarg arg;
1953 mpcth_t cth;
1954 int error;
1955
1956 if (mptable_fps_phyaddr == 0)
1957 return ENXIO;
1958
c455a23f
SZ
1959 if (mptable_use_default)
1960 return 0;
1961
fe423084
SZ
1962 error = mptable_map(&mpt);
1963 if (error)
1964 return error;
c455a23f 1965 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
fe423084
SZ
1966
1967 error = EINVAL;
fe423084 1968 cth = mpt.mp_cth;
c455a23f 1969
fe423084
SZ
1970 if (cth->apic_address == 0)
1971 goto done;
1972
1973 bzero(&arg, sizeof(arg));
1974 error = mptable_iterate_entries(cth,
1975 mptable_lapic_probe_callback, &arg);
1976 if (!error) {
1977 if (arg.cpu_count == 0) {
1978 kprintf("MP table contains no processor entries\n");
1979 error = EINVAL;
1980 } else if (!arg.found_bsp) {
1981 kprintf("MP table does not contains BSP entry\n");
1982 error = EINVAL;
1983 }
91903a05 1984 }
fe423084
SZ
1985done:
1986 mptable_unmap(&mpt);
1987 return error;
1988}
1989
1990static struct lapic_enumerator mptable_lapic_enumerator = {
1991 .lapic_prio = LAPIC_ENUM_PRIO_MPTABLE,
1992 .lapic_probe = mptable_lapic_probe,
1993 .lapic_enumerate = mptable_lapic_enumerate
91903a05
MN
1994};
1995
f2fc5f9b 1996static void
becce73f 1997mptable_lapic_enum_register(void)
e6a7270f 1998{
fe423084 1999 lapic_enumerator_register(&mptable_lapic_enumerator);
e6a7270f 2000}
becce73f
SZ
2001SYSINIT(mptable_lapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2002 mptable_lapic_enum_register, 0);
e0fd357f
SZ
2003
2004static int
6b881b58
SZ
2005mptable_ioapic_list_callback(void *xarg, const void *pos, int type)
2006{
2007 const struct IOAPICENTRY *ent;
2008 struct mptable_ioapic *nioapic, *ioapic;
2009
2010 if (type != 2)
2011 return 0;
2012 ent = pos;
2013
2014 if ((ent->apic_flags & IOAPICENTRY_FLAG_EN) == 0)
2015 return 0;
2016
2017 if (ent->apic_address == 0) {
2018 kprintf("mptable_ioapic_create_list: zero IOAPIC addr\n");
2019 return EINVAL;
2020 }
2021
2022 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2023 if (ioapic->mio_apic_id == ent->apic_id) {
2024 kprintf("mptable_ioapic_create_list: duplicated "
2025 "apic id %d\n", ioapic->mio_apic_id);
2026 return EINVAL;
2027 }
2028 if (ioapic->mio_addr == ent->apic_address) {
2029 kprintf("mptable_ioapic_create_list: overlapped "
2030 "IOAPIC addr 0x%08x", ioapic->mio_addr);
2031 return EINVAL;
2032 }
2033 }
2034
2035 nioapic = kmalloc(sizeof(*nioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2036 nioapic->mio_apic_id = ent->apic_id;
2037 nioapic->mio_addr = ent->apic_address;
2038
2039 /*
2040 * Create IOAPIC list in ascending order of APIC ID
2041 */
2042 TAILQ_FOREACH_REVERSE(ioapic, &mptable_ioapic_list,
2043 mptable_ioapic_list, mio_link) {
2044 if (nioapic->mio_apic_id > ioapic->mio_apic_id) {
2045 TAILQ_INSERT_AFTER(&mptable_ioapic_list,
2046 ioapic, nioapic, mio_link);
2047 break;
2048 }
2049 }
2050 if (ioapic == NULL)
2051 TAILQ_INSERT_HEAD(&mptable_ioapic_list, nioapic, mio_link);
2052
2053 return 0;
2054}
2055
2056static void
2057mptable_ioapic_create_list(void)
2058{
2059 struct mptable_ioapic *ioapic;
2060 struct mptable_pos mpt;
2061 int idx, error;
2062
2063 if (mptable_fps_phyaddr == 0)
2064 return;
2065
2066 if (mptable_use_default) {
2067 ioapic = kmalloc(sizeof(*ioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2068 ioapic->mio_idx = 0;
2069 ioapic->mio_apic_id = 0; /* NOTE: any value is ok here */
2070 ioapic->mio_addr = 0xfec00000; /* XXX magic number */
2071
2072 TAILQ_INSERT_HEAD(&mptable_ioapic_list, ioapic, mio_link);
2073 return;
2074 }
2075
2076 error = mptable_map(&mpt);
2077 if (error)
2078 panic("mptable_ioapic_create_list: mptable_map failed\n");
2079 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2080
2081 error = mptable_iterate_entries(mpt.mp_cth,
2082 mptable_ioapic_list_callback, NULL);
2083 if (error) {
2084 while ((ioapic = TAILQ_FIRST(&mptable_ioapic_list)) != NULL) {
2085 TAILQ_REMOVE(&mptable_ioapic_list, ioapic, mio_link);
2086 kfree(ioapic, M_DEVBUF);
2087 }
2088 goto done;
2089 }
2090
2091 /*
2092 * Assign index number for each IOAPIC
2093 */
2094 idx = 0;
2095 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2096 ioapic->mio_idx = idx;
2097 ++idx;
2098 }
2099done:
2100 mptable_unmap(&mpt);
2101}
2102SYSINIT(mptable_ioapic_list, SI_BOOT2_PRESMP, SI_ORDER_SECOND,
2103 mptable_ioapic_create_list, 0);
2104
2105static int
e0fd357f
SZ
2106mptable_pci_int_callback(void *xarg, const void *pos, int type)
2107{
2108 const struct mptable_bus_info *bus_info = xarg;
6b881b58 2109 const struct mptable_ioapic *ioapic;
e0fd357f
SZ
2110 const struct mptable_bus *bus;
2111 struct mptable_pci_int *pci_int;
2112 const struct INTENTRY *ent;
2113 int pci_pin, pci_dev;
2114
2115 if (type != 3)
2116 return 0;
2117 ent = pos;
2118
2119 if (ent->int_type != 0)
2120 return 0;
2121
2122 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
2123 if (bus->mb_type == MPTABLE_BUS_PCI &&
2124 bus->mb_id == ent->src_bus_id)
2125 break;
2126 }
2127 if (bus == NULL)
2128 return 0;
2129
6b881b58
SZ
2130 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2131 if (ioapic->mio_apic_id == ent->dst_apic_id)
2132 break;
2133 }
2134 if (ioapic == NULL) {
2135 kprintf("MPTABLE: warning PCI int dst apic id %d "
2136 "does not exist\n", ent->dst_apic_id);
2137 return 0;
2138 }
2139
e0fd357f
SZ
2140 pci_pin = ent->src_bus_irq & 0x3;
2141 pci_dev = (ent->src_bus_irq >> 2) & 0x1f;
2142
2143 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2144 if (pci_int->mpci_bus == ent->src_bus_id &&
2145 pci_int->mpci_dev == pci_dev &&
2146 pci_int->mpci_pin == pci_pin) {
6b881b58 2147 if (pci_int->mpci_ioapic_idx == ioapic->mio_idx &&
e0fd357f
SZ
2148 pci_int->mpci_ioapic_pin == ent->dst_apic_int) {
2149 kprintf("MPTABLE: warning duplicated "
2150 "PCI int entry for "
2151 "bus %d, dev %d, pin %d\n",
2152 pci_int->mpci_bus,
2153 pci_int->mpci_dev,
2154 pci_int->mpci_pin);
2155 return 0;
2156 } else {
2157 kprintf("mptable_pci_int_register: "
2158 "conflict PCI int entry for "
2159 "bus %d, dev %d, pin %d, "
2160 "IOAPIC %d.%d -> %d.%d\n",
2161 pci_int->mpci_bus,
2162 pci_int->mpci_dev,
2163 pci_int->mpci_pin,
6b881b58 2164 pci_int->mpci_ioapic_idx,
e0fd357f 2165 pci_int->mpci_ioapic_pin,
6b881b58 2166 ioapic->mio_idx,
e0fd357f
SZ
2167 ent->dst_apic_int);
2168 return EINVAL;
2169 }
2170 }
2171 }
2172
2619977b 2173 pci_int = kmalloc(sizeof(*pci_int), M_DEVBUF, M_WAITOK | M_ZERO);
e0fd357f
SZ
2174
2175 pci_int->mpci_bus = ent->src_bus_id;
2176 pci_int->mpci_dev = pci_dev;
2177 pci_int->mpci_pin = pci_pin;
6b881b58 2178 pci_int->mpci_ioapic_idx = ioapic->mio_idx;
e0fd357f
SZ
2179 pci_int->mpci_ioapic_pin = ent->dst_apic_int;
2180
2181 TAILQ_INSERT_TAIL(&mptable_pci_int_list, pci_int, mpci_link);
2182
2183 return 0;
2184}
2185
2186static void
2187mptable_pci_int_register(void)
2188{
2189 struct mptable_bus_info bus_info;
2190 const struct mptable_bus *bus;
2191 struct mptable_pci_int *pci_int;
2192 struct mptable_pos mpt;
2193 int error, force_pci0, npcibus;
2194 mpcth_t cth;
2195
2196 if (mptable_fps_phyaddr == 0)
2197 return;
2198
2199 if (mptable_use_default)
2200 return;
2201
6b881b58
SZ
2202 if (TAILQ_EMPTY(&mptable_ioapic_list))
2203 return;
2204
e0fd357f
SZ
2205 error = mptable_map(&mpt);
2206 if (error)
2207 panic("mptable_pci_int_register: mptable_map failed\n");
2208 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2209
2210 cth = mpt.mp_cth;
2211
2212 mptable_bus_info_alloc(cth, &bus_info);
2213 if (TAILQ_EMPTY(&bus_info.mbi_list))
2214 goto done;
2215
8d905764 2216 force_pci0 = 0;
e0fd357f
SZ
2217 npcibus = 0;
2218 TAILQ_FOREACH(bus, &bus_info.mbi_list, mb_link) {
2219 if (bus->mb_type == MPTABLE_BUS_PCI)
2220 ++npcibus;
2221 }
2222 if (npcibus == 0) {
2223 mptable_bus_info_free(&bus_info);
2224 goto done;
2225 } else if (npcibus == 1) {
2226 force_pci0 = 1;
2227 }
2228
2229 error = mptable_iterate_entries(cth,
2230 mptable_pci_int_callback, &bus_info);
2231
2232 mptable_bus_info_free(&bus_info);
2233
2234 if (error) {
2235 while ((pci_int = TAILQ_FIRST(&mptable_pci_int_list)) != NULL) {
2236 TAILQ_REMOVE(&mptable_pci_int_list, pci_int, mpci_link);
2237 kfree(pci_int, M_DEVBUF);
2238 }
2239 goto done;
2240 }
2241
2242 if (force_pci0) {
2243 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link)
2244 pci_int->mpci_bus = 0;
2245 }
2246done:
2247 mptable_unmap(&mpt);
2248}
2249SYSINIT(mptable_pci, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2250 mptable_pci_int_register, 0);
7da2706b
SZ
2251
2252struct mptable_ioapic_probe_cbarg {
2253 const struct mptable_bus_info *bus_info;
7da2706b
SZ
2254};
2255
2256static int
2257mptable_ioapic_probe_callback(void *xarg, const void *pos, int type)
2258{
2259 struct mptable_ioapic_probe_cbarg *arg = xarg;
6b881b58
SZ
2260 const struct mptable_ioapic *ioapic;
2261 const struct mptable_bus *bus;
2262 const struct INTENTRY *ent;
7da2706b 2263
6b881b58
SZ
2264 if (type != 3)
2265 return 0;
2266 ent = pos;
7da2706b 2267
6b881b58
SZ
2268 if (ent->int_type != 0)
2269 return 0;
7da2706b 2270
6b881b58
SZ
2271 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2272 if (bus->mb_type == MPTABLE_BUS_ISA &&
2273 bus->mb_id == ent->src_bus_id)
2274 break;
2275 }
2276 if (bus == NULL)
2277 return 0;
7da2706b 2278
6b881b58
SZ
2279 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2280 if (ioapic->mio_apic_id == ent->dst_apic_id)
2281 break;
2282 }
2283 if (ioapic == NULL) {
2284 kprintf("MPTABLE: warning ISA int dst apic id %d "
2285 "does not exist\n", ent->dst_apic_id);
2286 return 0;
2287 }
7da2706b 2288
6b881b58
SZ
2289 /* XXX magic number */
2290 if (ent->src_bus_irq >= 16) {
2291 kprintf("mptable_ioapic_probe: invalid ISA irq (%d)\n",
2292 ent->src_bus_irq);
2293 return EINVAL;
7da2706b
SZ
2294 }
2295 return 0;
2296}
2297
2298static int
2299mptable_ioapic_probe(struct ioapic_enumerator *e)
2300{
2301 struct mptable_ioapic_probe_cbarg arg;
2302 struct mptable_bus_info bus_info;
2303 struct mptable_pos mpt;
2304 mpcth_t cth;
2305 int error;
2306
2307 if (mptable_fps_phyaddr == 0)
2308 return ENXIO;
2309
2310 if (mptable_use_default)
2311 return 0;
2312
6b881b58
SZ
2313 if (TAILQ_EMPTY(&mptable_ioapic_list))
2314 return ENXIO;
2315
7da2706b
SZ
2316 error = mptable_map(&mpt);
2317 if (error)
2318 panic("mptable_ioapic_probe: mptable_map failed\n");
2319 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2320
2321 cth = mpt.mp_cth;
2322
2323 mptable_bus_info_alloc(cth, &bus_info);
2324
2325 bzero(&arg, sizeof(arg));
2326 arg.bus_info = &bus_info;
2327
2328 error = mptable_iterate_entries(cth,
2329 mptable_ioapic_probe_callback, &arg);
7da2706b
SZ
2330
2331 mptable_bus_info_free(&bus_info);
2332 mptable_unmap(&mpt);
2333
2334 return error;
2335}
2336
7da2706b
SZ
2337struct mptable_ioapic_int_cbarg {
2338 const struct mptable_bus_info *bus_info;
2339 int ioapic_nint;
2340};
2341
2342static int
2343mptable_ioapic_int_callback(void *xarg, const void *pos, int type)
2344{
2345 struct mptable_ioapic_int_cbarg *arg = xarg;
512fb675 2346 const struct mptable_ioapic *ioapic;
7da2706b
SZ
2347 const struct mptable_bus *bus;
2348 const struct INTENTRY *ent;
7a603b36 2349 int gsi;
7da2706b
SZ
2350
2351 if (type != 3)
2352 return 0;
2353
2354 arg->ioapic_nint++;
2355
2356 ent = pos;
2357 if (ent->int_type != 0)
2358 return 0;
2359
2360 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2361 if (bus->mb_type == MPTABLE_BUS_ISA &&
2362 bus->mb_id == ent->src_bus_id)
2363 break;
2364 }
2365 if (bus == NULL)
2366 return 0;
2367
512fb675
SZ
2368 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2369 if (ioapic->mio_apic_id == ent->dst_apic_id)
2370 break;
2371 }
2372 if (ioapic == NULL) {
2373 kprintf("MPTABLE: warning ISA int dst apic id %d "
2374 "does not exist\n", ent->dst_apic_id);
2375 return 0;
2376 }
2377
7a603b36
SZ
2378 if (ent->dst_apic_int >= ioapic->mio_npin) {
2379 panic("mptable_ioapic_enumerate: invalid I/O APIC "
2380 "pin %d, should be < %d",
2381 ent->dst_apic_int, ioapic->mio_npin);
2382 }
2383 gsi = ioapic->mio_gsi_base + ent->dst_apic_int;
512fb675 2384
7a603b36
SZ
2385 if (ent->src_bus_irq != gsi) {
2386 if (bootverbose) {
2387 kprintf("MPTABLE: INTSRC irq %d -> GSI %d\n",
2388 ent->src_bus_irq, gsi);
7da2706b 2389 }
7a603b36
SZ
2390 ioapic_intsrc(ent->src_bus_irq, gsi,
2391 INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2392 }
2393 return 0;
2394}
2395
2396static void
2397mptable_ioapic_enumerate(struct ioapic_enumerator *e)
2398{
2399 struct mptable_bus_info bus_info;
0471bb0e 2400 struct mptable_ioapic *ioapic;
7da2706b
SZ
2401 struct mptable_pos mpt;
2402 mpcth_t cth;
2403 int error;
2404
2405 KKASSERT(mptable_fps_phyaddr != 0);
6b881b58 2406 KKASSERT(!TAILQ_EMPTY(&mptable_ioapic_list));
7da2706b 2407
6b881b58 2408 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
7a603b36
SZ
2409 const struct mptable_ioapic *prev_ioapic;
2410 uint32_t ver;
2411 void *addr;
0471bb0e 2412
7a603b36 2413 addr = ioapic_map(ioapic->mio_addr);
0471bb0e 2414
7a603b36
SZ
2415 ver = ioapic_read(addr, IOAPIC_VER);
2416 ioapic->mio_npin = ((ver & IOART_VER_MAXREDIR)
2417 >> MAXREDIRSHIFT) + 1;
0471bb0e 2418
7a603b36
SZ
2419 prev_ioapic = TAILQ_PREV(ioapic,
2420 mptable_ioapic_list, mio_link);
2421 if (prev_ioapic == NULL) {
2422 ioapic->mio_gsi_base = 0;
2423 } else {
2424 ioapic->mio_gsi_base =
2425 prev_ioapic->mio_gsi_base +
2426 prev_ioapic->mio_npin;
0471bb0e 2427 }
7a603b36
SZ
2428 ioapic_add(addr, ioapic->mio_gsi_base, ioapic->mio_npin);
2429
7da2706b 2430 if (bootverbose) {
6b881b58 2431 kprintf("MPTABLE: IOAPIC addr 0x%08x, "
0471bb0e 2432 "apic id %d, idx %d, gsi base %d, npin %d\n",
6b881b58 2433 ioapic->mio_addr,
0471bb0e
SZ
2434 ioapic->mio_apic_id,
2435 ioapic->mio_idx,
2436 ioapic->mio_gsi_base,
2437 ioapic->mio_npin);
7da2706b 2438 }
6b881b58
SZ
2439 }
2440
2441 if (mptable_use_default) {
2442 if (bootverbose)
2443 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (default)\n");
ae80be10 2444 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2445 return;
2446 }
2447
2448 error = mptable_map(&mpt);
2449 if (error)
2450 panic("mptable_ioapic_probe: mptable_map failed\n");
2451 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2452
2453 cth = mpt.mp_cth;
2454
7da2706b
SZ
2455 mptable_bus_info_alloc(cth, &bus_info);
2456
2457 if (TAILQ_EMPTY(&bus_info.mbi_list)) {
2458 if (bootverbose)
2459 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (no bus)\n");
ae80be10 2460 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2461 } else {
2462 struct mptable_ioapic_int_cbarg arg;
2463
2464 bzero(&arg, sizeof(arg));
2465 arg.bus_info = &bus_info;
2466
2467 error = mptable_iterate_entries(cth,
2468 mptable_ioapic_int_callback, &arg);
2469 if (error)
2470 panic("mptable_ioapic_int failed\n");
2471
2472 if (arg.ioapic_nint == 0) {
2473 if (bootverbose) {
2474 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 "
2475 "(no int)\n");
2476 }
ae80be10
SZ
2477 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE,
2478 INTR_POLARITY_HIGH);
7da2706b
SZ
2479 }
2480 }
2481
2482 mptable_bus_info_free(&bus_info);
2483
2484 mptable_unmap(&mpt);
2485}
2486
2487static struct ioapic_enumerator mptable_ioapic_enumerator = {
2488 .ioapic_prio = IOAPIC_ENUM_PRIO_MPTABLE,
2489 .ioapic_probe = mptable_ioapic_probe,
2490 .ioapic_enumerate = mptable_ioapic_enumerate
2491};
2492
2493static void
2494mptable_ioapic_enum_register(void)
2495{
2496 ioapic_enumerator_register(&mptable_ioapic_enumerator);
2497}
2498SYSINIT(mptable_ioapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2499 mptable_ioapic_enum_register, 0);
e90e7ac4
SZ
2500
2501void
2502mptable_pci_int_dump(void)
2503{
2504 const struct mptable_pci_int *pci_int;
2505
2506 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
eab22b0b 2507 kprintf("MPTABLE: %d:%d INT%c -> IOAPIC %d.%d\n",
e90e7ac4
SZ
2508 pci_int->mpci_bus,
2509 pci_int->mpci_dev,
eab22b0b 2510 pci_int->mpci_pin + 'A',
e90e7ac4
SZ
2511 pci_int->mpci_ioapic_idx,
2512 pci_int->mpci_ioapic_pin);
2513 }
2514}
2515
2516int
2517mptable_pci_int_route(int bus, int dev, int pin, int intline)
2518{
2519 const struct mptable_pci_int *pci_int;
2520 int irq = -1;
2521
2522 KKASSERT(pin >= 1);
2523 --pin; /* zero based */
2524
2525 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2526 if (pci_int->mpci_bus == bus &&
2527 pci_int->mpci_dev == dev &&
2528 pci_int->mpci_pin == pin)
2529 break;
2530 }
2531 if (pci_int != NULL) {
2532 int gsi;
2533
2534 gsi = ioapic_gsi(pci_int->mpci_ioapic_idx,
2535 pci_int->mpci_ioapic_pin);
2536 if (gsi >= 0) {
2537 irq = ioapic_abi_find_gsi(gsi,
2538 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2539 }
2540 }
2541
2542 if (irq < 0) {
40fd5939
SZ
2543 if (bootverbose) {
2544 kprintf("MPTABLE: fixed interrupt routing "
eab22b0b 2545 "for %d:%d INT%c\n", bus, dev, pin + 'A');
40fd5939 2546 }
e90e7ac4
SZ
2547
2548 irq = ioapic_abi_find_irq(intline,
2549 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2550 }
eab22b0b
SZ
2551
2552 if (irq >= 0 && bootverbose) {
2553 kprintf("MPTABLE: %d:%d INT%c routed to irq %d\n",
2554 bus, dev, pin + 'A', irq);
2555 }
e90e7ac4
SZ
2556 return irq;
2557}