i386/mp_machdep.c: Remove unused code
[dragonfly.git] / sys / platform / pc32 / i386 / mp_machdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
c0c5de70 26 * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
984263bc
MD
27 */
28
29#include "opt_cpu.h"
984263bc 30
984263bc
MD
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
984263bc
MD
34#include <sys/sysctl.h>
35#include <sys/malloc.h>
36#include <sys/memrange.h>
984263bc 37#include <sys/cons.h> /* cngetc() */
37e7efec 38#include <sys/machintr.h>
984263bc
MD
39
40#include <vm/vm.h>
41#include <vm/vm_param.h>
42#include <vm/pmap.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_extern.h>
984263bc
MD
45#include <sys/lock.h>
46#include <vm/vm_map.h>
47#include <sys/user.h>
48#ifdef GPROF
49#include <sys/gmon.h>
50#endif
984263bc 51
684a93c4
MD
52#include <sys/mplock2.h>
53
984263bc 54#include <machine/smp.h>
a9295349 55#include <machine_base/apic/apicreg.h>
984263bc
MD
56#include <machine/atomic.h>
57#include <machine/cpufunc.h>
90e8a35b 58#include <machine/cputypes.h>
e0918665 59#include <machine_base/apic/ioapic_abi.h>
a9295349 60#include <machine_base/apic/mpapic.h>
984263bc
MD
61#include <machine/psl.h>
62#include <machine/segments.h>
984263bc
MD
63#include <machine/tss.h>
64#include <machine/specialreg.h>
65#include <machine/globaldata.h>
4117f2fd 66#include <machine/pmap_inval.h>
984263bc 67
984263bc 68#include <machine/md_var.h> /* setidt() */
87cf6827
SZ
69#include <machine_base/icu/icu.h> /* IPIs */
70#include <machine/intr_machdep.h> /* IPIs */
984263bc 71
984263bc
MD
72#define WARMBOOT_TARGET 0
73#define WARMBOOT_OFF (KERNBASE + 0x0467)
74#define WARMBOOT_SEG (KERNBASE + 0x0469)
75
984263bc 76#define BIOS_BASE (0xf0000)
1df86978 77#define BIOS_BASE2 (0xe0000)
984263bc 78#define BIOS_SIZE (0x10000)
984263bc
MD
79#define BIOS_COUNT (BIOS_SIZE/4)
80
81#define CMOS_REG (0x70)
82#define CMOS_DATA (0x71)
83#define BIOS_RESET (0x0f)
84#define BIOS_WARM (0x0a)
85
86#define PROCENTRY_FLAG_EN 0x01
87#define PROCENTRY_FLAG_BP 0x02
88#define IOAPICENTRY_FLAG_EN 0x01
89
90
91/* MP Floating Pointer Structure */
92typedef struct MPFPS {
93 char signature[4];
981bebd1 94 u_int32_t pap;
984263bc
MD
95 u_char length;
96 u_char spec_rev;
97 u_char checksum;
98 u_char mpfb1;
99 u_char mpfb2;
100 u_char mpfb3;
101 u_char mpfb4;
102 u_char mpfb5;
103} *mpfps_t;
104
105/* MP Configuration Table Header */
106typedef struct MPCTH {
107 char signature[4];
108 u_short base_table_length;
109 u_char spec_rev;
110 u_char checksum;
111 u_char oem_id[8];
112 u_char product_id[12];
113 void *oem_table_pointer;
114 u_short oem_table_size;
115 u_short entry_count;
116 void *apic_address;
117 u_short extended_table_length;
118 u_char extended_table_checksum;
119 u_char reserved;
120} *mpcth_t;
121
122
123typedef struct PROCENTRY {
124 u_char type;
125 u_char apic_id;
126 u_char apic_version;
127 u_char cpu_flags;
128 u_long cpu_signature;
129 u_long feature_flags;
130 u_long reserved1;
131 u_long reserved2;
132} *proc_entry_ptr;
133
134typedef struct BUSENTRY {
135 u_char type;
136 u_char bus_id;
137 char bus_type[6];
138} *bus_entry_ptr;
139
140typedef struct IOAPICENTRY {
141 u_char type;
142 u_char apic_id;
143 u_char apic_version;
144 u_char apic_flags;
145 void *apic_address;
146} *io_apic_entry_ptr;
147
148typedef struct INTENTRY {
149 u_char type;
150 u_char int_type;
151 u_short int_flags;
152 u_char src_bus_id;
153 u_char src_bus_irq;
154 u_char dst_apic_id;
155 u_char dst_apic_int;
156} *int_entry_ptr;
157
158/* descriptions of MP basetable entries */
159typedef struct BASETABLE_ENTRY {
160 u_char type;
161 u_char length;
162 char name[16];
163} basetable_entry;
164
981bebd1
SZ
165struct mptable_pos {
166 mpfps_t mp_fps;
167 mpcth_t mp_cth;
168 vm_size_t mp_cth_mapsz;
169};
170
c455a23f
SZ
171#define MPTABLE_POS_USE_DEFAULT(mpt) \
172 ((mpt)->mp_fps->mpfb1 != 0 || (mpt)->mp_cth == NULL)
173
e0fd357f
SZ
174struct mptable_bus {
175 int mb_id;
176 int mb_type; /* MPTABLE_BUS_ */
177 TAILQ_ENTRY(mptable_bus) mb_link;
178};
179
180#define MPTABLE_BUS_ISA 0
181#define MPTABLE_BUS_PCI 1
182
183struct mptable_bus_info {
184 TAILQ_HEAD(, mptable_bus) mbi_list;
185};
186
187struct mptable_pci_int {
188 int mpci_bus;
189 int mpci_dev;
190 int mpci_pin;
191
6b881b58 192 int mpci_ioapic_idx;
e0fd357f
SZ
193 int mpci_ioapic_pin;
194 TAILQ_ENTRY(mptable_pci_int) mpci_link;
195};
196
6b881b58
SZ
197struct mptable_ioapic {
198 int mio_idx;
199 int mio_apic_id;
200 uint32_t mio_addr;
0471bb0e
SZ
201 int mio_gsi_base;
202 int mio_npin;
6b881b58
SZ
203 TAILQ_ENTRY(mptable_ioapic) mio_link;
204};
205
fa058384
SZ
206typedef int (*mptable_iter_func)(void *, const void *, int);
207
984263bc
MD
208/*
209 * this code MUST be enabled here and in mpboot.s.
210 * it follows the very early stages of AP boot by placing values in CMOS ram.
211 * it NORMALLY will never be needed and thus the primitive method for enabling.
212 *
984263bc 213 */
7d34994c 214#if defined(CHECK_POINTS)
984263bc
MD
215#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
216#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
217
218#define CHECK_INIT(D); \
219 CHECK_WRITE(0x34, (D)); \
220 CHECK_WRITE(0x35, (D)); \
221 CHECK_WRITE(0x36, (D)); \
222 CHECK_WRITE(0x37, (D)); \
223 CHECK_WRITE(0x38, (D)); \
224 CHECK_WRITE(0x39, (D));
225
226#define CHECK_PRINT(S); \
26be20a0 227 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
984263bc
MD
228 (S), \
229 CHECK_READ(0x34), \
230 CHECK_READ(0x35), \
231 CHECK_READ(0x36), \
232 CHECK_READ(0x37), \
233 CHECK_READ(0x38), \
234 CHECK_READ(0x39));
235
236#else /* CHECK_POINTS */
237
238#define CHECK_INIT(D)
239#define CHECK_PRINT(S)
240
241#endif /* CHECK_POINTS */
242
243/*
244 * Values to send to the POST hardware.
245 */
246#define MP_BOOTADDRESS_POST 0x10
247#define MP_PROBE_POST 0x11
248#define MPTABLE_PASS1_POST 0x12
249
250#define MP_START_POST 0x13
251#define MP_ENABLE_POST 0x14
252#define MPTABLE_PASS2_POST 0x15
253
254#define START_ALL_APS_POST 0x16
255#define INSTALL_AP_TRAMP_POST 0x17
256#define START_AP_POST 0x18
257
258#define MP_ANNOUNCE_POST 0x19
259
984263bc
MD
260/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
261int current_postcode;
262
263/** XXX FIXME: what system files declare these??? */
264extern struct region_descriptor r_gdt, r_idt;
265
984263bc 266int mp_naps; /* # of Applications processors */
984263bc
MD
267extern int nkpt;
268
269u_int32_t cpu_apic_versions[MAXCPU];
374133e3 270int64_t tsc0_offset;
0b698dca 271extern int64_t tsc_offsets[];
984263bc 272
1876681a
SZ
273extern u_long ebda_addr;
274
30c5f287 275#ifdef SMP /* APIC-IO */
8a8d5d85 276struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
97359a5b 277#endif
984263bc 278
984263bc
MD
279/*
280 * APIC ID logical/physical mapping structures.
281 * We oversize these to simplify boot-time config.
282 */
283int cpu_num_to_apic_id[NAPICID];
984263bc
MD
284int apic_id_to_logical[NAPICID];
285
984263bc
MD
286/* AP uses this during bootstrap. Do not staticize. */
287char *bootSTK;
288static int bootAP;
289
290/* Hotwire a 0->4MB V==P mapping */
291extern pt_entry_t *KPTphys;
292
f13b5eec
MD
293/*
294 * SMP page table page. Setup by locore to point to a page table
295 * page from which we allocate per-cpu privatespace areas io_apics,
296 * and so forth.
297 */
298
299#define IO_MAPPING_START_INDEX \
300 (SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
301
984263bc 302extern pt_entry_t *SMPpt;
f13b5eec 303static int SMPpt_alloc_index = IO_MAPPING_START_INDEX;
984263bc
MD
304
305struct pcb stoppcbs[MAXCPU];
306
fa058384
SZ
307static basetable_entry basetable_entry_types[] =
308{
309 {0, 20, "Processor"},
310 {1, 8, "Bus"},
311 {2, 8, "I/O APIC"},
312 {3, 8, "I/O INT"},
313 {4, 8, "Local INT"}
314};
315
984263bc
MD
316/*
317 * Local data and functions.
318 */
319
984263bc
MD
320static u_int boot_address;
321static u_int base_memory;
41a01a4d 322static int mp_finish;
52596b13 323static int mp_finish_lapic;
984263bc 324
984263bc
MD
325static void mp_enable(u_int boot_addr);
326
fa058384
SZ
327static int mptable_iterate_entries(const mpcth_t,
328 mptable_iter_func, void *);
34e6fa63 329static int mptable_search(void);
3aba8f73 330static int mptable_search_sig(u_int32_t target, int count);
da23a592 331static int mptable_hyperthread_fixup(cpumask_t, int);
fe423084 332static int mptable_map(struct mptable_pos *);
981bebd1 333static void mptable_unmap(struct mptable_pos *);
e0fd357f
SZ
334static void mptable_bus_info_alloc(const mpcth_t,
335 struct mptable_bus_info *);
336static void mptable_bus_info_free(struct mptable_bus_info *);
3aba8f73 337
281d9482
SZ
338static int mptable_lapic_probe(struct lapic_enumerator *);
339static void mptable_lapic_enumerate(struct lapic_enumerator *);
340static void mptable_lapic_default(void);
341
7da2706b
SZ
342static int mptable_ioapic_probe(struct ioapic_enumerator *);
343static void mptable_ioapic_enumerate(struct ioapic_enumerator *);
344
984263bc
MD
345static int start_all_aps(u_int boot_addr);
346static void install_ap_tramp(u_int boot_addr);
bb467734
MD
347static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
348static int smitest(void);
984263bc 349
41a01a4d 350static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
52596b13 351static cpumask_t smp_lapic_mask = 1; /* which cpus have lapic been inited */
0f7a3396
MD
352cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
353SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
354
9d758cc4
SZ
355int imcr_present;
356
fe423084 357static vm_paddr_t mptable_fps_phyaddr;
c455a23f 358static int mptable_use_default;
6b881b58 359static TAILQ_HEAD(mptable_pci_int_list, mptable_pci_int) mptable_pci_int_list =
e0fd357f 360 TAILQ_HEAD_INITIALIZER(mptable_pci_int_list);
6b881b58
SZ
361static TAILQ_HEAD(mptable_ioapic_list, mptable_ioapic) mptable_ioapic_list =
362 TAILQ_HEAD_INITIALIZER(mptable_ioapic_list);
fe423084 363
984263bc
MD
364/*
365 * Calculate usable address in base memory for AP trampoline code.
366 */
367u_int
368mp_bootaddress(u_int basemem)
369{
370 POSTCODE(MP_BOOTADDRESS_POST);
371
c0c5de70 372 base_memory = basemem;
984263bc
MD
373
374 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
375 if ((base_memory - boot_address) < bootMP_size)
376 boot_address -= 4096; /* not enough, lower by 4k */
377
378 return boot_address;
379}
380
381
fe423084 382static void
34e6fa63
SZ
383mptable_probe(void)
384{
c455a23f
SZ
385 struct mptable_pos mpt;
386 int error;
387
fe423084 388 KKASSERT(mptable_fps_phyaddr == 0);
c455a23f 389
fe423084 390 mptable_fps_phyaddr = mptable_search();
c455a23f
SZ
391 if (mptable_fps_phyaddr == 0)
392 return;
393
394 error = mptable_map(&mpt);
395 if (error) {
396 mptable_fps_phyaddr = 0;
397 return;
398 }
399
400 if (MPTABLE_POS_USE_DEFAULT(&mpt)) {
401 kprintf("MPTABLE: use default configuration\n");
402 mptable_use_default = 1;
403 }
9d758cc4
SZ
404 if (mpt.mp_fps->mpfb2 & 0x80)
405 imcr_present = 1;
c455a23f
SZ
406
407 mptable_unmap(&mpt);
34e6fa63 408}
fe423084 409SYSINIT(mptable_probe, SI_BOOT2_PRESMP, SI_ORDER_FIRST, mptable_probe, 0);
34e6fa63 410
984263bc
MD
411/*
412 * Look for an Intel MP spec table (ie, SMP capable hardware).
413 */
cb00b5c4 414static int
34e6fa63 415mptable_search(void)
984263bc
MD
416{
417 int x;
984263bc 418 u_int32_t target;
f13b5eec
MD
419
420 /*
421 * Make sure our SMPpt[] page table is big enough to hold all the
422 * mappings we need.
423 */
424 KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
984263bc
MD
425
426 POSTCODE(MP_PROBE_POST);
427
428 /* see if EBDA exists */
1876681a 429 if (ebda_addr != 0) {
984263bc 430 /* search first 1K of EBDA */
1876681a 431 target = (u_int32_t)ebda_addr;
3aba8f73 432 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
aeb48299 433 return x;
984263bc
MD
434 } else {
435 /* last 1K of base memory, effective 'top of base' passed in */
aeb48299 436 target = (u_int32_t)(base_memory - 0x400);
3aba8f73 437 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
aeb48299 438 return x;
984263bc
MD
439 }
440
441 /* search the BIOS */
aeb48299 442 target = (u_int32_t)BIOS_BASE;
3aba8f73 443 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
aeb48299 444 return x;
984263bc 445
1df86978
SZ
446 /* search the extended BIOS */
447 target = (u_int32_t)BIOS_BASE2;
448 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
449 return x;
450
984263bc 451 /* nothing found */
984263bc 452 return 0;
984263bc
MD
453}
454
fa058384
SZ
455static int
456mptable_iterate_entries(const mpcth_t cth, mptable_iter_func func, void *arg)
457{
458 int count, total_size;
459 const void *position;
460
461 KKASSERT(cth->base_table_length >= sizeof(struct MPCTH));
462 total_size = cth->base_table_length - sizeof(struct MPCTH);
463 position = (const uint8_t *)cth + sizeof(struct MPCTH);
464 count = cth->entry_count;
465
466 while (count--) {
467 int type, error;
468
469 KKASSERT(total_size >= 0);
470 if (total_size == 0) {
471 kprintf("invalid base MP table, "
472 "entry count and length mismatch\n");
473 return EINVAL;
474 }
475
476 type = *(const uint8_t *)position;
477 switch (type) {
478 case 0: /* processor_entry */
479 case 1: /* bus_entry */
480 case 2: /* io_apic_entry */
481 case 3: /* int_entry */
482 case 4: /* int_entry */
483 break;
484 default:
485 kprintf("unknown base MP table entry type %d\n", type);
486 return EINVAL;
487 }
488
489 if (total_size < basetable_entry_types[type].length) {
490 kprintf("invalid base MP table length, "
491 "does not contain all entries\n");
492 return EINVAL;
493 }
494 total_size -= basetable_entry_types[type].length;
495
496 error = func(arg, position, type);
497 if (error)
498 return error;
499
500 position = (const uint8_t *)position +
501 basetable_entry_types[type].length;
502 }
503 return 0;
504}
505
984263bc
MD
506
507/*
508 * Startup the SMP processors.
509 */
510void
511mp_start(void)
512{
513 POSTCODE(MP_START_POST);
50bc991e 514 mp_enable(boot_address);
984263bc
MD
515}
516
517
518/*
519 * Print various information about the SMP system hardware and setup.
520 */
521void
522mp_announce(void)
523{
524 int x;
525
526 POSTCODE(MP_ANNOUNCE_POST);
527
26be20a0
SW
528 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
529 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
8629c4ea 530 kprintf(", version: 0x%08x\n", cpu_apic_versions[0]);
984263bc 531 for (x = 1; x <= mp_naps; ++x) {
26be20a0 532 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
8629c4ea 533 kprintf(", version: 0x%08x\n", cpu_apic_versions[x]);
984263bc
MD
534 }
535
6ac31e9d
SZ
536 if (!apic_io_enable)
537 kprintf(" Warning: APIC I/O disabled\n");
984263bc
MD
538}
539
540/*
541 * AP cpu's call this to sync up protected mode.
7160572f
MD
542 *
543 * WARNING! We must ensure that the cpu is sufficiently initialized to
544 * be able to use to the FP for our optimized bzero/bcopy code before
545 * we enter more mainstream C code.
a44bdeec
MD
546 *
547 * WARNING! %fs is not set up on entry. This routine sets up %fs.
984263bc
MD
548 */
549void
550init_secondary(void)
551{
552 int gsel_tss;
553 int x, myid = bootAP;
554 u_int cr0;
8a8d5d85 555 struct mdglobaldata *md;
0f7a3396 556 struct privatespace *ps;
984263bc 557
0f7a3396
MD
558 ps = &CPU_prvspace[myid];
559
560 gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
984263bc 561 gdt_segs[GPROC0_SEL].ssd_base =
0f7a3396
MD
562 (int) &ps->mdglobaldata.gd_common_tss;
563 ps->mdglobaldata.mi.gd_prvspace = ps;
984263bc
MD
564
565 for (x = 0; x < NGDT; x++) {
566 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
567 }
568
569 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
570 r_gdt.rd_base = (int) &gdt[myid * NGDT];
571 lgdt(&r_gdt); /* does magic intra-segment return */
572
573 lidt(&r_idt);
574
575 lldt(_default_ldt);
7b95be2a 576 mdcpu->gd_currentldt = _default_ldt;
984263bc
MD
577
578 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
579 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
8a8d5d85 580
0f7a3396 581 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
8a8d5d85
MD
582
583 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
584 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
585 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
586 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
587 md->gd_common_tssd = *md->gd_tss_gdt;
984263bc
MD
588 ltr(gsel_tss);
589
590 /*
591 * Set to a known state:
592 * Set by mpboot.s: CR0_PG, CR0_PE
593 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
594 */
595 cr0 = rcr0();
596 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
597 load_cr0(cr0);
7160572f 598 pmap_set_opt(); /* PSE/4MB pages, etc */
984263bc 599
7160572f
MD
600 /* set up CPU registers and state */
601 cpu_setregs();
602
603 /* set up FPU state on the AP */
604 npxinit(__INITIAL_NPXCW__);
605
606 /* set up SSE registers */
607 enable_sse();
984263bc
MD
608}
609
984263bc
MD
610/*******************************************************************
611 * local functions and data
612 */
613
614/*
615 * start the SMP system
616 */
617static void
618mp_enable(u_int boot_addr)
619{
984263bc
MD
620 POSTCODE(MP_ENABLE_POST);
621
281d9482 622 lapic_config();
984263bc 623
a40ec003
SZ
624 /* Initialize BSP's local APIC */
625 lapic_init(TRUE);
626
52596b13
SZ
627 /* start each Application Processor */
628 start_all_aps(boot_addr);
629
65b2387f
SZ
630 if (apic_io_enable)
631 ioapic_config();
632
a40ec003
SZ
633 /* Finalize PIC */
634 MachIntrABI.finalize();
984263bc
MD
635}
636
637
638/*
639 * look for the MP spec signature
640 */
641
642/* string defined by the Intel MP Spec as identifying the MP table */
643#define MP_SIG 0x5f504d5f /* _MP_ */
644#define NEXT(X) ((X) += 4)
645static int
3aba8f73 646mptable_search_sig(u_int32_t target, int count)
984263bc 647{
0f85efa2
SZ
648 vm_size_t map_size;
649 u_int32_t *addr;
650 int x, ret;
984263bc 651
aeb48299
SZ
652 KKASSERT(target != 0);
653
0f85efa2
SZ
654 map_size = count * sizeof(u_int32_t);
655 addr = pmap_mapdev((vm_paddr_t)target, map_size);
984263bc 656
aeb48299 657 ret = 0;
0f85efa2
SZ
658 for (x = 0; x < count; NEXT(x)) {
659 if (addr[x] == MP_SIG) {
660 /* make array index a byte index */
661 ret = target + (x * sizeof(u_int32_t));
662 break;
663 }
664 }
aeb48299 665
0f85efa2
SZ
666 pmap_unmapdev((vm_offset_t)addr, map_size);
667 return ret;
984263bc
MD
668}
669
a0873f07 670static int processor_entry (const struct PROCENTRY *entry, int cpu);
984263bc 671
984263bc
MD
672/*
673 * Check if we should perform a hyperthreading "fix-up" to
674 * enumerate any logical CPU's that aren't already listed
675 * in the table.
676 *
677 * XXX: We assume that all of the physical CPUs in the
678 * system have the same number of logical CPUs.
679 *
680 * XXX: We assume that APIC ID's are allocated such that
681 * the APIC ID's for a physical processor are aligned
682 * with the number of logical CPU's in the processor.
683 */
44c36320 684static int
da23a592 685mptable_hyperthread_fixup(cpumask_t id_mask, int cpu_count)
984263bc 686{
44c36320 687 int i, id, lcpus_max, logical_cpus;
984263bc 688
984263bc 689 if ((cpu_feature & CPUID_HTT) == 0)
44c36320 690 return 0;
7ea07fd2
SZ
691
692 lcpus_max = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
693 if (lcpus_max <= 1)
44c36320 694 return 0;
984263bc 695
90e8a35b 696 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
7ea07fd2
SZ
697 /*
698 * INSTRUCTION SET REFERENCE, A-M (#253666)
699 * Page 3-181, Table 3-20
700 * "The nearest power-of-2 integer that is not smaller
701 * than EBX[23:16] is the number of unique initial APIC
702 * IDs reserved for addressing different logical
703 * processors in a physical package."
704 */
705 for (i = 0; ; ++i) {
706 if ((1 << i) >= lcpus_max) {
707 lcpus_max = 1 << i;
708 break;
709 }
710 }
711 }
712
44c36320
SZ
713 KKASSERT(cpu_count != 0);
714 if (cpu_count == lcpus_max) {
7ea07fd2 715 /* We have nothing to fix */
44c36320
SZ
716 return 0;
717 } else if (cpu_count == 1) {
7ea07fd2
SZ
718 /* XXX this may be incorrect */
719 logical_cpus = lcpus_max;
720 } else {
721 int cur, prev, dist;
722
723 /*
724 * Calculate the distances between two nearest
725 * APIC IDs. If all such distances are same,
726 * then it is the number of missing cpus that
727 * we are going to fill later.
728 */
729 dist = cur = prev = -1;
730 for (id = 0; id < MAXCPU; ++id) {
da23a592 731 if ((id_mask & CPUMASK(id)) == 0)
7ea07fd2
SZ
732 continue;
733
734 cur = id;
735 if (prev >= 0) {
736 int new_dist = cur - prev;
737
738 if (dist < 0)
739 dist = new_dist;
740
741 /*
742 * Make sure that all distances
743 * between two nearest APIC IDs
744 * are same.
745 */
746 if (dist != new_dist)
44c36320 747 return 0;
7ea07fd2
SZ
748 }
749 prev = cur;
750 }
751 if (dist == 1)
44c36320 752 return 0;
7ea07fd2
SZ
753
754 /* Must be power of 2 */
755 if (dist & (dist - 1))
44c36320 756 return 0;
7ea07fd2
SZ
757
758 /* Can't exceed CPU package capacity */
759 if (dist > lcpus_max)
760 logical_cpus = lcpus_max;
761 else
762 logical_cpus = dist;
763 }
764
984263bc
MD
765 /*
766 * For each APIC ID of a CPU that is set in the mask,
767 * scan the other candidate APIC ID's for this
768 * physical processor. If any of those ID's are
769 * already in the table, then kill the fixup.
770 */
7ea07fd2 771 for (id = 0; id < MAXCPU; id++) {
da23a592 772 if ((id_mask & CPUMASK(id)) == 0)
984263bc
MD
773 continue;
774 /* First, make sure we are on a logical_cpus boundary. */
775 if (id % logical_cpus != 0)
44c36320 776 return 0;
984263bc 777 for (i = id + 1; i < id + logical_cpus; i++)
da23a592 778 if ((id_mask & CPUMASK(i)) != 0)
44c36320 779 return 0;
984263bc 780 }
44c36320 781 return logical_cpus;
984263bc 782}
984263bc 783
fa058384 784static int
fe423084 785mptable_map(struct mptable_pos *mpt)
981bebd1
SZ
786{
787 mpfps_t fps = NULL;
788 mpcth_t cth = NULL;
789 vm_size_t cth_mapsz = 0;
790
fe423084
SZ
791 KKASSERT(mptable_fps_phyaddr != 0);
792
fa058384
SZ
793 bzero(mpt, sizeof(*mpt));
794
fe423084 795 fps = pmap_mapdev(mptable_fps_phyaddr, sizeof(*fps));
981bebd1
SZ
796 if (fps->pap != 0) {
797 /*
798 * Map configuration table header to get
799 * the base table size
800 */
801 cth = pmap_mapdev(fps->pap, sizeof(*cth));
802 cth_mapsz = cth->base_table_length;
803 pmap_unmapdev((vm_offset_t)cth, sizeof(*cth));
804
fa058384
SZ
805 if (cth_mapsz < sizeof(*cth)) {
806 kprintf("invalid base MP table length %d\n",
807 (int)cth_mapsz);
808 pmap_unmapdev((vm_offset_t)fps, sizeof(*fps));
809 return EINVAL;
810 }
811
981bebd1
SZ
812 /*
813 * Map the base table
814 */
815 cth = pmap_mapdev(fps->pap, cth_mapsz);
816 }
817
818 mpt->mp_fps = fps;
819 mpt->mp_cth = cth;
820 mpt->mp_cth_mapsz = cth_mapsz;
fa058384
SZ
821
822 return 0;
981bebd1
SZ
823}
824
825static void
826mptable_unmap(struct mptable_pos *mpt)
827{
828 if (mpt->mp_cth != NULL) {
829 pmap_unmapdev((vm_offset_t)mpt->mp_cth, mpt->mp_cth_mapsz);
830 mpt->mp_cth = NULL;
831 mpt->mp_cth_mapsz = 0;
832 }
833 if (mpt->mp_fps != NULL) {
834 pmap_unmapdev((vm_offset_t)mpt->mp_fps, sizeof(*mpt->mp_fps));
835 mpt->mp_fps = NULL;
836 }
837}
838
a9112655
SZ
839void
840mp_set_cpuids(int cpu_id, int apic_id)
841{
842 CPU_TO_ID(cpu_id) = apic_id;
843 ID_TO_CPU(apic_id) = cpu_id;
844}
845
984263bc 846static int
a0873f07 847processor_entry(const struct PROCENTRY *entry, int cpu)
984263bc 848{
bd8aa7e2
SZ
849 KKASSERT(cpu > 0);
850
984263bc
MD
851 /* check for usability */
852 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
853 return 0;
854
984263bc
MD
855 /* check for BSP flag */
856 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
a9112655 857 mp_set_cpuids(0, entry->apic_id);
984263bc
MD
858 return 0; /* its already been counted */
859 }
860
861 /* add another AP to list, if less than max number of CPUs */
862 else if (cpu < MAXCPU) {
a9112655 863 mp_set_cpuids(cpu, entry->apic_id);
984263bc
MD
864 return 1;
865 }
866
867 return 0;
868}
869
f13b5eec
MD
870/*
871 * Map a physical memory address representing I/O into KVA. The I/O
872 * block is assumed not to cross a page boundary.
873 */
874void *
01616f8b 875ioapic_map(vm_paddr_t pa)
f13b5eec
MD
876{
877 vm_offset_t vaddr;
878 int pgeflag;
879 int i;
880
881 KKASSERT(pa < 0x100000000LL);
882
883 pgeflag = 0; /* not used for SMP yet */
884
885 /*
886 * If the requested physical address has already been incidently
887 * mapped, just use the existing mapping. Otherwise create a new
888 * mapping.
889 */
890 for (i = IO_MAPPING_START_INDEX; i < SMPpt_alloc_index; ++i) {
891 if (((vm_offset_t)SMPpt[i] & PG_FRAME) ==
892 ((vm_offset_t)pa & PG_FRAME)) {
893 break;
894 }
895 }
896 if (i == SMPpt_alloc_index) {
897 if (i == NPTEPG - 2) {
898 panic("permanent_io_mapping: We ran out of space"
899 " in SMPpt[]!");
900 }
5277b9f6 901 SMPpt[i] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
f13b5eec
MD
902 ((vm_offset_t)pa & PG_FRAME));
903 ++SMPpt_alloc_index;
904 }
905 vaddr = (vm_offset_t)CPU_prvspace + (i * PAGE_SIZE) +
906 ((vm_offset_t)pa & PAGE_MASK);
907 return ((void *)vaddr);
908}
909
984263bc
MD
910/*
911 * start each AP in our list
912 */
913static int
914start_all_aps(u_int boot_addr)
915{
b45759e1
MD
916 int x, i, pg;
917 int shift;
bb467734
MD
918 int smicount;
919 int smibest;
920 int smilast;
984263bc
MD
921 u_char mpbiosreason;
922 u_long mpbioswarmvec;
8a8d5d85 923 struct mdglobaldata *gd;
0f7a3396 924 struct privatespace *ps;
984263bc
MD
925 char *stack;
926 uintptr_t kptbase;
927
928 POSTCODE(START_ALL_APS_POST);
929
984263bc
MD
930 /* install the AP 1st level boot code */
931 install_ap_tramp(boot_addr);
932
933
934 /* save the current value of the warm-start vector */
935 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
984263bc
MD
936 outb(CMOS_REG, BIOS_RESET);
937 mpbiosreason = inb(CMOS_DATA);
984263bc 938
bb467734
MD
939 /* setup a vector to our boot code */
940 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
941 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
942 outb(CMOS_REG, BIOS_RESET);
943 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
944
945 /*
946 * If we have a TSC we can figure out the SMI interrupt rate.
947 * The SMI does not necessarily use a constant rate. Spend
948 * up to 250ms trying to figure it out.
949 */
950 smibest = 0;
951 if (cpu_feature & CPUID_TSC) {
952 set_apic_timer(275000);
953 smilast = read_apic_timer();
954 for (x = 0; x < 20 && read_apic_timer(); ++x) {
955 smicount = smitest();
956 if (smibest == 0 || smilast - smicount < smibest)
957 smibest = smilast - smicount;
958 smilast = smicount;
959 }
960 if (smibest > 250000)
961 smibest = 0;
962 if (smibest) {
963 smibest = smibest * (int64_t)1000000 /
964 get_apic_timer_frequency();
965 }
966 }
967 if (smibest)
968 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
969 1000000 / smibest, smibest);
970
971
984263bc
MD
972 /* set up temporary P==V mapping for AP boot */
973 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
974 kptbase = (uintptr_t)(void *)KPTphys;
a44bdeec 975 for (x = 0; x < NKPT; x++) {
984263bc
MD
976 PTD[x] = (pd_entry_t)(PG_V | PG_RW |
977 ((kptbase + x * PAGE_SIZE) & PG_FRAME));
a44bdeec 978 }
0f7a3396 979 cpu_invltlb();
984263bc
MD
980
981 /* start each AP */
982 for (x = 1; x <= mp_naps; ++x) {
983
984 /* This is a bit verbose, it will go away soon. */
985
986 /* first page of AP's private space */
987 pg = x * i386_btop(sizeof(struct privatespace));
988
81c04d07 989 /* allocate new private data page(s) */
e4846942 990 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
81c04d07 991 MDGLOBALDATA_BASEALLOC_SIZE);
984263bc 992 /* wire it into the private page table page */
81c04d07
MD
993 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
994 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
995 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
996 }
997 pg += MDGLOBALDATA_BASEALLOC_PAGES;
998
999 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
1000 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
1001 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
1002 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
984263bc
MD
1003
1004 /* allocate and set up an idle stack data page */
e4846942 1005 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
8a8d5d85 1006 for (i = 0; i < UPAGES; i++) {
81c04d07 1007 SMPpt[pg + 4 + i] = (pt_entry_t)
b5b32410 1008 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
8a8d5d85 1009 }
984263bc 1010
8a8d5d85
MD
1011 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
1012 bzero(gd, sizeof(*gd));
0f7a3396 1013 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
8a8d5d85 1014
984263bc 1015 /* prime data page for it to use */
8a8d5d85 1016 mi_gdinit(&gd->mi, x);
8ad65e08 1017 cpu_gdinit(gd, x);
81c04d07
MD
1018 gd->gd_CMAP1 = &SMPpt[pg + 0];
1019 gd->gd_CMAP2 = &SMPpt[pg + 1];
1020 gd->gd_CMAP3 = &SMPpt[pg + 2];
1021 gd->gd_PMAP1 = &SMPpt[pg + 3];
0f7a3396
MD
1022 gd->gd_CADDR1 = ps->CPAGE1;
1023 gd->gd_CADDR2 = ps->CPAGE2;
1024 gd->gd_CADDR3 = ps->CPAGE3;
1025 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
9388fcaa
MD
1026
1027 /*
1028 * Per-cpu pmap for get_ptbase().
1029 */
1030 gd->gd_GDADDR1= (unsigned *)
1031 kmem_alloc_nofault(&kernel_map, SEG_SIZE, SEG_SIZE);
1032 gd->gd_GDMAP1 = &PTD[(vm_offset_t)gd->gd_GDADDR1 >> PDRSHIFT];
1033
e4846942 1034 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
96728c05 1035 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
984263bc 1036
8a8d5d85
MD
1037 /*
1038 * Setup the AP boot stack
1039 */
0f7a3396 1040 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
984263bc
MD
1041 bootAP = x;
1042
1043 /* attempt to start the Application Processor */
1044 CHECK_INIT(99); /* setup checkpoints */
bb467734 1045 if (!start_ap(gd, boot_addr, smibest)) {
26be20a0 1046 kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
984263bc
MD
1047 CHECK_PRINT("trace"); /* show checkpoints */
1048 /* better panic as the AP may be running loose */
26be20a0 1049 kprintf("panic y/n? [y] ");
984263bc
MD
1050 if (cngetc() != 'n')
1051 panic("bye-bye");
1052 }
1053 CHECK_PRINT("trace"); /* show checkpoints */
1054
1055 /* record its version info */
1056 cpu_apic_versions[x] = cpu_apic_versions[0];
984263bc
MD
1057 }
1058
0f7a3396
MD
1059 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
1060 ncpus = x;
1061
b45759e1
MD
1062 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
1063 for (shift = 0; (1 << shift) <= ncpus; ++shift)
1064 ;
1065 --shift;
1066 ncpus2_shift = shift;
1067 ncpus2 = 1 << shift;
90100055
JH
1068 ncpus2_mask = ncpus2 - 1;
1069
b45759e1
MD
1070 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
1071 if ((1 << shift) < ncpus)
1072 ++shift;
1073 ncpus_fit = 1 << shift;
1074 ncpus_fit_mask = ncpus_fit - 1;
1075
984263bc 1076 /* build our map of 'other' CPUs */
da23a592 1077 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
e4846942 1078 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
96728c05 1079 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
984263bc
MD
1080
1081 /* fill in our (BSP) APIC version */
1082 cpu_apic_versions[0] = lapic.version;
1083
1084 /* restore the warmstart vector */
1085 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
984263bc
MD
1086 outb(CMOS_REG, BIOS_RESET);
1087 outb(CMOS_DATA, mpbiosreason);
984263bc
MD
1088
1089 /*
8a8d5d85
MD
1090 * NOTE! The idlestack for the BSP was setup by locore. Finish
1091 * up, clean out the P==V mapping we did earlier.
984263bc 1092 */
984263bc
MD
1093 for (x = 0; x < NKPT; x++)
1094 PTD[x] = 0;
1095 pmap_set_opt();
1096
52596b13
SZ
1097 /*
1098 * Wait all APs to finish initializing LAPIC
1099 */
1100 mp_finish_lapic = 1;
1101 if (bootverbose)
1102 kprintf("SMP: Waiting APs LAPIC initialization\n");
1103 if (cpu_feature & CPUID_TSC)
1104 tsc0_offset = rdtsc();
1105 tsc_offsets[0] = 0;
1106 rel_mplock();
1107 while (smp_lapic_mask != smp_startup_mask) {
1108 cpu_lfence();
1109 if (cpu_feature & CPUID_TSC)
1110 tsc0_offset = rdtsc();
1111 }
1112 while (try_mplock() == 0)
1113 ;
1114
984263bc 1115 /* number of APs actually started */
8a8d5d85 1116 return ncpus - 1;
984263bc
MD
1117}
1118
984263bc
MD
1119/*
1120 * load the 1st level AP boot code into base memory.
1121 */
1122
1123/* targets for relocation */
1124extern void bigJump(void);
1125extern void bootCodeSeg(void);
1126extern void bootDataSeg(void);
1127extern void MPentry(void);
1128extern u_int MP_GDT;
1129extern u_int mp_gdtbase;
1130
1131static void
1132install_ap_tramp(u_int boot_addr)
1133{
1134 int x;
1135 int size = *(int *) ((u_long) & bootMP_size);
1136 u_char *src = (u_char *) ((u_long) bootMP);
1137 u_char *dst = (u_char *) boot_addr + KERNBASE;
1138 u_int boot_base = (u_int) bootMP;
1139 u_int8_t *dst8;
1140 u_int16_t *dst16;
1141 u_int32_t *dst32;
1142
1143 POSTCODE(INSTALL_AP_TRAMP_POST);
1144
1145 for (x = 0; x < size; ++x)
1146 *dst++ = *src++;
1147
1148 /*
1149 * modify addresses in code we just moved to basemem. unfortunately we
1150 * need fairly detailed info about mpboot.s for this to work. changes
1151 * to mpboot.s might require changes here.
1152 */
1153
1154 /* boot code is located in KERNEL space */
1155 dst = (u_char *) boot_addr + KERNBASE;
1156
1157 /* modify the lgdt arg */
1158 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1159 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
1160
1161 /* modify the ljmp target for MPentry() */
1162 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1163 *dst32 = ((u_int) MPentry - KERNBASE);
1164
1165 /* modify the target for boot code segment */
1166 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1167 dst8 = (u_int8_t *) (dst16 + 1);
1168 *dst16 = (u_int) boot_addr & 0xffff;
1169 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1170
1171 /* modify the target for boot data segment */
1172 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1173 dst8 = (u_int8_t *) (dst16 + 1);
1174 *dst16 = (u_int) boot_addr & 0xffff;
1175 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
1176}
1177
1178
1179/*
bb467734 1180 * This function starts the AP (application processor) identified
984263bc
MD
1181 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1182 * to accomplish this. This is necessary because of the nuances
1183 * of the different hardware we might encounter. It ain't pretty,
1184 * but it seems to work.
a108bf71
MD
1185 *
1186 * NOTE: eventually an AP gets to ap_init(), which is called just
1187 * before the AP goes into the LWKT scheduler's idle loop.
984263bc
MD
1188 */
1189static int
bb467734 1190start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
984263bc
MD
1191{
1192 int physical_cpu;
1193 int vector;
984263bc
MD
1194 u_long icr_lo, icr_hi;
1195
1196 POSTCODE(START_AP_POST);
1197
1198 /* get the PHYSICAL APIC ID# */
0f7a3396 1199 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
984263bc
MD
1200
1201 /* calculate the vector */
1202 vector = (boot_addr >> 12) & 0xff;
1203
bb467734
MD
1204 /* We don't want anything interfering */
1205 cpu_disable_intr();
1206
8a8d5d85
MD
1207 /* Make sure the target cpu sees everything */
1208 wbinvd();
984263bc 1209
bb467734
MD
1210 /*
1211 * Try to detect when a SMI has occurred, wait up to 200ms.
1212 *
1213 * If a SMI occurs during an AP reset but before we issue
1214 * the STARTUP command, the AP may brick. To work around
1215 * this problem we hold off doing the AP startup until
1216 * after we have detected the SMI. Hopefully another SMI
1217 * will not occur before we finish the AP startup.
1218 *
1219 * Retries don't seem to help. SMIs have a window of opportunity
1220 * and if USB->legacy keyboard emulation is enabled in the BIOS
1221 * the interrupt rate can be quite high.
1222 *
1223 * NOTE: Don't worry about the L1 cache load, it might bloat
1224 * ldelta a little but ndelta will be so huge when the SMI
1225 * occurs the detection logic will still work fine.
1226 */
1227 if (smibest) {
1228 set_apic_timer(200000);
1229 smitest();
1230 }
1231
984263bc
MD
1232 /*
1233 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
1234 * and running the target CPU. OR this INIT IPI might be latched (P5
1235 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1236 * ignored.
bb467734
MD
1237 *
1238 * see apic/apicreg.h for icr bit definitions.
1239 *
1240 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
984263bc
MD
1241 */
1242
bb467734
MD
1243 /*
1244 * Setup the address for the target AP. We can setup
1245 * icr_hi once and then just trigger operations with
1246 * icr_lo.
1247 */
984263bc
MD
1248 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
1249 icr_hi |= (physical_cpu << 24);
bb467734 1250 icr_lo = lapic.icr_lo & 0xfff00000;
984263bc
MD
1251 lapic.icr_hi = icr_hi;
1252
bb467734
MD
1253 /*
1254 * Do an INIT IPI: assert RESET
1255 *
1256 * Use edge triggered mode to assert INIT
1257 */
984263bc 1258 lapic.icr_lo = icr_lo | 0x0000c500;
984263bc
MD
1259 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1260 /* spin */ ;
1261
bb467734
MD
1262 /*
1263 * The spec calls for a 10ms delay but we may have to use a
1264 * MUCH lower delay to avoid bricking an AP due to a fast SMI
1265 * interrupt. We have other loops here too and dividing by 2
1266 * doesn't seem to be enough even after subtracting 350us,
1267 * so we divide by 4.
1268 *
1269 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
1270 * interrupt was detected we use the full 10ms.
1271 */
1272 if (smibest == 0)
1273 u_sleep(10000);
1274 else if (smibest < 150 * 4 + 350)
1275 u_sleep(150);
1276 else if ((smibest - 350) / 4 < 10000)
1277 u_sleep((smibest - 350) / 4);
1278 else
1279 u_sleep(10000);
984263bc 1280
bb467734
MD
1281 /*
1282 * Do an INIT IPI: deassert RESET
1283 *
1284 * Use level triggered mode to deassert. It is unclear
1285 * why we need to do this.
1286 */
1287 lapic.icr_lo = icr_lo | 0x00008500;
984263bc
MD
1288 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1289 /* spin */ ;
bb467734 1290 u_sleep(150); /* wait 150us */
984263bc
MD
1291
1292 /*
bb467734 1293 * Next we do a STARTUP IPI: the previous INIT IPI might still be
984263bc
MD
1294 * latched, (P5 bug) this 1st STARTUP would then terminate
1295 * immediately, and the previously started INIT IPI would continue. OR
1296 * the previous INIT IPI has already run. and this STARTUP IPI will
1297 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1298 * will run.
1299 */
984263bc
MD
1300 lapic.icr_lo = icr_lo | 0x00000600 | vector;
1301 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1302 /* spin */ ;
1303 u_sleep(200); /* wait ~200uS */
1304
1305 /*
bb467734 1306 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
984263bc
MD
1307 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1308 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1309 * recognized after hardware RESET or INIT IPI.
1310 */
984263bc
MD
1311 lapic.icr_lo = icr_lo | 0x00000600 | vector;
1312 while (lapic.icr_lo & APIC_DELSTAT_MASK)
1313 /* spin */ ;
bb467734
MD
1314
1315 /* Resume normal operation */
1316 cpu_enable_intr();
984263bc 1317
8a8d5d85 1318 /* wait for it to start, see ap_init() */
984263bc 1319 set_apic_timer(5000000);/* == 5 seconds */
8a8d5d85 1320 while (read_apic_timer()) {
da23a592 1321 if (smp_startup_mask & CPUMASK(gd->mi.gd_cpuid))
984263bc 1322 return 1; /* return SUCCESS */
8a8d5d85 1323 }
bb467734 1324
984263bc
MD
1325 return 0; /* return FAILURE */
1326}
1327
bb467734
MD
1328static
1329int
1330smitest(void)
1331{
1332 int64_t ltsc;
1333 int64_t ntsc;
1334 int64_t ldelta;
1335 int64_t ndelta;
1336 int count;
1337
1338 ldelta = 0;
1339 ndelta = 0;
1340 while (read_apic_timer()) {
1341 ltsc = rdtsc();
1342 for (count = 0; count < 100; ++count)
1343 ntsc = rdtsc(); /* force loop to occur */
1344 if (ldelta) {
1345 ndelta = ntsc - ltsc;
1346 if (ldelta > ndelta)
1347 ldelta = ndelta;
1348 if (ndelta > ldelta * 2)
1349 break;
1350 } else {
1351 ldelta = ntsc - ltsc;
1352 }
1353 }
1354 return(read_apic_timer());
1355}
984263bc
MD
1356
1357/*
0f7a3396 1358 * Lazy flush the TLB on all other CPU's. DEPRECATED.
984263bc 1359 *
0f7a3396
MD
1360 * If for some reason we were unable to start all cpus we cannot safely
1361 * use broadcast IPIs.
984263bc 1362 */
7d4d6fdb
MD
1363
1364static cpumask_t smp_invltlb_req;
b4b1a37a 1365#define SMP_INVLTLB_DEBUG
7d4d6fdb 1366
984263bc
MD
1367void
1368smp_invltlb(void)
1369{
97359a5b 1370#ifdef SMP
7d4d6fdb 1371 struct mdglobaldata *md = mdcpu;
2d910aaf
MD
1372#ifdef SMP_INVLTLB_DEBUG
1373 long count = 0;
1374 long xcount = 0;
1375#endif
4117f2fd 1376
7d4d6fdb
MD
1377 crit_enter_gd(&md->mi);
1378 md->gd_invltlb_ret = 0;
1379 ++md->mi.gd_cnt.v_smpinvltlb;
da23a592 1380 atomic_set_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
2d910aaf
MD
1381#ifdef SMP_INVLTLB_DEBUG
1382again:
1383#endif
0f7a3396 1384 if (smp_startup_mask == smp_active_mask) {
984263bc 1385 all_but_self_ipi(XINVLTLB_OFFSET);
0f7a3396 1386 } else {
7d4d6fdb
MD
1387 selected_apic_ipi(smp_active_mask & ~md->mi.gd_cpumask,
1388 XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
0f7a3396 1389 }
2d910aaf
MD
1390
1391#ifdef SMP_INVLTLB_DEBUG
1392 if (xcount)
1393 kprintf("smp_invltlb: ipi sent\n");
1394#endif
7d4d6fdb
MD
1395 while ((md->gd_invltlb_ret & smp_active_mask & ~md->mi.gd_cpumask) !=
1396 (smp_active_mask & ~md->mi.gd_cpumask)) {
1397 cpu_mfence();
1398 cpu_pause();
2d910aaf
MD
1399#ifdef SMP_INVLTLB_DEBUG
1400 /* DEBUGGING */
1401 if (++count == 400000000) {
1402 print_backtrace(-1);
1403 kprintf("smp_invltlb: endless loop %08lx %08lx, "
1404 "rflags %016lx retry",
1405 (long)md->gd_invltlb_ret,
1406 (long)smp_invltlb_req,
1407 (long)read_eflags());
1408 __asm __volatile ("sti");
1409 ++xcount;
1410 if (xcount > 2)
1411 lwkt_process_ipiq();
1412 if (xcount > 3) {
da23a592
MD
1413 int bcpu = BSFCPUMASK(~md->gd_invltlb_ret &
1414 ~md->mi.gd_cpumask &
1415 smp_active_mask);
2d910aaf
MD
1416 globaldata_t xgd;
1417 kprintf("bcpu %d\n", bcpu);
1418 xgd = globaldata_find(bcpu);
1419 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm);
1420 }
1421 if (xcount > 5)
1422 panic("giving up");
1423 count = 0;
1424 goto again;
1425 }
1426#endif
7d4d6fdb 1427 }
da23a592 1428 atomic_clear_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
7d4d6fdb 1429 crit_exit_gd(&md->mi);
4117f2fd 1430#endif
984263bc
MD
1431}
1432
7d4d6fdb
MD
1433#ifdef SMP
1434
1435/*
1436 * Called from Xinvltlb assembly with interrupts disabled. We didn't
1437 * bother to bump the critical section count or nested interrupt count
1438 * so only do very low level operations here.
1439 */
1440void
1441smp_invltlb_intr(void)
1442{
1443 struct mdglobaldata *md = mdcpu;
1444 struct mdglobaldata *omd;
1445 cpumask_t mask;
1446 int cpu;
1447
1448 mask = smp_invltlb_req;
1449 cpu_mfence();
1450 cpu_invltlb();
1451 while (mask) {
da23a592
MD
1452 cpu = BSFCPUMASK(mask);
1453 mask &= ~CPUMASK(cpu);
7d4d6fdb 1454 omd = (struct mdglobaldata *)globaldata_find(cpu);
da23a592 1455 atomic_set_cpumask(&omd->gd_invltlb_ret, md->mi.gd_cpumask);
7d4d6fdb
MD
1456 }
1457}
1458
1459#endif
1460
984263bc
MD
1461/*
1462 * When called the executing CPU will send an IPI to all other CPUs
1463 * requesting that they halt execution.
1464 *
1465 * Usually (but not necessarily) called with 'other_cpus' as its arg.
1466 *
1467 * - Signals all CPUs in map to stop.
1468 * - Waits for each to stop.
1469 *
1470 * Returns:
1471 * -1: error
1472 * 0: NA
1473 * 1: ok
1474 *
1475 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1476 * from executing at same time.
1477 */
1478int
da23a592 1479stop_cpus(cpumask_t map)
984263bc 1480{
0f7a3396 1481 map &= smp_active_mask;
984263bc
MD
1482
1483 /* send the Xcpustop IPI to all CPUs in map */
1484 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
1485
1486 while ((stopped_cpus & map) != map)
1487 /* spin */ ;
1488
1489 return 1;
1490}
1491
1492
1493/*
1494 * Called by a CPU to restart stopped CPUs.
1495 *
1496 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1497 *
1498 * - Signals all CPUs in map to restart.
1499 * - Waits for each to restart.
1500 *
1501 * Returns:
1502 * -1: error
1503 * 0: NA
1504 * 1: ok
1505 */
1506int
da23a592 1507restart_cpus(cpumask_t map)
984263bc 1508{
0f7a3396
MD
1509 /* signal other cpus to restart */
1510 started_cpus = map & smp_active_mask;
984263bc
MD
1511
1512 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
1513 /* spin */ ;
1514
1515 return 1;
1516}
1517
984263bc 1518/*
8a8d5d85
MD
1519 * This is called once the mpboot code has gotten us properly relocated
1520 * and the MMU turned on, etc. ap_init() is actually the idle thread,
1521 * and when it returns the scheduler will call the real cpu_idle() main
1522 * loop for the idlethread. Interrupts are disabled on entry and should
1523 * remain disabled at return.
984263bc 1524 */
984263bc 1525void
8a8d5d85 1526ap_init(void)
984263bc
MD
1527{
1528 u_int apic_id;
1529
8a8d5d85 1530 /*
0f7a3396
MD
1531 * Adjust smp_startup_mask to signal the BSP that we have started
1532 * up successfully. Note that we do not yet hold the BGL. The BSP
1533 * is waiting for our signal.
1534 *
1535 * We can't set our bit in smp_active_mask yet because we are holding
1536 * interrupts physically disabled and remote cpus could deadlock
1537 * trying to send us an IPI.
8a8d5d85 1538 */
da23a592 1539 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
35238fa5 1540 cpu_mfence();
8a8d5d85
MD
1541
1542 /*
52596b13
SZ
1543 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1544 * non-zero, then get the MP lock.
41a01a4d
MD
1545 *
1546 * Note: We are in a critical section.
1547 *
41a01a4d
MD
1548 * Note: we are the idle thread, we can only spin.
1549 *
35238fa5 1550 * Note: The load fence is memory volatile and prevents the compiler
52596b13 1551 * from improperly caching mp_finish_lapic, and the cpu from improperly
35238fa5 1552 * caching it.
8a8d5d85 1553 */
52596b13 1554 while (mp_finish_lapic == 0)
b5d16701
MD
1555 cpu_lfence();
1556 while (try_mplock() == 0)
1557 ;
8a8d5d85 1558
374133e3 1559 if (cpu_feature & CPUID_TSC) {
b5d16701
MD
1560 /*
1561 * The BSP is constantly updating tsc0_offset, figure out
1562 * the relative difference to synchronize ktrdump.
1563 */
1564 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
374133e3
MD
1565 }
1566
984263bc
MD
1567 /* BSP may have changed PTD while we're waiting for the lock */
1568 cpu_invltlb();
1569
984263bc
MD
1570#if defined(I586_CPU) && !defined(NO_F00F_HACK)
1571 lidt(&r_idt);
1572#endif
1573
1574 /* Build our map of 'other' CPUs. */
da23a592 1575 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
984263bc 1576
984263bc 1577 /* A quick check from sanity claus */
d53907dd 1578 apic_id = (apic_id_to_logical[(lapic.id & 0xff000000) >> 24]);
8a8d5d85 1579 if (mycpu->gd_cpuid != apic_id) {
26be20a0
SW
1580 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
1581 kprintf("SMP: apic_id = %d\n", apic_id);
1582 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
984263bc
MD
1583 panic("cpuid mismatch! boom!!");
1584 }
1585
b52c8db0 1586 /* Initialize AP's local APIC for irq's */
5ddeabb9 1587 lapic_init(FALSE);
984263bc 1588
52596b13
SZ
1589 /* LAPIC initialization is done */
1590 smp_lapic_mask |= CPUMASK(mycpu->gd_cpuid);
1591 cpu_mfence();
1592
1593 /* Let BSP move onto the next initialization stage */
1594 rel_mplock();
1595
1596 /*
1597 * Interlock for finalization. Wait until mp_finish is non-zero,
1598 * then get the MP lock.
1599 *
1600 * Note: We are in a critical section.
1601 *
1602 * Note: we are the idle thread, we can only spin.
1603 *
1604 * Note: The load fence is memory volatile and prevents the compiler
1605 * from improperly caching mp_finish, and the cpu from improperly
1606 * caching it.
1607 */
1608 while (mp_finish == 0)
1609 cpu_lfence();
1610 while (try_mplock() == 0)
1611 ;
1612
1613 /* BSP may have changed PTD while we're waiting for the lock */
1614 cpu_invltlb();
1615
984263bc
MD
1616 /* Set memory range attributes for this CPU to match the BSP */
1617 mem_range_AP_init();
1618
8a8d5d85 1619 /*
4c9f5a7f
MD
1620 * Once we go active we must process any IPIQ messages that may
1621 * have been queued, because no actual IPI will occur until we
1622 * set our bit in the smp_active_mask. If we don't the IPI
1623 * message interlock could be left set which would also prevent
1624 * further IPIs.
1625 *
8a8d5d85
MD
1626 * The idle loop doesn't expect the BGL to be held and while
1627 * lwkt_switch() normally cleans things up this is a special case
1628 * because we returning almost directly into the idle loop.
41a01a4d
MD
1629 *
1630 * The idle thread is never placed on the runq, make sure
4c9f5a7f 1631 * nothing we've done put it there.
8a8d5d85 1632 */
b5d16701 1633 KKASSERT(get_mplock_count(curthread) == 1);
da23a592 1634 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
d19f6edf
MD
1635
1636 /*
1637 * Enable interrupts here. idle_restore will also do it, but
1638 * doing it here lets us clean up any strays that got posted to
1639 * the CPU during the AP boot while we are still in a critical
1640 * section.
1641 */
1642 __asm __volatile("sti; pause; pause"::);
c263294b 1643 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
d19f6edf 1644
4a19580d 1645 initclocks_pcpu(); /* clock interrupts (via IPIs) */
4c9f5a7f 1646 lwkt_process_ipiq();
d19f6edf
MD
1647
1648 /*
1649 * Releasing the mp lock lets the BSP finish up the SMP init
1650 */
96728c05 1651 rel_mplock();
41a01a4d 1652 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
984263bc
MD
1653}
1654
41a01a4d
MD
1655/*
1656 * Get SMP fully working before we start initializing devices.
1657 */
1658static
1659void
1660ap_finish(void)
1661{
1662 mp_finish = 1;
1663 if (bootverbose)
26be20a0 1664 kprintf("Finish MP startup\n");
41a01a4d 1665 rel_mplock();
52596b13 1666 while (smp_active_mask != smp_startup_mask)
35238fa5 1667 cpu_lfence();
4da43e1f 1668 while (try_mplock() == 0)
41a01a4d
MD
1669 ;
1670 if (bootverbose)
26be20a0 1671 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
41a01a4d
MD
1672}
1673
ba39e2e0 1674SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
41a01a4d 1675
96728c05
MD
1676void
1677cpu_send_ipiq(int dcpu)
1678{
da23a592 1679 if (CPUMASK(dcpu) & smp_active_mask)
41a01a4d 1680 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
96728c05 1681}
41a01a4d
MD
1682
1683#if 0 /* single_apic_ipi_passive() not working yet */
1684/*
1685 * Returns 0 on failure, 1 on success
1686 */
1687int
1688cpu_send_ipiq_passive(int dcpu)
1689{
1690 int r = 0;
da23a592 1691 if (CPUMASK(dcpu) & smp_active_mask) {
41a01a4d
MD
1692 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1693 APIC_DELMODE_FIXED);
1694 }
1695 return(r);
1696}
1697#endif
1698
e0fd357f
SZ
1699static int
1700mptable_bus_info_callback(void *xarg, const void *pos, int type)
1701{
1702 struct mptable_bus_info *bus_info = xarg;
1703 const struct BUSENTRY *ent;
1704 struct mptable_bus *bus;
1705
1706 if (type != 1)
1707 return 0;
c715f062 1708
e0fd357f 1709 ent = pos;
c715f062
SZ
1710 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
1711 if (bus->mb_id == ent->bus_id) {
1712 kprintf("mptable_bus_info_alloc: duplicated bus id "
1713 "(%d)\n", bus->mb_id);
1714 return EINVAL;
1715 }
1716 }
e0fd357f
SZ
1717
1718 bus = NULL;
1719 if (strncmp(ent->bus_type, "PCI", 3) == 0) {
1720 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1721 bus->mb_type = MPTABLE_BUS_PCI;
1722 } else if (strncmp(ent->bus_type, "ISA", 3) == 0) {
1723 bus = kmalloc(sizeof(*bus), M_TEMP, M_WAITOK | M_ZERO);
1724 bus->mb_type = MPTABLE_BUS_ISA;
1725 }
1726
1727 if (bus != NULL) {
c715f062
SZ
1728 bus->mb_id = ent->bus_id;
1729 TAILQ_INSERT_TAIL(&bus_info->mbi_list, bus, mb_link);
e0fd357f
SZ
1730 }
1731 return 0;
1732}
1733
1734static void
1735mptable_bus_info_alloc(const mpcth_t cth, struct mptable_bus_info *bus_info)
1736{
1737 int error;
1738
1739 bzero(bus_info, sizeof(*bus_info));
1740 TAILQ_INIT(&bus_info->mbi_list);
1741
1742 error = mptable_iterate_entries(cth, mptable_bus_info_callback, bus_info);
1743 if (error)
1744 mptable_bus_info_free(bus_info);
1745}
1746
1747static void
1748mptable_bus_info_free(struct mptable_bus_info *bus_info)
1749{
1750 struct mptable_bus *bus;
1751
1752 while ((bus = TAILQ_FIRST(&bus_info->mbi_list)) != NULL) {
1753 TAILQ_REMOVE(&bus_info->mbi_list, bus, mb_link);
1754 kfree(bus, M_TEMP);
1755 }
1756}
1757
a0873f07
SZ
1758struct mptable_lapic_cbarg1 {
1759 int cpu_count;
44c36320
SZ
1760 int ht_fixup;
1761 u_int ht_apicid_mask;
a0873f07
SZ
1762};
1763
1764static int
1765mptable_lapic_pass1_callback(void *xarg, const void *pos, int type)
1766{
1767 const struct PROCENTRY *ent;
1768 struct mptable_lapic_cbarg1 *arg = xarg;
1769
1770 if (type != 0)
1771 return 0;
1772 ent = pos;
1773
1774 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1775 return 0;
1776
1777 arg->cpu_count++;
44c36320
SZ
1778 if (ent->apic_id < 32) {
1779 arg->ht_apicid_mask |= 1 << ent->apic_id;
1780 } else if (arg->ht_fixup) {
1781 kprintf("MPTABLE: lapic id > 32, disable HTT fixup\n");
1782 arg->ht_fixup = 0;
1783 }
a0873f07
SZ
1784 return 0;
1785}
1786
1787struct mptable_lapic_cbarg2 {
1788 int cpu;
44c36320 1789 int logical_cpus;
a0873f07
SZ
1790 int found_bsp;
1791};
1792
1793static int
1794mptable_lapic_pass2_callback(void *xarg, const void *pos, int type)
1795{
1796 const struct PROCENTRY *ent;
1797 struct mptable_lapic_cbarg2 *arg = xarg;
1798
1799 if (type != 0)
1800 return 0;
1801 ent = pos;
1802
1803 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1804 KKASSERT(!arg->found_bsp);
1805 arg->found_bsp = 1;
1806 }
1807
1808 if (processor_entry(ent, arg->cpu))
1809 arg->cpu++;
1810
44c36320 1811 if (arg->logical_cpus) {
a0873f07
SZ
1812 struct PROCENTRY proc;
1813 int i;
1814
1815 /*
1816 * Create fake mptable processor entries
1817 * and feed them to processor_entry() to
1818 * enumerate the logical CPUs.
1819 */
1820 bzero(&proc, sizeof(proc));
1821 proc.type = 0;
1822 proc.cpu_flags = PROCENTRY_FLAG_EN;
1823 proc.apic_id = ent->apic_id;
1824
44c36320 1825 for (i = 1; i < arg->logical_cpus; i++) {
a0873f07
SZ
1826 proc.apic_id++;
1827 processor_entry(&proc, arg->cpu);
a0873f07
SZ
1828 arg->cpu++;
1829 }
1830 }
1831 return 0;
1832}
1833
322abba7
SZ
1834static void
1835mptable_lapic_default(void)
1836{
1837 int ap_apicid, bsp_apicid;
1838
1839 mp_naps = 1; /* exclude BSP */
1840
1841 /* Map local apic before the id field is accessed */
84cc808b 1842 lapic_map(DEFAULT_APIC_BASE);
322abba7
SZ
1843
1844 bsp_apicid = APIC_ID(lapic.id);
1845 ap_apicid = (bsp_apicid == 0) ? 1 : 0;
1846
1847 /* BSP */
1848 mp_set_cpuids(0, bsp_apicid);
1849 /* one and only AP */
1850 mp_set_cpuids(1, ap_apicid);
1851}
1852
a0873f07
SZ
1853/*
1854 * Configure:
a0873f07 1855 * mp_naps
d787e80c 1856 * ID_TO_CPU(N), APIC ID to logical CPU table
a0873f07
SZ
1857 * CPU_TO_ID(N), logical CPU to APIC ID table
1858 */
1859static void
281d9482 1860mptable_lapic_enumerate(struct lapic_enumerator *e)
a0873f07 1861{
281d9482 1862 struct mptable_pos mpt;
322abba7
SZ
1863 struct mptable_lapic_cbarg1 arg1;
1864 struct mptable_lapic_cbarg2 arg2;
1865 mpcth_t cth;
44c36320 1866 int error, logical_cpus = 0;
5a16ccc3 1867 vm_offset_t lapic_addr;
281d9482 1868
c455a23f 1869 if (mptable_use_default) {
322abba7
SZ
1870 mptable_lapic_default();
1871 return;
1872 }
a0873f07 1873
c455a23f
SZ
1874 error = mptable_map(&mpt);
1875 if (error)
1876 panic("mptable_lapic_enumerate mptable_map failed\n");
1877 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
1878
281d9482 1879 cth = mpt.mp_cth;
a0873f07 1880
322abba7
SZ
1881 /* Save local apic address */
1882 lapic_addr = (vm_offset_t)cth->apic_address;
1883 KKASSERT(lapic_addr != 0);
a0873f07 1884
322abba7
SZ
1885 /*
1886 * Find out how many CPUs do we have
1887 */
1888 bzero(&arg1, sizeof(arg1));
44c36320
SZ
1889 arg1.ht_fixup = 1; /* Apply ht fixup by default */
1890
322abba7
SZ
1891 error = mptable_iterate_entries(cth,
1892 mptable_lapic_pass1_callback, &arg1);
1893 if (error)
1894 panic("mptable_iterate_entries(lapic_pass1) failed\n");
322abba7 1895 KKASSERT(arg1.cpu_count != 0);
a0873f07 1896
322abba7 1897 /* See if we need to fixup HT logical CPUs. */
44c36320
SZ
1898 if (arg1.ht_fixup) {
1899 logical_cpus = mptable_hyperthread_fixup(arg1.ht_apicid_mask,
1900 arg1.cpu_count);
1901 if (logical_cpus != 0)
1902 arg1.cpu_count *= logical_cpus;
1903 }
1904 mp_naps = arg1.cpu_count;
a0873f07 1905
44c36320 1906 /* Qualify the numbers again, after possible HT fixup */
322abba7
SZ
1907 if (mp_naps > MAXCPU) {
1908 kprintf("Warning: only using %d of %d available CPUs!\n",
1909 MAXCPU, mp_naps);
1910 mp_naps = MAXCPU;
a0873f07
SZ
1911 }
1912
322abba7 1913 --mp_naps; /* subtract the BSP */
a0873f07 1914
322abba7
SZ
1915 /*
1916 * Link logical CPU id to local apic id
1917 */
1918 bzero(&arg2, sizeof(arg2));
1919 arg2.cpu = 1;
44c36320 1920 arg2.logical_cpus = logical_cpus;
a0873f07 1921
322abba7
SZ
1922 error = mptable_iterate_entries(cth,
1923 mptable_lapic_pass2_callback, &arg2);
1924 if (error)
1925 panic("mptable_iterate_entries(lapic_pass2) failed\n");
1926 KKASSERT(arg2.found_bsp);
a0873f07 1927
322abba7 1928 /* Map local apic */
84cc808b 1929 lapic_map(lapic_addr);
281d9482
SZ
1930
1931 mptable_unmap(&mpt);
1932}
1933
fe423084
SZ
1934struct mptable_lapic_probe_cbarg {
1935 int cpu_count;
1936 int found_bsp;
1937};
1938
281d9482 1939static int
fe423084 1940mptable_lapic_probe_callback(void *xarg, const void *pos, int type)
281d9482 1941{
fe423084
SZ
1942 const struct PROCENTRY *ent;
1943 struct mptable_lapic_probe_cbarg *arg = xarg;
281d9482 1944
fe423084
SZ
1945 if (type != 0)
1946 return 0;
1947 ent = pos;
281d9482 1948
fe423084
SZ
1949 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
1950 return 0;
1951 arg->cpu_count++;
1952
1953 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
1954 if (arg->found_bsp) {
1955 kprintf("more than one BSP in base MP table\n");
1956 return EINVAL;
1957 }
1958 arg->found_bsp = 1;
1959 }
281d9482 1960 return 0;
a0873f07 1961}
5a16ccc3 1962
fe423084
SZ
1963static int
1964mptable_lapic_probe(struct lapic_enumerator *e)
1965{
1966 struct mptable_pos mpt;
1967 struct mptable_lapic_probe_cbarg arg;
1968 mpcth_t cth;
1969 int error;
1970
1971 if (mptable_fps_phyaddr == 0)
1972 return ENXIO;
1973
c455a23f
SZ
1974 if (mptable_use_default)
1975 return 0;
1976
fe423084
SZ
1977 error = mptable_map(&mpt);
1978 if (error)
1979 return error;
c455a23f 1980 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
fe423084
SZ
1981
1982 error = EINVAL;
fe423084 1983 cth = mpt.mp_cth;
c455a23f 1984
fe423084
SZ
1985 if (cth->apic_address == 0)
1986 goto done;
1987
1988 bzero(&arg, sizeof(arg));
1989 error = mptable_iterate_entries(cth,
1990 mptable_lapic_probe_callback, &arg);
1991 if (!error) {
1992 if (arg.cpu_count == 0) {
1993 kprintf("MP table contains no processor entries\n");
1994 error = EINVAL;
1995 } else if (!arg.found_bsp) {
1996 kprintf("MP table does not contains BSP entry\n");
1997 error = EINVAL;
1998 }
281d9482 1999 }
fe423084
SZ
2000done:
2001 mptable_unmap(&mpt);
2002 return error;
2003}
2004
2005static struct lapic_enumerator mptable_lapic_enumerator = {
2006 .lapic_prio = LAPIC_ENUM_PRIO_MPTABLE,
2007 .lapic_probe = mptable_lapic_probe,
2008 .lapic_enumerate = mptable_lapic_enumerate
281d9482
SZ
2009};
2010
a0eaef71 2011static void
becce73f 2012mptable_lapic_enum_register(void)
a0eaef71 2013{
fe423084 2014 lapic_enumerator_register(&mptable_lapic_enumerator);
a0eaef71 2015}
becce73f
SZ
2016SYSINIT(mptable_lapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2017 mptable_lapic_enum_register, 0);
e0fd357f 2018
6b881b58
SZ
2019static int
2020mptable_ioapic_list_callback(void *xarg, const void *pos, int type)
2021{
2022 const struct IOAPICENTRY *ent;
2023 struct mptable_ioapic *nioapic, *ioapic;
2024
2025 if (type != 2)
2026 return 0;
2027 ent = pos;
2028
2029 if ((ent->apic_flags & IOAPICENTRY_FLAG_EN) == 0)
2030 return 0;
2031
2032 if (ent->apic_address == 0) {
2033 kprintf("mptable_ioapic_create_list: zero IOAPIC addr\n");
2034 return EINVAL;
2035 }
2036
2037 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2038 if (ioapic->mio_apic_id == ent->apic_id) {
2039 kprintf("mptable_ioapic_create_list: duplicated "
2040 "apic id %d\n", ioapic->mio_apic_id);
2041 return EINVAL;
2042 }
2043 if (ioapic->mio_addr == (uint32_t)ent->apic_address) {
2044 kprintf("mptable_ioapic_create_list: overlapped "
2045 "IOAPIC addr 0x%08x", ioapic->mio_addr);
2046 return EINVAL;
2047 }
2048 }
2049
2050 nioapic = kmalloc(sizeof(*nioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2051 nioapic->mio_apic_id = ent->apic_id;
2052 nioapic->mio_addr = (uint32_t)ent->apic_address;
2053
2054 /*
2055 * Create IOAPIC list in ascending order of APIC ID
2056 */
2057 TAILQ_FOREACH_REVERSE(ioapic, &mptable_ioapic_list,
2058 mptable_ioapic_list, mio_link) {
2059 if (nioapic->mio_apic_id > ioapic->mio_apic_id) {
2060 TAILQ_INSERT_AFTER(&mptable_ioapic_list,
2061 ioapic, nioapic, mio_link);
2062 break;
2063 }
2064 }
2065 if (ioapic == NULL)
2066 TAILQ_INSERT_HEAD(&mptable_ioapic_list, nioapic, mio_link);
2067
2068 return 0;
2069}
2070
2071static void
2072mptable_ioapic_create_list(void)
2073{
2074 struct mptable_ioapic *ioapic;
2075 struct mptable_pos mpt;
2076 int idx, error;
2077
2078 if (mptable_fps_phyaddr == 0)
2079 return;
2080
2081 if (mptable_use_default) {
2082 ioapic = kmalloc(sizeof(*ioapic), M_DEVBUF, M_WAITOK | M_ZERO);
2083 ioapic->mio_idx = 0;
2084 ioapic->mio_apic_id = 0; /* NOTE: any value is ok here */
2085 ioapic->mio_addr = 0xfec00000; /* XXX magic number */
2086
2087 TAILQ_INSERT_HEAD(&mptable_ioapic_list, ioapic, mio_link);
2088 return;
2089 }
2090
2091 error = mptable_map(&mpt);
2092 if (error)
2093 panic("mptable_ioapic_create_list: mptable_map failed\n");
2094 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2095
2096 error = mptable_iterate_entries(mpt.mp_cth,
2097 mptable_ioapic_list_callback, NULL);
2098 if (error) {
2099 while ((ioapic = TAILQ_FIRST(&mptable_ioapic_list)) != NULL) {
2100 TAILQ_REMOVE(&mptable_ioapic_list, ioapic, mio_link);
2101 kfree(ioapic, M_DEVBUF);
2102 }
2103 goto done;
2104 }
2105
2106 /*
2107 * Assign index number for each IOAPIC
2108 */
2109 idx = 0;
2110 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2111 ioapic->mio_idx = idx;
2112 ++idx;
2113 }
2114done:
2115 mptable_unmap(&mpt);
2116}
2117SYSINIT(mptable_ioapic_list, SI_BOOT2_PRESMP, SI_ORDER_SECOND,
2118 mptable_ioapic_create_list, 0);
2119
e0fd357f
SZ
2120static int
2121mptable_pci_int_callback(void *xarg, const void *pos, int type)
2122{
2123 const struct mptable_bus_info *bus_info = xarg;
6b881b58 2124 const struct mptable_ioapic *ioapic;
e0fd357f
SZ
2125 const struct mptable_bus *bus;
2126 struct mptable_pci_int *pci_int;
2127 const struct INTENTRY *ent;
2128 int pci_pin, pci_dev;
2129
2130 if (type != 3)
2131 return 0;
2132 ent = pos;
2133
2134 if (ent->int_type != 0)
2135 return 0;
2136
2137 TAILQ_FOREACH(bus, &bus_info->mbi_list, mb_link) {
2138 if (bus->mb_type == MPTABLE_BUS_PCI &&
2139 bus->mb_id == ent->src_bus_id)
2140 break;
2141 }
2142 if (bus == NULL)
2143 return 0;
2144
6b881b58
SZ
2145 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2146 if (ioapic->mio_apic_id == ent->dst_apic_id)
2147 break;
2148 }
2149 if (ioapic == NULL) {
2150 kprintf("MPTABLE: warning PCI int dst apic id %d "
2151 "does not exist\n", ent->dst_apic_id);
2152 return 0;
2153 }
2154
e0fd357f
SZ
2155 pci_pin = ent->src_bus_irq & 0x3;
2156 pci_dev = (ent->src_bus_irq >> 2) & 0x1f;
2157
2158 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2159 if (pci_int->mpci_bus == ent->src_bus_id &&
2160 pci_int->mpci_dev == pci_dev &&
2161 pci_int->mpci_pin == pci_pin) {
6b881b58 2162 if (pci_int->mpci_ioapic_idx == ioapic->mio_idx &&
e0fd357f
SZ
2163 pci_int->mpci_ioapic_pin == ent->dst_apic_int) {
2164 kprintf("MPTABLE: warning duplicated "
2165 "PCI int entry for "
2166 "bus %d, dev %d, pin %d\n",
2167 pci_int->mpci_bus,
2168 pci_int->mpci_dev,
2169 pci_int->mpci_pin);
2170 return 0;
2171 } else {
2172 kprintf("mptable_pci_int_register: "
2173 "conflict PCI int entry for "
2174 "bus %d, dev %d, pin %d, "
2175 "IOAPIC %d.%d -> %d.%d\n",
2176 pci_int->mpci_bus,
2177 pci_int->mpci_dev,
2178 pci_int->mpci_pin,
6b881b58 2179 pci_int->mpci_ioapic_idx,
e0fd357f 2180 pci_int->mpci_ioapic_pin,
6b881b58 2181 ioapic->mio_idx,
e0fd357f
SZ
2182 ent->dst_apic_int);
2183 return EINVAL;
2184 }
2185 }
2186 }
2187
2619977b 2188 pci_int = kmalloc(sizeof(*pci_int), M_DEVBUF, M_WAITOK | M_ZERO);
e0fd357f
SZ
2189
2190 pci_int->mpci_bus = ent->src_bus_id;
2191 pci_int->mpci_dev = pci_dev;
2192 pci_int->mpci_pin = pci_pin;
6b881b58 2193 pci_int->mpci_ioapic_idx = ioapic->mio_idx;
e0fd357f
SZ
2194 pci_int->mpci_ioapic_pin = ent->dst_apic_int;
2195
2196 TAILQ_INSERT_TAIL(&mptable_pci_int_list, pci_int, mpci_link);
2197
2198 return 0;
2199}
2200
2201static void
2202mptable_pci_int_register(void)
2203{
2204 struct mptable_bus_info bus_info;
2205 const struct mptable_bus *bus;
2206 struct mptable_pci_int *pci_int;
2207 struct mptable_pos mpt;
2208 int error, force_pci0, npcibus;
2209 mpcth_t cth;
2210
2211 if (mptable_fps_phyaddr == 0)
2212 return;
2213
2214 if (mptable_use_default)
2215 return;
2216
6b881b58
SZ
2217 if (TAILQ_EMPTY(&mptable_ioapic_list))
2218 return;
2219
e0fd357f
SZ
2220 error = mptable_map(&mpt);
2221 if (error)
2222 panic("mptable_pci_int_register: mptable_map failed\n");
2223 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2224
2225 cth = mpt.mp_cth;
2226
2227 mptable_bus_info_alloc(cth, &bus_info);
2228 if (TAILQ_EMPTY(&bus_info.mbi_list))
2229 goto done;
2230
8d905764 2231 force_pci0 = 0;
e0fd357f
SZ
2232 npcibus = 0;
2233 TAILQ_FOREACH(bus, &bus_info.mbi_list, mb_link) {
2234 if (bus->mb_type == MPTABLE_BUS_PCI)
2235 ++npcibus;
2236 }
2237 if (npcibus == 0) {
2238 mptable_bus_info_free(&bus_info);
2239 goto done;
2240 } else if (npcibus == 1) {
2241 force_pci0 = 1;
2242 }
2243
2244 error = mptable_iterate_entries(cth,
2245 mptable_pci_int_callback, &bus_info);
2246
2247 mptable_bus_info_free(&bus_info);
2248
2249 if (error) {
2250 while ((pci_int = TAILQ_FIRST(&mptable_pci_int_list)) != NULL) {
2251 TAILQ_REMOVE(&mptable_pci_int_list, pci_int, mpci_link);
2252 kfree(pci_int, M_DEVBUF);
2253 }
2254 goto done;
2255 }
2256
2257 if (force_pci0) {
2258 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link)
2259 pci_int->mpci_bus = 0;
2260 }
2261done:
2262 mptable_unmap(&mpt);
2263}
2264SYSINIT(mptable_pci, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2265 mptable_pci_int_register, 0);
7da2706b
SZ
2266
2267struct mptable_ioapic_probe_cbarg {
2268 const struct mptable_bus_info *bus_info;
7da2706b
SZ
2269};
2270
2271static int
2272mptable_ioapic_probe_callback(void *xarg, const void *pos, int type)
2273{
2274 struct mptable_ioapic_probe_cbarg *arg = xarg;
6b881b58
SZ
2275 const struct mptable_ioapic *ioapic;
2276 const struct mptable_bus *bus;
2277 const struct INTENTRY *ent;
7da2706b 2278
6b881b58
SZ
2279 if (type != 3)
2280 return 0;
2281 ent = pos;
7da2706b 2282
6b881b58
SZ
2283 if (ent->int_type != 0)
2284 return 0;
7da2706b 2285
6b881b58
SZ
2286 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2287 if (bus->mb_type == MPTABLE_BUS_ISA &&
2288 bus->mb_id == ent->src_bus_id)
2289 break;
2290 }
2291 if (bus == NULL)
2292 return 0;
7da2706b 2293
6b881b58
SZ
2294 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2295 if (ioapic->mio_apic_id == ent->dst_apic_id)
2296 break;
2297 }
2298 if (ioapic == NULL) {
2299 kprintf("MPTABLE: warning ISA int dst apic id %d "
2300 "does not exist\n", ent->dst_apic_id);
2301 return 0;
2302 }
7da2706b 2303
6b881b58
SZ
2304 /* XXX magic number */
2305 if (ent->src_bus_irq >= 16) {
2306 kprintf("mptable_ioapic_probe: invalid ISA irq (%d)\n",
2307 ent->src_bus_irq);
2308 return EINVAL;
7da2706b
SZ
2309 }
2310 return 0;
2311}
2312
2313static int
2314mptable_ioapic_probe(struct ioapic_enumerator *e)
2315{
2316 struct mptable_ioapic_probe_cbarg arg;
2317 struct mptable_bus_info bus_info;
2318 struct mptable_pos mpt;
2319 mpcth_t cth;
2320 int error;
2321
2322 if (mptable_fps_phyaddr == 0)
2323 return ENXIO;
2324
2325 if (mptable_use_default)
2326 return 0;
2327
6b881b58
SZ
2328 if (TAILQ_EMPTY(&mptable_ioapic_list))
2329 return ENXIO;
2330
7da2706b
SZ
2331 error = mptable_map(&mpt);
2332 if (error)
2333 panic("mptable_ioapic_probe: mptable_map failed\n");
2334 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2335
2336 cth = mpt.mp_cth;
2337
2338 mptable_bus_info_alloc(cth, &bus_info);
2339
2340 bzero(&arg, sizeof(arg));
2341 arg.bus_info = &bus_info;
2342
2343 error = mptable_iterate_entries(cth,
2344 mptable_ioapic_probe_callback, &arg);
7da2706b
SZ
2345
2346 mptable_bus_info_free(&bus_info);
2347 mptable_unmap(&mpt);
2348
2349 return error;
2350}
2351
7da2706b
SZ
2352struct mptable_ioapic_int_cbarg {
2353 const struct mptable_bus_info *bus_info;
2354 int ioapic_nint;
2355};
2356
2357static int
2358mptable_ioapic_int_callback(void *xarg, const void *pos, int type)
2359{
2360 struct mptable_ioapic_int_cbarg *arg = xarg;
512fb675 2361 const struct mptable_ioapic *ioapic;
7da2706b
SZ
2362 const struct mptable_bus *bus;
2363 const struct INTENTRY *ent;
6ac31e9d 2364 int gsi;
7da2706b
SZ
2365
2366 if (type != 3)
2367 return 0;
2368
2369 arg->ioapic_nint++;
2370
2371 ent = pos;
2372 if (ent->int_type != 0)
2373 return 0;
2374
2375 TAILQ_FOREACH(bus, &arg->bus_info->mbi_list, mb_link) {
2376 if (bus->mb_type == MPTABLE_BUS_ISA &&
2377 bus->mb_id == ent->src_bus_id)
2378 break;
2379 }
2380 if (bus == NULL)
2381 return 0;
2382
512fb675
SZ
2383 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
2384 if (ioapic->mio_apic_id == ent->dst_apic_id)
2385 break;
2386 }
2387 if (ioapic == NULL) {
2388 kprintf("MPTABLE: warning ISA int dst apic id %d "
2389 "does not exist\n", ent->dst_apic_id);
2390 return 0;
2391 }
2392
6ac31e9d
SZ
2393 if (ent->dst_apic_int >= ioapic->mio_npin) {
2394 panic("mptable_ioapic_enumerate: invalid I/O APIC "
2395 "pin %d, should be < %d",
2396 ent->dst_apic_int, ioapic->mio_npin);
2397 }
2398 gsi = ioapic->mio_gsi_base + ent->dst_apic_int;
512fb675 2399
6ac31e9d
SZ
2400 if (ent->src_bus_irq != gsi) {
2401 if (bootverbose) {
2402 kprintf("MPTABLE: INTSRC irq %d -> GSI %d\n",
2403 ent->src_bus_irq, gsi);
7da2706b 2404 }
6ac31e9d
SZ
2405 ioapic_intsrc(ent->src_bus_irq, gsi,
2406 INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2407 }
2408 return 0;
2409}
2410
2411static void
2412mptable_ioapic_enumerate(struct ioapic_enumerator *e)
2413{
2414 struct mptable_bus_info bus_info;
0471bb0e 2415 struct mptable_ioapic *ioapic;
7da2706b
SZ
2416 struct mptable_pos mpt;
2417 mpcth_t cth;
2418 int error;
2419
2420 KKASSERT(mptable_fps_phyaddr != 0);
6b881b58 2421 KKASSERT(!TAILQ_EMPTY(&mptable_ioapic_list));
7da2706b 2422
6b881b58 2423 TAILQ_FOREACH(ioapic, &mptable_ioapic_list, mio_link) {
6ac31e9d
SZ
2424 const struct mptable_ioapic *prev_ioapic;
2425 uint32_t ver;
2426 void *addr;
0471bb0e 2427
6ac31e9d 2428 addr = ioapic_map(ioapic->mio_addr);
0471bb0e 2429
6ac31e9d
SZ
2430 ver = ioapic_read(addr, IOAPIC_VER);
2431 ioapic->mio_npin = ((ver & IOART_VER_MAXREDIR)
2432 >> MAXREDIRSHIFT) + 1;
0471bb0e 2433
6ac31e9d
SZ
2434 prev_ioapic = TAILQ_PREV(ioapic,
2435 mptable_ioapic_list, mio_link);
2436 if (prev_ioapic == NULL) {
2437 ioapic->mio_gsi_base = 0;
2438 } else {
2439 ioapic->mio_gsi_base =
2440 prev_ioapic->mio_gsi_base +
2441 prev_ioapic->mio_npin;
0471bb0e 2442 }
6ac31e9d
SZ
2443 ioapic_add(addr, ioapic->mio_gsi_base,
2444 ioapic->mio_npin);
2445
7da2706b 2446 if (bootverbose) {
6b881b58 2447 kprintf("MPTABLE: IOAPIC addr 0x%08x, "
0471bb0e 2448 "apic id %d, idx %d, gsi base %d, npin %d\n",
6b881b58 2449 ioapic->mio_addr,
0471bb0e
SZ
2450 ioapic->mio_apic_id,
2451 ioapic->mio_idx,
2452 ioapic->mio_gsi_base,
2453 ioapic->mio_npin);
7da2706b 2454 }
6b881b58
SZ
2455 }
2456
2457 if (mptable_use_default) {
2458 if (bootverbose)
2459 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (default)\n");
ae80be10 2460 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2461 return;
2462 }
2463
2464 error = mptable_map(&mpt);
2465 if (error)
2466 panic("mptable_ioapic_probe: mptable_map failed\n");
2467 KKASSERT(!MPTABLE_POS_USE_DEFAULT(&mpt));
2468
2469 cth = mpt.mp_cth;
2470
7da2706b
SZ
2471 mptable_bus_info_alloc(cth, &bus_info);
2472
2473 if (TAILQ_EMPTY(&bus_info.mbi_list)) {
2474 if (bootverbose)
2475 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 (no bus)\n");
ae80be10 2476 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
7da2706b
SZ
2477 } else {
2478 struct mptable_ioapic_int_cbarg arg;
2479
2480 bzero(&arg, sizeof(arg));
2481 arg.bus_info = &bus_info;
2482
2483 error = mptable_iterate_entries(cth,
2484 mptable_ioapic_int_callback, &arg);
2485 if (error)
2486 panic("mptable_ioapic_int failed\n");
2487
2488 if (arg.ioapic_nint == 0) {
2489 if (bootverbose) {
2490 kprintf("MPTABLE: INTSRC irq 0 -> GSI 2 "
2491 "(no int)\n");
2492 }
ae80be10
SZ
2493 ioapic_intsrc(0, 2, INTR_TRIGGER_EDGE,
2494 INTR_POLARITY_HIGH);
7da2706b
SZ
2495 }
2496 }
2497
2498 mptable_bus_info_free(&bus_info);
2499
2500 mptable_unmap(&mpt);
2501}
2502
2503static struct ioapic_enumerator mptable_ioapic_enumerator = {
2504 .ioapic_prio = IOAPIC_ENUM_PRIO_MPTABLE,
2505 .ioapic_probe = mptable_ioapic_probe,
2506 .ioapic_enumerate = mptable_ioapic_enumerate
2507};
2508
2509static void
2510mptable_ioapic_enum_register(void)
2511{
2512 ioapic_enumerator_register(&mptable_ioapic_enumerator);
2513}
2514SYSINIT(mptable_ioapic, SI_BOOT2_PRESMP, SI_ORDER_ANY,
2515 mptable_ioapic_enum_register, 0);
e90e7ac4
SZ
2516
2517void
2518mptable_pci_int_dump(void)
2519{
2520 const struct mptable_pci_int *pci_int;
2521
2522 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
eab22b0b 2523 kprintf("MPTABLE: %d:%d INT%c -> IOAPIC %d.%d\n",
e90e7ac4
SZ
2524 pci_int->mpci_bus,
2525 pci_int->mpci_dev,
eab22b0b 2526 pci_int->mpci_pin + 'A',
e90e7ac4
SZ
2527 pci_int->mpci_ioapic_idx,
2528 pci_int->mpci_ioapic_pin);
2529 }
2530}
2531
2532int
2533mptable_pci_int_route(int bus, int dev, int pin, int intline)
2534{
2535 const struct mptable_pci_int *pci_int;
2536 int irq = -1;
2537
2538 KKASSERT(pin >= 1);
2539 --pin; /* zero based */
2540
2541 TAILQ_FOREACH(pci_int, &mptable_pci_int_list, mpci_link) {
2542 if (pci_int->mpci_bus == bus &&
2543 pci_int->mpci_dev == dev &&
2544 pci_int->mpci_pin == pin)
2545 break;
2546 }
2547 if (pci_int != NULL) {
2548 int gsi;
2549
2550 gsi = ioapic_gsi(pci_int->mpci_ioapic_idx,
2551 pci_int->mpci_ioapic_pin);
2552 if (gsi >= 0) {
2553 irq = ioapic_abi_find_gsi(gsi,
2554 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2555 }
2556 }
2557
2558 if (irq < 0) {
40fd5939
SZ
2559 if (bootverbose) {
2560 kprintf("MPTABLE: fixed interrupt routing "
eab22b0b 2561 "for %d:%d INT%c\n", bus, dev, pin + 'A');
40fd5939 2562 }
e90e7ac4
SZ
2563
2564 irq = ioapic_abi_find_irq(intline,
2565 INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
2566 }
eab22b0b
SZ
2567
2568 if (irq >= 0 && bootverbose) {
2569 kprintf("MPTABLE: %d:%d INT%c routed to irq %d\n",
2570 bus, dev, pin + 'A', irq);
2571 }
e90e7ac4
SZ
2572 return irq;
2573}