2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h> /* cngetc() */
37 #include <sys/machintr.h>
38 #include <sys/cpu_topology.h>
40 #include <sys/mplock2.h>
43 #include <vm/vm_param.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
48 #include <vm/vm_map.h>
54 #include <machine/smp.h>
55 #include <machine_base/apic/apicreg.h>
56 #include <machine/atomic.h>
57 #include <machine/cpufunc.h>
58 #include <machine/cputypes.h>
59 #include <machine_base/apic/lapic.h>
60 #include <machine_base/apic/ioapic.h>
61 #include <machine/psl.h>
62 #include <machine/segments.h>
63 #include <machine/tss.h>
64 #include <machine/specialreg.h>
65 #include <machine/globaldata.h>
66 #include <machine/pmap_inval.h>
68 #include <machine/md_var.h> /* setidt() */
69 #include <machine_base/icu/icu.h> /* IPIs */
70 #include <machine_base/icu/icu_var.h>
71 #include <machine_base/apic/ioapic_abi.h>
72 #include <machine/intr_machdep.h> /* IPIs */
74 #define WARMBOOT_TARGET 0
75 #define WARMBOOT_OFF (KERNBASE + 0x0467)
76 #define WARMBOOT_SEG (KERNBASE + 0x0469)
78 #define CMOS_REG (0x70)
79 #define CMOS_DATA (0x71)
80 #define BIOS_RESET (0x0f)
81 #define BIOS_WARM (0x0a)
84 * this code MUST be enabled here and in mpboot.s.
85 * it follows the very early stages of AP boot by placing values in CMOS ram.
86 * it NORMALLY will never be needed and thus the primitive method for enabling.
89 #if defined(CHECK_POINTS)
90 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
91 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
93 #define CHECK_INIT(D); \
94 CHECK_WRITE(0x34, (D)); \
95 CHECK_WRITE(0x35, (D)); \
96 CHECK_WRITE(0x36, (D)); \
97 CHECK_WRITE(0x37, (D)); \
98 CHECK_WRITE(0x38, (D)); \
99 CHECK_WRITE(0x39, (D));
101 #define CHECK_PRINT(S); \
102 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
111 #else /* CHECK_POINTS */
113 #define CHECK_INIT(D)
114 #define CHECK_PRINT(S)
116 #endif /* CHECK_POINTS */
119 * Values to send to the POST hardware.
121 #define MP_BOOTADDRESS_POST 0x10
122 #define MP_PROBE_POST 0x11
123 #define MPTABLE_PASS1_POST 0x12
125 #define MP_START_POST 0x13
126 #define MP_ENABLE_POST 0x14
127 #define MPTABLE_PASS2_POST 0x15
129 #define START_ALL_APS_POST 0x16
130 #define INSTALL_AP_TRAMP_POST 0x17
131 #define START_AP_POST 0x18
133 #define MP_ANNOUNCE_POST 0x19
135 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
136 int current_postcode;
138 /** XXX FIXME: what system files declare these??? */
139 extern struct region_descriptor r_gdt;
145 extern int64_t tsc_offsets[];
147 /* AP uses this during bootstrap. Do not staticize. */
151 struct pcb stoppcbs[MAXCPU];
153 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
156 * Local data and functions.
159 static u_int boot_address;
160 static int mp_finish;
161 static int mp_finish_lapic;
163 static int start_all_aps(u_int boot_addr);
165 static void install_ap_tramp(u_int boot_addr);
167 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
168 static int smitest(void);
169 static void mp_bsp_simple_setup(void);
171 /* which cpus have been started */
172 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
173 /* which cpus have lapic been inited */
174 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE;
175 /* which cpus are ready for IPIs etc? */
176 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
177 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE;
179 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
180 static u_int bootMP_size;
182 /* Local data for detecting CPU TOPOLOGY */
183 static int core_bits = 0;
184 static int logical_CPU_bits = 0;
188 * Calculate usable address in base memory for AP trampoline code.
191 mp_bootaddress(u_int basemem)
193 POSTCODE(MP_BOOTADDRESS_POST);
195 bootMP_size = mptramp_end - mptramp_start;
196 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
197 if (((basemem * 1024) - boot_address) < bootMP_size)
198 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
199 /* 3 levels of page table pages */
200 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
202 return mptramp_pagetables;
206 * Print various information about the SMP system hardware and setup.
213 POSTCODE(MP_ANNOUNCE_POST);
215 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
216 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0));
217 for (x = 1; x <= naps; ++x)
218 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x));
221 kprintf(" Warning: APIC I/O disabled\n");
225 * AP cpu's call this to sync up protected mode.
227 * WARNING! %gs is not set up on entry. This routine sets up %gs.
233 int x, myid = bootAP;
235 struct mdglobaldata *md;
236 struct privatespace *ps;
238 ps = &CPU_prvspace[myid];
240 gdt_segs[GPROC0_SEL].ssd_base =
241 (long) &ps->mdglobaldata.gd_common_tss;
242 ps->mdglobaldata.mi.gd_prvspace = ps;
244 /* We fill the 32-bit segment descriptors */
245 for (x = 0; x < NGDT; x++) {
246 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
247 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
249 /* And now a 64-bit one */
250 ssdtosyssd(&gdt_segs[GPROC0_SEL],
251 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
253 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
254 r_gdt.rd_base = (long) &gdt[myid * NGDT];
255 lgdt(&r_gdt); /* does magic intra-segment return */
257 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
258 wrmsr(MSR_FSBASE, 0); /* User value */
259 wrmsr(MSR_GSBASE, (u_int64_t)ps);
260 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
262 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]);
266 mdcpu->gd_currentldt = _default_ldt;
269 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
270 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
272 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
274 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
276 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
278 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
279 md->gd_common_tssd = *md->gd_tss_gdt;
281 /* double fault stack */
282 md->gd_common_tss.tss_ist1 =
283 (long)&md->mi.gd_prvspace->idlestack[
284 sizeof(md->mi.gd_prvspace->idlestack)];
289 * Set to a known state:
290 * Set by mpboot.s: CR0_PG, CR0_PE
291 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
294 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
297 /* Set up the fast syscall stuff */
298 msr = rdmsr(MSR_EFER) | EFER_SCE;
299 wrmsr(MSR_EFER, msr);
300 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
301 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
302 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
303 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
304 wrmsr(MSR_STAR, msr);
305 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL);
307 pmap_set_opt(); /* PSE/4MB pages, etc */
308 pmap_init_pat(); /* Page Attribute Table */
310 /* set up CPU registers and state */
313 /* set up SSE/NX registers */
316 /* set up FPU state on the AP */
319 /* disable the APIC, just to be SURE */
320 lapic->svr &= ~APIC_SVR_ENABLE;
323 /*******************************************************************
324 * local functions and data
328 * Start the SMP system
331 mp_start_aps(void *dummy __unused)
334 /* start each Application Processor */
335 start_all_aps(boot_address);
337 mp_bsp_simple_setup();
340 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL)
343 * start each AP in our list
346 start_all_aps(u_int boot_addr)
348 vm_offset_t va = boot_address + KERNBASE;
349 u_int64_t *pt4, *pt3, *pt2;
356 u_long mpbioswarmvec;
357 struct mdglobaldata *gd;
358 struct privatespace *ps;
361 POSTCODE(START_ALL_APS_POST);
363 /* install the AP 1st level boot code */
364 pmap_kenter(va, boot_address);
365 cpu_invlpg((void *)va); /* JG XXX */
366 bcopy(mptramp_start, (void *)va, bootMP_size);
368 /* Locate the page tables, they'll be below the trampoline */
369 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
370 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
371 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
373 /* Create the initial 1GB replicated page tables */
374 for (i = 0; i < 512; i++) {
375 /* Each slot of the level 4 pages points to the same level 3 page */
376 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
377 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
378 kernel_pmap.pmap_bits[PG_RW_IDX] |
379 kernel_pmap.pmap_bits[PG_U_IDX];
381 /* Each slot of the level 3 pages points to the same level 2 page */
382 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
383 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
384 kernel_pmap.pmap_bits[PG_RW_IDX] |
385 kernel_pmap.pmap_bits[PG_U_IDX];
387 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
388 pt2[i] = i * (2 * 1024 * 1024);
389 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
390 kernel_pmap.pmap_bits[PG_RW_IDX] |
391 kernel_pmap.pmap_bits[PG_PS_IDX] |
392 kernel_pmap.pmap_bits[PG_U_IDX];
395 /* save the current value of the warm-start vector */
396 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
397 outb(CMOS_REG, BIOS_RESET);
398 mpbiosreason = inb(CMOS_DATA);
400 /* setup a vector to our boot code */
401 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
402 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
403 outb(CMOS_REG, BIOS_RESET);
404 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
407 * If we have a TSC we can figure out the SMI interrupt rate.
408 * The SMI does not necessarily use a constant rate. Spend
409 * up to 250ms trying to figure it out.
412 if (cpu_feature & CPUID_TSC) {
413 set_apic_timer(275000);
414 smilast = read_apic_timer();
415 for (x = 0; x < 20 && read_apic_timer(); ++x) {
416 smicount = smitest();
417 if (smibest == 0 || smilast - smicount < smibest)
418 smibest = smilast - smicount;
421 if (smibest > 250000)
424 smibest = smibest * (int64_t)1000000 /
425 get_apic_timer_frequency();
429 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
430 1000000 / smibest, smibest);
433 for (x = 1; x <= naps; ++x) {
434 /* This is a bit verbose, it will go away soon. */
437 /* allocate new private data page(s) */
438 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
439 MDGLOBALDATA_BASEALLOC_SIZE);
442 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
443 bzero(gd, sizeof(*gd));
444 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
446 /* prime data page for it to use */
447 mi_gdinit(&gd->mi, x);
449 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
450 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
451 bzero(gd->mi.gd_ipiq, ipiq_size);
453 /* setup a vector to our boot code */
454 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
455 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
456 outb(CMOS_REG, BIOS_RESET);
457 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
460 * Setup the AP boot stack
462 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE];
465 /* attempt to start the Application Processor */
466 CHECK_INIT(99); /* setup checkpoints */
467 if (!start_ap(gd, boot_addr, smibest)) {
468 kprintf("\nAP #%d (PHY# %d) failed!\n",
469 x, CPUID_TO_APICID(x));
470 CHECK_PRINT("trace"); /* show checkpoints */
471 /* better panic as the AP may be running loose */
472 kprintf("panic y/n? [y] ");
476 CHECK_PRINT("trace"); /* show checkpoints */
479 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
482 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
483 for (shift = 0; (1 << shift) <= ncpus; ++shift)
486 ncpus2_shift = shift;
488 ncpus2_mask = ncpus2 - 1;
490 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
491 if ((1 << shift) < ncpus)
493 ncpus_fit = 1 << shift;
494 ncpus_fit_mask = ncpus_fit - 1;
496 /* build our map of 'other' CPUs */
497 mycpu->gd_other_cpus = smp_startup_mask;
498 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
500 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
501 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
502 bzero(mycpu->gd_ipiq, ipiq_size);
504 /* restore the warmstart vector */
505 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
506 outb(CMOS_REG, BIOS_RESET);
507 outb(CMOS_DATA, mpbiosreason);
510 * NOTE! The idlestack for the BSP was setup by locore. Finish
511 * up, clean out the P==V mapping we did earlier.
516 * Wait all APs to finish initializing LAPIC
519 kprintf("SMP: Waiting APs LAPIC initialization\n");
520 if (cpu_feature & CPUID_TSC)
521 tsc0_offset = rdtsc();
526 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) {
529 if (cpu_feature & CPUID_TSC)
530 tsc0_offset = rdtsc();
532 while (try_mplock() == 0) {
537 /* number of APs actually started */
543 * load the 1st level AP boot code into base memory.
546 /* targets for relocation */
547 extern void bigJump(void);
548 extern void bootCodeSeg(void);
549 extern void bootDataSeg(void);
550 extern void MPentry(void);
552 extern u_int mp_gdtbase;
557 install_ap_tramp(u_int boot_addr)
560 int size = *(int *) ((u_long) & bootMP_size);
561 u_char *src = (u_char *) ((u_long) bootMP);
562 u_char *dst = (u_char *) boot_addr + KERNBASE;
563 u_int boot_base = (u_int) bootMP;
568 POSTCODE(INSTALL_AP_TRAMP_POST);
570 for (x = 0; x < size; ++x)
574 * modify addresses in code we just moved to basemem. unfortunately we
575 * need fairly detailed info about mpboot.s for this to work. changes
576 * to mpboot.s might require changes here.
579 /* boot code is located in KERNEL space */
580 dst = (u_char *) boot_addr + KERNBASE;
582 /* modify the lgdt arg */
583 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
584 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
586 /* modify the ljmp target for MPentry() */
587 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
588 *dst32 = ((u_int) MPentry - KERNBASE);
590 /* modify the target for boot code segment */
591 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
592 dst8 = (u_int8_t *) (dst16 + 1);
593 *dst16 = (u_int) boot_addr & 0xffff;
594 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
596 /* modify the target for boot data segment */
597 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
598 dst8 = (u_int8_t *) (dst16 + 1);
599 *dst16 = (u_int) boot_addr & 0xffff;
600 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
606 * This function starts the AP (application processor) identified
607 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
608 * to accomplish this. This is necessary because of the nuances
609 * of the different hardware we might encounter. It ain't pretty,
610 * but it seems to work.
612 * NOTE: eventually an AP gets to ap_init(), which is called just
613 * before the AP goes into the LWKT scheduler's idle loop.
616 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
620 u_long icr_lo, icr_hi;
622 POSTCODE(START_AP_POST);
624 /* get the PHYSICAL APIC ID# */
625 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid);
627 /* calculate the vector */
628 vector = (boot_addr >> 12) & 0xff;
630 /* We don't want anything interfering */
633 /* Make sure the target cpu sees everything */
637 * Try to detect when a SMI has occurred, wait up to 200ms.
639 * If a SMI occurs during an AP reset but before we issue
640 * the STARTUP command, the AP may brick. To work around
641 * this problem we hold off doing the AP startup until
642 * after we have detected the SMI. Hopefully another SMI
643 * will not occur before we finish the AP startup.
645 * Retries don't seem to help. SMIs have a window of opportunity
646 * and if USB->legacy keyboard emulation is enabled in the BIOS
647 * the interrupt rate can be quite high.
649 * NOTE: Don't worry about the L1 cache load, it might bloat
650 * ldelta a little but ndelta will be so huge when the SMI
651 * occurs the detection logic will still work fine.
654 set_apic_timer(200000);
659 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
660 * and running the target CPU. OR this INIT IPI might be latched (P5
661 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
664 * see apic/apicreg.h for icr bit definitions.
666 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
670 * Setup the address for the target AP. We can setup
671 * icr_hi once and then just trigger operations with
674 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
675 icr_hi |= (physical_cpu << 24);
676 icr_lo = lapic->icr_lo & 0xfff00000;
677 lapic->icr_hi = icr_hi;
680 * Do an INIT IPI: assert RESET
682 * Use edge triggered mode to assert INIT
684 lapic->icr_lo = icr_lo | 0x00004500;
685 while (lapic->icr_lo & APIC_DELSTAT_MASK)
689 * The spec calls for a 10ms delay but we may have to use a
690 * MUCH lower delay to avoid bricking an AP due to a fast SMI
691 * interrupt. We have other loops here too and dividing by 2
692 * doesn't seem to be enough even after subtracting 350us,
695 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
696 * interrupt was detected we use the full 10ms.
700 else if (smibest < 150 * 4 + 350)
702 else if ((smibest - 350) / 4 < 10000)
703 u_sleep((smibest - 350) / 4);
708 * Do an INIT IPI: deassert RESET
710 * Use level triggered mode to deassert. It is unclear
711 * why we need to do this.
713 lapic->icr_lo = icr_lo | 0x00008500;
714 while (lapic->icr_lo & APIC_DELSTAT_MASK)
716 u_sleep(150); /* wait 150us */
719 * Next we do a STARTUP IPI: the previous INIT IPI might still be
720 * latched, (P5 bug) this 1st STARTUP would then terminate
721 * immediately, and the previously started INIT IPI would continue. OR
722 * the previous INIT IPI has already run. and this STARTUP IPI will
723 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
726 lapic->icr_lo = icr_lo | 0x00000600 | vector;
727 while (lapic->icr_lo & APIC_DELSTAT_MASK)
729 u_sleep(200); /* wait ~200uS */
732 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
733 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
734 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
735 * recognized after hardware RESET or INIT IPI.
737 lapic->icr_lo = icr_lo | 0x00000600 | vector;
738 while (lapic->icr_lo & APIC_DELSTAT_MASK)
741 /* Resume normal operation */
744 /* wait for it to start, see ap_init() */
745 set_apic_timer(5000000);/* == 5 seconds */
746 while (read_apic_timer()) {
747 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid))
748 return 1; /* return SUCCESS */
751 return 0; /* return FAILURE */
766 while (read_apic_timer()) {
768 for (count = 0; count < 100; ++count)
769 ntsc = rdtsc(); /* force loop to occur */
771 ndelta = ntsc - ltsc;
774 if (ndelta > ldelta * 2)
777 ldelta = ntsc - ltsc;
780 return(read_apic_timer());
784 * Synchronously flush the TLB on all other CPU's. The current cpu's
785 * TLB is not flushed. If the caller wishes to flush the current cpu's
786 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
788 * NOTE: If for some reason we were unable to start all cpus we cannot
789 * safely use broadcast IPIs.
792 static cpumask_t smp_invltlb_req;
794 #define SMP_INVLTLB_DEBUG
799 struct mdglobaldata *md = mdcpu;
800 #ifdef SMP_INVLTLB_DEBUG
807 crit_enter_gd(&md->mi);
808 CPUMASK_ASSZERO(md->gd_invltlb_ret);
809 ++md->mi.gd_cnt.v_smpinvltlb;
810 ATOMIC_CPUMASK_ORBIT(smp_invltlb_req, md->mi.gd_cpuid);
811 #ifdef SMP_INVLTLB_DEBUG
814 if (CPUMASK_CMPMASKEQ(smp_startup_mask, smp_active_mask)) {
815 all_but_self_ipi(XINVLTLB_OFFSET);
817 tmpmask = smp_active_mask;
818 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
819 selected_apic_ipi(tmpmask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
822 #ifdef SMP_INVLTLB_DEBUG
824 kprintf("smp_invltlb: ipi sent\n");
827 tmpmask = smp_active_mask;
829 CPUMASK_ANDMASK(tmpmask, md->gd_invltlb_ret);
830 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
831 CPUMASK_NANDMASK(tmpmask2, md->mi.gd_cpumask);
833 if (CPUMASK_CMPMASKEQ(tmpmask, tmpmask2))
837 #ifdef SMP_INVLTLB_DEBUG
839 if (++count == 400000000) {
841 kprintf("smp_invltlb: endless loop %08lx %08lx, "
842 "rflags %016jx retry",
843 (long)CPUMASK_LOWMASK(md->gd_invltlb_ret),
844 (long)CPUMASK_LOWMASK(smp_invltlb_req),
845 (intmax_t)read_rflags());
846 __asm __volatile ("sti");
854 tmpmask = smp_active_mask;
855 CPUMASK_NANDMASK(tmpmask, md->gd_invltlb_ret);
856 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
857 bcpu = BSFCPUMASK(tmpmask);
859 kprintf("bcpu %d\n", bcpu);
860 xgd = globaldata_find(bcpu);
861 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm);
864 Debugger("giving up");
870 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_req, md->mi.gd_cpuid);
871 crit_exit_gd(&md->mi);
875 * Called from Xinvltlb assembly with interrupts disabled. We didn't
876 * bother to bump the critical section count or nested interrupt count
877 * so only do very low level operations here.
880 smp_invltlb_intr(void)
882 struct mdglobaldata *md = mdcpu;
883 struct mdglobaldata *omd;
888 mask = smp_invltlb_req;
890 while (CPUMASK_TESTNZERO(mask)) {
891 cpu = BSFCPUMASK(mask);
892 CPUMASK_NANDBIT(mask, cpu);
893 omd = (struct mdglobaldata *)globaldata_find(cpu);
894 ATOMIC_CPUMASK_ORBIT(omd->gd_invltlb_ret, md->mi.gd_cpuid);
899 cpu_wbinvd_on_all_cpus_callback(void *arg)
905 smp_invlpg_range_cpusync(void *arg)
907 vm_offset_t eva, sva, addr;
908 sva = ((struct smp_invlpg_range_cpusync_arg *)arg)->sva;
909 eva = ((struct smp_invlpg_range_cpusync_arg *)arg)->eva;
911 for (addr = sva; addr < eva; addr += PAGE_SIZE) {
912 cpu_invlpg((void *)addr);
917 * When called the executing CPU will send an IPI to all other CPUs
918 * requesting that they halt execution.
920 * Usually (but not necessarily) called with 'other_cpus' as its arg.
922 * - Signals all CPUs in map to stop.
923 * - Waits for each to stop.
930 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
931 * from executing at same time.
934 stop_cpus(cpumask_t map)
938 CPUMASK_ANDMASK(map, smp_active_mask);
940 /* send the Xcpustop IPI to all CPUs in map */
941 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
945 CPUMASK_ANDMASK(mask, map);
947 } while (CPUMASK_CMPMASKNEQ(mask, map));
954 * Called by a CPU to restart stopped CPUs.
956 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
958 * - Signals all CPUs in map to restart.
959 * - Waits for each to restart.
967 restart_cpus(cpumask_t map)
971 /* signal other cpus to restart */
973 CPUMASK_ANDMASK(mask, smp_active_mask);
978 /* wait for each to clear its bit */
979 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map))
986 * This is called once the mpboot code has gotten us properly relocated
987 * and the MMU turned on, etc. ap_init() is actually the idle thread,
988 * and when it returns the scheduler will call the real cpu_idle() main
989 * loop for the idlethread. Interrupts are disabled on entry and should
990 * remain disabled at return.
998 * Adjust smp_startup_mask to signal the BSP that we have started
999 * up successfully. Note that we do not yet hold the BGL. The BSP
1000 * is waiting for our signal.
1002 * We can't set our bit in smp_active_mask yet because we are holding
1003 * interrupts physically disabled and remote cpus could deadlock
1004 * trying to send us an IPI.
1006 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
1010 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1011 * non-zero, then get the MP lock.
1013 * Note: We are in a critical section.
1015 * Note: we are the idle thread, we can only spin.
1017 * Note: The load fence is memory volatile and prevents the compiler
1018 * from improperly caching mp_finish_lapic, and the cpu from improperly
1021 while (mp_finish_lapic == 0) {
1026 while (try_mplock() == 0) {
1032 if (cpu_feature & CPUID_TSC) {
1034 * The BSP is constantly updating tsc0_offset, figure out
1035 * the relative difference to synchronize ktrdump.
1037 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
1040 /* BSP may have changed PTD while we're waiting for the lock */
1043 /* Build our map of 'other' CPUs. */
1044 mycpu->gd_other_cpus = smp_startup_mask;
1045 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1047 /* A quick check from sanity claus */
1048 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24);
1049 if (mycpu->gd_cpuid != cpu_id) {
1050 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid);
1051 kprintf("SMP: actual cpuid = %d lapicid %d\n",
1052 cpu_id, (lapic->id & 0xff000000) >> 24);
1054 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1056 panic("cpuid mismatch! boom!!");
1059 /* Initialize AP's local APIC for irq's */
1062 /* LAPIC initialization is done */
1063 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid);
1067 /* Let BSP move onto the next initialization stage */
1072 * Interlock for finalization. Wait until mp_finish is non-zero,
1073 * then get the MP lock.
1075 * Note: We are in a critical section.
1077 * Note: we are the idle thread, we can only spin.
1079 * Note: The load fence is memory volatile and prevents the compiler
1080 * from improperly caching mp_finish, and the cpu from improperly
1083 while (mp_finish == 0) {
1088 /* BSP may have changed PTD while we're waiting for the lock */
1091 /* Set memory range attributes for this CPU to match the BSP */
1092 mem_range_AP_init();
1095 * Once we go active we must process any IPIQ messages that may
1096 * have been queued, because no actual IPI will occur until we
1097 * set our bit in the smp_active_mask. If we don't the IPI
1098 * message interlock could be left set which would also prevent
1101 * The idle loop doesn't expect the BGL to be held and while
1102 * lwkt_switch() normally cleans things up this is a special case
1103 * because we returning almost directly into the idle loop.
1105 * The idle thread is never placed on the runq, make sure
1106 * nothing we've done put it there.
1110 * Hold a critical section and allow real interrupts to occur. Zero
1111 * any spurious interrupts which have accumulated, then set our
1112 * smp_active_mask indicating that we are fully operational.
1115 __asm __volatile("sti; pause; pause"::);
1116 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
1117 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
1120 * Wait until all cpus have set their smp_active_mask and have fully
1121 * operational interrupts before proceeding.
1123 * We need a final cpu_invltlb() because we would not have received
1124 * any until we set our bit in smp_active_mask.
1126 while (mp_finish == 1) {
1133 * Initialize per-cpu clocks and do other per-cpu initialization.
1134 * At this point code is expected to be able to use the full kernel
1137 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1140 * Since we may have cleaned up the interrupt triggers, manually
1141 * process any pending IPIs before exiting our critical section.
1142 * Once the critical section has exited, normal interrupt processing
1145 lwkt_process_ipiq();
1146 crit_exit_noyield(mycpu->gd_curthread);
1149 * Final final, allow the waiting BSP to resume the boot process,
1150 * return 'into' the idle thread bootstrap.
1152 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid);
1153 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1157 * Get SMP fully working before we start initializing devices.
1164 kprintf("Finish MP startup\n");
1168 * Wait for the active mask to complete, after which all cpus will
1169 * be accepting interrupts.
1172 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) {
1178 * Wait for the finalization mask to complete, after which all cpus
1179 * have completely finished initializing and are entering or are in
1180 * their idle thread.
1182 * BSP should have received all required invltlbs but do another
1187 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) {
1192 while (try_mplock() == 0) {
1198 kprintf("Active CPU Mask: %016jx\n",
1199 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask));
1203 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
1206 cpu_send_ipiq(int dcpu)
1208 if (CPUMASK_TESTBIT(smp_active_mask, dcpu))
1209 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1212 #if 0 /* single_apic_ipi_passive() not working yet */
1214 * Returns 0 on failure, 1 on success
1217 cpu_send_ipiq_passive(int dcpu)
1220 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
1221 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1222 APIC_DELMODE_FIXED);
1229 mp_bsp_simple_setup(void)
1233 /* build our map of 'other' CPUs */
1234 mycpu->gd_other_cpus = smp_startup_mask;
1235 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1237 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1238 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
1239 bzero(mycpu->gd_ipiq, ipiq_size);
1243 if (cpu_feature & CPUID_TSC)
1244 tsc0_offset = rdtsc();
1249 * CPU TOPOLOGY DETECTION FUNCTIONS
1252 /* Detect intel topology using CPUID
1253 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41
1256 detect_intel_topology(int count_htt_cores)
1260 int core_plus_logical_bits = 0;
1261 int cores_per_package;
1262 int logical_per_package;
1263 int logical_per_core;
1266 if (cpu_high >= 0xb) {
1269 } else if (cpu_high >= 0x4) {
1274 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1276 logical_CPU_bits = 1 << shift;
1281 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p);
1283 /* if 0xb not supported - fallback to 0x4 */
1284 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) {
1288 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1290 ecx_index = FUNC_B_THREAD_LEVEL + 1;
1292 cpuid_count(0xb, ecx_index, p);
1294 /* Check for the Core type in the implemented sub leaves. */
1295 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) {
1296 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1302 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE);
1304 core_bits = core_plus_logical_bits - logical_CPU_bits;
1309 cpuid_count(0x4, 0, p);
1310 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1;
1312 logical_per_package = count_htt_cores;
1313 logical_per_core = logical_per_package / cores_per_package;
1315 for (shift = 0; (1 << shift) < logical_per_core; ++shift)
1317 logical_CPU_bits = shift;
1319 for (shift = 0; (1 << shift) < cores_per_package; ++shift)
1326 /* Detect AMD topology using CPUID
1327 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page
1330 detect_amd_topology(int count_htt_cores)
1333 if ((cpu_feature & CPUID_HTT)
1334 && (amd_feature2 & AMDID2_CMP)) {
1336 if (cpu_procinfo2 & AMDID_COREID_SIZE) {
1337 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE)
1338 >> AMDID_COREID_SIZE_SHIFT;
1340 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
1341 for (shift = 0; (1 << shift) < core_bits; ++shift)
1346 logical_CPU_bits = count_htt_cores >> core_bits;
1347 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift)
1349 logical_CPU_bits = shift;
1351 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1354 logical_CPU_bits = 0;
1359 amd_get_compute_unit_id(void *arg)
1363 do_cpuid(0x8000001e, regs);
1364 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid);
1366 * AMD - CPUID Specification September 2010
1367 * page 34 - //ComputeUnitID = ebx[0:7]//
1369 mynode->compute_unit_id = regs[1] & 0xff;
1373 fix_amd_topology(void)
1377 if (cpu_vendor_id != CPU_VENDOR_AMD)
1379 if ((amd_feature2 & AMDID2_TOPOEXT) == 0)
1382 CPUMASK_ASSALLONES(mask);
1383 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL);
1385 kprintf("Compute unit iDS:\n");
1387 for (i = 0; i < ncpus; i++) {
1388 kprintf("%d-%d; \n",
1389 i, get_cpu_node_by_cpuid(i)->compute_unit_id);
1396 * - logical_CPU_bits
1398 * With the values above (for AMD or INTEL) we are able to generally
1399 * detect the CPU topology (number of cores for each level):
1400 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1401 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf
1404 detect_cpu_topology(void)
1406 static int topology_detected = 0;
1409 if (topology_detected) {
1413 if ((cpu_feature & CPUID_HTT) == 0) {
1415 logical_CPU_bits = 0;
1418 count = (cpu_procinfo & CPUID_HTT_CORES)
1419 >> CPUID_HTT_CORE_SHIFT;
1422 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1423 detect_intel_topology(count);
1424 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
1425 detect_amd_topology(count);
1430 kprintf("Bits within APICID: logical_CPU_bits: %d; core_bits: %d\n",
1431 logical_CPU_bits, core_bits);
1433 topology_detected = 1;
1436 /* Interface functions to calculate chip_ID,
1437 * core_number and logical_number
1438 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1441 get_chip_ID(int cpuid)
1443 return get_apicid_from_cpuid(cpuid) >>
1444 (logical_CPU_bits + core_bits);
1448 get_core_number_within_chip(int cpuid)
1450 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
1451 ( (1 << core_bits) -1);
1455 get_logical_CPU_number_within_core(int cpuid)
1457 return get_apicid_from_cpuid(cpuid) &
1458 ( (1 << logical_CPU_bits) -1);