2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h> /* cngetc() */
37 #include <sys/machintr.h>
38 #include <sys/cpu_topology.h>
40 #include <sys/mplock2.h>
43 #include <vm/vm_param.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
48 #include <vm/vm_map.h>
50 #include <machine/smp.h>
51 #include <machine_base/apic/apicreg.h>
52 #include <machine/atomic.h>
53 #include <machine/cpufunc.h>
54 #include <machine/cputypes.h>
55 #include <machine_base/apic/lapic.h>
56 #include <machine_base/apic/ioapic.h>
57 #include <machine_base/acpica/acpi_md_cpu.h>
58 #include <machine/psl.h>
59 #include <machine/segments.h>
60 #include <machine/tss.h>
61 #include <machine/specialreg.h>
62 #include <machine/globaldata.h>
63 #include <machine/pmap_inval.h>
64 #include <machine/clock.h>
66 #include <machine/md_var.h> /* setidt() */
67 #include <machine_base/icu/icu.h> /* IPIs */
68 #include <machine_base/icu/icu_var.h>
69 #include <machine_base/apic/ioapic_abi.h>
70 #include <machine/intr_machdep.h> /* IPIs */
72 #define WARMBOOT_TARGET 0
73 #define WARMBOOT_OFF (KERNBASE + 0x0467)
74 #define WARMBOOT_SEG (KERNBASE + 0x0469)
76 #define CMOS_REG (0x70)
77 #define CMOS_DATA (0x71)
78 #define BIOS_RESET (0x0f)
79 #define BIOS_WARM (0x0a)
82 * this code MUST be enabled here and in mpboot.s.
83 * it follows the very early stages of AP boot by placing values in CMOS ram.
84 * it NORMALLY will never be needed and thus the primitive method for enabling.
87 #if defined(CHECK_POINTS)
88 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
89 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
91 #define CHECK_INIT(D); \
92 CHECK_WRITE(0x34, (D)); \
93 CHECK_WRITE(0x35, (D)); \
94 CHECK_WRITE(0x36, (D)); \
95 CHECK_WRITE(0x37, (D)); \
96 CHECK_WRITE(0x38, (D)); \
97 CHECK_WRITE(0x39, (D));
99 #define CHECK_PRINT(S); \
100 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
109 #else /* CHECK_POINTS */
111 #define CHECK_INIT(D)
112 #define CHECK_PRINT(S)
114 #endif /* CHECK_POINTS */
117 * Values to send to the POST hardware.
119 #define MP_BOOTADDRESS_POST 0x10
120 #define MP_PROBE_POST 0x11
121 #define MPTABLE_PASS1_POST 0x12
123 #define MP_START_POST 0x13
124 #define MP_ENABLE_POST 0x14
125 #define MPTABLE_PASS2_POST 0x15
127 #define START_ALL_APS_POST 0x16
128 #define INSTALL_AP_TRAMP_POST 0x17
129 #define START_AP_POST 0x18
131 #define MP_ANNOUNCE_POST 0x19
133 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
134 int current_postcode;
136 /** XXX FIXME: what system files declare these??? */
139 extern int _udatasel;
142 extern int64_t tsc_offsets[];
144 /* AP uses this during bootstrap. Do not staticize. */
148 struct pcb stoppcbs[MAXCPU];
150 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
153 * Local data and functions.
156 static u_int boot_address;
157 static int mp_finish;
158 static int mp_finish_lapic;
160 static int start_all_aps(u_int boot_addr);
162 static void install_ap_tramp(u_int boot_addr);
164 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
165 static int smitest(void);
166 static void mp_bsp_simple_setup(void);
168 /* which cpus have been started */
169 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
170 /* which cpus have lapic been inited */
171 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE;
172 /* which cpus are ready for IPIs etc? */
173 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
174 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE;
176 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD,
177 &smp_active_mask, sizeof(smp_active_mask), "LU", "");
178 static u_int bootMP_size;
179 static u_int report_invlpg_src;
180 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW,
181 &report_invlpg_src, 0, "");
182 static u_int report_invltlb_src;
183 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW,
184 &report_invltlb_src, 0, "");
185 static int optimized_invltlb;
186 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW,
187 &optimized_invltlb, 0, "");
188 static int all_but_self_ipi_enable = 1;
189 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW,
190 &all_but_self_ipi_enable, 0, "");
192 /* Local data for detecting CPU TOPOLOGY */
193 static int core_bits = 0;
194 static int logical_CPU_bits = 0;
198 * Calculate usable address in base memory for AP trampoline code.
201 mp_bootaddress(u_int basemem)
203 POSTCODE(MP_BOOTADDRESS_POST);
205 bootMP_size = mptramp_end - mptramp_start;
206 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
207 if (((basemem * 1024) - boot_address) < bootMP_size)
208 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
209 /* 3 levels of page table pages */
210 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
212 return mptramp_pagetables;
216 * Print various information about the SMP system hardware and setup.
223 POSTCODE(MP_ANNOUNCE_POST);
225 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
226 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0));
227 for (x = 1; x <= naps; ++x)
228 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x));
231 kprintf(" Warning: APIC I/O disabled\n");
235 * AP cpu's call this to sync up protected mode.
237 * WARNING! %gs is not set up on entry. This routine sets up %gs.
243 int x, myid = bootAP;
245 struct mdglobaldata *md;
246 struct privatespace *ps;
248 ps = CPU_prvspace[myid];
250 gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss;
251 ps->mdglobaldata.mi.gd_prvspace = ps;
253 /* We fill the 32-bit segment descriptors */
254 for (x = 0; x < NGDT; x++) {
255 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
256 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
258 /* And now a 64-bit one */
259 ssdtosyssd(&gdt_segs[GPROC0_SEL],
260 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
262 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
263 r_gdt.rd_base = (long) &gdt[myid * NGDT];
264 lgdt(&r_gdt); /* does magic intra-segment return */
266 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
267 wrmsr(MSR_FSBASE, 0); /* User value */
268 wrmsr(MSR_GSBASE, (u_int64_t)ps);
269 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
271 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]);
279 mdcpu->gd_currentldt = _default_ldt;
282 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
283 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
285 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
288 * TSS entry point for interrupts, traps, and exceptions
289 * (sans NMI). This will always go to near the top of the pcpu
290 * trampoline area. Hardware-pushed data will be copied into
291 * the trap-frame on entry, and (if necessary) returned to the
292 * trampoline on exit.
294 * We store some pcb data for the trampoline code above the
295 * stack the cpu hw pushes into, and arrange things so the
296 * address of tr_pcb_rsp is the same as the desired top of
299 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp;
300 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0;
301 ps->trampoline.tr_pcb_gs_kernel = (register_t)md;
302 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */
303 ps->dbltramp.tr_pcb_gs_kernel = (register_t)md;
304 ps->dbltramp.tr_pcb_cr3 = KPML4phys;
305 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md;
306 ps->dbgtramp.tr_pcb_cr3 = KPML4phys;
309 ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16;
311 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
312 md->gd_common_tssd = *md->gd_tss_gdt;
314 /* double fault stack */
315 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp;
316 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp;
321 * Set to a known state:
322 * Set by mpboot.s: CR0_PG, CR0_PE
323 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
326 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
329 /* Set up the fast syscall stuff */
330 msr = rdmsr(MSR_EFER) | EFER_SCE;
331 wrmsr(MSR_EFER, msr);
332 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
333 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
334 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
335 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
336 wrmsr(MSR_STAR, msr);
337 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL|PSL_AC);
339 pmap_set_opt(); /* PSE/4MB pages, etc */
340 pmap_init_pat(); /* Page Attribute Table */
342 /* set up CPU registers and state */
345 /* set up SSE/NX registers */
348 /* set up FPU state on the AP */
351 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */
353 lapic_x2apic_enter(FALSE);
355 /* disable the APIC, just to be SURE */
356 LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE));
359 /*******************************************************************
360 * local functions and data
364 * Start the SMP system
367 mp_start_aps(void *dummy __unused)
370 /* start each Application Processor */
371 start_all_aps(boot_address);
373 mp_bsp_simple_setup();
376 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL);
379 * start each AP in our list
382 start_all_aps(u_int boot_addr)
384 vm_offset_t va = boot_address + KERNBASE;
385 u_int64_t *pt4, *pt3, *pt2;
393 u_long mpbioswarmvec;
394 struct mdglobaldata *gd;
395 struct privatespace *ps;
398 POSTCODE(START_ALL_APS_POST);
400 /* install the AP 1st level boot code */
401 pmap_kenter(va, boot_address);
402 cpu_invlpg((void *)va); /* JG XXX */
403 bcopy(mptramp_start, (void *)va, bootMP_size);
405 /* Locate the page tables, they'll be below the trampoline */
406 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
407 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
408 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
410 /* Create the initial 1GB replicated page tables */
411 for (i = 0; i < 512; i++) {
412 /* Each slot of the level 4 pages points to the same level 3 page */
413 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
414 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
415 kernel_pmap.pmap_bits[PG_RW_IDX] |
416 kernel_pmap.pmap_bits[PG_U_IDX];
418 /* Each slot of the level 3 pages points to the same level 2 page */
419 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
420 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
421 kernel_pmap.pmap_bits[PG_RW_IDX] |
422 kernel_pmap.pmap_bits[PG_U_IDX];
424 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
425 pt2[i] = i * (2 * 1024 * 1024);
426 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
427 kernel_pmap.pmap_bits[PG_RW_IDX] |
428 kernel_pmap.pmap_bits[PG_PS_IDX] |
429 kernel_pmap.pmap_bits[PG_U_IDX];
432 /* save the current value of the warm-start vector */
433 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
434 outb(CMOS_REG, BIOS_RESET);
435 mpbiosreason = inb(CMOS_DATA);
437 /* setup a vector to our boot code */
438 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
439 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
440 outb(CMOS_REG, BIOS_RESET);
441 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
444 * If we have a TSC we can figure out the SMI interrupt rate.
445 * The SMI does not necessarily use a constant rate. Spend
446 * up to 250ms trying to figure it out.
449 if (cpu_feature & CPUID_TSC) {
450 set_apic_timer(275000);
451 smilast = read_apic_timer();
452 for (x = 0; x < 20 && read_apic_timer(); ++x) {
453 smicount = smitest();
454 if (smibest == 0 || smilast - smicount < smibest)
455 smibest = smilast - smicount;
458 if (smibest > 250000)
462 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
463 1000000 / smibest, smibest);
466 for (x = 1; x <= naps; ++x) {
467 /* This is a bit verbose, it will go away soon. */
469 pssize = sizeof(struct privatespace);
470 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD,
472 CPU_prvspace[x] = ps;
474 kprintf("ps %d %p %d\n", x, ps, pssize);
477 gd = &ps->mdglobaldata;
478 gd->mi.gd_prvspace = ps;
480 /* prime data page for it to use */
481 mi_gdinit(&gd->mi, x);
483 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
484 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size,
485 VM_SUBSYS_IPIQ, KM_CPU(x));
486 bzero(gd->mi.gd_ipiq, ipiq_size);
488 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid);
490 /* initialize arc4random. */
493 /* setup a vector to our boot code */
494 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
495 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
496 outb(CMOS_REG, BIOS_RESET);
497 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
500 * Setup the AP boot stack
502 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE];
505 /* attempt to start the Application Processor */
506 CHECK_INIT(99); /* setup checkpoints */
507 if (!start_ap(gd, boot_addr, smibest)) {
508 kprintf("\nAP #%d (PHY# %d) failed!\n",
509 x, CPUID_TO_APICID(x));
510 CHECK_PRINT("trace"); /* show checkpoints */
511 /* better panic as the AP may be running loose */
512 kprintf("panic y/n? [y] ");
518 CHECK_PRINT("trace"); /* show checkpoints */
521 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
524 for (shift = 0; (1 << shift) <= ncpus; ++shift)
528 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
529 if ((1 << shift) < ncpus)
531 ncpus_fit = 1 << shift;
532 ncpus_fit_mask = ncpus_fit - 1;
534 /* build our map of 'other' CPUs */
535 mycpu->gd_other_cpus = smp_startup_mask;
536 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
538 malloc_reinit_ncpus();
540 gd = (struct mdglobaldata *)mycpu;
541 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
543 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
544 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size,
545 VM_SUBSYS_IPIQ, KM_CPU(0));
546 bzero(mycpu->gd_ipiq, ipiq_size);
548 /* initialize arc4random. */
551 /* restore the warmstart vector */
552 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
553 outb(CMOS_REG, BIOS_RESET);
554 outb(CMOS_DATA, mpbiosreason);
557 * NOTE! The idlestack for the BSP was setup by locore. Finish
558 * up, clean out the P==V mapping we did earlier.
563 * Wait all APs to finish initializing LAPIC
566 kprintf("SMP: Waiting APs LAPIC initialization\n");
567 if (cpu_feature & CPUID_TSC)
568 tsc0_offset = rdtsc();
573 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) {
576 if (cpu_feature & CPUID_TSC)
577 tsc0_offset = rdtsc();
579 while (try_mplock() == 0) {
584 /* number of APs actually started */
590 * load the 1st level AP boot code into base memory.
593 /* targets for relocation */
594 extern void bigJump(void);
595 extern void bootCodeSeg(void);
596 extern void bootDataSeg(void);
597 extern void MPentry(void);
599 extern u_int mp_gdtbase;
604 install_ap_tramp(u_int boot_addr)
607 int size = *(int *) ((u_long) & bootMP_size);
608 u_char *src = (u_char *) ((u_long) bootMP);
609 u_char *dst = (u_char *) boot_addr + KERNBASE;
610 u_int boot_base = (u_int) bootMP;
615 POSTCODE(INSTALL_AP_TRAMP_POST);
617 for (x = 0; x < size; ++x)
621 * modify addresses in code we just moved to basemem. unfortunately we
622 * need fairly detailed info about mpboot.s for this to work. changes
623 * to mpboot.s might require changes here.
626 /* boot code is located in KERNEL space */
627 dst = (u_char *) boot_addr + KERNBASE;
629 /* modify the lgdt arg */
630 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
631 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
633 /* modify the ljmp target for MPentry() */
634 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
635 *dst32 = ((u_int) MPentry - KERNBASE);
637 /* modify the target for boot code segment */
638 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
639 dst8 = (u_int8_t *) (dst16 + 1);
640 *dst16 = (u_int) boot_addr & 0xffff;
641 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
643 /* modify the target for boot data segment */
644 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
645 dst8 = (u_int8_t *) (dst16 + 1);
646 *dst16 = (u_int) boot_addr & 0xffff;
647 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
653 * This function starts the AP (application processor) identified
654 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
655 * to accomplish this. This is necessary because of the nuances
656 * of the different hardware we might encounter. It ain't pretty,
657 * but it seems to work.
659 * NOTE: eventually an AP gets to ap_init(), which is called just
660 * before the AP goes into the LWKT scheduler's idle loop.
663 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
668 POSTCODE(START_AP_POST);
670 /* get the PHYSICAL APIC ID# */
671 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid);
673 /* calculate the vector */
674 vector = (boot_addr >> 12) & 0xff;
676 /* We don't want anything interfering */
679 /* Make sure the target cpu sees everything */
683 * Try to detect when a SMI has occurred, wait up to 200ms.
685 * If a SMI occurs during an AP reset but before we issue
686 * the STARTUP command, the AP may brick. To work around
687 * this problem we hold off doing the AP startup until
688 * after we have detected the SMI. Hopefully another SMI
689 * will not occur before we finish the AP startup.
691 * Retries don't seem to help. SMIs have a window of opportunity
692 * and if USB->legacy keyboard emulation is enabled in the BIOS
693 * the interrupt rate can be quite high.
695 * NOTE: Don't worry about the L1 cache load, it might bloat
696 * ldelta a little but ndelta will be so huge when the SMI
697 * occurs the detection logic will still work fine.
700 set_apic_timer(200000);
705 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
706 * and running the target CPU. OR this INIT IPI might be latched (P5
707 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
710 * see apic/apicreg.h for icr bit definitions.
712 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
716 * Do an INIT IPI: assert RESET
718 * Use edge triggered mode to assert INIT
720 lapic_seticr_sync(physical_cpu,
728 * The spec calls for a 10ms delay but we may have to use a
729 * MUCH lower delay to avoid bricking an AP due to a fast SMI
730 * interrupt. We have other loops here too and dividing by 2
731 * doesn't seem to be enough even after subtracting 350us,
734 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
735 * interrupt was detected we use the full 10ms.
739 else if (smibest < 150 * 4 + 350)
741 else if ((smibest - 350) / 4 < 10000)
742 u_sleep((smibest - 350) / 4);
747 * Do an INIT IPI: deassert RESET
749 * Use level triggered mode to deassert. It is unclear
750 * why we need to do this.
752 lapic_seticr_sync(physical_cpu,
756 APIC_LEVEL_DEASSERT |
758 u_sleep(150); /* wait 150us */
761 * Next we do a STARTUP IPI: the previous INIT IPI might still be
762 * latched, (P5 bug) this 1st STARTUP would then terminate
763 * immediately, and the previously started INIT IPI would continue. OR
764 * the previous INIT IPI has already run. and this STARTUP IPI will
765 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
768 * XXX set APIC_LEVEL_ASSERT
770 lapic_seticr_sync(physical_cpu,
773 APIC_DELMODE_STARTUP |
775 u_sleep(200); /* wait ~200uS */
778 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
779 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
780 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
781 * recognized after hardware RESET or INIT IPI.
783 * XXX set APIC_LEVEL_ASSERT
785 lapic_seticr_sync(physical_cpu,
788 APIC_DELMODE_STARTUP |
791 /* Resume normal operation */
794 /* wait for it to start, see ap_init() */
795 set_apic_timer(5000000);/* == 5 seconds */
796 while (read_apic_timer()) {
797 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid))
798 return 1; /* return SUCCESS */
801 return 0; /* return FAILURE */
816 while (read_apic_timer()) {
818 for (count = 0; count < 100; ++count)
819 ntsc = rdtsc(); /* force loop to occur */
821 ndelta = ntsc - ltsc;
824 if (ndelta > ldelta * 2)
827 ldelta = ntsc - ltsc;
830 return(read_apic_timer());
834 * Synchronously flush the TLB on all other CPU's. The current cpu's
835 * TLB is not flushed. If the caller wishes to flush the current cpu's
836 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
838 * This routine may be called concurrently from multiple cpus. When this
839 * happens, smp_invltlb() can wind up sticking around in the confirmation
840 * while() loop at the end as additional cpus are added to the global
841 * cpumask, until they are acknowledged by another IPI.
843 * NOTE: If for some reason we were unable to start all cpus we cannot
844 * safely use broadcast IPIs.
847 cpumask_t smp_smurf_mask;
848 static cpumask_t smp_invltlb_mask;
852 cpumask_t smp_in_mask;
854 cpumask_t smp_invmask;
855 extern cpumask_t smp_idleinvl_mask;
856 extern cpumask_t smp_idleinvl_reqs;
859 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove
860 * bits that do not need to be IPId. These bits are still part of the command,
861 * but the target cpus have already been signalled and do not need to be
864 #include <sys/spinlock.h>
865 #include <sys/spinlock2.h>
869 smp_smurf_fetchset(cpumask_t *mask)
877 while (i < CPUMASK_ELEMENTS) {
878 obits = smp_smurf_mask.ary[i];
880 nbits = obits | mask->ary[i];
881 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) {
882 omask.ary[i] = obits;
886 CPUMASK_NANDMASK(*mask, omask);
890 * This is a mechanism which guarantees that cpu_invltlb() will be executed
891 * on idle cpus without having to signal or wake them up. The invltlb will be
892 * executed when they wake up, prior to any scheduling or interrupt thread.
894 * (*mask) is modified to remove the cpus we successfully negotiate this
895 * function with. This function may only be used with semi-synchronous
896 * commands (typically invltlb's or semi-synchronous invalidations which
897 * are usually associated only with kernel memory).
900 smp_smurf_idleinvlclr(cpumask_t *mask)
902 if (optimized_invltlb) {
903 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask);
904 /* cpu_lfence() not needed */
905 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask);
910 * Issue cpu_invltlb() across all cpus except the current cpu.
912 * This function will arrange to avoid idle cpus, but still gurantee that
913 * invltlb is run on them when they wake up prior to any scheduling or
919 struct mdglobaldata *md = mdcpu;
921 unsigned long rflags;
923 tsc_uclock_t tsc_base = rdtsc();
927 if (report_invltlb_src > 0) {
928 if (--report_invltlb_src <= 0)
933 * Disallow normal interrupts, set all active cpus except our own
934 * in the global smp_invltlb_mask.
936 ++md->mi.gd_cnt.v_smpinvltlb;
937 crit_enter_gd(&md->mi);
940 * Bits we want to set in smp_invltlb_mask. We do not want to signal
941 * our own cpu. Also try to remove bits associated with idle cpus
942 * that we can flag for auto-invltlb.
944 mask = smp_active_mask;
945 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid);
946 smp_smurf_idleinvlclr(&mask);
948 rflags = read_rflags();
950 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask);
953 * IPI non-idle cpus represented by mask. The omask calculation
954 * removes cpus from the mask which already have a Xinvltlb IPI
955 * pending (avoid double-queueing the IPI).
957 * We must disable real interrupts when setting the smurf flags or
958 * we might race a XINVLTLB before we manage to send the ipi's for
961 * NOTE: We are not signalling ourselves, mask already does NOT
962 * include our own cpu.
964 smp_smurf_fetchset(&mask);
967 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of
968 * the critical section count on the target cpus.
970 CPUMASK_ORMASK(mask, md->mi.gd_cpumask);
971 if (all_but_self_ipi_enable &&
972 (all_but_self_ipi_enable >= 2 ||
973 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) {
974 all_but_self_ipi(XINVLTLB_OFFSET);
976 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask);
977 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
981 * Wait for acknowledgement by all cpus. smp_inval_intr() will
982 * temporarily enable interrupts to avoid deadlocking the lapic,
983 * and will also handle running cpu_invltlb() and remote invlpg
984 * command son our cpu if some other cpu requests it of us.
986 * WARNING! I originally tried to implement this as a hard loop
987 * checking only smp_invltlb_mask (and issuing a local
988 * cpu_invltlb() if requested), with interrupts enabled
989 * and without calling smp_inval_intr(). This DID NOT WORK.
990 * It resulted in weird races where smurf bits would get
991 * cleared without any action being taken.
994 CPUMASK_ASSZERO(mask);
995 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) {
999 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
1001 * cpuid - cpu doing the waiting
1002 * invltlb_mask - IPI in progress
1004 kprintf("smp_invltlb %d: waited too long inv=%08jx "
1009 "idle=%08jx/%08jx\n",
1011 smp_invltlb_mask.ary[0],
1012 smp_smurf_mask.ary[0],
1016 smp_idleinvl_mask.ary[0],
1017 smp_idleinvl_reqs.ary[0]);
1018 mdcpu->gd_xinvaltlb = 0;
1019 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask,
1021 smp_invlpg(&smp_active_mask);
1023 if (++repeats > 10) {
1024 kprintf("smp_invltlb: giving up\n");
1025 CPUMASK_ASSZERO(smp_invltlb_mask);
1030 write_rflags(rflags);
1031 crit_exit_gd(&md->mi);
1035 * Called from a critical section with interrupts hard-disabled.
1036 * This function issues an XINVLTLB IPI and then executes any pending
1037 * command on the current cpu before returning.
1040 smp_invlpg(cpumask_t *cmdmask)
1042 struct mdglobaldata *md = mdcpu;
1045 if (report_invlpg_src > 0) {
1046 if (--report_invlpg_src <= 0)
1051 * Disallow normal interrupts, set all active cpus in the pmap,
1052 * plus our own for completion processing (it might or might not
1053 * be part of the set).
1055 mask = smp_active_mask;
1056 CPUMASK_ANDMASK(mask, *cmdmask);
1057 CPUMASK_ORMASK(mask, md->mi.gd_cpumask);
1060 * Avoid double-queuing IPIs, which can deadlock us. We must disable
1061 * real interrupts when setting the smurf flags or we might race a
1062 * XINVLTLB before we manage to send the ipi's for the bits we set.
1064 * NOTE: We might be including our own cpu in the smurf mask.
1066 smp_smurf_fetchset(&mask);
1069 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of
1070 * the critical section count on the target cpus.
1072 * We do not include our own cpu when issuing the IPI.
1074 if (all_but_self_ipi_enable &&
1075 (all_but_self_ipi_enable >= 2 ||
1076 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) {
1077 all_but_self_ipi(XINVLTLB_OFFSET);
1079 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask);
1080 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
1084 * This will synchronously wait for our command to complete,
1085 * as well as process commands from other cpus. It also handles
1088 * (interrupts are disabled and we are in a critical section here)
1094 * Issue rip/rsp sniffs
1099 globaldata_t gd = mycpu;
1104 * Ignore all_but_self_ipi_enable here and just use it.
1106 rflags = read_rflags();
1108 all_but_self_ipi(XSNIFF_OFFSET);
1109 gd->gd_sample_pc = smp_sniff;
1110 gd->gd_sample_sp = &dummy;
1111 write_rflags(rflags);
1117 globaldata_t rgd = globaldata_find(dcpu);
1122 * Ignore all_but_self_ipi_enable here and just use it.
1124 rflags = read_rflags();
1126 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED);
1127 rgd->gd_sample_pc = cpu_sniff;
1128 rgd->gd_sample_sp = &dummy;
1129 write_rflags(rflags);
1133 * Called from Xinvltlb assembly with interrupts hard-disabled and in a
1134 * critical section. gd_intr_nesting_level may or may not be bumped
1135 * depending on entry.
1137 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT.
1138 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE
1139 * IS IN A CRITICAL SECTION.
1142 smp_inval_intr(void)
1144 struct mdglobaldata *md = mdcpu;
1147 tsc_uclock_t tsc_base = rdtsc();
1152 * The idle code is in a critical section, but that doesn't stop
1153 * Xinvltlb from executing, so deal with the race which can occur
1154 * in that situation. Otherwise r-m-w operations by pmap_inval_intr()
1155 * may have problems.
1157 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) {
1158 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid);
1165 * This is a real mess. I'd like to just leave interrupts disabled
1166 * but it can cause the lapic to deadlock if too many interrupts queue
1167 * to it, due to the idiotic design of the lapic. So instead we have
1168 * to enter a critical section so normal interrupts are made pending
1169 * and track whether this one was reentered.
1171 if (md->gd_xinvaltlb) { /* reentrant on cpu */
1172 md->gd_xinvaltlb = 2;
1175 md->gd_xinvaltlb = 1;
1178 * Check only those cpus with active Xinvl* commands pending.
1180 * We are going to enable interrupts so make sure we are in a
1181 * critical section. This is necessary to avoid deadlocking
1182 * the lapic and to ensure that we execute our commands prior to
1183 * any nominal interrupt or preemption.
1185 * WARNING! It is very important that we only clear out but in
1186 * smp_smurf_mask once for each interrupt we take. In
1187 * this case, we clear it on initial entry and only loop
1188 * on the reentrancy detect (caused by another interrupt).
1190 cpumask = smp_invmask;
1192 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid);
1196 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid);
1199 * Specific page request(s), and we can't return until all bits
1206 * Also execute any pending full invalidation request in
1209 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) {
1210 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask,
1217 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
1219 * cpuid - cpu doing the waiting
1220 * invmask - IPI in progress
1221 * invltlb_mask - which ones are TLB invalidations?
1223 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx "
1228 "idle=%08jx/%08jx\n",
1231 smp_invltlb_mask.ary[0],
1232 smp_smurf_mask.ary[0],
1236 smp_idleinvl_mask.ary[0],
1237 smp_idleinvl_reqs.ary[0]);
1248 * We can only add bits to the cpumask to test during the
1249 * loop because the smp_invmask bit is cleared once the
1250 * originator completes the command (the targets may still
1251 * be cycling their own completions in this loop, afterwords).
1253 * lfence required prior to all tests as this Xinvltlb
1254 * interrupt could race the originator (already be in progress
1255 * wnen the originator decides to issue, due to an issue by
1259 CPUMASK_ORMASK(cpumask, smp_invmask);
1260 /*cpumask = smp_active_mask;*/ /* XXX */
1263 if (pmap_inval_intr(&cpumask, toolong) == 0) {
1265 * Clear our smurf mask to allow new IPIs, but deal
1266 * with potential races.
1272 * Test if someone sent us another invalidation IPI, break
1273 * out so we can take it to avoid deadlocking the lapic
1274 * interrupt queue (? stupid intel, amd).
1276 if (md->gd_xinvaltlb == 2)
1279 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid))
1285 * Full invalidation request
1287 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) {
1288 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask,
1295 * Check to see if another Xinvltlb interrupt occurred and loop up
1299 if (md->gd_xinvaltlb == 2) {
1300 md->gd_xinvaltlb = 1;
1304 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid);
1306 md->gd_xinvaltlb = 0;
1310 cpu_wbinvd_on_all_cpus_callback(void *arg)
1316 * When called the executing CPU will send an IPI to all other CPUs
1317 * requesting that they halt execution.
1319 * Usually (but not necessarily) called with 'other_cpus' as its arg.
1321 * - Signals all CPUs in map to stop.
1322 * - Waits for each to stop.
1329 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1330 * from executing at same time.
1333 stop_cpus(cpumask_t map)
1337 CPUMASK_ANDMASK(map, smp_active_mask);
1339 /* send the Xcpustop IPI to all CPUs in map */
1340 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
1343 mask = stopped_cpus;
1344 CPUMASK_ANDMASK(mask, map);
1346 } while (CPUMASK_CMPMASKNEQ(mask, map));
1353 * Called by a CPU to restart stopped CPUs.
1355 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1357 * - Signals all CPUs in map to restart.
1358 * - Waits for each to restart.
1366 restart_cpus(cpumask_t map)
1370 /* signal other cpus to restart */
1372 CPUMASK_ANDMASK(mask, smp_active_mask);
1374 started_cpus = mask;
1377 /* wait for each to clear its bit */
1378 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map))
1385 * This is called once the mpboot code has gotten us properly relocated
1386 * and the MMU turned on, etc. ap_init() is actually the idle thread,
1387 * and when it returns the scheduler will call the real cpu_idle() main
1388 * loop for the idlethread. Interrupts are disabled on entry and should
1389 * remain disabled at return.
1397 * Adjust smp_startup_mask to signal the BSP that we have started
1398 * up successfully. Note that we do not yet hold the BGL. The BSP
1399 * is waiting for our signal.
1401 * We can't set our bit in smp_active_mask yet because we are holding
1402 * interrupts physically disabled and remote cpus could deadlock
1403 * trying to send us an IPI.
1405 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
1409 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1410 * non-zero, then get the MP lock.
1412 * Note: We are in a critical section.
1414 * Note: we are the idle thread, we can only spin.
1416 * Note: The load fence is memory volatile and prevents the compiler
1417 * from improperly caching mp_finish_lapic, and the cpu from improperly
1420 while (mp_finish_lapic == 0) {
1425 while (try_mplock() == 0) {
1431 if (cpu_feature & CPUID_TSC) {
1433 * The BSP is constantly updating tsc0_offset, figure out
1434 * the relative difference to synchronize ktrdump.
1436 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
1439 /* BSP may have changed PTD while we're waiting for the lock */
1442 /* Build our map of 'other' CPUs. */
1443 mycpu->gd_other_cpus = smp_startup_mask;
1444 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1446 /* A quick check from sanity claus */
1447 cpu_id = APICID_TO_CPUID(LAPIC_READID);
1448 if (mycpu->gd_cpuid != cpu_id) {
1449 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid);
1450 kprintf("SMP: actual cpuid = %d lapicid %d\n",
1451 cpu_id, LAPIC_READID);
1453 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1455 panic("cpuid mismatch! boom!!");
1458 /* Initialize AP's local APIC for irq's */
1461 /* LAPIC initialization is done */
1462 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid);
1466 /* Let BSP move onto the next initialization stage */
1471 * Interlock for finalization. Wait until mp_finish is non-zero,
1472 * then get the MP lock.
1474 * Note: We are in a critical section.
1476 * Note: we are the idle thread, we can only spin.
1478 * Note: The load fence is memory volatile and prevents the compiler
1479 * from improperly caching mp_finish, and the cpu from improperly
1482 while (mp_finish == 0) {
1487 /* BSP may have changed PTD while we're waiting for the lock */
1490 /* Set memory range attributes for this CPU to match the BSP */
1491 mem_range_AP_init();
1494 * Once we go active we must process any IPIQ messages that may
1495 * have been queued, because no actual IPI will occur until we
1496 * set our bit in the smp_active_mask. If we don't the IPI
1497 * message interlock could be left set which would also prevent
1500 * The idle loop doesn't expect the BGL to be held and while
1501 * lwkt_switch() normally cleans things up this is a special case
1502 * because we returning almost directly into the idle loop.
1504 * The idle thread is never placed on the runq, make sure
1505 * nothing we've done put it there.
1509 * Hold a critical section and allow real interrupts to occur. Zero
1510 * any spurious interrupts which have accumulated, then set our
1511 * smp_active_mask indicating that we are fully operational.
1514 __asm __volatile("sti; pause; pause"::);
1515 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
1516 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
1519 * Wait until all cpus have set their smp_active_mask and have fully
1520 * operational interrupts before proceeding.
1522 * We need a final cpu_invltlb() because we would not have received
1523 * any until we set our bit in smp_active_mask.
1525 while (mp_finish == 1) {
1532 * Initialize per-cpu clocks and do other per-cpu initialization.
1533 * At this point code is expected to be able to use the full kernel
1536 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1539 * Since we may have cleaned up the interrupt triggers, manually
1540 * process any pending IPIs before exiting our critical section.
1541 * Once the critical section has exited, normal interrupt processing
1544 atomic_swap_int(&mycpu->gd_npoll, 0);
1545 lwkt_process_ipiq();
1549 * Final final, allow the waiting BSP to resume the boot process,
1550 * return 'into' the idle thread bootstrap.
1552 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid);
1553 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1557 * Get SMP fully working before we start initializing devices.
1564 kprintf("Finish MP startup\n");
1568 * Wait for the active mask to complete, after which all cpus will
1569 * be accepting interrupts.
1572 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) {
1578 * Wait for the finalization mask to complete, after which all cpus
1579 * have completely finished initializing and are entering or are in
1580 * their idle thread.
1582 * BSP should have received all required invltlbs but do another
1587 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) {
1592 while (try_mplock() == 0) {
1598 kprintf("Active CPU Mask: %016jx\n",
1599 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask));
1603 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL);
1606 * Interrupts must be hard-disabled by caller
1609 cpu_send_ipiq(int dcpu)
1611 if (CPUMASK_TESTBIT(smp_active_mask, dcpu))
1612 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1615 #if 0 /* single_apic_ipi_passive() not working yet */
1617 * Returns 0 on failure, 1 on success
1620 cpu_send_ipiq_passive(int dcpu)
1623 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
1624 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1625 APIC_DELMODE_FIXED);
1632 mp_bsp_simple_setup(void)
1634 struct mdglobaldata *gd;
1637 /* build our map of 'other' CPUs */
1638 mycpu->gd_other_cpus = smp_startup_mask;
1639 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1641 gd = (struct mdglobaldata *)mycpu;
1642 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
1644 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1645 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
1647 bzero(mycpu->gd_ipiq, ipiq_size);
1649 /* initialize arc4random. */
1654 if (cpu_feature & CPUID_TSC)
1655 tsc0_offset = rdtsc();
1660 * CPU TOPOLOGY DETECTION FUNCTIONS
1663 /* Detect intel topology using CPUID
1664 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41
1667 detect_intel_topology(int count_htt_cores)
1671 int core_plus_logical_bits = 0;
1672 int cores_per_package;
1673 int logical_per_package;
1674 int logical_per_core;
1677 if (cpu_high >= 0xb) {
1680 } else if (cpu_high >= 0x4) {
1685 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1687 logical_CPU_bits = 1 << shift;
1692 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p);
1694 /* if 0xb not supported - fallback to 0x4 */
1695 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) {
1699 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1701 ecx_index = FUNC_B_THREAD_LEVEL + 1;
1703 cpuid_count(0xb, ecx_index, p);
1705 /* Check for the Core type in the implemented sub leaves. */
1706 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) {
1707 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1713 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE);
1715 core_bits = core_plus_logical_bits - logical_CPU_bits;
1720 cpuid_count(0x4, 0, p);
1721 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1;
1723 logical_per_package = count_htt_cores;
1724 logical_per_core = logical_per_package / cores_per_package;
1726 for (shift = 0; (1 << shift) < logical_per_core; ++shift)
1728 logical_CPU_bits = shift;
1730 for (shift = 0; (1 << shift) < cores_per_package; ++shift)
1737 /* Detect AMD topology using CPUID
1738 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page
1741 detect_amd_topology(int count_htt_cores)
1744 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) {
1745 if (cpu_procinfo2 & AMDID_COREID_SIZE) {
1746 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
1747 AMDID_COREID_SIZE_SHIFT;
1749 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
1750 for (shift = 0; (1 << shift) < core_bits; ++shift)
1754 logical_CPU_bits = count_htt_cores >> core_bits;
1755 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift)
1757 logical_CPU_bits = shift;
1759 kprintf("core_bits %d logical_CPU_bits %d\n",
1760 core_bits - logical_CPU_bits, logical_CPU_bits);
1762 if (amd_feature2 & AMDID2_TOPOEXT) {
1763 u_int p[4]; /* eax,ebx,ecx,edx */
1766 cpuid_count(0x8000001e, 0, p);
1768 switch(((p[1] >> 8) & 3) + 1) {
1770 logical_CPU_bits = 0;
1773 logical_CPU_bits = 1;
1777 logical_CPU_bits = 2;
1782 * Nodes are kind of a stand-in for packages*sockets,
1783 * but can be thought of in terms of Numa domains.
1785 nodes = ((p[2] >> 8) & 7) + 1;
1803 core_bits -= logical_CPU_bits;
1804 kprintf("%d-way htt, %d Nodes, %d cores/node\n",
1805 (int)(((p[1] >> 8) & 3) + 1),
1811 if (amd_feature2 & AMDID2_TOPOEXT) {
1818 logical_CPU_bits = 0;
1821 for (i = 0; i < 256; ++i) {
1822 cpuid_count(0x8000001d, i, p);
1824 level = (p[0] >> 5) & 0x7;
1825 share_count = 1 + ((p[0] >> 14) & 0xfff);
1829 kprintf("Topology probe i=%2d type=%d "
1830 "level=%d share_count=%d\n",
1831 i, type, level, share_count);
1833 while ((1 << shift) < share_count)
1843 logical_CPU_bits = shift;
1849 * Physical subdivision of a package
1851 core_bits = logical_CPU_bits +
1858 * CPU L1/L2/L3 cache
1865 * Package aka chip, equivalent to
1874 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1877 logical_CPU_bits = 0;
1882 amd_get_compute_unit_id(void *arg)
1886 do_cpuid(0x8000001e, regs);
1887 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid);
1890 * AMD - CPUID Specification September 2010
1891 * page 34 - //ComputeUnitID = ebx[0:7]//
1893 mynode->compute_unit_id = regs[1] & 0xff;
1897 fix_amd_topology(void)
1901 if (cpu_vendor_id != CPU_VENDOR_AMD)
1903 if ((amd_feature2 & AMDID2_TOPOEXT) == 0)
1906 CPUMASK_ASSALLONES(mask);
1907 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL);
1909 kprintf("Compute unit iDS:\n");
1911 for (i = 0; i < ncpus; i++) {
1912 kprintf("%d-%d; \n",
1913 i, get_cpu_node_by_cpuid(i)->compute_unit_id);
1920 * - logical_CPU_bits
1922 * With the values above (for AMD or INTEL) we are able to generally
1923 * detect the CPU topology (number of cores for each level):
1924 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1925 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf
1928 detect_cpu_topology(void)
1930 static int topology_detected = 0;
1933 if (topology_detected)
1935 if ((cpu_feature & CPUID_HTT) == 0) {
1937 logical_CPU_bits = 0;
1940 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT;
1942 if (cpu_vendor_id == CPU_VENDOR_INTEL)
1943 detect_intel_topology(count);
1944 else if (cpu_vendor_id == CPU_VENDOR_AMD)
1945 detect_amd_topology(count);
1946 topology_detected = 1;
1950 kprintf("Bits within APICID: logical_CPU_bits: %d; "
1952 logical_CPU_bits, core_bits);
1957 * Interface functions to calculate chip_ID,
1958 * core_number and logical_number
1959 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1962 get_chip_ID(int cpuid)
1964 return get_apicid_from_cpuid(cpuid) >>
1965 (logical_CPU_bits + core_bits);
1969 get_chip_ID_from_APICID(int apicid)
1971 return apicid >> (logical_CPU_bits + core_bits);
1975 get_core_number_within_chip(int cpuid)
1977 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
1978 ((1 << core_bits) - 1));
1982 get_logical_CPU_number_within_core(int cpuid)
1984 return (get_apicid_from_cpuid(cpuid) &
1985 ((1 << logical_CPU_bits) - 1));