2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h> /* cngetc() */
37 #include <sys/machintr.h>
40 #include <vm/vm_param.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_extern.h>
45 #include <vm/vm_map.h>
51 #include <sys/mplock2.h>
53 #include <machine/smp.h>
54 #include <machine_base/apic/apicreg.h>
55 #include <machine/atomic.h>
56 #include <machine/cpufunc.h>
57 #include <machine/cputypes.h>
58 #include <machine_base/icu/icu_var.h>
59 #include <machine_base/apic/ioapic_abi.h>
60 #include <machine_base/apic/lapic.h>
61 #include <machine_base/apic/ioapic.h>
62 #include <machine/psl.h>
63 #include <machine/segments.h>
64 #include <machine/tss.h>
65 #include <machine/specialreg.h>
66 #include <machine/globaldata.h>
67 #include <machine/pmap_inval.h>
69 #include <machine/md_var.h> /* setidt() */
70 #include <machine_base/icu/icu.h> /* IPIs */
71 #include <machine/intr_machdep.h> /* IPIs */
73 #define WARMBOOT_TARGET 0
74 #define WARMBOOT_OFF (KERNBASE + 0x0467)
75 #define WARMBOOT_SEG (KERNBASE + 0x0469)
77 #define CMOS_REG (0x70)
78 #define CMOS_DATA (0x71)
79 #define BIOS_RESET (0x0f)
80 #define BIOS_WARM (0x0a)
83 * this code MUST be enabled here and in mpboot.s.
84 * it follows the very early stages of AP boot by placing values in CMOS ram.
85 * it NORMALLY will never be needed and thus the primitive method for enabling.
88 #if defined(CHECK_POINTS)
89 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
90 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
92 #define CHECK_INIT(D); \
93 CHECK_WRITE(0x34, (D)); \
94 CHECK_WRITE(0x35, (D)); \
95 CHECK_WRITE(0x36, (D)); \
96 CHECK_WRITE(0x37, (D)); \
97 CHECK_WRITE(0x38, (D)); \
98 CHECK_WRITE(0x39, (D));
100 #define CHECK_PRINT(S); \
101 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
110 #else /* CHECK_POINTS */
112 #define CHECK_INIT(D)
113 #define CHECK_PRINT(S)
115 #endif /* CHECK_POINTS */
118 * Values to send to the POST hardware.
120 #define MP_BOOTADDRESS_POST 0x10
121 #define MP_PROBE_POST 0x11
122 #define MPTABLE_PASS1_POST 0x12
124 #define MP_START_POST 0x13
125 #define MP_ENABLE_POST 0x14
126 #define MPTABLE_PASS2_POST 0x15
128 #define START_ALL_APS_POST 0x16
129 #define INSTALL_AP_TRAMP_POST 0x17
130 #define START_AP_POST 0x18
132 #define MP_ANNOUNCE_POST 0x19
134 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
135 int current_postcode;
137 /** XXX FIXME: what system files declare these??? */
138 extern struct region_descriptor r_gdt, r_idt;
144 extern int64_t tsc_offsets[];
146 /* AP uses this during bootstrap. Do not staticize. */
150 /* Hotwire a 0->4MB V==P mapping */
151 extern pt_entry_t *KPTphys;
154 * SMP page table page. Setup by locore to point to a page table
155 * page from which we allocate per-cpu privatespace areas io_apics,
158 extern pt_entry_t *SMPpt;
160 struct pcb stoppcbs[MAXCPU];
163 * Local data and functions.
166 static u_int boot_address;
167 static int mp_finish;
168 static int mp_finish_lapic;
170 static int start_all_aps(u_int boot_addr);
171 static void install_ap_tramp(u_int boot_addr);
172 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
173 static int smitest(void);
174 static void mp_bsp_simple_setup(void);
176 /* which cpus have been started */
177 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
178 /* which cpus have lapic been inited */
179 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE;
180 /* which cpus are ready for IPIs etc? */
181 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
182 SYSCTL_LONG(_machdep, OID_AUTO, smp_active, CTLFLAG_RD,
183 &smp_active_mask, 0, "");
185 /* Local data for detecting CPU TOPOLOGY */
186 static int core_bits = 0;
187 static int logical_CPU_bits = 0;
191 * Calculate usable address in base memory for AP trampoline code.
194 mp_bootaddress(u_int basemem)
196 POSTCODE(MP_BOOTADDRESS_POST);
198 boot_address = basemem & ~0xfff; /* round down to 4k boundary */
199 if ((basemem - boot_address) < bootMP_size)
200 boot_address -= 4096; /* not enough, lower by 4k */
206 * Print various information about the SMP system hardware and setup.
213 POSTCODE(MP_ANNOUNCE_POST);
215 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
216 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0));
217 for (x = 1; x <= naps; ++x)
218 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x));
221 kprintf(" Warning: APIC I/O disabled\n");
225 * AP cpu's call this to sync up protected mode.
227 * WARNING! We must ensure that the cpu is sufficiently initialized to
228 * be able to use to the FP for our optimized bzero/bcopy code before
229 * we enter more mainstream C code.
231 * WARNING! %fs is not set up on entry. This routine sets up %fs.
237 int x, myid = bootAP;
239 struct mdglobaldata *md;
240 struct privatespace *ps;
242 ps = &CPU_prvspace[myid];
244 gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
245 gdt_segs[GPROC0_SEL].ssd_base =
246 (int) &ps->mdglobaldata.gd_common_tss;
247 ps->mdglobaldata.mi.gd_prvspace = ps;
249 for (x = 0; x < NGDT; x++) {
250 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
253 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
254 r_gdt.rd_base = (int) &gdt[myid * NGDT];
255 lgdt(&r_gdt); /* does magic intra-segment return */
260 mdcpu->gd_currentldt = _default_ldt;
262 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
263 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
265 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
267 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
268 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
269 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
270 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
271 md->gd_common_tssd = *md->gd_tss_gdt;
275 * Set to a known state:
276 * Set by mpboot.s: CR0_PG, CR0_PE
277 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
280 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
282 pmap_set_opt(); /* PSE/4MB pages, etc */
284 pmap_init_pat(); /* Page Attribute Table */
286 /* set up CPU registers and state */
289 /* set up FPU state on the AP */
292 /* set up SSE registers */
296 /*******************************************************************
297 * local functions and data
301 * Start the SMP system
304 mp_start_aps(void *dummy __unused)
307 /* start each Application Processor */
308 start_all_aps(boot_address);
310 mp_bsp_simple_setup();
313 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL)
316 * start each AP in our list
319 start_all_aps(u_int boot_addr)
327 u_long mpbioswarmvec;
328 struct mdglobaldata *gd;
329 struct privatespace *ps;
334 POSTCODE(START_ALL_APS_POST);
336 /* install the AP 1st level boot code */
337 install_ap_tramp(boot_addr);
340 /* save the current value of the warm-start vector */
341 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
342 outb(CMOS_REG, BIOS_RESET);
343 mpbiosreason = inb(CMOS_DATA);
345 /* setup a vector to our boot code */
346 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
347 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
348 outb(CMOS_REG, BIOS_RESET);
349 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
352 * If we have a TSC we can figure out the SMI interrupt rate.
353 * The SMI does not necessarily use a constant rate. Spend
354 * up to 250ms trying to figure it out.
357 if (cpu_feature & CPUID_TSC) {
358 set_apic_timer(275000);
359 smilast = read_apic_timer();
360 for (x = 0; x < 20 && read_apic_timer(); ++x) {
361 smicount = smitest();
362 if (smibest == 0 || smilast - smicount < smibest)
363 smibest = smilast - smicount;
366 if (smibest > 250000)
369 smibest = smibest * (int64_t)1000000 /
370 get_apic_timer_frequency();
374 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
375 1000000 / smibest, smibest);
378 /* set up temporary P==V mapping for AP boot */
379 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
380 kptbase = (uintptr_t)(void *)KPTphys;
381 for (x = 0; x < NKPT; x++) {
382 PTD[x] = (pd_entry_t)(PG_V | PG_RW |
383 ((kptbase + x * PAGE_SIZE) & PG_FRAME));
388 for (x = 1; x <= naps; ++x) {
389 /* This is a bit verbose, it will go away soon. */
391 /* first page of AP's private space */
392 pg = x * i386_btop(sizeof(struct privatespace));
394 /* allocate new private data page(s) */
395 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
396 MDGLOBALDATA_BASEALLOC_SIZE);
397 /* wire it into the private page table page */
398 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
399 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
400 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
402 pg += MDGLOBALDATA_BASEALLOC_PAGES;
404 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
405 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
406 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
407 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
409 /* allocate and set up an idle stack data page */
410 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
411 for (i = 0; i < UPAGES; i++) {
412 SMPpt[pg + 4 + i] = (pt_entry_t)
413 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
416 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
417 bzero(gd, sizeof(*gd));
418 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
420 /* prime data page for it to use */
421 mi_gdinit(&gd->mi, x);
423 gd->gd_CMAP1 = &SMPpt[pg + 0];
424 gd->gd_CMAP2 = &SMPpt[pg + 1];
425 gd->gd_CMAP3 = &SMPpt[pg + 2];
426 gd->gd_PMAP1 = &SMPpt[pg + 3];
427 gd->gd_CADDR1 = ps->CPAGE1;
428 gd->gd_CADDR2 = ps->CPAGE2;
429 gd->gd_CADDR3 = ps->CPAGE3;
430 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
433 * Per-cpu pmap for get_ptbase().
435 gd->gd_GDADDR1= (unsigned *)
436 kmem_alloc_nofault(&kernel_map, SEG_SIZE, SEG_SIZE);
437 gd->gd_GDMAP1 = &PTD[(vm_offset_t)gd->gd_GDADDR1 >> PDRSHIFT];
439 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
441 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
442 bzero(gd->mi.gd_ipiq, ipiq_size);
445 * Setup the AP boot stack
447 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
450 /* attempt to start the Application Processor */
451 CHECK_INIT(99); /* setup checkpoints */
452 if (!start_ap(gd, boot_addr, smibest)) {
453 kprintf("AP #%d (PHY# %d) failed!\n", x,
455 CHECK_PRINT("trace"); /* show checkpoints */
456 /* better panic as the AP may be running loose */
457 kprintf("panic y/n? [y] ");
461 CHECK_PRINT("trace"); /* show checkpoints */
464 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
467 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
468 for (shift = 0; (1 << shift) <= ncpus; ++shift)
471 ncpus2_shift = shift;
473 ncpus2_mask = ncpus2 - 1;
475 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
476 if ((1 << shift) < ncpus)
478 ncpus_fit = 1 << shift;
479 ncpus_fit_mask = ncpus_fit - 1;
481 /* build our map of 'other' CPUs */
482 mycpu->gd_other_cpus = smp_startup_mask;
483 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
485 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
486 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
487 bzero(mycpu->gd_ipiq, ipiq_size);
489 /* restore the warmstart vector */
490 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
491 outb(CMOS_REG, BIOS_RESET);
492 outb(CMOS_DATA, mpbiosreason);
495 * NOTE! The idlestack for the BSP was setup by locore. Finish
496 * up, clean out the P==V mapping we did earlier.
498 for (x = 0; x < NKPT; x++)
503 * Wait all APs to finish initializing LAPIC
507 kprintf("SMP: Waiting APs LAPIC initialization\n");
508 if (cpu_feature & CPUID_TSC)
509 tsc0_offset = rdtsc();
512 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) {
514 if (cpu_feature & CPUID_TSC)
515 tsc0_offset = rdtsc();
517 while (try_mplock() == 0)
520 /* number of APs actually started */
525 * load the 1st level AP boot code into base memory.
528 /* targets for relocation */
529 extern void bigJump(void);
530 extern void bootCodeSeg(void);
531 extern void bootDataSeg(void);
532 extern void MPentry(void);
534 extern u_int mp_gdtbase;
537 install_ap_tramp(u_int boot_addr)
540 int size = *(int *) ((u_long) & bootMP_size);
541 u_char *src = (u_char *) ((u_long) bootMP);
542 u_char *dst = (u_char *) boot_addr + KERNBASE;
543 u_int boot_base = (u_int) bootMP;
548 POSTCODE(INSTALL_AP_TRAMP_POST);
550 for (x = 0; x < size; ++x)
554 * modify addresses in code we just moved to basemem. unfortunately we
555 * need fairly detailed info about mpboot.s for this to work. changes
556 * to mpboot.s might require changes here.
559 /* boot code is located in KERNEL space */
560 dst = (u_char *) boot_addr + KERNBASE;
562 /* modify the lgdt arg */
563 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
564 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
566 /* modify the ljmp target for MPentry() */
567 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
568 *dst32 = ((u_int) MPentry - KERNBASE);
570 /* modify the target for boot code segment */
571 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
572 dst8 = (u_int8_t *) (dst16 + 1);
573 *dst16 = boot_addr & 0xffff;
574 *dst8 = (boot_addr >> 16) & 0xff;
576 /* modify the target for boot data segment */
577 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
578 dst8 = (u_int8_t *) (dst16 + 1);
579 *dst16 = boot_addr & 0xffff;
580 *dst8 = (boot_addr >> 16) & 0xff;
585 * This function starts the AP (application processor) identified
586 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
587 * to accomplish this. This is necessary because of the nuances
588 * of the different hardware we might encounter. It ain't pretty,
589 * but it seems to work.
591 * NOTE: eventually an AP gets to ap_init(), which is called just
592 * before the AP goes into the LWKT scheduler's idle loop.
595 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
599 u_long icr_lo, icr_hi;
601 POSTCODE(START_AP_POST);
603 /* get the PHYSICAL APIC ID# */
604 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid);
606 /* calculate the vector */
607 vector = (boot_addr >> 12) & 0xff;
609 /* We don't want anything interfering */
612 /* Make sure the target cpu sees everything */
616 * Try to detect when a SMI has occurred, wait up to 200ms.
618 * If a SMI occurs during an AP reset but before we issue
619 * the STARTUP command, the AP may brick. To work around
620 * this problem we hold off doing the AP startup until
621 * after we have detected the SMI. Hopefully another SMI
622 * will not occur before we finish the AP startup.
624 * Retries don't seem to help. SMIs have a window of opportunity
625 * and if USB->legacy keyboard emulation is enabled in the BIOS
626 * the interrupt rate can be quite high.
628 * NOTE: Don't worry about the L1 cache load, it might bloat
629 * ldelta a little but ndelta will be so huge when the SMI
630 * occurs the detection logic will still work fine.
633 set_apic_timer(200000);
638 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
639 * and running the target CPU. OR this INIT IPI might be latched (P5
640 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
643 * see apic/apicreg.h for icr bit definitions.
645 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
649 * Setup the address for the target AP. We can setup
650 * icr_hi once and then just trigger operations with
653 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
654 icr_hi |= (physical_cpu << 24);
655 icr_lo = lapic->icr_lo & 0xfff00000;
656 lapic->icr_hi = icr_hi;
659 * Do an INIT IPI: assert RESET
661 * Use edge triggered mode to assert INIT
663 lapic->icr_lo = icr_lo | 0x0000c500;
664 while (lapic->icr_lo & APIC_DELSTAT_MASK)
668 * The spec calls for a 10ms delay but we may have to use a
669 * MUCH lower delay to avoid bricking an AP due to a fast SMI
670 * interrupt. We have other loops here too and dividing by 2
671 * doesn't seem to be enough even after subtracting 350us,
674 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
675 * interrupt was detected we use the full 10ms.
679 else if (smibest < 150 * 4 + 350)
681 else if ((smibest - 350) / 4 < 10000)
682 u_sleep((smibest - 350) / 4);
687 * Do an INIT IPI: deassert RESET
689 * Use level triggered mode to deassert. It is unclear
690 * why we need to do this.
692 lapic->icr_lo = icr_lo | 0x00008500;
693 while (lapic->icr_lo & APIC_DELSTAT_MASK)
695 u_sleep(150); /* wait 150us */
698 * Next we do a STARTUP IPI: the previous INIT IPI might still be
699 * latched, (P5 bug) this 1st STARTUP would then terminate
700 * immediately, and the previously started INIT IPI would continue. OR
701 * the previous INIT IPI has already run. and this STARTUP IPI will
702 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
705 lapic->icr_lo = icr_lo | 0x00000600 | vector;
706 while (lapic->icr_lo & APIC_DELSTAT_MASK)
708 u_sleep(200); /* wait ~200uS */
711 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
712 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
713 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
714 * recognized after hardware RESET or INIT IPI.
716 lapic->icr_lo = icr_lo | 0x00000600 | vector;
717 while (lapic->icr_lo & APIC_DELSTAT_MASK)
720 /* Resume normal operation */
723 /* wait for it to start, see ap_init() */
724 set_apic_timer(5000000);/* == 5 seconds */
725 while (read_apic_timer()) {
726 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid))
727 return 1; /* return SUCCESS */
730 return 0; /* return FAILURE */
745 while (read_apic_timer()) {
747 for (count = 0; count < 100; ++count)
748 ntsc = rdtsc(); /* force loop to occur */
750 ndelta = ntsc - ltsc;
753 if (ndelta > ldelta * 2)
756 ldelta = ntsc - ltsc;
759 return(read_apic_timer());
763 * Lazy flush the TLB on all other CPU's. DEPRECATED.
765 * If for some reason we were unable to start all cpus we cannot safely
766 * use broadcast IPIs.
769 static cpumask_t smp_invltlb_req;
770 #define SMP_INVLTLB_DEBUG
775 struct mdglobaldata *md = mdcpu;
776 #ifdef SMP_INVLTLB_DEBUG
783 crit_enter_gd(&md->mi);
784 md->gd_invltlb_ret = 0;
785 ++md->mi.gd_cnt.v_smpinvltlb;
786 ATOMIC_CPUMASK_ORMASK(smp_invltlb_req, md->mi.gd_cpumask);
787 #ifdef SMP_INVLTLB_DEBUG
790 if (CPUMASK_CMPMASKEQ(smp_startup_mask, smp_active_mask)) {
791 all_but_self_ipi(XINVLTLB_OFFSET);
793 tmpmask = smp_active_mask;
794 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
795 selected_apic_ipi(tmpmask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
798 #ifdef SMP_INVLTLB_DEBUG
800 kprintf("smp_invltlb: ipi sent\n");
803 tmpmask = smp_active_mask;
805 CPUMASK_ANDMASK(tmpmask, md->gd_invltlb_ret);
806 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
807 CPUMASK_NANDMASK(tmpmask2, md->mi.gd_cpumask);
808 if (CPUMASK_CMPMASKEQ(tmpmask, tmpmask2))
812 #ifdef SMP_INVLTLB_DEBUG
814 if (++count == 400000000) {
816 kprintf("smp_invltlb: endless loop %08lx %08lx, "
817 "eflags %016lx retry",
818 (long)md->gd_invltlb_ret,
819 (long)smp_invltlb_req,
820 (long)read_eflags());
821 __asm __volatile ("sti");
829 tmpmask = smp_active_mask;
830 CPUMASK_NANDMASK(tmpmask, md->gd_invltlb_ret);
831 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask);
832 bcpu = BSFCPUMASK(tmpmask);
834 kprintf("bcpu %d\n", bcpu);
835 xgd = globaldata_find(bcpu);
836 kprintf("thread %p %s\n",
838 xgd->gd_curthread->td_comm);
847 ATOMIC_CPUMASK_NANDMASK(smp_invltlb_req, md->mi.gd_cpumask);
848 crit_exit_gd(&md->mi);
852 * Called from Xinvltlb assembly with interrupts disabled. We didn't
853 * bother to bump the critical section count or nested interrupt count
854 * so only do very low level operations here.
857 smp_invltlb_intr(void)
859 struct mdglobaldata *md = mdcpu;
860 struct mdglobaldata *omd;
864 mask = smp_invltlb_req;
867 while (CPUMASK_TESTNZERO(mask)) {
868 cpu = BSFCPUMASK(mask);
869 CPUMASK_NANDBIT(mask, cpu);
870 omd = (struct mdglobaldata *)globaldata_find(cpu);
871 ATOMIC_CPUMASK_ORMASK(omd->gd_invltlb_ret, md->mi.gd_cpumask);
876 cpu_wbinvd_on_all_cpus_callback(void *arg)
882 * When called the executing CPU will send an IPI to all other CPUs
883 * requesting that they halt execution.
885 * Usually (but not necessarily) called with 'other_cpus' as its arg.
887 * - Signals all CPUs in map to stop.
888 * - Waits for each to stop.
895 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
896 * from executing at same time.
899 stop_cpus(cpumask_t map)
903 map &= smp_active_mask;
905 /* send the Xcpustop IPI to all CPUs in map */
906 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
910 CPUMASK_ANDMASK(tmpmask, stopped_cpus);
912 } while (CPUMASK_CMPMASKNEQ(tmpmask, map));
919 * Called by a CPU to restart stopped CPUs.
921 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
923 * - Signals all CPUs in map to restart.
924 * - Waits for each to restart.
932 restart_cpus(cpumask_t map)
934 /* signal other cpus to restart */
935 started_cpus = map & smp_active_mask;
937 /* wait for each to clear its bit */
938 while (CPUMASK_TESTMASK(stopped_cpus, map) != 0)
945 * This is called once the mpboot code has gotten us properly relocated
946 * and the MMU turned on, etc. ap_init() is actually the idle thread,
947 * and when it returns the scheduler will call the real cpu_idle() main
948 * loop for the idlethread. Interrupts are disabled on entry and should
949 * remain disabled at return.
957 * Adjust smp_startup_mask to signal the BSP that we have started
958 * up successfully. Note that we do not yet hold the BGL. The BSP
959 * is waiting for our signal.
961 * We can't set our bit in smp_active_mask yet because we are holding
962 * interrupts physically disabled and remote cpus could deadlock
963 * trying to send us an IPI.
965 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
969 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
970 * non-zero, then get the MP lock.
972 * Note: We are in a critical section.
974 * Note: we are the idle thread, we can only spin.
976 * Note: The load fence is memory volatile and prevents the compiler
977 * from improperly caching mp_finish_lapic, and the cpu from improperly
980 while (mp_finish_lapic == 0)
982 while (try_mplock() == 0)
985 if (cpu_feature & CPUID_TSC) {
987 * The BSP is constantly updating tsc0_offset, figure out
988 * the relative difference to synchronize ktrdump.
990 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
993 /* BSP may have changed PTD while we're waiting for the lock */
996 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1000 /* Build our map of 'other' CPUs. */
1001 mycpu->gd_other_cpus = smp_startup_mask;
1002 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1004 /* A quick check from sanity claus */
1005 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24);
1006 if (mycpu->gd_cpuid != cpu_id) {
1007 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid);
1008 kprintf("SMP: actual cpuid = %d\n", cpu_id);
1009 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1010 panic("cpuid mismatch! boom!!");
1013 /* Initialize AP's local APIC for irq's */
1016 /* LAPIC initialization is done */
1017 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid);
1020 /* Let BSP move onto the next initialization stage */
1024 * Interlock for finalization. Wait until mp_finish is non-zero,
1025 * then get the MP lock.
1027 * Note: We are in a critical section.
1029 * Note: we are the idle thread, we can only spin.
1031 * Note: The load fence is memory volatile and prevents the compiler
1032 * from improperly caching mp_finish, and the cpu from improperly
1035 while (mp_finish == 0)
1037 while (try_mplock() == 0)
1040 /* BSP may have changed PTD while we're waiting for the lock */
1043 /* Set memory range attributes for this CPU to match the BSP */
1044 mem_range_AP_init();
1047 * Once we go active we must process any IPIQ messages that may
1048 * have been queued, because no actual IPI will occur until we
1049 * set our bit in the smp_active_mask. If we don't the IPI
1050 * message interlock could be left set which would also prevent
1053 * The idle loop doesn't expect the BGL to be held and while
1054 * lwkt_switch() normally cleans things up this is a special case
1055 * because we returning almost directly into the idle loop.
1057 * The idle thread is never placed on the runq, make sure
1058 * nothing we've done put it there.
1060 KKASSERT(get_mplock_count(curthread) == 1);
1061 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
1064 * Enable interrupts here. idle_restore will also do it, but
1065 * doing it here lets us clean up any strays that got posted to
1066 * the CPU during the AP boot while we are still in a critical
1069 __asm __volatile("sti; pause; pause"::);
1070 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
1072 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1073 lwkt_process_ipiq();
1076 * Releasing the mp lock lets the BSP finish up the SMP init
1079 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1083 * Get SMP fully working before we start initializing devices.
1091 kprintf("Finish MP startup\n");
1093 while (smp_active_mask != smp_startup_mask)
1095 while (try_mplock() == 0)
1098 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
1101 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
1104 cpu_send_ipiq(int dcpu)
1106 if (CPUMASK_TESTBIT(smp_active_mask, dcpu))
1107 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1110 #if 0 /* single_apic_ipi_passive() not working yet */
1112 * Returns 0 on failure, 1 on success
1115 cpu_send_ipiq_passive(int dcpu)
1118 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
1119 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1120 APIC_DELMODE_FIXED);
1127 mp_bsp_simple_setup(void)
1131 /* build our map of 'other' CPUs */
1132 mycpu->gd_other_cpus = smp_startup_mask;
1133 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1135 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1136 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
1137 bzero(mycpu->gd_ipiq, ipiq_size);
1141 if (cpu_feature & CPUID_TSC)
1142 tsc0_offset = rdtsc();
1147 * CPU TOPOLOGY DETECTION FUNCTIONS
1150 /* Detect intel topology using CPUID
1151 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41
1154 detect_intel_topology(int count_htt_cores)
1158 int core_plus_logical_bits = 0;
1159 int cores_per_package;
1160 int logical_per_package;
1161 int logical_per_core;
1164 if (cpu_high >= 0xb) {
1167 } else if (cpu_high >= 0x4) {
1172 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1174 logical_CPU_bits = 1 << shift;
1179 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p);
1181 /* if 0xb not supported - fallback to 0x4 */
1182 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) {
1186 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1188 ecx_index = FUNC_B_THREAD_LEVEL + 1;
1190 cpuid_count(0xb, ecx_index, p);
1191 /* Check for the Core type in the implemented sub leaves. */
1192 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) {
1193 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1197 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE);
1199 core_bits = core_plus_logical_bits - logical_CPU_bits;
1204 cpuid_count(0x4, 0, p);
1205 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1;
1207 logical_per_package = count_htt_cores;
1208 logical_per_core = logical_per_package / cores_per_package;
1210 for (shift = 0; (1 << shift) < logical_per_core; ++shift)
1212 logical_CPU_bits = shift;
1214 for (shift = 0; (1 << shift) < cores_per_package; ++shift)
1221 /* Detect AMD topology using CPUID
1222 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page
1225 detect_amd_topology(int count_htt_cores)
1229 if ((cpu_feature & CPUID_HTT)
1230 && (amd_feature2 & AMDID2_CMP)) {
1232 if (cpu_procinfo2 & AMDID_COREID_SIZE) {
1233 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE)
1234 >> AMDID_COREID_SIZE_SHIFT;
1236 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
1237 for (shift = 0; (1 << shift) < core_bits; ++shift);
1241 logical_CPU_bits = count_htt_cores >> core_bits;
1242 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift)
1244 logical_CPU_bits = shift;
1246 for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1249 logical_CPU_bits = 0;
1254 * - logical_CPU_bits
1256 * With the values above (for AMD or INTEL) we are able to generally
1257 * detect the CPU topology (number of cores for each level):
1258 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1259 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf
1262 detect_cpu_topology(void)
1264 static int topology_detected = 0;
1267 if (topology_detected) {
1271 if ((cpu_feature & CPUID_HTT) == 0) {
1273 logical_CPU_bits = 0;
1276 count = (cpu_procinfo & CPUID_HTT_CORES)
1277 >> CPUID_HTT_CORE_SHIFT;
1280 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1281 detect_intel_topology(count);
1282 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
1283 detect_amd_topology(count);
1288 kprintf("Bits within APICID: logical_CPU_bits: %d; core_bits: %d\n",
1289 logical_CPU_bits, core_bits);
1291 topology_detected = 1;
1294 /* Interface functions to calculate chip_ID,
1295 * core_number and logical_number
1296 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1299 get_chip_ID(int cpuid)
1301 return get_apicid_from_cpuid(cpuid) >>
1302 (logical_CPU_bits + core_bits);
1306 get_core_number_within_chip(int cpuid)
1308 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
1309 ( (1 << core_bits) -1);
1313 get_logical_CPU_number_within_core(int cpuid)
1315 return get_apicid_from_cpuid(cpuid) &
1316 ( (1 << logical_CPU_bits) -1);