2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h> /* cngetc() */
37 #include <sys/machintr.h>
39 #include <sys/mplock2.h>
42 #include <vm/vm_param.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_extern.h>
47 #include <vm/vm_map.h>
53 #include <machine/smp.h>
54 #include <machine_base/apic/apicreg.h>
55 #include <machine/atomic.h>
56 #include <machine/cpufunc.h>
57 #include <machine_base/apic/lapic.h>
58 #include <machine_base/apic/ioapic.h>
59 #include <machine/psl.h>
60 #include <machine/segments.h>
61 #include <machine/tss.h>
62 #include <machine/specialreg.h>
63 #include <machine/globaldata.h>
64 #include <machine/pmap_inval.h>
66 #include <machine/md_var.h> /* setidt() */
67 #include <machine_base/icu/icu.h> /* IPIs */
68 #include <machine_base/apic/ioapic_abi.h>
69 #include <machine/intr_machdep.h> /* IPIs */
71 #define WARMBOOT_TARGET 0
72 #define WARMBOOT_OFF (KERNBASE + 0x0467)
73 #define WARMBOOT_SEG (KERNBASE + 0x0469)
75 #define CMOS_REG (0x70)
76 #define CMOS_DATA (0x71)
77 #define BIOS_RESET (0x0f)
78 #define BIOS_WARM (0x0a)
81 * this code MUST be enabled here and in mpboot.s.
82 * it follows the very early stages of AP boot by placing values in CMOS ram.
83 * it NORMALLY will never be needed and thus the primitive method for enabling.
86 #if defined(CHECK_POINTS)
87 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
88 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
90 #define CHECK_INIT(D); \
91 CHECK_WRITE(0x34, (D)); \
92 CHECK_WRITE(0x35, (D)); \
93 CHECK_WRITE(0x36, (D)); \
94 CHECK_WRITE(0x37, (D)); \
95 CHECK_WRITE(0x38, (D)); \
96 CHECK_WRITE(0x39, (D));
98 #define CHECK_PRINT(S); \
99 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
108 #else /* CHECK_POINTS */
110 #define CHECK_INIT(D)
111 #define CHECK_PRINT(S)
113 #endif /* CHECK_POINTS */
116 * Values to send to the POST hardware.
118 #define MP_BOOTADDRESS_POST 0x10
119 #define MP_PROBE_POST 0x11
120 #define MPTABLE_PASS1_POST 0x12
122 #define MP_START_POST 0x13
123 #define MP_ENABLE_POST 0x14
124 #define MPTABLE_PASS2_POST 0x15
126 #define START_ALL_APS_POST 0x16
127 #define INSTALL_AP_TRAMP_POST 0x17
128 #define START_AP_POST 0x18
130 #define MP_ANNOUNCE_POST 0x19
132 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
133 int current_postcode;
135 /** XXX FIXME: what system files declare these??? */
136 extern struct region_descriptor r_gdt, r_idt;
138 int mp_naps; /* # of Applications processors */
142 extern int64_t tsc_offsets[];
144 #ifdef SMP /* APIC-IO */
145 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
149 * APIC ID logical/physical mapping structures.
150 * We oversize these to simplify boot-time config.
152 int cpu_num_to_apic_id[NAPICID];
153 int apic_id_to_logical[NAPICID];
155 /* AP uses this during bootstrap. Do not staticize. */
159 struct pcb stoppcbs[MAXCPU];
161 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
164 * Local data and functions.
167 static u_int boot_address;
168 static int mp_finish;
169 static int mp_finish_lapic;
171 static void mp_enable(u_int boot_addr);
173 static int start_all_aps(u_int boot_addr);
175 static void install_ap_tramp(u_int boot_addr);
177 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
178 static int smitest(void);
180 static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
181 static cpumask_t smp_lapic_mask = 1; /* which cpus have lapic been inited */
182 cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
183 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
184 static u_int bootMP_size;
190 * Calculate usable address in base memory for AP trampoline code.
193 mp_bootaddress(u_int basemem)
195 POSTCODE(MP_BOOTADDRESS_POST);
197 base_memory = basemem;
199 bootMP_size = mptramp_end - mptramp_start;
200 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
201 if (((basemem * 1024) - boot_address) < bootMP_size)
202 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
203 /* 3 levels of page table pages */
204 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
206 return mptramp_pagetables;
210 * Startup the SMP processors.
215 POSTCODE(MP_START_POST);
216 mp_enable(boot_address);
221 * Print various information about the SMP system hardware and setup.
228 POSTCODE(MP_ANNOUNCE_POST);
230 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
231 kprintf(" cpu0 (BSP): apic id: %2d\n", CPU_TO_ID(0));
232 for (x = 1; x <= mp_naps; ++x)
233 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPU_TO_ID(x));
236 kprintf(" Warning: APIC I/O disabled\n");
240 * AP cpu's call this to sync up protected mode.
242 * WARNING! %gs is not set up on entry. This routine sets up %gs.
248 int x, myid = bootAP;
250 struct mdglobaldata *md;
251 struct privatespace *ps;
253 ps = &CPU_prvspace[myid];
255 gdt_segs[GPROC0_SEL].ssd_base =
256 (long) &ps->mdglobaldata.gd_common_tss;
257 ps->mdglobaldata.mi.gd_prvspace = ps;
259 /* We fill the 32-bit segment descriptors */
260 for (x = 0; x < NGDT; x++) {
261 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
262 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
264 /* And now a 64-bit one */
265 ssdtosyssd(&gdt_segs[GPROC0_SEL],
266 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
268 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
269 r_gdt.rd_base = (long) &gdt[myid * NGDT];
270 lgdt(&r_gdt); /* does magic intra-segment return */
272 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
273 wrmsr(MSR_FSBASE, 0); /* User value */
274 wrmsr(MSR_GSBASE, (u_int64_t)ps);
275 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
281 mdcpu->gd_currentldt = _default_ldt;
284 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
285 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
287 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
289 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
291 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
293 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
294 md->gd_common_tssd = *md->gd_tss_gdt;
296 /* double fault stack */
297 md->gd_common_tss.tss_ist1 =
298 (long)&md->mi.gd_prvspace->idlestack[
299 sizeof(md->mi.gd_prvspace->idlestack)];
304 * Set to a known state:
305 * Set by mpboot.s: CR0_PG, CR0_PE
306 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
309 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
312 /* Set up the fast syscall stuff */
313 msr = rdmsr(MSR_EFER) | EFER_SCE;
314 wrmsr(MSR_EFER, msr);
315 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
316 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
317 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
318 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
319 wrmsr(MSR_STAR, msr);
320 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
322 pmap_set_opt(); /* PSE/4MB pages, etc */
324 /* Initialize the PAT MSR. */
328 /* set up CPU registers and state */
331 /* set up SSE/NX registers */
334 /* set up FPU state on the AP */
335 npxinit(__INITIAL_NPXCW__);
337 /* disable the APIC, just to be SURE */
338 lapic->svr &= ~APIC_SVR_ENABLE;
341 /*******************************************************************
342 * local functions and data
346 * start the SMP system
349 mp_enable(u_int boot_addr)
351 POSTCODE(MP_ENABLE_POST);
355 /* Initialize BSP's local APIC */
358 /* start each Application Processor */
359 start_all_aps(boot_addr);
365 MachIntrABI.finalize();
370 mp_set_cpuids(int cpu_id, int apic_id)
372 CPU_TO_ID(cpu_id) = apic_id;
373 ID_TO_CPU(apic_id) = cpu_id;
377 * Map a physical memory address representing I/O into KVA. The I/O
378 * block is assumed not to cross a page boundary.
381 ioapic_map(vm_paddr_t pa)
383 KKASSERT(pa < 0x100000000LL);
385 return pmap_mapdev_uncacheable(pa, PAGE_SIZE);
389 * start each AP in our list
392 start_all_aps(u_int boot_addr)
394 vm_offset_t va = boot_address + KERNBASE;
395 u_int64_t *pt4, *pt3, *pt2;
402 u_long mpbioswarmvec;
403 struct mdglobaldata *gd;
404 struct privatespace *ps;
406 POSTCODE(START_ALL_APS_POST);
408 /* install the AP 1st level boot code */
409 pmap_kenter(va, boot_address);
410 cpu_invlpg((void *)va); /* JG XXX */
411 bcopy(mptramp_start, (void *)va, bootMP_size);
413 /* Locate the page tables, they'll be below the trampoline */
414 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
415 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
416 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
418 /* Create the initial 1GB replicated page tables */
419 for (i = 0; i < 512; i++) {
420 /* Each slot of the level 4 pages points to the same level 3 page */
421 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
422 pt4[i] |= PG_V | PG_RW | PG_U;
424 /* Each slot of the level 3 pages points to the same level 2 page */
425 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
426 pt3[i] |= PG_V | PG_RW | PG_U;
428 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
429 pt2[i] = i * (2 * 1024 * 1024);
430 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
433 /* save the current value of the warm-start vector */
434 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
435 outb(CMOS_REG, BIOS_RESET);
436 mpbiosreason = inb(CMOS_DATA);
438 /* setup a vector to our boot code */
439 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
440 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
441 outb(CMOS_REG, BIOS_RESET);
442 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
445 * If we have a TSC we can figure out the SMI interrupt rate.
446 * The SMI does not necessarily use a constant rate. Spend
447 * up to 250ms trying to figure it out.
450 if (cpu_feature & CPUID_TSC) {
451 set_apic_timer(275000);
452 smilast = read_apic_timer();
453 for (x = 0; x < 20 && read_apic_timer(); ++x) {
454 smicount = smitest();
455 if (smibest == 0 || smilast - smicount < smibest)
456 smibest = smilast - smicount;
459 if (smibest > 250000)
462 smibest = smibest * (int64_t)1000000 /
463 get_apic_timer_frequency();
467 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
468 1000000 / smibest, smibest);
471 for (x = 1; x <= mp_naps; ++x) {
473 /* This is a bit verbose, it will go away soon. */
475 /* first page of AP's private space */
476 pg = x * x86_64_btop(sizeof(struct privatespace));
478 /* allocate new private data page(s) */
479 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
480 MDGLOBALDATA_BASEALLOC_SIZE);
482 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
483 bzero(gd, sizeof(*gd));
484 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
486 /* prime data page for it to use */
487 mi_gdinit(&gd->mi, x);
489 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
490 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
492 /* setup a vector to our boot code */
493 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
494 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
495 outb(CMOS_REG, BIOS_RESET);
496 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
499 * Setup the AP boot stack
501 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
504 /* attempt to start the Application Processor */
505 CHECK_INIT(99); /* setup checkpoints */
506 if (!start_ap(gd, boot_addr, smibest)) {
507 kprintf("\nAP #%d (PHY# %d) failed!\n",
509 CHECK_PRINT("trace"); /* show checkpoints */
510 /* better panic as the AP may be running loose */
511 kprintf("panic y/n? [y] ");
515 CHECK_PRINT("trace"); /* show checkpoints */
518 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
521 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
522 for (shift = 0; (1 << shift) <= ncpus; ++shift)
525 ncpus2_shift = shift;
527 ncpus2_mask = ncpus2 - 1;
529 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
530 if ((1 << shift) < ncpus)
532 ncpus_fit = 1 << shift;
533 ncpus_fit_mask = ncpus_fit - 1;
535 /* build our map of 'other' CPUs */
536 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
537 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
538 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
540 /* restore the warmstart vector */
541 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
542 outb(CMOS_REG, BIOS_RESET);
543 outb(CMOS_DATA, mpbiosreason);
546 * NOTE! The idlestack for the BSP was setup by locore. Finish
547 * up, clean out the P==V mapping we did earlier.
552 * Wait all APs to finish initializing LAPIC
556 kprintf("SMP: Waiting APs LAPIC initialization\n");
557 if (cpu_feature & CPUID_TSC)
558 tsc0_offset = rdtsc();
561 while (smp_lapic_mask != smp_startup_mask) {
563 if (cpu_feature & CPUID_TSC)
564 tsc0_offset = rdtsc();
566 while (try_mplock() == 0)
569 /* number of APs actually started */
575 * load the 1st level AP boot code into base memory.
578 /* targets for relocation */
579 extern void bigJump(void);
580 extern void bootCodeSeg(void);
581 extern void bootDataSeg(void);
582 extern void MPentry(void);
584 extern u_int mp_gdtbase;
589 install_ap_tramp(u_int boot_addr)
592 int size = *(int *) ((u_long) & bootMP_size);
593 u_char *src = (u_char *) ((u_long) bootMP);
594 u_char *dst = (u_char *) boot_addr + KERNBASE;
595 u_int boot_base = (u_int) bootMP;
600 POSTCODE(INSTALL_AP_TRAMP_POST);
602 for (x = 0; x < size; ++x)
606 * modify addresses in code we just moved to basemem. unfortunately we
607 * need fairly detailed info about mpboot.s for this to work. changes
608 * to mpboot.s might require changes here.
611 /* boot code is located in KERNEL space */
612 dst = (u_char *) boot_addr + KERNBASE;
614 /* modify the lgdt arg */
615 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
616 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
618 /* modify the ljmp target for MPentry() */
619 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
620 *dst32 = ((u_int) MPentry - KERNBASE);
622 /* modify the target for boot code segment */
623 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
624 dst8 = (u_int8_t *) (dst16 + 1);
625 *dst16 = (u_int) boot_addr & 0xffff;
626 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
628 /* modify the target for boot data segment */
629 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
630 dst8 = (u_int8_t *) (dst16 + 1);
631 *dst16 = (u_int) boot_addr & 0xffff;
632 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
638 * This function starts the AP (application processor) identified
639 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
640 * to accomplish this. This is necessary because of the nuances
641 * of the different hardware we might encounter. It ain't pretty,
642 * but it seems to work.
644 * NOTE: eventually an AP gets to ap_init(), which is called just
645 * before the AP goes into the LWKT scheduler's idle loop.
648 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
652 u_long icr_lo, icr_hi;
654 POSTCODE(START_AP_POST);
656 /* get the PHYSICAL APIC ID# */
657 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
659 /* calculate the vector */
660 vector = (boot_addr >> 12) & 0xff;
662 /* We don't want anything interfering */
665 /* Make sure the target cpu sees everything */
669 * Try to detect when a SMI has occurred, wait up to 200ms.
671 * If a SMI occurs during an AP reset but before we issue
672 * the STARTUP command, the AP may brick. To work around
673 * this problem we hold off doing the AP startup until
674 * after we have detected the SMI. Hopefully another SMI
675 * will not occur before we finish the AP startup.
677 * Retries don't seem to help. SMIs have a window of opportunity
678 * and if USB->legacy keyboard emulation is enabled in the BIOS
679 * the interrupt rate can be quite high.
681 * NOTE: Don't worry about the L1 cache load, it might bloat
682 * ldelta a little but ndelta will be so huge when the SMI
683 * occurs the detection logic will still work fine.
686 set_apic_timer(200000);
691 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
692 * and running the target CPU. OR this INIT IPI might be latched (P5
693 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
696 * see apic/apicreg.h for icr bit definitions.
698 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
702 * Setup the address for the target AP. We can setup
703 * icr_hi once and then just trigger operations with
706 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
707 icr_hi |= (physical_cpu << 24);
708 icr_lo = lapic->icr_lo & 0xfff00000;
709 lapic->icr_hi = icr_hi;
712 * Do an INIT IPI: assert RESET
714 * Use edge triggered mode to assert INIT
716 lapic->icr_lo = icr_lo | 0x00004500;
717 while (lapic->icr_lo & APIC_DELSTAT_MASK)
721 * The spec calls for a 10ms delay but we may have to use a
722 * MUCH lower delay to avoid bricking an AP due to a fast SMI
723 * interrupt. We have other loops here too and dividing by 2
724 * doesn't seem to be enough even after subtracting 350us,
727 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
728 * interrupt was detected we use the full 10ms.
732 else if (smibest < 150 * 4 + 350)
734 else if ((smibest - 350) / 4 < 10000)
735 u_sleep((smibest - 350) / 4);
740 * Do an INIT IPI: deassert RESET
742 * Use level triggered mode to deassert. It is unclear
743 * why we need to do this.
745 lapic->icr_lo = icr_lo | 0x00008500;
746 while (lapic->icr_lo & APIC_DELSTAT_MASK)
748 u_sleep(150); /* wait 150us */
751 * Next we do a STARTUP IPI: the previous INIT IPI might still be
752 * latched, (P5 bug) this 1st STARTUP would then terminate
753 * immediately, and the previously started INIT IPI would continue. OR
754 * the previous INIT IPI has already run. and this STARTUP IPI will
755 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
758 lapic->icr_lo = icr_lo | 0x00000600 | vector;
759 while (lapic->icr_lo & APIC_DELSTAT_MASK)
761 u_sleep(200); /* wait ~200uS */
764 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
765 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
766 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
767 * recognized after hardware RESET or INIT IPI.
769 lapic->icr_lo = icr_lo | 0x00000600 | vector;
770 while (lapic->icr_lo & APIC_DELSTAT_MASK)
773 /* Resume normal operation */
776 /* wait for it to start, see ap_init() */
777 set_apic_timer(5000000);/* == 5 seconds */
778 while (read_apic_timer()) {
779 if (smp_startup_mask & CPUMASK(gd->mi.gd_cpuid))
780 return 1; /* return SUCCESS */
783 return 0; /* return FAILURE */
798 while (read_apic_timer()) {
800 for (count = 0; count < 100; ++count)
801 ntsc = rdtsc(); /* force loop to occur */
803 ndelta = ntsc - ltsc;
806 if (ndelta > ldelta * 2)
809 ldelta = ntsc - ltsc;
812 return(read_apic_timer());
816 * Synchronously flush the TLB on all other CPU's. The current cpu's
817 * TLB is not flushed. If the caller wishes to flush the current cpu's
818 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
820 * NOTE: If for some reason we were unable to start all cpus we cannot
821 * safely use broadcast IPIs.
824 static cpumask_t smp_invltlb_req;
826 #define SMP_INVLTLB_DEBUG
832 struct mdglobaldata *md = mdcpu;
833 #ifdef SMP_INVLTLB_DEBUG
838 crit_enter_gd(&md->mi);
839 md->gd_invltlb_ret = 0;
840 ++md->mi.gd_cnt.v_smpinvltlb;
841 atomic_set_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
842 #ifdef SMP_INVLTLB_DEBUG
845 if (smp_startup_mask == smp_active_mask) {
846 all_but_self_ipi(XINVLTLB_OFFSET);
848 selected_apic_ipi(smp_active_mask & ~md->mi.gd_cpumask,
849 XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
852 #ifdef SMP_INVLTLB_DEBUG
854 kprintf("smp_invltlb: ipi sent\n");
856 while ((md->gd_invltlb_ret & smp_active_mask & ~md->mi.gd_cpumask) !=
857 (smp_active_mask & ~md->mi.gd_cpumask)) {
860 #ifdef SMP_INVLTLB_DEBUG
862 if (++count == 400000000) {
864 kprintf("smp_invltlb: endless loop %08lx %08lx, "
865 "rflags %016jx retry",
866 (long)md->gd_invltlb_ret,
867 (long)smp_invltlb_req,
868 (intmax_t)read_rflags());
869 __asm __volatile ("sti");
874 int bcpu = BSFCPUMASK(~md->gd_invltlb_ret &
879 kprintf("bcpu %d\n", bcpu);
880 xgd = globaldata_find(bcpu);
881 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm);
884 Debugger("giving up");
890 atomic_clear_cpumask(&smp_invltlb_req, md->mi.gd_cpumask);
891 crit_exit_gd(&md->mi);
898 * Called from Xinvltlb assembly with interrupts disabled. We didn't
899 * bother to bump the critical section count or nested interrupt count
900 * so only do very low level operations here.
903 smp_invltlb_intr(void)
905 struct mdglobaldata *md = mdcpu;
906 struct mdglobaldata *omd;
911 mask = smp_invltlb_req;
914 cpu = BSFCPUMASK(mask);
915 mask &= ~CPUMASK(cpu);
916 omd = (struct mdglobaldata *)globaldata_find(cpu);
917 atomic_set_cpumask(&omd->gd_invltlb_ret, md->mi.gd_cpumask);
924 * When called the executing CPU will send an IPI to all other CPUs
925 * requesting that they halt execution.
927 * Usually (but not necessarily) called with 'other_cpus' as its arg.
929 * - Signals all CPUs in map to stop.
930 * - Waits for each to stop.
937 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
938 * from executing at same time.
941 stop_cpus(cpumask_t map)
943 map &= smp_active_mask;
945 /* send the Xcpustop IPI to all CPUs in map */
946 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
948 while ((stopped_cpus & map) != map)
956 * Called by a CPU to restart stopped CPUs.
958 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
960 * - Signals all CPUs in map to restart.
961 * - Waits for each to restart.
969 restart_cpus(cpumask_t map)
971 /* signal other cpus to restart */
972 started_cpus = map & smp_active_mask;
974 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
981 * This is called once the mpboot code has gotten us properly relocated
982 * and the MMU turned on, etc. ap_init() is actually the idle thread,
983 * and when it returns the scheduler will call the real cpu_idle() main
984 * loop for the idlethread. Interrupts are disabled on entry and should
985 * remain disabled at return.
993 * Adjust smp_startup_mask to signal the BSP that we have started
994 * up successfully. Note that we do not yet hold the BGL. The BSP
995 * is waiting for our signal.
997 * We can't set our bit in smp_active_mask yet because we are holding
998 * interrupts physically disabled and remote cpus could deadlock
999 * trying to send us an IPI.
1001 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
1005 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1006 * non-zero, then get the MP lock.
1008 * Note: We are in a critical section.
1010 * Note: we are the idle thread, we can only spin.
1012 * Note: The load fence is memory volatile and prevents the compiler
1013 * from improperly caching mp_finish_lapic, and the cpu from improperly
1016 while (mp_finish_lapic == 0)
1018 while (try_mplock() == 0)
1021 if (cpu_feature & CPUID_TSC) {
1023 * The BSP is constantly updating tsc0_offset, figure out
1024 * the relative difference to synchronize ktrdump.
1026 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
1029 /* BSP may have changed PTD while we're waiting for the lock */
1032 /* Build our map of 'other' CPUs. */
1033 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
1035 /* A quick check from sanity claus */
1036 apic_id = (apic_id_to_logical[(lapic->id & 0xff000000) >> 24]);
1037 if (mycpu->gd_cpuid != apic_id) {
1038 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
1039 kprintf("SMP: apic_id = %d lapicid %d\n",
1040 apic_id, (lapic->id & 0xff000000) >> 24);
1042 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1044 panic("cpuid mismatch! boom!!");
1047 /* Initialize AP's local APIC for irq's */
1050 /* LAPIC initialization is done */
1051 smp_lapic_mask |= CPUMASK(mycpu->gd_cpuid);
1054 /* Let BSP move onto the next initialization stage */
1058 * Interlock for finalization. Wait until mp_finish is non-zero,
1059 * then get the MP lock.
1061 * Note: We are in a critical section.
1063 * Note: we are the idle thread, we can only spin.
1065 * Note: The load fence is memory volatile and prevents the compiler
1066 * from improperly caching mp_finish, and the cpu from improperly
1069 while (mp_finish == 0)
1071 while (try_mplock() == 0)
1074 /* BSP may have changed PTD while we're waiting for the lock */
1077 /* Set memory range attributes for this CPU to match the BSP */
1078 mem_range_AP_init();
1081 * Once we go active we must process any IPIQ messages that may
1082 * have been queued, because no actual IPI will occur until we
1083 * set our bit in the smp_active_mask. If we don't the IPI
1084 * message interlock could be left set which would also prevent
1087 * The idle loop doesn't expect the BGL to be held and while
1088 * lwkt_switch() normally cleans things up this is a special case
1089 * because we returning almost directly into the idle loop.
1091 * The idle thread is never placed on the runq, make sure
1092 * nothing we've done put it there.
1094 KKASSERT(get_mplock_count(curthread) == 1);
1095 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
1098 * Enable interrupts here. idle_restore will also do it, but
1099 * doing it here lets us clean up any strays that got posted to
1100 * the CPU during the AP boot while we are still in a critical
1103 __asm __volatile("sti; pause; pause"::);
1104 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
1106 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1107 lwkt_process_ipiq();
1110 * Releasing the mp lock lets the BSP finish up the SMP init
1113 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1117 * Get SMP fully working before we start initializing devices.
1125 kprintf("Finish MP startup\n");
1127 while (smp_active_mask != smp_startup_mask)
1129 while (try_mplock() == 0)
1132 kprintf("Active CPU Mask: %016jx\n",
1133 (uintmax_t)smp_active_mask);
1137 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
1140 cpu_send_ipiq(int dcpu)
1142 if (CPUMASK(dcpu) & smp_active_mask)
1143 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1146 #if 0 /* single_apic_ipi_passive() not working yet */
1148 * Returns 0 on failure, 1 on success
1151 cpu_send_ipiq_passive(int dcpu)
1154 if (CPUMASK(dcpu) & smp_active_mask) {
1155 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1156 APIC_DELMODE_FIXED);