nrelease - fix/improve livecd
[dragonfly.git] / sys / platform / vkernel64 / x86_64 / mp.c
CommitLineData
da673940
JG
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
da673940
JG
33 */
34
da82a65a 35#include <sys/cpumask.h>
da673940
JG
36#include <sys/interrupt.h>
37#include <sys/kernel.h>
4499490f 38#include <sys/malloc.h>
da673940
JG
39#include <sys/memrange.h>
40#include <sys/tls.h>
41#include <sys/types.h>
42
43#include <vm/vm_extern.h>
44#include <vm/vm_kern.h>
45#include <vm/vm_object.h>
46#include <vm/vm_page.h>
47
0e6594a8 48#include <sys/mplock2.h>
fcf6efef 49#include <sys/thread2.h>
0e6594a8 50
da673940
JG
51#include <machine/cpu.h>
52#include <machine/cpufunc.h>
53#include <machine/globaldata.h>
54#include <machine/md_var.h>
55#include <machine/pmap.h>
56#include <machine/smp.h>
57#include <machine/tls.h>
a86ce0cd 58#include <machine/param.h>
da673940
JG
59
60#include <unistd.h>
61#include <pthread.h>
62#include <signal.h>
63#include <stdio.h>
64
65extern pt_entry_t *KPTphys;
66
da23a592 67volatile cpumask_t stopped_cpus;
1ad93419
NA
68/* which cpus are ready for IPIs etc? */
69cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
da673940 70static int boot_address;
1ad93419
NA
71/* which cpus have been started */
72static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
da673940
JG
73static int mp_finish;
74
9bea6114
MC
75/* Local data for detecting CPU TOPOLOGY */
76static int core_bits = 0;
77static int logical_CPU_bits = 0;
78
da673940
JG
79/* function prototypes XXX these should go elsewhere */
80void bootstrap_idle(void);
81void single_cpu_ipi(int, int, int);
da23a592 82void selected_cpu_ipi(cpumask_t, int, int);
da673940
JG
83#if 0
84void ipi_handler(int);
85#endif
86
87pt_entry_t *SMPpt;
88
89/* AP uses this during bootstrap. Do not staticize. */
90char *bootSTK;
91static int bootAP;
92
93
94/* XXX these need to go into the appropriate header file */
95static int start_all_aps(u_int);
96void init_secondary(void);
97void *start_ap(void *);
98
99/*
100 * Get SMP fully working before we start initializing devices.
101 */
102static
103void
104ap_finish(void)
105{
da673940
JG
106 mp_finish = 1;
107 if (bootverbose)
108 kprintf("Finish MP startup\n");
109
110 /* build our map of 'other' CPUs */
c07315c4
MD
111 mycpu->gd_other_cpus = smp_startup_mask;
112 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
da673940
JG
113
114 /*
115 * Let the other cpu's finish initializing and build their map
116 * of 'other' CPUs.
117 */
118 rel_mplock();
1ad93419 119 while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
da673940
JG
120 DELAY(100000);
121 cpu_lfence();
122 }
123
124 while (try_mplock() == 0)
125 DELAY(100000);
126 if (bootverbose)
739d9bd3
MD
127 kprintf("Active CPU Mask: %08lx\n",
128 (long)CPUMASK_LOWMASK(smp_active_mask));
da673940
JG
129}
130
f3f3eadb 131SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL);
da673940 132
da673940
JG
133void *
134start_ap(void *arg __unused)
135{
136 init_secondary();
137 setrealcpu();
138 bootstrap_idle();
139
140 return(NULL); /* NOTREACHED */
141}
142
143/* storage for AP thread IDs */
144pthread_t ap_tids[MAXCPU];
145
c91894e0
MD
146int naps;
147
da673940
JG
148void
149mp_start(void)
150{
1997b4c2 151 size_t ipiq_size;
da673940 152 int shift;
1997b4c2 153
da673940 154 ncpus = optcpus;
c91894e0 155 naps = ncpus - 1;
da673940 156
da673940
JG
157 for (shift = 0; (1 << shift) <= ncpus; ++shift)
158 ;
159 --shift;
da673940
JG
160
161 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
162 if ((1 << shift) < ncpus)
163 ++shift;
164 ncpus_fit = 1 << shift;
165 ncpus_fit_mask = ncpus_fit - 1;
166
3ab3ae18
MD
167 malloc_reinit_ncpus();
168
da673940
JG
169 /*
170 * cpu0 initialization
171 */
1997b4c2 172 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1eeaf6b2 173 mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
3091de50 174 VM_SUBSYS_IPIQ);
1997b4c2 175 bzero(mycpu->gd_ipiq, ipiq_size);
da673940 176
cc3685b0
SZ
177 /* initialize arc4random. */
178 arc4_init_pcpu(0);
179
da673940
JG
180 /*
181 * cpu 1-(n-1)
182 */
183 start_all_aps(boot_address);
184
185}
186
187void
188mp_announce(void)
189{
190 int x;
191
192 kprintf("DragonFly/MP: Multiprocessor\n");
193 kprintf(" cpu0 (BSP)\n");
194
c91894e0 195 for (x = 1; x <= naps; ++x)
da673940
JG
196 kprintf(" cpu%d (AP)\n", x);
197}
198
da673940
JG
199void
200cpu_send_ipiq(int dcpu)
201{
c07315c4 202 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
da673940
JG
203 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
204 panic("pthread_kill failed in cpu_send_ipiq");
da0b0e8b 205 }
da673940
JG
206#if 0
207 panic("XXX cpu_send_ipiq()");
208#endif
209}
210
da673940
JG
211void
212single_cpu_ipi(int cpu, int vector, int delivery_mode)
213{
214 kprintf("XXX single_cpu_ipi\n");
215}
216
217void
da23a592 218selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
da673940
JG
219{
220 crit_enter();
c07315c4 221 while (CPUMASK_TESTNZERO(target)) {
da23a592 222 int n = BSFCPUMASK(target);
c07315c4 223 CPUMASK_NANDBIT(target, n);
da673940
JG
224 single_cpu_ipi(n, vector, delivery_mode);
225 }
226 crit_exit();
227}
228
229int
da23a592 230stop_cpus(cpumask_t map)
da673940 231{
c07315c4 232 CPUMASK_ANDMASK(map, smp_active_mask);
da673940
JG
233
234 crit_enter();
c07315c4 235 while (CPUMASK_TESTNZERO(map)) {
da23a592 236 int n = BSFCPUMASK(map);
c07315c4
MD
237 CPUMASK_NANDBIT(map, n);
238 ATOMIC_CPUMASK_ORBIT(stopped_cpus, n);
da673940
JG
239 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
240 panic("stop_cpus: pthread_kill failed");
241 }
242 crit_exit();
243#if 0
244 panic("XXX stop_cpus()");
245#endif
246
247 return(1);
248}
249
250int
da23a592 251restart_cpus(cpumask_t map)
da673940 252{
c07315c4 253 CPUMASK_ANDMASK(map, smp_active_mask);
da673940
JG
254
255 crit_enter();
c07315c4 256 while (CPUMASK_TESTNZERO(map)) {
da23a592 257 int n = BSFCPUMASK(map);
c07315c4
MD
258 CPUMASK_NANDBIT(map, n);
259 ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n);
da673940
JG
260 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
261 panic("restart_cpus: pthread_kill failed");
262 }
263 crit_exit();
264#if 0
265 panic("XXX restart_cpus()");
266#endif
267
268 return(1);
269}
da673940
JG
270void
271ap_init(void)
272{
273 /*
274 * Adjust smp_startup_mask to signal the BSP that we have started
275 * up successfully. Note that we do not yet hold the BGL. The BSP
276 * is waiting for our signal.
277 *
278 * We can't set our bit in smp_active_mask yet because we are holding
279 * interrupts physically disabled and remote cpus could deadlock
280 * trying to send us an IPI.
281 */
c07315c4 282 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
da673940
JG
283 cpu_mfence();
284
285 /*
286 * Interlock for finalization. Wait until mp_finish is non-zero,
287 * then get the MP lock.
288 *
289 * Note: We are in a critical section.
290 *
da673940
JG
291 * Note: we are the idle thread, we can only spin.
292 *
293 * Note: The load fence is memory volatile and prevents the compiler
294 * from improperly caching mp_finish, and the cpu from improperly
295 * caching it.
296 */
297
298 while (mp_finish == 0) {
299 cpu_lfence();
300 DELAY(500000);
301 }
b5d16701 302 while (try_mplock() == 0)
da673940
JG
303 DELAY(100000);
304
305 /* BSP may have changed PTD while we're waiting for the lock */
306 cpu_invltlb();
307
308 /* Build our map of 'other' CPUs. */
c07315c4
MD
309 mycpu->gd_other_cpus = smp_startup_mask;
310 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
da673940
JG
311
312 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
313
314
315 /* Set memory range attributes for this CPU to match the BSP */
316 mem_range_AP_init();
317 /*
318 * Once we go active we must process any IPIQ messages that may
319 * have been queued, because no actual IPI will occur until we
320 * set our bit in the smp_active_mask. If we don't the IPI
321 * message interlock could be left set which would also prevent
322 * further IPIs.
323 *
324 * The idle loop doesn't expect the BGL to be held and while
325 * lwkt_switch() normally cleans things up this is a special case
326 * because we returning almost directly into the idle loop.
327 *
328 * The idle thread is never placed on the runq, make sure
329 * nothing we've done put it there.
330 */
b5d16701 331 KKASSERT(get_mplock_count(curthread) == 1);
c07315c4 332 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
da673940
JG
333
334 mdcpu->gd_fpending = 0;
335 mdcpu->gd_ipending = 0;
336 initclocks_pcpu(); /* clock interrupts (via IPIs) */
4dd1b994
AHJ
337
338 /*
339 * Since we may have cleaned up the interrupt triggers, manually
340 * process any pending IPIs before exiting our critical section.
341 * Once the critical section has exited, normal interrupt processing
342 * may occur.
343 */
344 atomic_swap_int(&mycpu->gd_npoll, 0);
da673940
JG
345 lwkt_process_ipiq();
346
347 /*
348 * Releasing the mp lock lets the BSP finish up the SMP init
349 */
350 rel_mplock();
351 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
352}
353
354void
355init_secondary(void)
356{
357 int myid = bootAP;
358 struct mdglobaldata *md;
359 struct privatespace *ps;
360
361 ps = &CPU_prvspace[myid];
362
363 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
364
365 /*
366 * Setup the %gs for cpu #n. The mycpu macro works after this
367 * point. Note that %fs is used by pthreads.
368 */
369 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
370
371 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
372
373 /* JG */
374 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
375 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
376 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
377
378 /*
379 * Set to a known state:
380 * Set by mpboot.s: CR0_PG, CR0_PE
381 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
382 */
383}
384
385static int
386start_all_aps(u_int boot_addr)
387{
388 int x, i;
389 struct mdglobaldata *gd;
390 struct privatespace *ps;
391 vm_page_t m;
392 vm_offset_t va;
a86ce0cd 393 pthread_attr_t attr;
d97990a1 394 size_t ipiq_size;
da673940
JG
395#if 0
396 struct lwp_params params;
397#endif
398
399 /*
400 * needed for ipis to initial thread
401 * FIXME: rename ap_tids?
402 */
403 ap_tids[0] = pthread_self();
a86ce0cd 404 pthread_attr_init(&attr);
da673940 405
712b6620 406 vm_object_hold(kernel_object);
c91894e0 407 for (x = 1; x <= naps; ++x) {
da673940 408 /* Allocate space for the CPU's private space. */
da673940
JG
409 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
410 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
712b6620 411 m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
da673940
JG
412 pmap_kenter_quick(va, m->phys_addr);
413 }
414
415 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
416 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
712b6620 417 m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
da673940
JG
418 pmap_kenter_quick(va, m->phys_addr);
419 }
420
421 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
422 bzero(gd, sizeof(*gd));
423 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
424
425 /* prime data page for it to use */
426 mi_gdinit(&gd->mi, x);
427 cpu_gdinit(gd, x);
428
429#if 0
430 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
431 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
432 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
433 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
434 gd->gd_CADDR1 = ps->CPAGE1;
435 gd->gd_CADDR2 = ps->CPAGE2;
436 gd->gd_CADDR3 = ps->CPAGE3;
437 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
438#endif
439
c91894e0 440 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
1eeaf6b2 441 gd->mi.gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
3091de50 442 VM_SUBSYS_IPIQ);
1997b4c2 443 bzero(gd->mi.gd_ipiq, ipiq_size);
da673940 444
cc3685b0
SZ
445 /* initialize arc4random. */
446 arc4_init_pcpu(x);
447
da673940
JG
448 /*
449 * Setup the AP boot stack
450 */
451 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
452 bootAP = x;
453
454 /*
455 * Setup the AP's lwp, this is the 'cpu'
456 *
457 * We have to make sure our signals are masked or the new LWP
458 * may pick up a signal that it isn't ready for yet. SMP
459 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
460 * have already been enabled.
461 */
462 cpu_disable_intr();
a86ce0cd 463
a86ce0cd 464 pthread_create(&ap_tids[x], &attr, start_ap, NULL);
da673940
JG
465 cpu_enable_intr();
466
c07315c4 467 while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) {
da673940
JG
468 cpu_lfence(); /* XXX spin until the AP has started */
469 DELAY(1000);
470 }
471 }
712b6620 472 vm_object_drop(kernel_object);
a86ce0cd 473 pthread_attr_destroy(&attr);
da673940
JG
474
475 return(ncpus - 1);
476}
9bea6114
MC
477
478/*
479 * CPU TOPOLOGY DETECTION FUNCTIONS.
480 */
9bea6114
MC
481void
482detect_cpu_topology(void)
483{
484 logical_CPU_bits = vkernel_b_arg;
485 core_bits = vkernel_B_arg;
486}
487
488int
489get_chip_ID(int cpuid)
490{
491 return get_apicid_from_cpuid(cpuid) >>
492 (logical_CPU_bits + core_bits);
493}
494
c91894e0
MD
495int
496get_chip_ID_from_APICID(int apicid)
497{
498 return apicid >> (logical_CPU_bits + core_bits);
499}
500
9bea6114
MC
501int
502get_core_number_within_chip(int cpuid)
503{
c91894e0
MD
504 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
505 ((1 << core_bits) - 1));
9bea6114
MC
506}
507
508int
509get_logical_CPU_number_within_core(int cpuid)
510{
c91894e0
MD
511 return (get_apicid_from_cpuid(cpuid) &
512 ((1 << logical_CPU_bits) - 1));
9bea6114 513}