vkernel{,64} - CPU topology support
[dragonfly.git] / sys / platform / vkernel / i386 / mp.c
CommitLineData
6a8aa90e
JT
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
6a8aa90e
JT
33 */
34
24eb47e0 35
6a8aa90e 36#include <sys/interrupt.h>
24eb47e0
MD
37#include <sys/kernel.h>
38#include <sys/memrange.h>
39#include <sys/tls.h>
6a8aa90e
JT
40#include <sys/types.h>
41
24eb47e0
MD
42#include <vm/vm_extern.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_object.h>
45#include <vm/vm_page.h>
46
684a93c4
MD
47#include <sys/mplock2.h>
48
702acf06 49#include <machine/cpu.h>
6a8aa90e 50#include <machine/cpufunc.h>
24eb47e0
MD
51#include <machine/globaldata.h>
52#include <machine/md_var.h>
53#include <machine/pmap.h>
6a8aa90e 54#include <machine/smp.h>
24eb47e0 55#include <machine/tls.h>
6a8aa90e 56
24eb47e0
MD
57#include <unistd.h>
58#include <pthread.h>
59#include <signal.h>
60#include <stdio.h>
61
62extern pt_entry_t *KPTphys;
63
da23a592 64volatile cpumask_t stopped_cpus;
24eb47e0
MD
65cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
66static int boot_address;
67static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
68int mp_naps; /* # of Applications processors */
69static int mp_finish;
70
9bea6114
MC
71/* Local data for detecting CPU TOPOLOGY */
72static int core_bits = 0;
73static int logical_CPU_bits = 0;
74
24eb47e0
MD
75/* function prototypes XXX these should go elsewhere */
76void bootstrap_idle(void);
77void single_cpu_ipi(int, int, int);
da23a592 78void selected_cpu_ipi(cpumask_t, int, int);
6a8aa90e 79#if 0
24eb47e0 80void ipi_handler(int);
6a8aa90e
JT
81#endif
82
24eb47e0 83pt_entry_t *SMPpt;
6a8aa90e 84
24eb47e0
MD
85/* AP uses this during bootstrap. Do not staticize. */
86char *bootSTK;
87static int bootAP;
6a8aa90e 88
24eb47e0
MD
89
90/* XXX these need to go into the appropriate header file */
91static int start_all_aps(u_int);
92void init_secondary(void);
93void *start_ap(void *);
94
95/*
96 * Get SMP fully working before we start initializing devices.
97 */
98static
6a8aa90e 99void
24eb47e0 100ap_finish(void)
6a8aa90e 101{
24eb47e0
MD
102 int i;
103 cpumask_t ncpus_mask = 0;
104
105 for (i = 1; i <= ncpus; i++)
da23a592 106 ncpus_mask |= CPUMASK(i);
24eb47e0
MD
107
108 mp_finish = 1;
109 if (bootverbose)
110 kprintf("Finish MP startup\n");
111
112 /* build our map of 'other' CPUs */
da23a592 113 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
24eb47e0
MD
114
115 /*
116 * Let the other cpu's finish initializing and build their map
117 * of 'other' CPUs.
118 */
119 rel_mplock();
057f0718
MD
120 while (smp_active_mask != smp_startup_mask) {
121 DELAY(100000);
24eb47e0 122 cpu_lfence();
057f0718 123 }
24eb47e0
MD
124
125 while (try_mplock() == 0)
057f0718 126 DELAY(100000);
24eb47e0
MD
127 if (bootverbose)
128 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
6a8aa90e
JT
129}
130
24eb47e0
MD
131SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
132
133
134void *
135start_ap(void *arg __unused)
6a8aa90e 136{
24eb47e0 137 init_secondary();
702acf06 138 setrealcpu();
24eb47e0
MD
139 bootstrap_idle();
140
141 return(NULL); /* NOTREACHED */
6a8aa90e
JT
142}
143
24eb47e0
MD
144/* storage for AP thread IDs */
145pthread_t ap_tids[MAXCPU];
146
6a8aa90e 147void
24eb47e0 148mp_start(void)
6a8aa90e 149{
24eb47e0
MD
150 int shift;
151
c5b0b0ba 152 ncpus = optcpus;
24eb47e0
MD
153
154 mp_naps = ncpus - 1;
155
156 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
157 for (shift = 0; (1 << shift) <= ncpus; ++shift)
158 ;
159 --shift;
160 ncpus2_shift = shift;
161 ncpus2 = 1 << shift;
162 ncpus2_mask = ncpus2 - 1;
163
164 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
165 if ((1 << shift) < ncpus)
166 ++shift;
167 ncpus_fit = 1 << shift;
168 ncpus_fit_mask = ncpus_fit - 1;
169
170 /*
171 * cpu0 initialization
172 */
173 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map,
174 sizeof(lwkt_ipiq) * ncpus);
175 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
176
177 /*
178 * cpu 1-(n-1)
179 */
180 start_all_aps(boot_address);
6a8aa90e 181
6a8aa90e 182}
24eb47e0 183
6a8aa90e 184void
24eb47e0 185mp_announce(void)
6a8aa90e 186{
24eb47e0 187 int x;
6a8aa90e 188
24eb47e0
MD
189 kprintf("DragonFly/MP: Multiprocessor\n");
190 kprintf(" cpu0 (BSP)\n");
191
192 for (x = 1; x <= mp_naps; ++x)
193 kprintf(" cpu%d (AP)\n", x);
194}
6a8aa90e
JT
195
196void
6a8aa90e
JT
197cpu_send_ipiq(int dcpu)
198{
da23a592 199 if (CPUMASK(dcpu) & smp_active_mask)
24eb47e0
MD
200 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
201 panic("pthread_kill failed in cpu_send_ipiq");
202#if 0
6a8aa90e 203 panic("XXX cpu_send_ipiq()");
24eb47e0 204#endif
6a8aa90e
JT
205}
206
207void
208smp_invltlb(void)
209{
210#ifdef SMP
6a8aa90e
JT
211#endif
212}
213
24eb47e0
MD
214void
215single_cpu_ipi(int cpu, int vector, int delivery_mode)
216{
217 kprintf("XXX single_cpu_ipi\n");
218}
219
220void
da23a592 221selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
24eb47e0
MD
222{
223 crit_enter();
224 while (target) {
da23a592
MD
225 int n = BSFCPUMASK(target);
226 target &= ~CPUMASK(n);
24eb47e0
MD
227 single_cpu_ipi(n, vector, delivery_mode);
228 }
229 crit_exit();
230}
231
6a8aa90e 232int
da23a592 233stop_cpus(cpumask_t map)
6a8aa90e 234{
24eb47e0
MD
235 map &= smp_active_mask;
236
237 crit_enter();
238 while (map) {
da23a592
MD
239 int n = BSFCPUMASK(map);
240 map &= ~CPUMASK(n);
241 stopped_cpus |= CPUMASK(n);
8f66501e 242 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
24eb47e0
MD
243 panic("stop_cpus: pthread_kill failed");
244 }
245 crit_exit();
246#if 0
6a8aa90e 247 panic("XXX stop_cpus()");
24eb47e0
MD
248#endif
249
250 return(1);
6a8aa90e
JT
251}
252
253int
da23a592 254restart_cpus(cpumask_t map)
6a8aa90e 255{
24eb47e0
MD
256 map &= smp_active_mask;
257
258 crit_enter();
259 while (map) {
da23a592
MD
260 int n = BSFCPUMASK(map);
261 map &= ~CPUMASK(n);
262 stopped_cpus &= ~CPUMASK(n);
8f66501e 263 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
24eb47e0
MD
264 panic("restart_cpus: pthread_kill failed");
265 }
266 crit_exit();
267#if 0
6a8aa90e 268 panic("XXX restart_cpus()");
24eb47e0
MD
269#endif
270
271 return(1);
6a8aa90e
JT
272}
273
274void
275ap_init(void)
276{
24eb47e0
MD
277 /*
278 * Adjust smp_startup_mask to signal the BSP that we have started
279 * up successfully. Note that we do not yet hold the BGL. The BSP
280 * is waiting for our signal.
281 *
282 * We can't set our bit in smp_active_mask yet because we are holding
283 * interrupts physically disabled and remote cpus could deadlock
284 * trying to send us an IPI.
285 */
da23a592 286 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
24eb47e0
MD
287 cpu_mfence();
288
289 /*
290 * Interlock for finalization. Wait until mp_finish is non-zero,
291 * then get the MP lock.
292 *
293 * Note: We are in a critical section.
294 *
24eb47e0
MD
295 * Note: we are the idle thread, we can only spin.
296 *
297 * Note: The load fence is memory volatile and prevents the compiler
298 * from improperly caching mp_finish, and the cpu from improperly
299 * caching it.
300 */
301
302 while (mp_finish == 0) {
303 cpu_lfence();
057f0718 304 DELAY(500000);
24eb47e0 305 }
b5d16701 306 while (try_mplock() == 0)
057f0718 307 DELAY(100000);
24eb47e0
MD
308
309 /* BSP may have changed PTD while we're waiting for the lock */
310 cpu_invltlb();
311
312 /* Build our map of 'other' CPUs. */
da23a592 313 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
24eb47e0
MD
314
315 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
316
317
318 /* Set memory range attributes for this CPU to match the BSP */
319 mem_range_AP_init();
320 /*
321 * Once we go active we must process any IPIQ messages that may
322 * have been queued, because no actual IPI will occur until we
323 * set our bit in the smp_active_mask. If we don't the IPI
324 * message interlock could be left set which would also prevent
325 * further IPIs.
326 *
327 * The idle loop doesn't expect the BGL to be held and while
328 * lwkt_switch() normally cleans things up this is a special case
329 * because we returning almost directly into the idle loop.
330 *
331 * The idle thread is never placed on the runq, make sure
332 * nothing we've done put it there.
333 */
b5d16701 334 KKASSERT(get_mplock_count(curthread) == 1);
da23a592 335 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
24eb47e0
MD
336
337 mdcpu->gd_fpending = 0;
338 mdcpu->gd_ipending = 0;
339 initclocks_pcpu(); /* clock interrupts (via IPIs) */
340 lwkt_process_ipiq();
341
342 /*
343 * Releasing the mp lock lets the BSP finish up the SMP init
344 */
345 rel_mplock();
346 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
347}
348
349void
350init_secondary(void)
351{
352 int myid = bootAP;
353 struct mdglobaldata *md;
354 struct privatespace *ps;
355
356 ps = &CPU_prvspace[myid];
357
358 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
359
360 /*
27c3aec1
MD
361 * Setup the %fs for cpu #n. The mycpu macro works after this
362 * point. Note that %gs is used by pthreads.
24eb47e0
MD
363 */
364 tls_set_fs(&CPU_prvspace[myid], sizeof(struct privatespace));
365
366 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
367
368 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
369 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
370 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
371
372 /*
373 * Set to a known state:
374 * Set by mpboot.s: CR0_PG, CR0_PE
375 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
376 */
6a8aa90e
JT
377}
378
24eb47e0
MD
379static int
380start_all_aps(u_int boot_addr)
381{
382 int x, i;
383 struct mdglobaldata *gd;
384 struct privatespace *ps;
385 vm_page_t m;
386 vm_offset_t va;
387#if 0
388 struct lwp_params params;
389#endif
390
391 /*
392 * needed for ipis to initial thread
393 * FIXME: rename ap_tids?
394 */
395 ap_tids[0] = pthread_self();
396
b12defdc 397 vm_object_hold(&kernel_object);
24eb47e0
MD
398 for (x = 1; x <= mp_naps; x++)
399 {
400 /* Allocate space for the CPU's private space. */
24eb47e0
MD
401 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
402 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
403 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
404 pmap_kenter_quick(va, m->phys_addr);
405 }
6a8aa90e 406
24eb47e0
MD
407 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
408 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
409 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
410 pmap_kenter_quick(va, m->phys_addr);
411 }
412
413 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
414 bzero(gd, sizeof(*gd));
415 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
416
417 /* prime data page for it to use */
418 mi_gdinit(&gd->mi, x);
419 cpu_gdinit(gd, x);
420
421#if 0
422 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
423 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
424 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
425 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
426 gd->gd_CADDR1 = ps->CPAGE1;
427 gd->gd_CADDR2 = ps->CPAGE2;
428 gd->gd_CADDR3 = ps->CPAGE3;
429 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
430#endif
431
432 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
433 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
434
435 /*
436 * Setup the AP boot stack
437 */
438 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
439 bootAP = x;
440
441 /*
442 * Setup the AP's lwp, this is the 'cpu'
792a98ed
MD
443 *
444 * We have to make sure our signals are masked or the new LWP
445 * may pick up a signal that it isn't ready for yet. SMP
446 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
447 * have already been enabled.
24eb47e0 448 */
792a98ed 449 cpu_disable_intr();
24eb47e0 450 pthread_create(&ap_tids[x], NULL, start_ap, NULL);
792a98ed 451 cpu_enable_intr();
24eb47e0 452
da23a592 453 while((smp_startup_mask & CPUMASK(x)) == 0) {
24eb47e0 454 cpu_lfence(); /* XXX spin until the AP has started */
057f0718
MD
455 DELAY(1000);
456 }
24eb47e0 457 }
b12defdc 458 vm_object_drop(&kernel_object);
24eb47e0
MD
459
460 return(ncpus - 1);
461}
9bea6114
MC
462
463/*
464 * CPU TOPOLOGY DETECTION FUNCTIONS.
465 */
466
467void
468detect_cpu_topology(void)
469{
470 logical_CPU_bits = vkernel_b_arg;
471 core_bits = vkernel_B_arg;
472}
473
474int
475get_chip_ID(int cpuid)
476{
477 return get_apicid_from_cpuid(cpuid) >>
478 (logical_CPU_bits + core_bits);
479}
480
481int
482get_core_number_within_chip(int cpuid)
483{
484 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
485 ( (1 << core_bits) -1);
486}
487
488int
489get_logical_CPU_number_within_core(int cpuid)
490{
491 return get_apicid_from_cpuid(cpuid) &
492 ( (1 << logical_CPU_bits) -1);
493}