kernel - Fix rare IPIQ freezes
[dragonfly.git] / sys / platform / vkernel64 / x86_64 / mp.c
CommitLineData
da673940
JG
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
da673940
JG
33 */
34
35
36#include <sys/interrupt.h>
37#include <sys/kernel.h>
38#include <sys/memrange.h>
39#include <sys/tls.h>
40#include <sys/types.h>
41
42#include <vm/vm_extern.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_object.h>
45#include <vm/vm_page.h>
46
0e6594a8
SW
47#include <sys/mplock2.h>
48
da673940
JG
49#include <machine/cpu.h>
50#include <machine/cpufunc.h>
51#include <machine/globaldata.h>
52#include <machine/md_var.h>
53#include <machine/pmap.h>
54#include <machine/smp.h>
55#include <machine/tls.h>
56
57#include <unistd.h>
58#include <pthread.h>
59#include <signal.h>
60#include <stdio.h>
61
62extern pt_entry_t *KPTphys;
63
da23a592 64volatile cpumask_t stopped_cpus;
da673940
JG
65cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
66static int boot_address;
67static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
68int mp_naps; /* # of Applications processors */
69static int mp_finish;
70
71/* function prototypes XXX these should go elsewhere */
72void bootstrap_idle(void);
73void single_cpu_ipi(int, int, int);
da23a592 74void selected_cpu_ipi(cpumask_t, int, int);
da673940
JG
75#if 0
76void ipi_handler(int);
77#endif
78
79pt_entry_t *SMPpt;
80
81/* AP uses this during bootstrap. Do not staticize. */
82char *bootSTK;
83static int bootAP;
84
85
86/* XXX these need to go into the appropriate header file */
87static int start_all_aps(u_int);
88void init_secondary(void);
89void *start_ap(void *);
90
91/*
92 * Get SMP fully working before we start initializing devices.
93 */
94static
95void
96ap_finish(void)
97{
98 int i;
99 cpumask_t ncpus_mask = 0;
100
101 for (i = 1; i <= ncpus; i++)
da23a592 102 ncpus_mask |= CPUMASK(i);
da673940
JG
103
104 mp_finish = 1;
105 if (bootverbose)
106 kprintf("Finish MP startup\n");
107
108 /* build our map of 'other' CPUs */
da23a592 109 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
da673940
JG
110
111 /*
112 * Let the other cpu's finish initializing and build their map
113 * of 'other' CPUs.
114 */
115 rel_mplock();
116 while (smp_active_mask != smp_startup_mask) {
117 DELAY(100000);
118 cpu_lfence();
119 }
120
121 while (try_mplock() == 0)
122 DELAY(100000);
123 if (bootverbose)
124 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
125}
126
127SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
128
129
130void *
131start_ap(void *arg __unused)
132{
133 init_secondary();
134 setrealcpu();
135 bootstrap_idle();
136
137 return(NULL); /* NOTREACHED */
138}
139
140/* storage for AP thread IDs */
141pthread_t ap_tids[MAXCPU];
142
143void
144mp_start(void)
145{
146 int shift;
147
148 ncpus = optcpus;
149
150 mp_naps = ncpus - 1;
151
152 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
153 for (shift = 0; (1 << shift) <= ncpus; ++shift)
154 ;
155 --shift;
156 ncpus2_shift = shift;
157 ncpus2 = 1 << shift;
158 ncpus2_mask = ncpus2 - 1;
159
160 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
161 if ((1 << shift) < ncpus)
162 ++shift;
163 ncpus_fit = 1 << shift;
164 ncpus_fit_mask = ncpus_fit - 1;
165
166 /*
167 * cpu0 initialization
168 */
169 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map,
170 sizeof(lwkt_ipiq) * ncpus);
171 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
172
173 /*
174 * cpu 1-(n-1)
175 */
176 start_all_aps(boot_address);
177
178}
179
180void
181mp_announce(void)
182{
183 int x;
184
185 kprintf("DragonFly/MP: Multiprocessor\n");
186 kprintf(" cpu0 (BSP)\n");
187
188 for (x = 1; x <= mp_naps; ++x)
189 kprintf(" cpu%d (AP)\n", x);
190}
191
da673940
JG
192void
193cpu_send_ipiq(int dcpu)
194{
da0b0e8b 195 if (CPUMASK(dcpu) & smp_active_mask) {
da673940
JG
196 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
197 panic("pthread_kill failed in cpu_send_ipiq");
da0b0e8b 198 }
da673940
JG
199#if 0
200 panic("XXX cpu_send_ipiq()");
201#endif
202}
203
204void
205smp_invltlb(void)
206{
207#ifdef SMP
208#endif
209}
210
211void
212single_cpu_ipi(int cpu, int vector, int delivery_mode)
213{
214 kprintf("XXX single_cpu_ipi\n");
215}
216
217void
da23a592 218selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
da673940
JG
219{
220 crit_enter();
221 while (target) {
da23a592
MD
222 int n = BSFCPUMASK(target);
223 target &= ~CPUMASK(n);
da673940
JG
224 single_cpu_ipi(n, vector, delivery_mode);
225 }
226 crit_exit();
227}
228
229int
da23a592 230stop_cpus(cpumask_t map)
da673940
JG
231{
232 map &= smp_active_mask;
233
234 crit_enter();
235 while (map) {
da23a592
MD
236 int n = BSFCPUMASK(map);
237 map &= ~CPUMASK(n);
238 stopped_cpus |= CPUMASK(n);
da673940
JG
239 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
240 panic("stop_cpus: pthread_kill failed");
241 }
242 crit_exit();
243#if 0
244 panic("XXX stop_cpus()");
245#endif
246
247 return(1);
248}
249
250int
da23a592 251restart_cpus(cpumask_t map)
da673940
JG
252{
253 map &= smp_active_mask;
254
255 crit_enter();
256 while (map) {
da23a592
MD
257 int n = BSFCPUMASK(map);
258 map &= ~CPUMASK(n);
259 stopped_cpus &= ~CPUMASK(n);
da673940
JG
260 if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
261 panic("restart_cpus: pthread_kill failed");
262 }
263 crit_exit();
264#if 0
265 panic("XXX restart_cpus()");
266#endif
267
268 return(1);
269}
270
271void
272ap_init(void)
273{
274 /*
275 * Adjust smp_startup_mask to signal the BSP that we have started
276 * up successfully. Note that we do not yet hold the BGL. The BSP
277 * is waiting for our signal.
278 *
279 * We can't set our bit in smp_active_mask yet because we are holding
280 * interrupts physically disabled and remote cpus could deadlock
281 * trying to send us an IPI.
282 */
da23a592 283 smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
da673940
JG
284 cpu_mfence();
285
286 /*
287 * Interlock for finalization. Wait until mp_finish is non-zero,
288 * then get the MP lock.
289 *
290 * Note: We are in a critical section.
291 *
da673940
JG
292 * Note: we are the idle thread, we can only spin.
293 *
294 * Note: The load fence is memory volatile and prevents the compiler
295 * from improperly caching mp_finish, and the cpu from improperly
296 * caching it.
297 */
298
299 while (mp_finish == 0) {
300 cpu_lfence();
301 DELAY(500000);
302 }
b5d16701 303 while (try_mplock() == 0)
da673940
JG
304 DELAY(100000);
305
306 /* BSP may have changed PTD while we're waiting for the lock */
307 cpu_invltlb();
308
309 /* Build our map of 'other' CPUs. */
da23a592 310 mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
da673940
JG
311
312 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
313
314
315 /* Set memory range attributes for this CPU to match the BSP */
316 mem_range_AP_init();
317 /*
318 * Once we go active we must process any IPIQ messages that may
319 * have been queued, because no actual IPI will occur until we
320 * set our bit in the smp_active_mask. If we don't the IPI
321 * message interlock could be left set which would also prevent
322 * further IPIs.
323 *
324 * The idle loop doesn't expect the BGL to be held and while
325 * lwkt_switch() normally cleans things up this is a special case
326 * because we returning almost directly into the idle loop.
327 *
328 * The idle thread is never placed on the runq, make sure
329 * nothing we've done put it there.
330 */
b5d16701 331 KKASSERT(get_mplock_count(curthread) == 1);
da23a592 332 smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
da673940
JG
333
334 mdcpu->gd_fpending = 0;
335 mdcpu->gd_ipending = 0;
336 initclocks_pcpu(); /* clock interrupts (via IPIs) */
337 lwkt_process_ipiq();
338
339 /*
340 * Releasing the mp lock lets the BSP finish up the SMP init
341 */
342 rel_mplock();
343 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
344}
345
346void
347init_secondary(void)
348{
349 int myid = bootAP;
350 struct mdglobaldata *md;
351 struct privatespace *ps;
352
353 ps = &CPU_prvspace[myid];
354
355 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
356
357 /*
358 * Setup the %gs for cpu #n. The mycpu macro works after this
359 * point. Note that %fs is used by pthreads.
360 */
361 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
362
363 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
364
365 /* JG */
366 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
367 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
368 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
369
370 /*
371 * Set to a known state:
372 * Set by mpboot.s: CR0_PG, CR0_PE
373 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
374 */
375}
376
377static int
378start_all_aps(u_int boot_addr)
379{
380 int x, i;
381 struct mdglobaldata *gd;
382 struct privatespace *ps;
383 vm_page_t m;
384 vm_offset_t va;
385#if 0
386 struct lwp_params params;
387#endif
388
389 /*
390 * needed for ipis to initial thread
391 * FIXME: rename ap_tids?
392 */
393 ap_tids[0] = pthread_self();
394
395 for (x = 1; x <= mp_naps; x++)
396 {
397 /* Allocate space for the CPU's private space. */
da673940
JG
398 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
399 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
400 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
401 pmap_kenter_quick(va, m->phys_addr);
402 }
403
404 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
405 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
406 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
407 pmap_kenter_quick(va, m->phys_addr);
408 }
409
410 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
411 bzero(gd, sizeof(*gd));
412 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
413
414 /* prime data page for it to use */
415 mi_gdinit(&gd->mi, x);
416 cpu_gdinit(gd, x);
417
418#if 0
419 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
420 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
421 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
422 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
423 gd->gd_CADDR1 = ps->CPAGE1;
424 gd->gd_CADDR2 = ps->CPAGE2;
425 gd->gd_CADDR3 = ps->CPAGE3;
426 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
427#endif
428
429 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
430 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
431
432 /*
433 * Setup the AP boot stack
434 */
435 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
436 bootAP = x;
437
438 /*
439 * Setup the AP's lwp, this is the 'cpu'
440 *
441 * We have to make sure our signals are masked or the new LWP
442 * may pick up a signal that it isn't ready for yet. SMP
443 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
444 * have already been enabled.
445 */
446 cpu_disable_intr();
447 pthread_create(&ap_tids[x], NULL, start_ap, NULL);
448 cpu_enable_intr();
449
da23a592 450 while((smp_startup_mask & CPUMASK(x)) == 0) {
da673940
JG
451 cpu_lfence(); /* XXX spin until the AP has started */
452 DELAY(1000);
453 }
454 }
455
456 return(ncpus - 1);
457}