kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / platform / pc32 / i386 / vm_machdep.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $
42 */
43
1f2de5d4 44#include "use_npx.h"
984263bc
MD
45#include "opt_reset.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/malloc.h>
50#include <sys/proc.h>
51#include <sys/buf.h>
ef0fdad1 52#include <sys/interrupt.h>
984263bc
MD
53#include <sys/vnode.h>
54#include <sys/vmmeter.h>
55#include <sys/kernel.h>
56#include <sys/sysctl.h>
57#include <sys/unistd.h>
58
59#include <machine/clock.h>
60#include <machine/cpu.h>
61#include <machine/md_var.h>
984263bc 62#include <machine/smp.h>
984263bc
MD
63#include <machine/pcb.h>
64#include <machine/pcb_ext.h>
65#include <machine/vm86.h>
4a22e893 66#include <machine/segments.h>
85100692 67#include <machine/globaldata.h> /* npxthread */
984263bc
MD
68
69#include <vm/vm.h>
70#include <vm/vm_param.h>
71#include <sys/lock.h>
72#include <vm/vm_kern.h>
73#include <vm/vm_page.h>
74#include <vm/vm_map.h>
75#include <vm/vm_extern.h>
76
77#include <sys/user.h>
684a93c4 78
ae8050a4 79#include <sys/thread2.h>
684a93c4 80#include <sys/mplock2.h>
984263bc 81
21ce0dfa 82#include <bus/isa/isa.h>
984263bc 83
3ae0cd58 84static void cpu_reset_real (void);
3ae0cd58 85static void cpu_reset_proxy (void);
984263bc
MD
86static u_int cpu_reset_proxyid;
87static volatile u_int cpu_reset_proxy_active;
984263bc
MD
88extern int _ucodesel, _udatasel;
89
984263bc
MD
90
91/*
bb3cd951 92 * Finish a fork operation, with lwp lp2 nearly set up.
984263bc
MD
93 * Copy and update the pcb, set up the stack so that the child
94 * ready to run and return to user mode.
95 */
96void
bb3cd951 97cpu_fork(struct lwp *lp1, struct lwp *lp2, int flags)
984263bc
MD
98{
99 struct pcb *pcb2;
100
101 if ((flags & RFPROC) == 0) {
984263bc
MD
102 if ((flags & RFMEM) == 0) {
103 /* unshare user LDT */
bb3cd951 104 struct pcb *pcb1 = lp1->lwp_thread->td_pcb;
984263bc
MD
105 struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt;
106 if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) {
107 pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len);
108 user_ldt_free(pcb1);
109 pcb1->pcb_ldt = pcb_ldt;
110 set_user_ldt(pcb1);
111 }
112 }
984263bc
MD
113 return;
114 }
115
116#if NNPX > 0
bb3cd951
SS
117 /* Ensure that lp1's pcb is up to date. */
118 if (mdcpu->gd_npxthread == lp1->lwp_thread)
119 npxsave(lp1->lwp_thread->td_savefpu);
984263bc 120#endif
a96ef868
MD
121
122 /*
bb3cd951 123 * Copy lp1's PCB. This really only applies to the
a96ef868
MD
124 * debug registers and FP state, but its faster to just copy the
125 * whole thing. Because we only save the PCB at switchout time,
4e7c41c5 126 * the register state may not be current.
a96ef868 127 */
bb3cd951
SS
128 pcb2 = lp2->lwp_thread->td_pcb;
129 *pcb2 = *lp1->lwp_thread->td_pcb;
984263bc
MD
130
131 /*
132 * Create a new fresh stack for the new process.
133 * Copy the trap frame for the return to user mode as if from a
b7c628e4
MD
134 * syscall. This copies the user mode register values. The
135 * 16 byte offset saves space for vm86, and must match
136 * common_tss.esp0 (kernel stack pointer on entry from user mode)
8ad65e08
MD
137 *
138 * pcb_esp must allocate an additional call-return pointer below
d9eea1a5
MD
139 * the trap frame which will be restored by cpu_restore from
140 * PCB_EIP, and the thread's td_sp pointer must allocate an
141 * additonal two worsd below the pcb_esp call-return pointer to
142 * hold the LWKT restore function pointer and eflags.
8ad65e08
MD
143 *
144 * The LWKT restore function pointer must be set to cpu_restore,
145 * which is our standard heavy weight process switch-in function.
146 * YYY eventually we should shortcut fork_return and fork_trampoline
147 * to use the LWKT restore function directly so we can get rid of
148 * all the extra crap we are setting up.
984263bc 149 */
bb3cd951
SS
150 lp2->lwp_md.md_regs = (struct trapframe *)((char *)pcb2 - 16) - 1;
151 bcopy(lp1->lwp_md.md_regs, lp2->lwp_md.md_regs, sizeof(*lp2->lwp_md.md_regs));
984263bc
MD
152
153 /*
154 * Set registers for trampoline to user mode. Leave space for the
155 * return address on stack. These are the kernel mode register values.
156 */
bb3cd951 157 pcb2->pcb_cr3 = vtophys(vmspace_pmap(lp2->lwp_proc->p_vmspace)->pm_pdir);
984263bc
MD
158 pcb2->pcb_edi = 0;
159 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
160 pcb2->pcb_ebp = 0;
bb3cd951
SS
161 pcb2->pcb_esp = (int)lp2->lwp_md.md_regs - sizeof(void *);
162 pcb2->pcb_ebx = (int)lp2; /* fork_trampoline argument */
984263bc 163 pcb2->pcb_eip = (int)fork_trampoline;
bb3cd951
SS
164 lp2->lwp_thread->td_sp = (char *)(pcb2->pcb_esp - sizeof(void *));
165 *(u_int32_t *)lp2->lwp_thread->td_sp = PSL_USER;
166 lp2->lwp_thread->td_sp -= sizeof(void *);
167 *(void **)lp2->lwp_thread->td_sp = (void *)cpu_heavy_restore;
a96ef868 168
984263bc
MD
169 /*
170 * pcb2->pcb_ldt: duplicated below, if necessary.
171 * pcb2->pcb_savefpu: cloned above.
93ad6da2
MD
172 * pcb2->pcb_flags: cloned above (always 0 here).
173 * pcb2->pcb_onfault: cloned above (always NULL here).
174 * pcb2->pcb_onfault_sp:cloned above (don't care)
984263bc
MD
175 */
176
984263bc
MD
177 /*
178 * XXX don't copy the i/o pages. this should probably be fixed.
179 */
d8061892 180 pcb2->pcb_ext = NULL;
984263bc 181
984263bc 182 /* Copy the LDT, if necessary. */
d8061892 183 if (pcb2->pcb_ldt != NULL) {
984263bc
MD
184 if (flags & RFMEM) {
185 pcb2->pcb_ldt->ldt_refcnt++;
186 } else {
187 pcb2->pcb_ldt = user_ldt_alloc(pcb2,
188 pcb2->pcb_ldt->ldt_len);
189 }
190 }
bb3cd951
SS
191 bcopy(&lp1->lwp_thread->td_tls, &lp2->lwp_thread->td_tls,
192 sizeof(lp2->lwp_thread->td_tls));
984263bc 193 /*
08f2f1bb 194 * Now, cpu_switch() can schedule the new lwp.
984263bc
MD
195 * pcb_esp is loaded pointing to the cpu_switch() stack frame
196 * containing the return address when exiting cpu_switch.
197 * This will normally be to fork_trampoline(), which will have
08f2f1bb
SS
198 * %ebx loaded with the new lwp's pointer. fork_trampoline()
199 * will set up a stack to call fork_return(lp, frame); to complete
984263bc
MD
200 * the return to user-mode.
201 */
202}
203
91bd9c1e
SS
204/*
205 * Prepare new lwp to return to the address specified in params.
206 */
207int
208cpu_prepare_lwp(struct lwp *lp, struct lwp_params *params)
209{
210 struct trapframe *regs = lp->lwp_md.md_regs;
211 void *bad_return = NULL;
212 int error;
213
29adef62
NT
214 regs->tf_eip = (int)params->func;
215 regs->tf_esp = (int)params->stack;
91bd9c1e
SS
216 /* Set up argument for function call */
217 regs->tf_esp -= sizeof(params->arg);
29adef62
NT
218 error =
219 copyout(&params->arg, (void *)regs->tf_esp, sizeof(params->arg));
91bd9c1e
SS
220 if (error)
221 return (error);
222 /*
223 * Set up fake return address. As the lwp function may never return,
224 * we simply copy out a NULL pointer and force the lwp to receive
225 * a SIGSEGV if it returns anyways.
226 */
227 regs->tf_esp -= sizeof(void *);
29adef62 228 error = copyout(&bad_return, (void *)regs->tf_esp, sizeof(bad_return));
91bd9c1e
SS
229 if (error)
230 return (error);
231
232 cpu_set_fork_handler(lp,
233 (void (*)(void *, struct trapframe *))generic_lwp_return, lp);
234 return (0);
235}
236
984263bc
MD
237/*
238 * Intercept the return address from a freshly forked process that has NOT
239 * been scheduled yet.
240 *
241 * This is needed to make kernel threads stay in kernel mode.
242 */
243void
91bd9c1e
SS
244cpu_set_fork_handler(struct lwp *lp, void (*func)(void *, struct trapframe *),
245 void *arg)
984263bc
MD
246{
247 /*
248 * Note that the trap frame follows the args, so the function
249 * is really called like this: func(arg, frame);
250 */
bb3cd951
SS
251 lp->lwp_thread->td_pcb->pcb_esi = (int) func; /* function */
252 lp->lwp_thread->td_pcb->pcb_ebx = (int) arg; /* first arg */
984263bc
MD
253}
254
0cfcada1
MD
255void
256cpu_set_thread_handler(thread_t td, void (*rfunc)(void), void *func, void *arg)
257{
258 td->td_pcb->pcb_esi = (int)func;
259 td->td_pcb->pcb_ebx = (int) arg;
260 td->td_switch = cpu_lwkt_switch;
261 td->td_sp -= sizeof(void *);
262 *(void **)td->td_sp = rfunc; /* exit function on return */
263 td->td_sp -= sizeof(void *);
264 *(void **)td->td_sp = cpu_kthread_restore;
265}
266
984263bc 267void
c6880072 268cpu_lwp_exit(void)
984263bc 269{
99df837e 270 struct thread *td = curthread;
ae8050a4 271 struct pcb *pcb;
df011816 272 struct pcb_ext *ext;
99df837e 273
ae8050a4 274 /*
df011816
MD
275 * If we were using a private TSS do a forced-switch to ourselves
276 * to switch back to the common TSS before freeing it.
ae8050a4 277 */
99df837e 278 pcb = td->td_pcb;
df011816
MD
279 if ((ext = pcb->pcb_ext) != NULL) {
280 crit_enter();
281 pcb->pcb_ext = NULL;
eb673128 282 lwkt_switch_return(td->td_switch(td));
df011816 283 crit_exit();
e4846942 284 kmem_free(&kernel_map, (vm_offset_t)ext, ctob(IOPAGES + 1));
984263bc 285 }
984263bc 286 user_ldt_free(pcb);
984263bc
MD
287 if (pcb->pcb_flags & PCB_DBREGS) {
288 /*
289 * disable all hardware breakpoints
290 */
291 reset_dbregs();
292 pcb->pcb_flags &= ~PCB_DBREGS;
293 }
37af14fe 294 td->td_gd->gd_cnt.v_swtch++;
ae8050a4 295
37af14fe 296 crit_enter_quick(td);
09b62fa4
MD
297 if (td->td_flags & TDF_TSLEEPQ)
298 tsleep_remove(td);
37af14fe 299 lwkt_deschedule_self(td);
e56e4dea 300 lwkt_remove_tdallq(td);
99df837e
MD
301 cpu_thread_exit();
302}
303
304/*
305 * Terminate the current thread. The caller must have already acquired
306 * the thread's rwlock and placed it on a reap list or otherwise notified
307 * a reaper of its existance. We set a special assembly switch function which
308 * releases td_rwlock after it has cleaned up the MMU state and switched
309 * out the stack.
310 *
311 * Must be caller from a critical section and with the thread descheduled.
312 */
313void
314cpu_thread_exit(void)
315{
9e6e869e
MD
316#if NNPX > 0
317 npxexit();
318#endif
99df837e 319 curthread->td_switch = cpu_exit_switch;
c1102e9f 320 curthread->td_flags |= TDF_EXITING;
8ad65e08 321 lwkt_switch();
984263bc
MD
322 panic("cpu_exit");
323}
324
984263bc
MD
325#ifdef notyet
326static void
f123d5a1 327setredzone(u_short *pte, caddr_t vaddr)
984263bc
MD
328{
329/* eventually do this by setting up an expand-down stack segment
330 for ss0: selector, allowing stack access down to top of u.
331 this means though that protection violations need to be handled
332 thru a double fault exception that must do an integral task
333 switch to a known good context, within which a dump can be
334 taken. a sensible scheme might be to save the initial context
335 used by sched (that has physical memory mapped 1:1 at bottom)
336 and take the dump while still in mapped mode */
337}
338#endif
339
340/*
341 * Convert kernel VA to physical address
342 */
6ef943a3 343vm_paddr_t
984263bc
MD
344kvtop(void *addr)
345{
6ef943a3 346 vm_paddr_t pa;
984263bc 347
6ef943a3
MD
348 pa = pmap_kextract((vm_offset_t)addr);
349 if (pa == 0)
984263bc 350 panic("kvtop: zero page frame");
6ef943a3 351 return (pa);
984263bc
MD
352}
353
354/*
355 * Force reset the processor by invalidating the entire address space!
356 */
357
984263bc 358static void
f123d5a1 359cpu_reset_proxy(void)
984263bc 360{
984263bc
MD
361 cpu_reset_proxy_active = 1;
362 while (cpu_reset_proxy_active == 1)
363 ; /* Wait for other cpu to disable interupts */
26be20a0 364 kprintf("cpu_reset_proxy: Grabbed mp lock for BSP\n");
984263bc
MD
365 cpu_reset_proxy_active = 3;
366 while (cpu_reset_proxy_active == 3)
367 ; /* Wait for other cpu to enable interrupts */
da23a592 368 stop_cpus(CPUMASK(cpu_reset_proxyid));
26be20a0 369 kprintf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
984263bc
MD
370 DELAY(1000000);
371 cpu_reset_real();
372}
984263bc
MD
373
374void
f123d5a1 375cpu_reset(void)
984263bc 376{
0f7a3396 377 if (smp_active_mask == 1) {
984263bc
MD
378 cpu_reset_real();
379 /* NOTREACHED */
380 } else {
da23a592 381 cpumask_t map;
984263bc 382 int cnt;
26be20a0 383 kprintf("cpu_reset called on cpu#%d\n",mycpu->gd_cpuid);
984263bc 384
0f7a3396 385 map = mycpu->gd_other_cpus & ~stopped_cpus & smp_active_mask;
984263bc
MD
386
387 if (map != 0) {
26be20a0 388 kprintf("cpu_reset: Stopping other CPUs\n");
984263bc
MD
389 stop_cpus(map); /* Stop all other CPUs */
390 }
391
72740893 392 if (mycpu->gd_cpuid == 0) {
984263bc
MD
393 DELAY(1000000);
394 cpu_reset_real();
395 /* NOTREACHED */
396 } else {
397 /* We are not BSP (CPU #0) */
398
72740893 399 cpu_reset_proxyid = mycpu->gd_cpuid;
984263bc 400 cpustop_restartfunc = cpu_reset_proxy;
26be20a0 401 kprintf("cpu_reset: Restarting BSP\n");
984263bc
MD
402 started_cpus = (1<<0); /* Restart CPU #0 */
403
404 cnt = 0;
405 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
406 cnt++; /* Wait for BSP to announce restart */
407 if (cpu_reset_proxy_active == 0)
26be20a0 408 kprintf("cpu_reset: Failed to restart BSP\n");
984263bc
MD
409 __asm __volatile("cli" : : : "memory");
410 cpu_reset_proxy_active = 2;
411 cnt = 0;
412 while (cpu_reset_proxy_active == 2 && cnt < 10000000)
413 cnt++; /* Do nothing */
414 if (cpu_reset_proxy_active == 2) {
26be20a0 415 kprintf("cpu_reset: BSP did not grab mp lock\n");
984263bc
MD
416 cpu_reset_real(); /* XXX: Bogus ? */
417 }
418 cpu_reset_proxy_active = 4;
419 __asm __volatile("sti" : : : "memory");
420 while (1);
421 /* NOTREACHED */
422 }
423 }
984263bc
MD
424}
425
426static void
f123d5a1 427cpu_reset_real(void)
984263bc 428{
984263bc
MD
429 /*
430 * Attempt to do a CPU reset via the keyboard controller,
431 * do not turn of the GateA20, as any machine that fails
432 * to do the reset here would then end up in no man's land.
433 */
434
435#if !defined(BROKEN_KEYBOARD_RESET)
436 outb(IO_KBD + 4, 0xFE);
437 DELAY(500000); /* wait 0.5 sec to see if that did it */
26be20a0
SW
438 kprintf("Keyboard reset did not work, attempting CPU shutdown\n");
439 DELAY(1000000); /* wait 1 sec for kprintf to complete */
984263bc 440#endif
984263bc
MD
441 /* force a shutdown by unmapping entire address space ! */
442 bzero((caddr_t) PTD, PAGE_SIZE);
443
444 /* "good night, sweet prince .... <THUNK!>" */
0f7a3396 445 cpu_invltlb();
984263bc
MD
446 /* NOTREACHED */
447 while(1);
448}
449
984263bc
MD
450SYSCTL_DECL(_vm_stats_misc);
451
26a0694b 452static void
477d3c1c 453swi_vm(void *arg, void *frame)
ef0fdad1 454{
984263bc
MD
455 if (busdma_swi_pending != 0)
456 busdma_swi();
457}
458
ef0fdad1
MD
459static void
460swi_vm_setup(void *arg)
461{
1da8d52f 462 register_swi(SWI_VM, swi_vm, NULL, "swi_vm", NULL, 0);
ef0fdad1
MD
463}
464
ba39e2e0 465SYSINIT(vm_setup, SI_BOOT2_MACHDEP, SI_ORDER_ANY, swi_vm_setup, NULL);
ef0fdad1 466
135d7199
MD
467/*
468 * platform-specific vmspace initialization (nothing for i386)
469 */
470void
471cpu_vmspace_alloc(struct vmspace *vm __unused)
472{
473}
474
475void
476cpu_vmspace_free(struct vmspace *vm __unused)
477{
478}
460426e6
MD
479
480/*
481 * Used by /dev/kmem to determine if we can safely read or write
482 * the requested KVA range.
483 */
484int
485kvm_access_check(vm_offset_t saddr, vm_offset_t eaddr, int prot)
486{
487 vm_offset_t addr;
488
489 if (saddr < KvaStart)
490 return EFAULT;
491 if (eaddr >= KvaEnd)
492 return EFAULT;
493 for (addr = saddr; addr < eaddr; addr += PAGE_SIZE) {
494 if (pmap_extract(&kernel_pmap, addr) == 0)
495 return EFAULT;
496 }
497 if (!kernacc((caddr_t)saddr, eaddr - saddr, prot))
498 return EFAULT;
499 return 0;
500}
501