AMD64 - Refactor uio_resid and size_t assumptions.
[dragonfly.git] / sys / platform / pc64 / amd64 / vm_machdep.c
CommitLineData
d7f50089
YY
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
c8fe38ae 5 * Copyright (c) 2008 The DragonFly Project.
d7f50089
YY
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department, and William Jolitz.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
41 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
42 * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $
c8fe38ae 43 * $DragonFly: src/sys/platform/pc64/amd64/vm_machdep.c,v 1.3 2008/08/29 17:07:10 dillon Exp $
d7f50089
YY
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/malloc.h>
49#include <sys/proc.h>
50#include <sys/buf.h>
51#include <sys/interrupt.h>
52#include <sys/vnode.h>
53#include <sys/vmmeter.h>
54#include <sys/kernel.h>
55#include <sys/sysctl.h>
56#include <sys/unistd.h>
57
58#include <machine/clock.h>
59#include <machine/cpu.h>
60#include <machine/md_var.h>
61#include <machine/smp.h>
d7f50089 62#include <machine/pcb.h>
0855a2af 63#include <machine/pcb_ext.h>
d7f50089
YY
64#include <machine/segments.h>
65#include <machine/globaldata.h> /* npxthread */
66
67#include <vm/vm.h>
68#include <vm/vm_param.h>
69#include <sys/lock.h>
70#include <vm/vm_kern.h>
71#include <vm/vm_page.h>
72#include <vm/vm_map.h>
73#include <vm/vm_extern.h>
74
75#include <sys/thread2.h>
76
0855a2af 77#include <bus/isa/isa.h>
d7f50089 78
c8fe38ae 79static void cpu_reset_real (void);
d7f50089
YY
80/*
81 * Finish a fork operation, with lwp lp2 nearly set up.
82 * Copy and update the pcb, set up the stack so that the child
83 * ready to run and return to user mode.
84 */
85void
86cpu_fork(struct lwp *lp1, struct lwp *lp2, int flags)
87{
c8fe38ae
MD
88 struct pcb *pcb2;
89
90 if ((flags & RFPROC) == 0) {
91 if ((flags & RFMEM) == 0) {
92 /* unshare user LDT */
93 struct pcb *pcb1 = lp1->lwp_thread->td_pcb;
94 struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt;
95 if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) {
96 pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len);
97 user_ldt_free(pcb1);
98 pcb1->pcb_ldt = pcb_ldt;
99 set_user_ldt(pcb1);
100 }
101 }
102 return;
103 }
104
c8fe38ae
MD
105 /* Ensure that lp1's pcb is up to date. */
106 if (mdcpu->gd_npxthread == lp1->lwp_thread)
107 npxsave(lp1->lwp_thread->td_savefpu);
c8fe38ae
MD
108
109 /*
110 * Copy lp1's PCB. This really only applies to the
111 * debug registers and FP state, but its faster to just copy the
112 * whole thing. Because we only save the PCB at switchout time,
113 * the register state may not be current.
114 */
115 pcb2 = lp2->lwp_thread->td_pcb;
116 *pcb2 = *lp1->lwp_thread->td_pcb;
117
118 /*
119 * Create a new fresh stack for the new process.
120 * Copy the trap frame for the return to user mode as if from a
121 * syscall. This copies the user mode register values.
122 *
123 * pcb_rsp must allocate an additional call-return pointer below
124 * the trap frame which will be restored by cpu_heavy_restore from
125 * PCB_RIP, and the thread's td_sp pointer must allocate an
126 * additonal two quadwords below the pcb_rsp call-return pointer to
127 * hold the LWKT restore function pointer and rflags.
128 *
129 * The LWKT restore function pointer must be set to cpu_heavy_restore,
130 * which is our standard heavy-weight process switch-in function.
131 * YYY eventually we should shortcut fork_return and fork_trampoline
132 * to use the LWKT restore function directly so we can get rid of
133 * all the extra crap we are setting up.
134 */
135 lp2->lwp_md.md_regs = (struct trapframe *)pcb2 - 1;
136 bcopy(lp1->lwp_md.md_regs, lp2->lwp_md.md_regs, sizeof(*lp2->lwp_md.md_regs));
137
138 /*
139 * Set registers for trampoline to user mode. Leave space for the
140 * return address on stack. These are the kernel mode register values.
141 */
48ffc236 142 pcb2->pcb_cr3 = vtophys(vmspace_pmap(lp2->lwp_proc->p_vmspace)->pm_pml4);
c8fe38ae
MD
143 pcb2->pcb_cr3 |= PG_RW | PG_U | PG_V;
144 pcb2->pcb_rbx = (unsigned long)fork_return; /* fork_trampoline argument */
145 pcb2->pcb_rbp = 0;
146 pcb2->pcb_rsp = (unsigned long)lp2->lwp_md.md_regs - sizeof(void *);
147 pcb2->pcb_r12 = (unsigned long)lp2; /* fork_trampoline argument */
148 pcb2->pcb_r13 = 0;
149 pcb2->pcb_r14 = 0;
150 pcb2->pcb_r15 = 0;
151 pcb2->pcb_rip = (unsigned long)fork_trampoline;
152 lp2->lwp_thread->td_sp = (char *)(pcb2->pcb_rsp - sizeof(void *));
153 *(u_int64_t *)lp2->lwp_thread->td_sp = PSL_USER;
154 lp2->lwp_thread->td_sp -= sizeof(void *);
155 *(void **)lp2->lwp_thread->td_sp = (void *)cpu_heavy_restore;
156
157 /*
158 * pcb2->pcb_ldt: duplicated below, if necessary.
159 * pcb2->pcb_savefpu: cloned above.
160 * pcb2->pcb_flags: cloned above (always 0 here?).
161 * pcb2->pcb_onfault: cloned above (always NULL here?).
162 */
163
164 /*
165 * XXX don't copy the i/o pages. this should probably be fixed.
166 */
167 pcb2->pcb_ext = 0;
168
169 /* Copy the LDT, if necessary. */
170 if (pcb2->pcb_ldt != 0) {
171 if (flags & RFMEM) {
172 pcb2->pcb_ldt->ldt_refcnt++;
173 } else {
174 pcb2->pcb_ldt = user_ldt_alloc(pcb2,
175 pcb2->pcb_ldt->ldt_len);
176 }
177 }
178 bcopy(&lp1->lwp_thread->td_tls, &lp2->lwp_thread->td_tls,
179 sizeof(lp2->lwp_thread->td_tls));
180 /*
181 * Now, cpu_switch() can schedule the new lwp.
182 * pcb_rsp is loaded pointing to the cpu_switch() stack frame
183 * containing the return address when exiting cpu_switch.
184 * This will normally be to fork_trampoline(), which will have
185 * %rbx loaded with the new lwp's pointer. fork_trampoline()
186 * will set up a stack to call fork_return(lp, frame); to complete
187 * the return to user-mode.
188 */
d7f50089
YY
189}
190
191/*
192 * Prepare new lwp to return to the address specified in params.
193 */
194int
195cpu_prepare_lwp(struct lwp *lp, struct lwp_params *params)
196{
42d8ab5f
JG
197 struct trapframe *regs = lp->lwp_md.md_regs;
198 void *bad_return = NULL;
199 int error;
200
201 regs->tf_rip = (long)params->func;
202 regs->tf_rsp = (long)params->stack;
203 /* Set up argument for function call */
2123d4c3 204 regs->tf_rdi = (long)params->arg; /* JG Can this be in userspace addresses? */
42d8ab5f
JG
205 /*
206 * Set up fake return address. As the lwp function may never return,
207 * we simply copy out a NULL pointer and force the lwp to receive
208 * a SIGSEGV if it returns anyways.
209 */
210 regs->tf_rsp -= sizeof(void *);
211 error = copyout(&bad_return, (void *)regs->tf_rsp, sizeof(bad_return));
212 if (error)
213 return (error);
214
215 cpu_set_fork_handler(lp,
216 (void (*)(void *, struct trapframe *))generic_lwp_return, lp);
d7f50089
YY
217 return (0);
218}
219
220/*
221 * Intercept the return address from a freshly forked process that has NOT
222 * been scheduled yet.
223 *
224 * This is needed to make kernel threads stay in kernel mode.
225 */
226void
227cpu_set_fork_handler(struct lwp *lp, void (*func)(void *, struct trapframe *),
228 void *arg)
229{
c8fe38ae
MD
230 /*
231 * Note that the trap frame follows the args, so the function
232 * is really called like this: func(arg, frame);
233 */
234 lp->lwp_thread->td_pcb->pcb_rbx = (long)func; /* function */
235 lp->lwp_thread->td_pcb->pcb_r12 = (long)arg; /* first arg */
d7f50089
YY
236}
237
238void
239cpu_set_thread_handler(thread_t td, void (*rfunc)(void), void *func, void *arg)
240{
c8fe38ae
MD
241 td->td_pcb->pcb_rbx = (long)func;
242 td->td_pcb->pcb_r12 = (long)arg;
243 td->td_switch = cpu_lwkt_switch;
244 td->td_sp -= sizeof(void *);
245 *(void **)td->td_sp = rfunc; /* exit function on return */
246 td->td_sp -= sizeof(void *);
247 *(void **)td->td_sp = cpu_kthread_restore;
d7f50089
YY
248}
249
250void
251cpu_lwp_exit(void)
252{
0855a2af 253 struct thread *td = curthread;
48ffc236 254 struct pcb *pcb;
0855a2af 255 npxexit();
48ffc236 256 pcb = td->td_pcb;
0855a2af
JG
257 KKASSERT(pcb->pcb_ext == NULL); /* Some i386 functionality was dropped */
258 if (pcb->pcb_flags & PCB_DBREGS) {
259 /*
260 * disable all hardware breakpoints
261 */
262 reset_dbregs();
263 pcb->pcb_flags &= ~PCB_DBREGS;
264 }
265 td->td_gd->gd_cnt.v_swtch++;
266
267 crit_enter_quick(td);
09b62fa4
MD
268 if (td->td_flags & TDF_TSLEEPQ)
269 tsleep_remove(td);
0855a2af
JG
270 lwkt_deschedule_self(td);
271 lwkt_remove_tdallq(td);
272 cpu_thread_exit();
d7f50089
YY
273}
274
275/*
276 * Terminate the current thread. The caller must have already acquired
277 * the thread's rwlock and placed it on a reap list or otherwise notified
278 * a reaper of its existance. We set a special assembly switch function which
279 * releases td_rwlock after it has cleaned up the MMU state and switched
280 * out the stack.
281 *
282 * Must be caller from a critical section and with the thread descheduled.
283 */
284void
285cpu_thread_exit(void)
286{
0855a2af
JG
287 curthread->td_switch = cpu_exit_switch;
288 curthread->td_flags |= TDF_EXITING;
289 lwkt_switch();
290 panic("cpu_thread_exit: lwkt_switch() unexpectedly returned");
d7f50089
YY
291}
292
293/*
294 * Process Reaper. Called after the caller has acquired the thread's
295 * rwlock and removed it from the reap list.
296 */
297void
298cpu_proc_wait(struct proc *p)
299{
0855a2af
JG
300 /* drop per-process resources */
301 pmap_dispose_proc(p);
c8fe38ae
MD
302}
303
304void
305cpu_reset(void)
306{
307 cpu_reset_real();
308}
309
310static void
311cpu_reset_real(void)
312{
313 /*
314 * Attempt to do a CPU reset via the keyboard controller,
315 * do not turn of the GateA20, as any machine that fails
316 * to do the reset here would then end up in no man's land.
317 */
318
319#if !defined(BROKEN_KEYBOARD_RESET)
320 outb(IO_KBD + 4, 0xFE);
321 DELAY(500000); /* wait 0.5 sec to see if that did it */
322 kprintf("Keyboard reset did not work, attempting CPU shutdown\n");
323 DELAY(1000000); /* wait 1 sec for kprintf to complete */
324#endif
325#if JG
326 /* force a shutdown by unmapping entire address space ! */
327 bzero((caddr_t) PTD, PAGE_SIZE);
328#endif
329
330 /* "good night, sweet prince .... <THUNK!>" */
331 cpu_invltlb();
332 /* NOTREACHED */
333 while(1);
334}
335
336int
e54488bb 337grow_stack(struct proc *p, vm_offset_t sp)
c8fe38ae
MD
338{
339 int rv;
340
341 rv = vm_map_growstack (p, sp);
342 if (rv != KERN_SUCCESS)
343 return (0);
344
345 return (1);
346}
347
348/*
349 * Tell whether this address is in some physical memory region.
350 * Currently used by the kernel coredump code in order to avoid
351 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
352 * or other unpredictable behaviour.
353 */
354
355int
356is_physical_memory(vm_offset_t addr)
357{
358#if NISA > 0
359 /* The ISA ``memory hole''. */
360 if (addr >= 0xa0000 && addr < 0x100000)
361 return 0;
362#endif
363 /*
364 * stuff other tests for known memory-mapped devices (PCI?)
365 * here
366 */
367
368 return 1;
369}
370
371/*
372 * platform-specific vmspace initialization (nothing for amd64)
373 */
374void
375cpu_vmspace_alloc(struct vmspace *vm __unused)
376{
377}
378
379void
380cpu_vmspace_free(struct vmspace *vm __unused)
381{
d7f50089
YY
382}
383
384int
385kvm_access_check(vm_offset_t saddr, vm_offset_t eaddr, int prot)
386{
0855a2af
JG
387 vm_offset_t addr;
388
389 if (saddr < KvaStart)
390 return EFAULT;
391 if (eaddr >= KvaEnd)
392 return EFAULT;
393 for (addr = saddr; addr < eaddr; addr += PAGE_SIZE) {
394 if (pmap_extract(&kernel_pmap, addr) == 0)
395 return EFAULT;
396 }
397 if (!kernacc((caddr_t)saddr, eaddr - saddr, prot))
398 return EFAULT;
d7f50089
YY
399 return 0;
400}
401