| 1 | /*- |
| 2 | * Copyright (c) 1982, 1986 The Regents of the University of California. |
| 3 | * Copyright (c) 1989, 1990 William Jolitz |
| 4 | * Copyright (c) 1994 John Dyson |
| 5 | * Copyright (c) 2008 The DragonFly Project. |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * This code is derived from software contributed to Berkeley by |
| 9 | * the Systems Programming Group of the University of Utah Computer |
| 10 | * Science Department, and William Jolitz. |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * 1. Redistributions of source code must retain the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer. |
| 17 | * 2. Redistributions in binary form must reproduce the above copyright |
| 18 | * notice, this list of conditions and the following disclaimer in the |
| 19 | * documentation and/or other materials provided with the distribution. |
| 20 | * 3. All advertising materials mentioning features or use of this software |
| 21 | * must display the following acknowledgement: |
| 22 | * This product includes software developed by the University of |
| 23 | * California, Berkeley and its contributors. |
| 24 | * 4. Neither the name of the University nor the names of its contributors |
| 25 | * may be used to endorse or promote products derived from this software |
| 26 | * without specific prior written permission. |
| 27 | * |
| 28 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 29 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 30 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 31 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 32 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 33 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 34 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 35 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 36 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 37 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 38 | * SUCH DAMAGE. |
| 39 | * |
| 40 | * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 |
| 41 | * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ |
| 42 | * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $ |
| 43 | * $DragonFly: src/sys/platform/pc64/amd64/vm_machdep.c,v 1.3 2008/08/29 17:07:10 dillon Exp $ |
| 44 | */ |
| 45 | |
| 46 | #include <sys/param.h> |
| 47 | #include <sys/systm.h> |
| 48 | #include <sys/malloc.h> |
| 49 | #include <sys/proc.h> |
| 50 | #include <sys/buf.h> |
| 51 | #include <sys/interrupt.h> |
| 52 | #include <sys/vnode.h> |
| 53 | #include <sys/vmmeter.h> |
| 54 | #include <sys/kernel.h> |
| 55 | #include <sys/sysctl.h> |
| 56 | #include <sys/unistd.h> |
| 57 | #include <sys/dsched.h> |
| 58 | |
| 59 | #include <machine/clock.h> |
| 60 | #include <machine/cpu.h> |
| 61 | #include <machine/md_var.h> |
| 62 | #include <machine/smp.h> |
| 63 | #include <machine/pcb.h> |
| 64 | #include <machine/pcb_ext.h> |
| 65 | #include <machine/segments.h> |
| 66 | #include <machine/globaldata.h> /* npxthread */ |
| 67 | |
| 68 | #include <vm/vm.h> |
| 69 | #include <vm/vm_param.h> |
| 70 | #include <sys/lock.h> |
| 71 | #include <vm/vm_kern.h> |
| 72 | #include <vm/vm_page.h> |
| 73 | #include <vm/vm_map.h> |
| 74 | #include <vm/vm_extern.h> |
| 75 | |
| 76 | #include <sys/thread2.h> |
| 77 | |
| 78 | #include <bus/isa/isa.h> |
| 79 | |
| 80 | char machine[] = MACHINE; |
| 81 | SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, |
| 82 | machine, 0, "Machine class"); |
| 83 | |
| 84 | /* |
| 85 | * Finish a fork operation, with lwp lp2 nearly set up. |
| 86 | * Copy and update the pcb, set up the stack so that the child |
| 87 | * ready to run and return to user mode. |
| 88 | */ |
| 89 | void |
| 90 | cpu_fork(struct lwp *lp1, struct lwp *lp2, int flags) |
| 91 | { |
| 92 | struct pcb *pcb2; |
| 93 | |
| 94 | if ((flags & RFPROC) == 0) { |
| 95 | if ((flags & RFMEM) == 0) { |
| 96 | /* unshare user LDT */ |
| 97 | struct pcb *pcb1 = lp1->lwp_thread->td_pcb; |
| 98 | struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt; |
| 99 | if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) { |
| 100 | pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len); |
| 101 | user_ldt_free(pcb1); |
| 102 | pcb1->pcb_ldt = pcb_ldt; |
| 103 | set_user_ldt(pcb1); |
| 104 | } |
| 105 | } |
| 106 | return; |
| 107 | } |
| 108 | |
| 109 | /* Ensure that lp1's pcb is up to date. */ |
| 110 | if (mdcpu->gd_npxthread == lp1->lwp_thread) |
| 111 | npxsave(lp1->lwp_thread->td_savefpu); |
| 112 | |
| 113 | /* |
| 114 | * Copy lp1's PCB. This really only applies to the |
| 115 | * debug registers and FP state, but its faster to just copy the |
| 116 | * whole thing. Because we only save the PCB at switchout time, |
| 117 | * the register state may not be current. |
| 118 | */ |
| 119 | pcb2 = lp2->lwp_thread->td_pcb; |
| 120 | *pcb2 = *lp1->lwp_thread->td_pcb; |
| 121 | |
| 122 | /* |
| 123 | * Create a new fresh stack for the new process. |
| 124 | * Copy the trap frame for the return to user mode as if from a |
| 125 | * syscall. This copies the user mode register values. |
| 126 | * |
| 127 | * pcb_rsp must allocate an additional call-return pointer below |
| 128 | * the trap frame which will be restored by cpu_heavy_restore from |
| 129 | * PCB_RIP, and the thread's td_sp pointer must allocate an |
| 130 | * additonal two quadwords below the pcb_rsp call-return pointer to |
| 131 | * hold the LWKT restore function pointer and rflags. |
| 132 | * |
| 133 | * The LWKT restore function pointer must be set to cpu_heavy_restore, |
| 134 | * which is our standard heavy-weight process switch-in function. |
| 135 | * YYY eventually we should shortcut fork_return and fork_trampoline |
| 136 | * to use the LWKT restore function directly so we can get rid of |
| 137 | * all the extra crap we are setting up. |
| 138 | */ |
| 139 | lp2->lwp_md.md_regs = (struct trapframe *)pcb2 - 1; |
| 140 | bcopy(lp1->lwp_md.md_regs, lp2->lwp_md.md_regs, sizeof(*lp2->lwp_md.md_regs)); |
| 141 | |
| 142 | /* |
| 143 | * Set registers for trampoline to user mode. Leave space for the |
| 144 | * return address on stack. These are the kernel mode register values. |
| 145 | */ |
| 146 | pcb2->pcb_unused01 = 0; |
| 147 | pcb2->pcb_rbx = (unsigned long)fork_return; /* fork_trampoline argument */ |
| 148 | pcb2->pcb_rbp = 0; |
| 149 | pcb2->pcb_rsp = (unsigned long)lp2->lwp_md.md_regs - sizeof(void *); |
| 150 | pcb2->pcb_r12 = (unsigned long)lp2; /* fork_trampoline argument */ |
| 151 | pcb2->pcb_r13 = 0; |
| 152 | pcb2->pcb_r14 = 0; |
| 153 | pcb2->pcb_r15 = 0; |
| 154 | pcb2->pcb_rip = (unsigned long)fork_trampoline; |
| 155 | lp2->lwp_thread->td_sp = (char *)(pcb2->pcb_rsp - sizeof(void *)); |
| 156 | *(u_int64_t *)lp2->lwp_thread->td_sp = PSL_USER; |
| 157 | lp2->lwp_thread->td_sp -= sizeof(void *); |
| 158 | *(void **)lp2->lwp_thread->td_sp = (void *)cpu_heavy_restore; |
| 159 | |
| 160 | /* |
| 161 | * pcb2->pcb_ldt: duplicated below, if necessary. |
| 162 | * pcb2->pcb_savefpu: cloned above. |
| 163 | * pcb2->pcb_flags: cloned above (always 0 here?). |
| 164 | * pcb2->pcb_onfault: cloned above (always NULL here?). |
| 165 | */ |
| 166 | |
| 167 | /* |
| 168 | * XXX don't copy the i/o pages. this should probably be fixed. |
| 169 | */ |
| 170 | pcb2->pcb_ext = 0; |
| 171 | |
| 172 | /* Copy the LDT, if necessary. */ |
| 173 | if (pcb2->pcb_ldt != 0) { |
| 174 | if (flags & RFMEM) { |
| 175 | pcb2->pcb_ldt->ldt_refcnt++; |
| 176 | } else { |
| 177 | pcb2->pcb_ldt = user_ldt_alloc(pcb2, |
| 178 | pcb2->pcb_ldt->ldt_len); |
| 179 | } |
| 180 | } |
| 181 | bcopy(&lp1->lwp_thread->td_tls, &lp2->lwp_thread->td_tls, |
| 182 | sizeof(lp2->lwp_thread->td_tls)); |
| 183 | /* |
| 184 | * Now, cpu_switch() can schedule the new lwp. |
| 185 | * pcb_rsp is loaded pointing to the cpu_switch() stack frame |
| 186 | * containing the return address when exiting cpu_switch. |
| 187 | * This will normally be to fork_trampoline(), which will have |
| 188 | * %rbx loaded with the new lwp's pointer. fork_trampoline() |
| 189 | * will set up a stack to call fork_return(lp, frame); to complete |
| 190 | * the return to user-mode. |
| 191 | */ |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * Prepare new lwp to return to the address specified in params. |
| 196 | */ |
| 197 | int |
| 198 | cpu_prepare_lwp(struct lwp *lp, struct lwp_params *params) |
| 199 | { |
| 200 | struct trapframe *regs = lp->lwp_md.md_regs; |
| 201 | void *bad_return = NULL; |
| 202 | int error; |
| 203 | |
| 204 | regs->tf_rip = (long)params->func; |
| 205 | regs->tf_rsp = (long)params->stack; |
| 206 | /* Set up argument for function call */ |
| 207 | regs->tf_rdi = (long)params->arg; /* JG Can this be in userspace addresses? */ |
| 208 | /* |
| 209 | * Set up fake return address. As the lwp function may never return, |
| 210 | * we simply copy out a NULL pointer and force the lwp to receive |
| 211 | * a SIGSEGV if it returns anyways. |
| 212 | */ |
| 213 | regs->tf_rsp -= sizeof(void *); |
| 214 | error = copyout(&bad_return, (void *)regs->tf_rsp, sizeof(bad_return)); |
| 215 | if (error) |
| 216 | return (error); |
| 217 | |
| 218 | cpu_set_fork_handler(lp, |
| 219 | (void (*)(void *, struct trapframe *))generic_lwp_return, lp); |
| 220 | return (0); |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Intercept the return address from a freshly forked process that has NOT |
| 225 | * been scheduled yet. |
| 226 | * |
| 227 | * This is needed to make kernel threads stay in kernel mode. |
| 228 | */ |
| 229 | void |
| 230 | cpu_set_fork_handler(struct lwp *lp, void (*func)(void *, struct trapframe *), |
| 231 | void *arg) |
| 232 | { |
| 233 | /* |
| 234 | * Note that the trap frame follows the args, so the function |
| 235 | * is really called like this: func(arg, frame); |
| 236 | */ |
| 237 | lp->lwp_thread->td_pcb->pcb_rbx = (long)func; /* function */ |
| 238 | lp->lwp_thread->td_pcb->pcb_r12 = (long)arg; /* first arg */ |
| 239 | } |
| 240 | |
| 241 | void |
| 242 | cpu_set_thread_handler(thread_t td, void (*rfunc)(void), void *func, void *arg) |
| 243 | { |
| 244 | td->td_pcb->pcb_rbx = (long)func; |
| 245 | td->td_pcb->pcb_r12 = (long)arg; |
| 246 | td->td_switch = cpu_lwkt_switch; |
| 247 | td->td_sp -= sizeof(void *); |
| 248 | *(void **)td->td_sp = rfunc; /* exit function on return */ |
| 249 | td->td_sp -= sizeof(void *); |
| 250 | *(void **)td->td_sp = cpu_kthread_restore; |
| 251 | } |
| 252 | |
| 253 | void |
| 254 | cpu_lwp_exit(void) |
| 255 | { |
| 256 | struct thread *td = curthread; |
| 257 | struct pcb *pcb; |
| 258 | npxexit(); |
| 259 | pcb = td->td_pcb; |
| 260 | KKASSERT(pcb->pcb_ext == NULL); /* Some i386 functionality was dropped */ |
| 261 | if (pcb->pcb_flags & PCB_DBREGS) { |
| 262 | /* |
| 263 | * disable all hardware breakpoints |
| 264 | */ |
| 265 | reset_dbregs(); |
| 266 | pcb->pcb_flags &= ~PCB_DBREGS; |
| 267 | } |
| 268 | td->td_gd->gd_cnt.v_swtch++; |
| 269 | |
| 270 | dsched_exit_thread(td); |
| 271 | crit_enter_quick(td); |
| 272 | lwkt_deschedule_self(td); |
| 273 | lwkt_remove_tdallq(td); |
| 274 | cpu_thread_exit(); |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * Terminate the current thread. The caller must have already acquired |
| 279 | * the thread's rwlock and placed it on a reap list or otherwise notified |
| 280 | * a reaper of its existance. We set a special assembly switch function which |
| 281 | * releases td_rwlock after it has cleaned up the MMU state and switched |
| 282 | * out the stack. |
| 283 | * |
| 284 | * Must be caller from a critical section and with the thread descheduled. |
| 285 | */ |
| 286 | void |
| 287 | cpu_thread_exit(void) |
| 288 | { |
| 289 | curthread->td_switch = cpu_exit_switch; |
| 290 | curthread->td_flags |= TDF_EXITING; |
| 291 | lwkt_switch(); |
| 292 | panic("cpu_thread_exit: lwkt_switch() unexpectedly returned"); |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * Process Reaper. Called after the caller has acquired the thread's |
| 297 | * rwlock and removed it from the reap list. |
| 298 | */ |
| 299 | void |
| 300 | cpu_proc_wait(struct proc *p) |
| 301 | { |
| 302 | /* drop per-process resources */ |
| 303 | pmap_dispose_proc(p); |
| 304 | } |
| 305 | |
| 306 | int |
| 307 | grow_stack(struct proc *p, u_long sp) |
| 308 | { |
| 309 | int rv; |
| 310 | |
| 311 | rv = vm_map_growstack (p, sp); |
| 312 | if (rv != KERN_SUCCESS) |
| 313 | return (0); |
| 314 | |
| 315 | return (1); |
| 316 | } |
| 317 | |
| 318 | /* |
| 319 | * Tell whether this address is in some physical memory region. |
| 320 | * Currently used by the kernel coredump code in order to avoid |
| 321 | * dumping the ``ISA memory hole'' which could cause indefinite hangs, |
| 322 | * or other unpredictable behaviour. |
| 323 | */ |
| 324 | |
| 325 | int |
| 326 | is_physical_memory(vm_offset_t addr) |
| 327 | { |
| 328 | return 1; |
| 329 | } |
| 330 | |
| 331 | /* |
| 332 | * Used by /dev/kmem to determine if we can safely read or write |
| 333 | * the requested KVA range. Some portions of kernel memory are |
| 334 | * not governed by our virtual page table. |
| 335 | */ |
| 336 | extern int64_t _end; |
| 337 | extern void _start(void); |
| 338 | |
| 339 | int |
| 340 | kvm_access_check(vm_offset_t saddr, vm_offset_t eaddr, int prot) |
| 341 | { |
| 342 | vm_offset_t addr; |
| 343 | |
| 344 | if (saddr >= trunc_page((vm_offset_t)&_start) && eaddr <= round_page((vm_offset_t)&_end)) |
| 345 | return 0; |
| 346 | if (saddr < KvaStart) |
| 347 | return EFAULT; |
| 348 | if (eaddr >= KvaEnd) |
| 349 | return EFAULT; |
| 350 | for (addr = saddr; addr < eaddr; addr += PAGE_SIZE) { |
| 351 | if (pmap_extract(&kernel_pmap, addr) == 0) |
| 352 | return EFAULT; |
| 353 | } |
| 354 | if (!kernacc((caddr_t)saddr, eaddr - saddr, prot)) |
| 355 | return EFAULT; |
| 356 | return 0; |
| 357 | } |