2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.35 2008/09/09 04:06:19 dillon Exp $
43 * AMD64 Trap and System call handling
49 #include "opt_ktrace.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/pioctl.h>
55 #include <sys/kernel.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signalvar.h>
58 #include <sys/signal2.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
63 #include <sys/vmmeter.h>
64 #include <sys/malloc.h>
66 #include <sys/ktrace.h>
69 #include <sys/upcall.h>
70 #include <sys/vkernel.h>
71 #include <sys/sysproto.h>
72 #include <sys/sysunion.h>
73 #include <sys/vmspace.h>
76 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_extern.h>
84 #include <machine/cpu.h>
85 #include <machine/md_var.h>
86 #include <machine/pcb.h>
87 #include <machine/smp.h>
88 #include <machine/tss.h>
89 #include <machine/globaldata.h>
93 #include <sys/msgport2.h>
94 #include <sys/thread2.h>
95 #include <sys/mplock2.h>
99 #define MAKEMPSAFE(have_mplock) \
100 if (have_mplock == 0) { \
107 #define MAKEMPSAFE(have_mplock)
111 int (*pmath_emulate) (struct trapframe *);
113 extern int trapwrite (unsigned addr);
115 static int trap_pfault (struct trapframe *, int, vm_offset_t);
116 static void trap_fatal (struct trapframe *, int, vm_offset_t);
117 void dblfault_handler (void);
120 extern inthand_t IDTVEC(syscall);
123 #define MAX_TRAP_MSG 30
124 static char *trap_msg[] = {
126 "privileged instruction fault", /* 1 T_PRIVINFLT */
128 "breakpoint instruction fault", /* 3 T_BPTFLT */
131 "arithmetic trap", /* 6 T_ARITHTRAP */
132 "system forced exception", /* 7 T_ASTFLT */
134 "general protection fault", /* 9 T_PROTFLT */
135 "trace trap", /* 10 T_TRCTRAP */
137 "page fault", /* 12 T_PAGEFLT */
139 "alignment fault", /* 14 T_ALIGNFLT */
143 "integer divide fault", /* 18 T_DIVIDE */
144 "non-maskable interrupt trap", /* 19 T_NMI */
145 "overflow trap", /* 20 T_OFLOW */
146 "FPU bounds check fault", /* 21 T_BOUND */
147 "FPU device not available", /* 22 T_DNA */
148 "double fault", /* 23 T_DOUBLEFLT */
149 "FPU operand fetch fault", /* 24 T_FPOPFLT */
150 "invalid TSS fault", /* 25 T_TSSFLT */
151 "segment not present fault", /* 26 T_SEGNPFLT */
152 "stack fault", /* 27 T_STKFLT */
153 "machine check trap", /* 28 T_MCHK */
154 "SIMD floating-point exception", /* 29 T_XMMFLT */
155 "reserved (unknown) fault", /* 30 T_RESERVED */
159 static int ddb_on_nmi = 1;
160 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
161 &ddb_on_nmi, 0, "Go to DDB on NMI");
163 static int panic_on_nmi = 1;
164 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
165 &panic_on_nmi, 0, "Panic on NMI");
166 static int fast_release;
167 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
168 &fast_release, 0, "Passive Release was optimal");
169 static int slow_release;
170 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
171 &slow_release, 0, "Passive Release was nonoptimal");
173 static int syscall_mpsafe = 1;
174 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
175 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
176 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
177 static int trap_mpsafe = 1;
178 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
179 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
180 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
183 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
184 extern int max_sysmsg;
187 * Passively intercepts the thread switch function to increase the thread
188 * priority from a user priority to a kernel priority, reducing
189 * syscall and trap overhead for the case where no switch occurs.
191 * Synchronizes td_ucred with p_ucred. This is used by system calls,
192 * signal handling, faults, AST traps, and anything else that enters the
193 * kernel from userland and provides the kernel with a stable read-only
194 * copy of the process ucred.
197 userenter(struct thread *curtd, struct proc *curp)
202 curtd->td_release = lwkt_passive_release;
204 if (curtd->td_ucred != curp->p_ucred) {
205 ncred = crhold(curp->p_ucred);
206 ocred = curtd->td_ucred;
207 curtd->td_ucred = ncred;
214 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
215 * must be completed before we can return to or try to return to userland.
217 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
218 * arithmatic on the delta calculation so the absolute tick values are
219 * truncated to an integer.
222 userret(struct lwp *lp, struct trapframe *frame, int sticks)
224 struct proc *p = lp->lwp_proc;
228 * Charge system time if profiling. Note: times are in microseconds.
229 * This may do a copyout and block, so do it first even though it
230 * means some system time will be charged as user time.
232 if (p->p_flag & P_PROFIL) {
233 addupc_task(p, frame->tf_rip,
234 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
239 * If the jungle wants us dead, so be it.
241 if (lp->lwp_flag & LWP_WEXIT) {
244 rel_mplock(); /* NOT REACHED */
248 * Block here if we are in a stopped state.
250 if (p->p_stat == SSTOP) {
258 * Post any pending upcalls
260 if (p->p_flag & P_UPCALLPEND) {
262 p->p_flag &= ~P_UPCALLPEND;
269 * Post any pending signals
271 if ((sig = CURSIG_TRACE(lp)) != 0) {
279 * block here if we are swapped out, but still process signals
280 * (such as SIGKILL). proc0 (the swapin scheduler) is already
281 * aware of our situation, we do not have to wake it up.
283 if (p->p_flag & P_SWAPPEDOUT) {
285 p->p_flag |= P_SWAPWAIT;
287 if (p->p_flag & P_SWAPWAIT)
288 tsleep(p, PCATCH, "SWOUT", 0);
289 p->p_flag &= ~P_SWAPWAIT;
295 * Make sure postsig() handled request to restore old signal mask after
296 * running signal handler.
298 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
302 * Cleanup from userenter and any passive release that might have occured.
303 * We must reclaim the current-process designation before we can return
304 * to usermode. We also handle both LWKT and USER reschedule requests.
307 userexit(struct lwp *lp)
309 struct thread *td = lp->lwp_thread;
310 /* globaldata_t gd = td->td_gd; */
313 * Handle stop requests at kernel priority. Any requests queued
314 * after this loop will generate another AST.
316 while (lp->lwp_proc->p_stat == SSTOP) {
323 * Reduce our priority in preparation for a return to userland. If
324 * our passive release function was still in place, our priority was
325 * never raised and does not need to be reduced.
327 lwkt_passive_recover(td);
330 * Become the current user scheduled process if we aren't already,
331 * and deal with reschedule requests and other factors.
333 lp->lwp_proc->p_usched->acquire_curproc(lp);
334 /* WARNING: we may have migrated cpu's */
335 /* gd = td->td_gd; */
338 #if !defined(KTR_KERNENTRY)
339 #define KTR_KERNENTRY KTR_ALL
341 KTR_INFO_MASTER(kernentry);
342 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
343 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
344 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
345 sizeof(int) + sizeof(int));
346 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
347 sizeof(int) + sizeof(int) + sizeof(int));
348 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
349 sizeof(int) + sizeof(int) + sizeof(int));
350 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
351 sizeof(int) + sizeof(int));
354 * Exception, fault, and trap interface to the kernel.
355 * This common code is called from assembly language IDT gate entry
356 * routines that prepare a suitable stack frame, and restore this
357 * frame after the exception has been processed.
359 * This function is also called from doreti in an interlock to handle ASTs.
360 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
362 * NOTE! We have to retrieve the fault address prior to obtaining the
363 * MP lock because get_mplock() may switch out. YYY cr2 really ought
364 * to be retrieved by the assembly code, not here.
366 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
367 * if an attempt is made to switch from a fast interrupt or IPI. This is
368 * necessary to properly take fatal kernel traps on SMP machines if
369 * get_mplock() has to block.
373 user_trap(struct trapframe *frame)
375 struct globaldata *gd = mycpu;
376 struct thread *td = gd->gd_curthread;
377 struct lwp *lp = td->td_lwp;
380 int i = 0, ucode = 0, type, code;
385 int crit_count = td->td_pri & ~TDPRI_MASK;
391 if (frame->tf_trapno == T_PAGEFLT)
392 eva = frame->tf_addr;
396 kprintf("USER_TRAP AT %08lx xflags %ld trapno %ld eva %08lx\n",
397 frame->tf_rip, frame->tf_xflags, frame->tf_trapno, eva);
401 * Everything coming from user mode runs through user_trap,
402 * including system calls.
404 if (frame->tf_trapno == T_FAST_SYSCALL) {
409 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
410 frame->tf_trapno, eva);
414 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
415 ++gd->gd_trap_nesting_level;
416 MAKEMPSAFE(have_mplock);
417 trap_fatal(frame, TRUE, eva);
418 --gd->gd_trap_nesting_level;
423 ++gd->gd_trap_nesting_level;
425 if (trap_mpsafe == 0)
426 MAKEMPSAFE(have_mplock);
429 --gd->gd_trap_nesting_level;
431 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
434 type = frame->tf_trapno;
435 code = frame->tf_err;
439 sticks = (int)td->td_sticks;
440 lp->lwp_md.md_regs = frame;
443 case T_PRIVINFLT: /* privileged instruction fault */
448 case T_BPTFLT: /* bpt instruction fault */
449 case T_TRCTRAP: /* trace trap */
450 frame->tf_rflags &= ~PSL_T;
454 case T_ARITHTRAP: /* arithmetic trap */
459 case T_ASTFLT: /* Allow process switch */
460 mycpu->gd_cnt.v_soft++;
461 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
462 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
464 addupc_task(p, p->p_prof.pr_addr,
470 * The following two traps can happen in
471 * vm86 mode, and, if so, we want to handle
474 case T_PROTFLT: /* general protection fault */
475 case T_STKFLT: /* stack fault */
477 if (frame->tf_eflags & PSL_VM) {
478 i = vm86_emulate((struct vm86frame *)frame);
486 case T_SEGNPFLT: /* segment not present fault */
487 case T_TSSFLT: /* invalid TSS fault */
488 case T_DOUBLEFLT: /* double fault */
490 ucode = code + BUS_SEGM_FAULT ;
494 case T_PAGEFLT: /* page fault */
495 MAKEMPSAFE(have_mplock);
496 i = trap_pfault(frame, TRUE, eva);
499 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
509 case T_DIVIDE: /* integer divide fault */
516 MAKEMPSAFE(have_mplock);
517 /* machine/parity/power fail/"kitchen sink" faults */
518 if (isa_nmi(code) == 0) {
521 * NMI can be hooked up to a pushbutton
525 kprintf ("NMI ... going to debugger\n");
526 kdb_trap (type, 0, frame);
530 } else if (panic_on_nmi)
531 panic("NMI indicates hardware failure");
533 #endif /* NISA > 0 */
535 case T_OFLOW: /* integer overflow fault */
540 case T_BOUND: /* bounds check fault */
547 * Virtual kernel intercept - pass the DNA exception
548 * to the (emulated) virtual kernel if it asked to handle
549 * it. This occurs when the virtual kernel is holding
550 * onto the FP context for a different emulated
551 * process then the one currently running.
553 * We must still call npxdna() since we may have
554 * saved FP state that the (emulated) virtual kernel
555 * needs to hand over to a different emulated process.
557 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
558 (td->td_pcb->pcb_flags & FP_VIRTFP)
564 * The kernel may have switched out the FP unit's
565 * state, causing the user process to take a fault
566 * when it tries to use the FP unit. Restore the
571 if (!pmath_emulate) {
573 ucode = FPE_FPU_NP_TRAP;
576 i = (*pmath_emulate)(frame);
578 if (!(frame->tf_rflags & PSL_T))
580 frame->tf_rflags &= ~PSL_T;
583 /* else ucode = emulator_only_knows() XXX */
586 case T_FPOPFLT: /* FPU operand fetch fault */
591 case T_XMMFLT: /* SIMD floating-point exception */
598 * Virtual kernel intercept - if the fault is directly related to a
599 * VM context managed by a virtual kernel then let the virtual kernel
602 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
603 vkernel_trap(lp, frame);
608 * Translate fault for emulators (e.g. Linux)
610 if (*p->p_sysent->sv_transtrap)
611 i = (*p->p_sysent->sv_transtrap)(i, type);
613 MAKEMPSAFE(have_mplock);
614 trapsignal(lp, i, ucode);
617 if (type <= MAX_TRAP_MSG) {
618 uprintf("fatal process exception: %s",
620 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
621 uprintf(", fault VA = 0x%lx", (u_long)eva);
628 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_rip));
630 userret(lp, frame, sticks);
637 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
639 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
640 ("syscall: critical section count mismatch! %d/%d",
641 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
646 kern_trap(struct trapframe *frame)
648 struct globaldata *gd = mycpu;
649 struct thread *td = gd->gd_curthread;
652 int i = 0, ucode = 0, type, code;
657 int crit_count = td->td_pri & ~TDPRI_MASK;
664 if (frame->tf_trapno == T_PAGEFLT)
665 eva = frame->tf_addr;
671 ++gd->gd_trap_nesting_level;
672 MAKEMPSAFE(have_mplock);
673 trap_fatal(frame, FALSE, eva);
674 --gd->gd_trap_nesting_level;
679 ++gd->gd_trap_nesting_level;
682 if (trap_mpsafe == 0)
683 MAKEMPSAFE(have_mplock);
686 --gd->gd_trap_nesting_level;
688 type = frame->tf_trapno;
689 code = frame->tf_err;
697 case T_PAGEFLT: /* page fault */
698 MAKEMPSAFE(have_mplock);
699 trap_pfault(frame, FALSE, eva);
704 * The kernel may be using npx for copying or other
707 panic("kernel NPX should not happen");
712 case T_PROTFLT: /* general protection fault */
713 case T_SEGNPFLT: /* segment not present fault */
715 * Invalid segment selectors and out of bounds
716 * %eip's and %esp's can be set up in user mode.
717 * This causes a fault in kernel mode when the
718 * kernel tries to return to user mode. We want
719 * to get this fault so that we can fix the
720 * problem here and not have to check all the
721 * selectors and pointers when the user changes
724 if (mycpu->gd_intr_nesting_level == 0) {
725 if (td->td_pcb->pcb_onfault) {
727 (register_t)td->td_pcb->pcb_onfault;
735 * PSL_NT can be set in user mode and isn't cleared
736 * automatically when the kernel is entered. This
737 * causes a TSS fault when the kernel attempts to
738 * `iret' because the TSS link is uninitialized. We
739 * want to get this fault so that we can fix the
740 * problem here and not every time the kernel is
743 if (frame->tf_rflags & PSL_NT) {
744 frame->tf_rflags &= ~PSL_NT;
749 case T_TRCTRAP: /* trace trap */
751 if (frame->tf_eip == (int)IDTVEC(syscall)) {
753 * We've just entered system mode via the
754 * syscall lcall. Continue single stepping
755 * silently until the syscall handler has
760 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
762 * The syscall handler has now saved the
763 * flags. Stop single stepping it.
765 frame->tf_eflags &= ~PSL_T;
771 * Ignore debug register trace traps due to
772 * accesses in the user's address space, which
773 * can happen under several conditions such as
774 * if a user sets a watchpoint on a buffer and
775 * then passes that buffer to a system call.
776 * We still want to get TRCTRAPS for addresses
777 * in kernel space because that is useful when
778 * debugging the kernel.
780 if (user_dbreg_trap()) {
782 * Reset breakpoint bits because the
785 load_dr6(rdr6() & 0xfffffff0);
790 * Fall through (TRCTRAP kernel mode, kernel address)
794 * If DDB is enabled, let it handle the debugger trap.
795 * Otherwise, debugger traps "can't happen".
798 MAKEMPSAFE(have_mplock);
799 if (kdb_trap (type, 0, frame))
804 MAKEMPSAFE(have_mplock);
805 trap_fatal(frame, FALSE, eva);
808 MAKEMPSAFE(have_mplock);
809 trap_fatal(frame, FALSE, eva);
814 * Ignore this trap generated from a spurious SIGTRAP.
816 * single stepping in / syscalls leads to spurious / SIGTRAP
819 * Haiku (c) 2007 Simon 'corecode' Schubert
825 * Translate fault for emulators (e.g. Linux)
827 if (*p->p_sysent->sv_transtrap)
828 i = (*p->p_sysent->sv_transtrap)(i, type);
830 MAKEMPSAFE(have_mplock);
831 trapsignal(lp, i, ucode);
834 if (type <= MAX_TRAP_MSG) {
835 uprintf("fatal process exception: %s",
837 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
838 uprintf(", fault VA = 0x%lx", (u_long)eva);
850 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
851 ("syscall: critical section count mismatch! %d/%d",
852 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
857 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
860 struct vmspace *vm = NULL;
864 thread_t td = curthread;
865 struct lwp *lp = td->td_lwp;
867 va = trunc_page(eva);
868 if (usermode == FALSE) {
870 * This is a fault on kernel virtual memory.
875 * This is a fault on non-kernel virtual memory.
876 * vm is initialized above to NULL. If curproc is NULL
877 * or curproc->p_vmspace is NULL the fault is fatal.
880 vm = lp->lwp_vmspace;
888 if (frame->tf_err & PGEX_W)
889 ftype = VM_PROT_READ | VM_PROT_WRITE;
891 ftype = VM_PROT_READ;
893 if (map != &kernel_map) {
895 * Keep swapout from messing with us during this
901 * Grow the stack if necessary
903 /* grow_stack returns false only if va falls into
904 * a growable stack region and the stack growth
905 * fails. It returns true if va was not within
906 * a growable stack region, or if the stack
909 if (!grow_stack (lp->lwp_proc, va)) {
915 /* Fault in the user page: */
916 rv = vm_fault(map, va, ftype,
917 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
923 * Don't have to worry about process locking or stacks in the kernel.
925 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
928 if (rv == KERN_SUCCESS)
932 if (td->td_gd->gd_intr_nesting_level == 0 &&
933 td->td_pcb->pcb_onfault) {
934 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
937 trap_fatal(frame, usermode, eva);
942 * NOTE: on amd64 we have a tf_addr field in the trapframe, no
943 * kludge is needed to pass the fault address to signal handlers.
945 struct proc *p = td->td_proc;
946 kprintf("seg-fault accessing address %p rip=%p pid=%d p_comm=%s\n",
947 (void *)va, (void *)frame->tf_rip, p->p_pid, p->p_comm);
948 /* Debugger("seg-fault"); */
950 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
954 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
959 code = frame->tf_xflags;
960 type = frame->tf_trapno;
962 if (type <= MAX_TRAP_MSG) {
963 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
964 type, trap_msg[type],
965 (usermode ? "user" : "kernel"));
968 /* two separate prints in case of a trap on an unmapped page */
969 kprintf("mp_lock = %08x; ", mp_lock);
970 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
972 if (type == T_PAGEFLT) {
973 kprintf("fault virtual address = %p\n", (void *)eva);
974 kprintf("fault code = %s %s, %s\n",
975 usermode ? "user" : "supervisor",
976 code & PGEX_W ? "write" : "read",
977 code & PGEX_P ? "protection violation" : "page not present");
979 kprintf("instruction pointer = 0x%lx:0x%lx\n",
980 frame->tf_cs & 0xffff, frame->tf_rip);
982 ss = frame->tf_ss & 0xffff;
985 ss = GSEL(GDATA_SEL, SEL_KPL);
986 rsp = (long)&frame->tf_rsp;
988 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
989 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
990 kprintf("processor eflags = ");
991 if (frame->tf_rflags & PSL_T)
992 kprintf("trace trap, ");
993 if (frame->tf_rflags & PSL_I)
994 kprintf("interrupt enabled, ");
995 if (frame->tf_rflags & PSL_NT)
996 kprintf("nested task, ");
997 if (frame->tf_rflags & PSL_RF)
1000 if (frame->tf_eflags & PSL_VM)
1003 kprintf("IOPL = %jd\n", (intmax_t)((frame->tf_rflags & PSL_IOPL) >> 12));
1004 kprintf("current process = ");
1006 kprintf("%lu (%s)\n",
1007 (u_long)curproc->p_pid, curproc->p_comm ?
1008 curproc->p_comm : "");
1012 kprintf("current thread = pri %d ", curthread->td_pri);
1013 if (curthread->td_pri >= TDPRI_CRIT)
1019 * we probably SHOULD have stopped the other CPUs before now!
1020 * another CPU COULD have been touching cpl at this moment...
1022 kprintf(" <- SMP: XXX");
1031 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1034 kprintf("trap number = %d\n", type);
1035 if (type <= MAX_TRAP_MSG)
1036 panic("%s", trap_msg[type]);
1038 panic("unknown/reserved trap");
1042 * Double fault handler. Called when a fault occurs while writing
1043 * a frame for a trap/exception onto the stack. This usually occurs
1044 * when the stack overflows (such is the case with infinite recursion,
1047 * XXX Note that the current PTD gets replaced by IdlePTD when the
1048 * task switch occurs. This means that the stack that was active at
1049 * the time of the double fault is not available at <kstack> unless
1050 * the machine was idle when the double fault occurred. The downside
1051 * of this is that "trace <ebp>" in ddb won't work.
1054 dblfault_handler(void)
1057 struct mdglobaldata *gd = mdcpu;
1060 kprintf("\nFatal double fault:\n");
1062 kprintf("rip = 0x%lx\n", gd->gd_common_tss.tss_rip);
1063 kprintf("rsp = 0x%lx\n", gd->gd_common_tss.tss_rsp);
1064 kprintf("rbp = 0x%lx\n", gd->gd_common_tss.tss_rbp);
1067 /* two separate prints in case of a trap on an unmapped page */
1068 kprintf("mp_lock = %08x; ", mp_lock);
1069 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1071 panic("double fault");
1075 * Compensate for 386 brain damage (missing URKR).
1076 * This is a little simpler than the pagefault handler in trap() because
1077 * it the page tables have already been faulted in and high addresses
1078 * are thrown out early for other reasons.
1081 trapwrite(unsigned addr)
1088 va = trunc_page((vm_offset_t)addr);
1090 * XXX - MAX is END. Changed > to >= for temp. fix.
1092 if (va >= VM_MAX_USER_ADDRESS)
1095 lp = curthread->td_lwp;
1096 vm = lp->lwp_vmspace;
1098 PHOLD(lp->lwp_proc);
1100 if (!grow_stack (lp->lwp_proc, va)) {
1101 PRELE(lp->lwp_proc);
1106 * fault the data page
1108 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1110 PRELE(lp->lwp_proc);
1112 if (rv != KERN_SUCCESS)
1119 * syscall2 - MP aware system call request C handler
1121 * A system call is essentially treated as a trap except that the
1122 * MP lock is not held on entry or return. We are responsible for
1123 * obtaining the MP lock if necessary and for handling ASTs
1124 * (e.g. a task switch) prior to return.
1126 * In general, only simple access and manipulation of curproc and
1127 * the current stack is allowed without having to hold MP lock.
1129 * MPSAFE - note that large sections of this routine are run without
1133 syscall2(struct trapframe *frame)
1135 struct thread *td = curthread;
1136 struct proc *p = td->td_proc;
1137 struct lwp *lp = td->td_lwp;
1139 struct sysent *callp;
1140 register_t orig_tf_rflags;
1145 int crit_count = td->td_pri & ~TDPRI_MASK;
1148 int have_mplock = 0;
1153 union sysunion args;
1154 register_t *argsdst;
1156 mycpu->gd_cnt.v_syscall++;
1158 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1162 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_rip));
1163 if (syscall_mpsafe == 0)
1164 MAKEMPSAFE(have_mplock);
1166 userenter(td, p); /* lazy raise our priority */
1173 sticks = (int)td->td_sticks;
1174 orig_tf_rflags = frame->tf_rflags;
1177 * Virtual kernel intercept - if a VM context managed by a virtual
1178 * kernel issues a system call the virtual kernel handles it, not us.
1179 * Restore the virtual kernel context and return from its system
1180 * call. The current frame is copied out to the virtual kernel.
1182 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1183 vkernel_trap(lp, frame);
1184 error = EJUSTRETURN;
1189 * Get the system call parameters and account for time
1191 lp->lwp_md.md_regs = frame;
1192 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1193 code = frame->tf_rax;
1195 if (p->p_sysent->sv_prepsyscall) {
1196 (*p->p_sysent->sv_prepsyscall)(
1197 frame, (int *)(&args.nosys.sysmsg + 1),
1200 if (code == SYS_syscall || code == SYS___syscall) {
1201 code = frame->tf_rdi;
1207 if (p->p_sysent->sv_mask)
1208 code &= p->p_sysent->sv_mask;
1210 if (code >= p->p_sysent->sv_size)
1211 callp = &p->p_sysent->sv_table[0];
1213 callp = &p->p_sysent->sv_table[code];
1215 narg = callp->sy_narg & SYF_ARGMASK;
1218 * On amd64 we get up to six arguments in registers. The rest are
1219 * on the stack. The first six members of 'struct trapframe' happen
1220 * to be the registers used to pass arguments, in exactly the right
1223 argp = &frame->tf_rdi;
1225 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1227 * JG can we overflow the space pointed to by 'argsdst'
1228 * either with 'bcopy' or with 'copyin'?
1230 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1232 * copyin is MP aware, but the tracing code is not
1234 if (narg > regcnt) {
1235 KASSERT(params != NULL, ("copyin args with no params!"));
1236 error = copyin(params, &argsdst[regcnt],
1237 (narg - regcnt) * sizeof(register_t));
1240 if (KTRPOINT(td, KTR_SYSCALL)) {
1241 MAKEMPSAFE(have_mplock);
1243 ktrsyscall(lp, code, narg,
1244 (void *)(&args.nosys.sysmsg + 1));
1252 if (KTRPOINT(td, KTR_SYSCALL)) {
1253 MAKEMPSAFE(have_mplock);
1254 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1259 * Default return value is 0 (will be copied to %rax). Double-value
1260 * returns use %rax and %rdx. %rdx is left unchanged for system
1261 * calls which return only one result.
1263 args.sysmsg_fds[0] = 0;
1264 args.sysmsg_fds[1] = frame->tf_rdx;
1267 * The syscall might manipulate the trap frame. If it does it
1268 * will probably return EJUSTRETURN.
1270 args.sysmsg_frame = frame;
1272 STOPEVENT(p, S_SCE, narg); /* MP aware */
1275 * NOTE: All system calls run MPSAFE now. The system call itself
1276 * is responsible for getting the MP lock.
1278 error = (*callp->sy_call)(&args);
1281 kprintf("system call %d returned %d\n", code, error);
1286 * MP SAFE (we may or may not have the MP lock at this point)
1291 * Reinitialize proc pointer `p' as it may be different
1292 * if this is a child returning from fork syscall.
1295 lp = curthread->td_lwp;
1296 frame->tf_rax = args.sysmsg_fds[0];
1297 frame->tf_rdx = args.sysmsg_fds[1];
1298 frame->tf_rflags &= ~PSL_C;
1302 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1303 * We have to do a full context restore so that %r10
1304 * (which was holding the value of %rcx) is restored for
1305 * the next iteration.
1307 frame->tf_rip -= frame->tf_err;
1308 frame->tf_r10 = frame->tf_rcx;
1313 panic("Unexpected EASYNC return value (for now)");
1316 if (p->p_sysent->sv_errsize) {
1317 if (error >= p->p_sysent->sv_errsize)
1318 error = -1; /* XXX */
1320 error = p->p_sysent->sv_errtbl[error];
1322 frame->tf_rax = error;
1323 frame->tf_rflags |= PSL_C;
1328 * Traced syscall. trapsignal() is not MP aware.
1330 if (orig_tf_rflags & PSL_T) {
1331 MAKEMPSAFE(have_mplock);
1332 frame->tf_rflags &= ~PSL_T;
1333 trapsignal(lp, SIGTRAP, 0);
1337 * Handle reschedule and other end-of-syscall issues
1339 userret(lp, frame, sticks);
1342 if (KTRPOINT(td, KTR_SYSRET)) {
1343 MAKEMPSAFE(have_mplock);
1344 ktrsysret(lp, code, error, args.sysmsg_result);
1349 * This works because errno is findable through the
1350 * register set. If we ever support an emulation where this
1351 * is not the case, this code will need to be revisited.
1353 STOPEVENT(p, S_SCX, code);
1358 * Release the MP lock if we had to get it
1360 KASSERT(td->td_mpcount == have_mplock,
1361 ("badmpcount syscall2/end from %p", (void *)frame->tf_rip));
1365 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1367 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1368 ("syscall: critical section count mismatch! %d/%d",
1369 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1374 fork_return(struct lwp *lp, struct trapframe *frame)
1376 frame->tf_rax = 0; /* Child returns zero */
1377 frame->tf_rflags &= ~PSL_C; /* success */
1380 generic_lwp_return(lp, frame);
1381 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1385 * Simplified back end of syscall(), used when returning from fork()
1386 * or lwp_create() directly into user mode. MP lock is held on entry and
1387 * should be released on return. This code will return back into the fork
1388 * trampoline code which then runs doreti.
1391 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1393 struct proc *p = lp->lwp_proc;
1396 * Newly forked processes are given a kernel priority. We have to
1397 * adjust the priority to a normal user priority and fake entry
1398 * into the kernel (call userenter()) to install a passive release
1399 * function just in case userret() decides to stop the process. This
1400 * can occur when ^Z races a fork. If we do not install the passive
1401 * release function the current process designation will not be
1402 * released when the thread goes to sleep.
1404 lwkt_setpri_self(TDPRI_USER_NORM);
1405 userenter(lp->lwp_thread, p);
1406 userret(lp, frame, 0);
1408 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1409 ktrsysret(lp, SYS_fork, 0, 0);
1411 p->p_flag |= P_PASSIVE_ACQ;
1413 p->p_flag &= ~P_PASSIVE_ACQ;
1415 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1421 * doreti has turned into this. The frame is directly on the stack. We
1422 * pull everything else we need (fpu and tls context) from the current
1425 * Note on fpu interactions: In a virtual kernel, the fpu context for
1426 * an emulated user mode process is not shared with the virtual kernel's
1427 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1428 * kernel itself, and not even then since the signal() contexts that we care
1429 * about save and restore the FPU state (I think anyhow).
1431 * vmspace_ctl() returns an error only if it had problems instaling the
1432 * context we supplied or problems copying data to/from our VM space.
1435 go_user(struct intrframe *frame)
1437 struct trapframe *tf = (void *)&frame->if_rdi;
1441 * Interrupts may be disabled on entry, make sure all signals
1442 * can be received before beginning our loop.
1447 * Switch to the current simulated user process, then call
1448 * user_trap() when we break out of it (usually due to a signal).
1452 * Tell the real kernel whether it is ok to use the FP
1455 if (mdcpu->gd_npxthread == curthread) {
1456 tf->tf_xflags &= ~PGEX_FPFAULT;
1458 tf->tf_xflags |= PGEX_FPFAULT;
1462 * Run emulated user process context. This call interlocks
1463 * with new mailbox signals.
1465 * Set PGEX_U unconditionally, indicating a user frame (the
1466 * bit is normally set only by T_PAGEFLT).
1468 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1469 tf, &curthread->td_savevext);
1470 frame->if_xflags |= PGEX_U;
1472 kprintf("GO USER %d trap %ld EVA %08lx RIP %08lx RSP %08lx XFLAGS %02lx/%02lx\n",
1473 r, tf->tf_trapno, tf->tf_addr, tf->tf_rip, tf->tf_rsp,
1474 tf->tf_xflags, frame->if_xflags);
1478 panic("vmspace_ctl failed error %d", errno);
1480 if (tf->tf_trapno) {
1484 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1485 tf->tf_trapno = T_ASTFLT;
1493 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1494 * fault (which is then passed back to the virtual kernel) if an attempt is
1495 * made to use the FP unit.
1497 * XXX this is a fairly big hack.
1500 set_vkernel_fp(struct trapframe *frame)
1502 struct thread *td = curthread;
1504 if (frame->tf_xflags & PGEX_FPFAULT) {
1505 td->td_pcb->pcb_flags |= FP_VIRTFP;
1506 if (mdcpu->gd_npxthread == td)
1509 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1514 * Called from vkernel_trap() to fixup the vkernel's syscall
1515 * frame for vmspace_ctl() return.
1518 cpu_vkernel_trap(struct trapframe *frame, int error)
1520 frame->tf_rax = error;
1522 frame->tf_rflags |= PSL_C;
1524 frame->tf_rflags &= ~PSL_C;