2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.35 2008/09/09 04:06:19 dillon Exp $
43 * 386 Trap and System call handling
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/signal2.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
64 #include <sys/vmmeter.h>
65 #include <sys/malloc.h>
67 #include <sys/ktrace.h>
70 #include <sys/upcall.h>
71 #include <sys/vkernel.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
74 #include <sys/vmspace.h>
77 #include <vm/vm_param.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_extern.h>
85 #include <machine/cpu.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/smp.h>
89 #include <machine/tss.h>
90 #include <machine/globaldata.h>
92 #include <machine/vm86.h>
96 #include <sys/msgport2.h>
97 #include <sys/thread2.h>
98 #include <sys/mplock2.h>
102 #define MAKEMPSAFE(have_mplock) \
103 if (have_mplock == 0) { \
110 #define MAKEMPSAFE(have_mplock)
114 int (*pmath_emulate) (struct trapframe *);
116 static int trap_pfault (struct trapframe *, int, vm_offset_t);
117 static void trap_fatal (struct trapframe *, int, vm_offset_t);
118 void dblfault_handler (void);
121 extern inthand_t IDTVEC(syscall);
124 #define MAX_TRAP_MSG 28
125 static char *trap_msg[] = {
127 "privileged instruction fault", /* 1 T_PRIVINFLT */
129 "breakpoint instruction fault", /* 3 T_BPTFLT */
132 "arithmetic trap", /* 6 T_ARITHTRAP */
133 "system forced exception", /* 7 T_ASTFLT */
135 "general protection fault", /* 9 T_PROTFLT */
136 "trace trap", /* 10 T_TRCTRAP */
138 "page fault", /* 12 T_PAGEFLT */
140 "alignment fault", /* 14 T_ALIGNFLT */
144 "integer divide fault", /* 18 T_DIVIDE */
145 "non-maskable interrupt trap", /* 19 T_NMI */
146 "overflow trap", /* 20 T_OFLOW */
147 "FPU bounds check fault", /* 21 T_BOUND */
148 "FPU device not available", /* 22 T_DNA */
149 "double fault", /* 23 T_DOUBLEFLT */
150 "FPU operand fetch fault", /* 24 T_FPOPFLT */
151 "invalid TSS fault", /* 25 T_TSSFLT */
152 "segment not present fault", /* 26 T_SEGNPFLT */
153 "stack fault", /* 27 T_STKFLT */
154 "machine check trap", /* 28 T_MCHK */
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
172 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
173 extern int max_sysmsg;
176 * Passively intercepts the thread switch function to increase
177 * the thread priority from a user priority to a kernel priority, reducing
178 * syscall and trap overhead for the case where no switch occurs.
180 * Synchronizes td_ucred with p_ucred. This is used by system calls,
181 * signal handling, faults, AST traps, and anything else that enters the
182 * kernel from userland and provides the kernel with a stable read-only
183 * copy of the process ucred.
186 userenter(struct thread *curtd, struct proc *curp)
191 curtd->td_release = lwkt_passive_release;
193 if (curtd->td_ucred != curp->p_ucred) {
194 ncred = crhold(curp->p_ucred);
195 ocred = curtd->td_ucred;
196 curtd->td_ucred = ncred;
203 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
204 * must be completed before we can return to or try to return to userland.
206 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
207 * arithmatic on the delta calculation so the absolute tick values are
208 * truncated to an integer.
211 userret(struct lwp *lp, struct trapframe *frame, int sticks)
213 struct proc *p = lp->lwp_proc;
217 * Charge system time if profiling. Note: times are in microseconds.
218 * This may do a copyout and block, so do it first even though it
219 * means some system time will be charged as user time.
221 if (p->p_flags & P_PROFIL) {
222 addupc_task(p, frame->tf_eip,
223 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
228 * If the jungle wants us dead, so be it.
230 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
231 lwkt_gettoken(&p->p_token);
233 lwkt_reltoken(&p->p_token); /* NOT REACHED */
237 * Block here if we are in a stopped state.
239 if (p->p_stat == SSTOP) {
240 lwkt_gettoken(&p->p_token);
242 lwkt_reltoken(&p->p_token);
247 * Post any pending upcalls. If running a virtual kernel be sure
248 * to restore the virtual kernel's vmspace before posting the upcall.
250 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) {
251 lwkt_gettoken(&p->p_token);
252 if (p->p_flags & P_SIGVTALRM) {
253 p->p_flags &= ~P_SIGVTALRM;
254 ksignal(p, SIGVTALRM);
256 if (p->p_flags & P_SIGPROF) {
257 p->p_flags &= ~P_SIGPROF;
260 if (p->p_flags & P_UPCALLPEND) {
261 p->p_flags &= ~P_UPCALLPEND;
264 lwkt_reltoken(&p->p_token);
269 * Post any pending signals
271 * WARNING! postsig() can exit and not return.
273 if ((sig = CURSIG_TRACE(lp)) != 0) {
274 lwkt_gettoken(&p->p_token);
276 lwkt_reltoken(&p->p_token);
281 * block here if we are swapped out, but still process signals
282 * (such as SIGKILL). proc0 (the swapin scheduler) is already
283 * aware of our situation, we do not have to wake it up.
285 if (p->p_flags & P_SWAPPEDOUT) {
286 lwkt_gettoken(&p->p_token);
288 p->p_flags |= P_SWAPWAIT;
290 if (p->p_flags & P_SWAPWAIT)
291 tsleep(p, PCATCH, "SWOUT", 0);
292 p->p_flags &= ~P_SWAPWAIT;
294 lwkt_reltoken(&p->p_token);
299 * Make sure postsig() handled request to restore old signal mask after
300 * running signal handler.
302 KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0);
306 * Cleanup from userenter and any passive release that might have occured.
307 * We must reclaim the current-process designation before we can return
308 * to usermode. We also handle both LWKT and USER reschedule requests.
311 userexit(struct lwp *lp)
313 struct thread *td = lp->lwp_thread;
314 /* globaldata_t gd = td->td_gd; */
317 * Handle stop requests at kernel priority. Any requests queued
318 * after this loop will generate another AST.
320 while (lp->lwp_proc->p_stat == SSTOP) {
321 lwkt_gettoken(&lp->lwp_proc->p_token);
323 lwkt_reltoken(&lp->lwp_proc->p_token);
327 * Reduce our priority in preparation for a return to userland. If
328 * our passive release function was still in place, our priority was
329 * never raised and does not need to be reduced.
331 lwkt_passive_recover(td);
334 * Become the current user scheduled process if we aren't already,
335 * and deal with reschedule requests and other factors.
337 lp->lwp_proc->p_usched->acquire_curproc(lp);
338 /* WARNING: we may have migrated cpu's */
339 /* gd = td->td_gd; */
342 #if !defined(KTR_KERNENTRY)
343 #define KTR_KERNENTRY KTR_ALL
345 KTR_INFO_MASTER(kernentry);
346 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0,
347 "TRAP(pid %d, tid %d, trapno %d, eva %lu)",
348 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva);
349 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)",
350 pid_t pid, lwpid_t tid);
351 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %d)",
352 pid_t pid, lwpid_t tid, register_t trapno);
353 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)",
354 pid_t pid, lwpid_t tid, int err);
355 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)",
356 pid_t pid, lwpid_t tid);
359 * Exception, fault, and trap interface to the kernel.
360 * This common code is called from assembly language IDT gate entry
361 * routines that prepare a suitable stack frame, and restore this
362 * frame after the exception has been processed.
364 * This function is also called from doreti in an interlock to handle ASTs.
365 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
367 * NOTE! We have to retrieve the fault address prior to obtaining the
368 * MP lock because get_mplock() may switch out. YYY cr2 really ought
369 * to be retrieved by the assembly code, not here.
371 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
372 * if an attempt is made to switch from a fast interrupt or IPI. This is
373 * necessary to properly take fatal kernel traps on SMP machines if
374 * get_mplock() has to block.
378 user_trap(struct trapframe *frame)
380 struct globaldata *gd = mycpu;
381 struct thread *td = gd->gd_curthread;
382 struct lwp *lp = td->td_lwp;
385 int i = 0, ucode = 0, type, code;
390 int crit_count = td->td_critcount;
391 lwkt_tokref_t curstop = td->td_toks_stop;
398 * This is a bad kludge to avoid changing the various trapframe
399 * structures. Because we are enabled as a virtual kernel,
400 * the original tf_err field will be passed to us shifted 16
401 * over in the tf_trapno field for T_PAGEFLT.
403 if (frame->tf_trapno == T_PAGEFLT)
408 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
409 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
413 * Everything coming from user mode runs through user_trap,
414 * including system calls.
416 if (frame->tf_trapno == T_SYSCALL80) {
421 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
422 frame->tf_trapno, eva);
426 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
427 ++gd->gd_trap_nesting_level;
428 MAKEMPSAFE(have_mplock);
429 trap_fatal(frame, TRUE, eva);
430 --gd->gd_trap_nesting_level;
435 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
438 type = frame->tf_trapno;
439 code = frame->tf_err;
443 sticks = (int)td->td_sticks;
444 lp->lwp_md.md_regs = frame;
447 case T_PRIVINFLT: /* privileged instruction fault */
452 case T_BPTFLT: /* bpt instruction fault */
453 case T_TRCTRAP: /* trace trap */
454 frame->tf_eflags &= ~PSL_T;
459 case T_ARITHTRAP: /* arithmetic trap */
464 case T_ASTFLT: /* Allow process switch */
465 mycpu->gd_cnt.v_soft++;
466 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
467 atomic_clear_int(&mycpu->gd_reqflags,
469 addupc_task(p, p->p_prof.pr_addr,
475 * The following two traps can happen in
476 * vm86 mode, and, if so, we want to handle
479 case T_PROTFLT: /* general protection fault */
480 case T_STKFLT: /* stack fault */
482 if (frame->tf_eflags & PSL_VM) {
483 i = vm86_emulate((struct vm86frame *)frame);
490 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
492 case T_SEGNPFLT: /* segment not present fault */
496 case T_TSSFLT: /* invalid TSS fault */
497 case T_DOUBLEFLT: /* double fault */
502 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
508 case T_PAGEFLT: /* page fault */
509 MAKEMPSAFE(have_mplock);
510 i = trap_pfault(frame, TRUE, eva);
513 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
529 case T_DIVIDE: /* integer divide fault */
536 MAKEMPSAFE(have_mplock);
537 /* machine/parity/power fail/"kitchen sink" faults */
538 if (isa_nmi(code) == 0) {
541 * NMI can be hooked up to a pushbutton
545 kprintf ("NMI ... going to debugger\n");
546 kdb_trap (type, 0, frame);
550 } else if (panic_on_nmi)
551 panic("NMI indicates hardware failure");
553 #endif /* NISA > 0 */
555 case T_OFLOW: /* integer overflow fault */
560 case T_BOUND: /* bounds check fault */
567 * Virtual kernel intercept - pass the DNA exception
568 * to the (emulated) virtual kernel if it asked to handle
569 * it. This occurs when the virtual kernel is holding
570 * onto the FP context for a different emulated
571 * process then the one currently running.
573 * We must still call npxdna() since we may have
574 * saved FP state that the (emulated) virtual kernel
575 * needs to hand over to a different emulated process.
577 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
578 (td->td_pcb->pcb_flags & FP_VIRTFP)
585 * The kernel may have switched out the FP unit's
586 * state, causing the user process to take a fault
587 * when it tries to use the FP unit. Restore the
593 if (!pmath_emulate) {
595 ucode = FPE_FPU_NP_TRAP;
598 i = (*pmath_emulate)(frame);
600 if (!(frame->tf_eflags & PSL_T))
602 frame->tf_eflags &= ~PSL_T;
605 /* else ucode = emulator_only_knows() XXX */
608 case T_FPOPFLT: /* FPU operand fetch fault */
613 case T_XMMFLT: /* SIMD floating-point exception */
620 * Virtual kernel intercept - if the fault is directly related to a
621 * VM context managed by a virtual kernel then let the virtual kernel
624 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
625 vkernel_trap(lp, frame);
630 * Translate fault for emulators (e.g. Linux)
632 if (*p->p_sysent->sv_transtrap)
633 i = (*p->p_sysent->sv_transtrap)(i, type);
635 MAKEMPSAFE(have_mplock);
636 trapsignal(lp, i, ucode);
639 if (type <= MAX_TRAP_MSG) {
640 uprintf("fatal process exception: %s",
642 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
643 uprintf(", fault VA = 0x%lx", (u_long)eva);
649 userret(lp, frame, sticks);
656 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
658 KASSERT(crit_count == td->td_critcount,
659 ("trap: critical section count mismatch! %d/%d",
660 crit_count, td->td_pri));
661 KASSERT(curstop == td->td_toks_stop,
662 ("trap: extra tokens held after trap! %zd/%zd",
663 curstop - &td->td_toks_base,
664 td->td_toks_stop - &td->td_toks_base));
669 kern_trap(struct trapframe *frame)
671 struct globaldata *gd = mycpu;
672 struct thread *td = gd->gd_curthread;
675 int i = 0, ucode = 0, type, code;
680 int crit_count = td->td_critcount;
681 lwkt_tokref_t curstop = td->td_toks_stop;
688 if (frame->tf_trapno == T_PAGEFLT)
695 ++gd->gd_trap_nesting_level;
696 MAKEMPSAFE(have_mplock);
697 trap_fatal(frame, FALSE, eva);
698 --gd->gd_trap_nesting_level;
702 type = frame->tf_trapno;
703 code = frame->tf_err;
711 case T_PAGEFLT: /* page fault */
712 MAKEMPSAFE(have_mplock);
713 trap_pfault(frame, FALSE, eva);
719 * The kernel may be using npx for copying or other
722 panic("kernel NPX should not happen");
728 case T_PROTFLT: /* general protection fault */
729 case T_SEGNPFLT: /* segment not present fault */
731 * Invalid segment selectors and out of bounds
732 * %eip's and %esp's can be set up in user mode.
733 * This causes a fault in kernel mode when the
734 * kernel tries to return to user mode. We want
735 * to get this fault so that we can fix the
736 * problem here and not have to check all the
737 * selectors and pointers when the user changes
740 if (mycpu->gd_intr_nesting_level == 0) {
741 if (td->td_pcb->pcb_onfault) {
743 (register_t)td->td_pcb->pcb_onfault;
751 * PSL_NT can be set in user mode and isn't cleared
752 * automatically when the kernel is entered. This
753 * causes a TSS fault when the kernel attempts to
754 * `iret' because the TSS link is uninitialized. We
755 * want to get this fault so that we can fix the
756 * problem here and not every time the kernel is
759 if (frame->tf_eflags & PSL_NT) {
760 frame->tf_eflags &= ~PSL_NT;
765 case T_TRCTRAP: /* trace trap */
767 if (frame->tf_eip == (int)IDTVEC(syscall)) {
769 * We've just entered system mode via the
770 * syscall lcall. Continue single stepping
771 * silently until the syscall handler has
776 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
778 * The syscall handler has now saved the
779 * flags. Stop single stepping it.
781 frame->tf_eflags &= ~PSL_T;
787 * Ignore debug register trace traps due to
788 * accesses in the user's address space, which
789 * can happen under several conditions such as
790 * if a user sets a watchpoint on a buffer and
791 * then passes that buffer to a system call.
792 * We still want to get TRCTRAPS for addresses
793 * in kernel space because that is useful when
794 * debugging the kernel.
796 if (user_dbreg_trap()) {
798 * Reset breakpoint bits because the
801 load_dr6(rdr6() & 0xfffffff0);
806 * Fall through (TRCTRAP kernel mode, kernel address)
810 * If DDB is enabled, let it handle the debugger trap.
811 * Otherwise, debugger traps "can't happen".
814 MAKEMPSAFE(have_mplock);
815 if (kdb_trap (type, 0, frame))
820 MAKEMPSAFE(have_mplock);
821 trap_fatal(frame, FALSE, eva);
824 MAKEMPSAFE(have_mplock);
825 trap_fatal(frame, FALSE, eva);
829 * Ignore this trap generated from a spurious SIGTRAP.
831 * single stepping in / syscalls leads to spurious / SIGTRAP
834 * Haiku (c) 2007 Simon 'corecode' Schubert
840 * Translate fault for emulators (e.g. Linux)
842 if (*p->p_sysent->sv_transtrap)
843 i = (*p->p_sysent->sv_transtrap)(i, type);
845 MAKEMPSAFE(have_mplock);
846 trapsignal(lp, i, ucode);
849 if (type <= MAX_TRAP_MSG) {
850 uprintf("fatal process exception: %s",
852 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
853 uprintf(", fault VA = 0x%lx", (u_long)eva);
865 KASSERT(crit_count == td->td_critcount,
866 ("trap: critical section count mismatch! %d/%d",
867 crit_count, td->td_pri));
868 KASSERT(curstop == td->td_toks_stop,
869 ("trap: extra tokens held after trap! %zd/%zd",
870 curstop - &td->td_toks_base,
871 td->td_toks_stop - &td->td_toks_base));
876 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
879 struct vmspace *vm = NULL;
884 thread_t td = curthread;
885 struct lwp *lp = td->td_lwp;
887 va = trunc_page(eva);
888 if (usermode == FALSE) {
890 * This is a fault on kernel virtual memory.
895 * This is a fault on non-kernel virtual memory.
896 * vm is initialized above to NULL. If curproc is NULL
897 * or curproc->p_vmspace is NULL the fault is fatal.
900 vm = lp->lwp_vmspace;
908 if (frame->tf_xflags & PGEX_W)
909 ftype = VM_PROT_READ | VM_PROT_WRITE;
911 ftype = VM_PROT_READ;
913 if (map != &kernel_map) {
915 * Keep swapout from messing with us during this
925 fault_flags |= VM_FAULT_BURST;
926 if (ftype & VM_PROT_WRITE)
927 fault_flags |= VM_FAULT_DIRTY;
929 fault_flags |= VM_FAULT_NORMAL;
930 rv = vm_fault(map, va, ftype, fault_flags);
935 * Don't have to worry about process locking or stacks in the kernel.
937 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
940 if (rv == KERN_SUCCESS)
944 if (td->td_gd->gd_intr_nesting_level == 0 &&
945 td->td_pcb->pcb_onfault) {
946 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
949 trap_fatal(frame, usermode, eva);
952 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
956 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
958 int code, type, ss, esp;
960 code = frame->tf_xflags;
961 type = frame->tf_trapno;
963 if (type <= MAX_TRAP_MSG) {
964 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
965 type, trap_msg[type],
966 (usermode ? "user" : "kernel"));
969 /* two separate prints in case of a trap on an unmapped page */
970 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
972 if (type == T_PAGEFLT) {
973 kprintf("fault virtual address = %p\n", (void *)eva);
974 kprintf("fault code = %s %s, %s\n",
975 usermode ? "user" : "supervisor",
976 code & PGEX_W ? "write" : "read",
977 code & PGEX_P ? "protection violation" : "page not present");
979 kprintf("instruction pointer = 0x%x:0x%x\n",
980 frame->tf_cs & 0xffff, frame->tf_eip);
982 ss = frame->tf_ss & 0xffff;
985 ss = GSEL(GDATA_SEL, SEL_KPL);
986 esp = (int)&frame->tf_esp;
988 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
989 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
990 kprintf("processor eflags = ");
991 if (frame->tf_eflags & PSL_T)
992 kprintf("trace trap, ");
993 if (frame->tf_eflags & PSL_I)
994 kprintf("interrupt enabled, ");
995 if (frame->tf_eflags & PSL_NT)
996 kprintf("nested task, ");
997 if (frame->tf_eflags & PSL_RF)
1000 if (frame->tf_eflags & PSL_VM)
1003 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1004 kprintf("current process = ");
1006 kprintf("%lu (%s)\n",
1007 (u_long)curproc->p_pid, curproc->p_comm ?
1008 curproc->p_comm : "");
1012 kprintf("current thread = pri %d ", curthread->td_pri);
1013 if (curthread->td_critcount)
1019 * we probably SHOULD have stopped the other CPUs before now!
1020 * another CPU COULD have been touching cpl at this moment...
1022 kprintf(" <- SMP: XXX");
1031 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1034 kprintf("trap number = %d\n", type);
1035 if (type <= MAX_TRAP_MSG)
1036 panic("%s", trap_msg[type]);
1038 panic("unknown/reserved trap");
1042 * Double fault handler. Called when a fault occurs while writing
1043 * a frame for a trap/exception onto the stack. This usually occurs
1044 * when the stack overflows (such is the case with infinite recursion,
1047 * XXX Note that the current PTD gets replaced by IdlePTD when the
1048 * task switch occurs. This means that the stack that was active at
1049 * the time of the double fault is not available at <kstack> unless
1050 * the machine was idle when the double fault occurred. The downside
1051 * of this is that "trace <ebp>" in ddb won't work.
1054 dblfault_handler(void)
1056 struct mdglobaldata *gd = mdcpu;
1058 kprintf("\nFatal double fault:\n");
1059 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1060 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1061 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1063 /* two separate prints in case of a trap on an unmapped page */
1064 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1066 panic("double fault");
1070 * syscall2 - MP aware system call request C handler
1072 * A system call is essentially treated as a trap except that the
1073 * MP lock is not held on entry or return. We are responsible for
1074 * obtaining the MP lock if necessary and for handling ASTs
1075 * (e.g. a task switch) prior to return.
1080 syscall2(struct trapframe *frame)
1082 struct thread *td = curthread;
1083 struct proc *p = td->td_proc;
1084 struct lwp *lp = td->td_lwp;
1086 struct sysent *callp;
1087 register_t orig_tf_eflags;
1092 int crit_count = td->td_critcount;
1095 int have_mplock = 0;
1098 union sysunion args;
1100 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1103 userenter(td, p); /* lazy raise our priority */
1108 sticks = (int)td->td_sticks;
1109 orig_tf_eflags = frame->tf_eflags;
1112 * Virtual kernel intercept - if a VM context managed by a virtual
1113 * kernel issues a system call the virtual kernel handles it, not us.
1114 * Restore the virtual kernel context and return from its system
1115 * call. The current frame is copied out to the virtual kernel.
1117 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1118 vkernel_trap(lp, frame);
1119 error = EJUSTRETURN;
1124 * Get the system call parameters and account for time
1126 lp->lwp_md.md_regs = frame;
1127 params = (caddr_t)frame->tf_esp + sizeof(int);
1128 code = frame->tf_eax;
1130 if (p->p_sysent->sv_prepsyscall) {
1131 (*p->p_sysent->sv_prepsyscall)(
1132 frame, (int *)(&args.nosys.sysmsg + 1),
1136 * Need to check if this is a 32 bit or 64 bit syscall.
1137 * fuword is MP aware.
1139 if (code == SYS_syscall) {
1141 * Code is first argument, followed by actual args.
1143 code = fuword(params);
1144 params += sizeof(int);
1145 } else if (code == SYS___syscall) {
1147 * Like syscall, but code is a quad, so as to maintain
1148 * quad alignment for the rest of the arguments.
1150 code = fuword(params);
1151 params += sizeof(quad_t);
1155 code &= p->p_sysent->sv_mask;
1156 if (code >= p->p_sysent->sv_size)
1157 callp = &p->p_sysent->sv_table[0];
1159 callp = &p->p_sysent->sv_table[code];
1161 narg = callp->sy_narg & SYF_ARGMASK;
1164 * copyin is MP aware, but the tracing code is not
1166 if (narg && params) {
1167 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1168 narg * sizeof(register_t));
1171 if (KTRPOINT(td, KTR_SYSCALL)) {
1172 MAKEMPSAFE(have_mplock);
1174 ktrsyscall(lp, code, narg,
1175 (void *)(&args.nosys.sysmsg + 1));
1183 if (KTRPOINT(td, KTR_SYSCALL)) {
1184 MAKEMPSAFE(have_mplock);
1185 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1190 * For traditional syscall code edx is left untouched when 32 bit
1191 * results are returned. Since edx is loaded from fds[1] when the
1192 * system call returns we pre-set it here.
1194 args.sysmsg_fds[0] = 0;
1195 args.sysmsg_fds[1] = frame->tf_edx;
1198 * The syscall might manipulate the trap frame. If it does it
1199 * will probably return EJUSTRETURN.
1201 args.sysmsg_frame = frame;
1203 STOPEVENT(p, S_SCE, narg); /* MP aware */
1206 * NOTE: All system calls run MPSAFE now. The system call itself
1207 * is responsible for getting the MP lock.
1209 error = (*callp->sy_call)(&args);
1212 kprintf("system call %d returned %d\n", code, error);
1217 * MP SAFE (we may or may not have the MP lock at this point)
1222 * Reinitialize proc pointer `p' as it may be different
1223 * if this is a child returning from fork syscall.
1226 lp = curthread->td_lwp;
1227 frame->tf_eax = args.sysmsg_fds[0];
1228 frame->tf_edx = args.sysmsg_fds[1];
1229 frame->tf_eflags &= ~PSL_C;
1233 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1234 * int 0x80 is 2 bytes. We saved this in tf_err.
1236 frame->tf_eip -= frame->tf_err;
1241 panic("Unexpected EASYNC return value (for now)");
1244 if (p->p_sysent->sv_errsize) {
1245 if (error >= p->p_sysent->sv_errsize)
1246 error = -1; /* XXX */
1248 error = p->p_sysent->sv_errtbl[error];
1250 frame->tf_eax = error;
1251 frame->tf_eflags |= PSL_C;
1256 * Traced syscall. trapsignal() is not MP aware.
1258 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1259 MAKEMPSAFE(have_mplock);
1260 frame->tf_eflags &= ~PSL_T;
1261 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1265 * Handle reschedule and other end-of-syscall issues
1267 userret(lp, frame, sticks);
1270 if (KTRPOINT(td, KTR_SYSRET)) {
1271 MAKEMPSAFE(have_mplock);
1272 ktrsysret(lp, code, error, args.sysmsg_result);
1277 * This works because errno is findable through the
1278 * register set. If we ever support an emulation where this
1279 * is not the case, this code will need to be revisited.
1281 STOPEVENT(p, S_SCX, code);
1286 * Release the MP lock if we had to get it
1291 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1293 KASSERT(crit_count == td->td_critcount,
1294 ("syscall: critical section count mismatch! %d/%d",
1295 crit_count, td->td_pri));
1296 KASSERT(&td->td_toks_base == td->td_toks_stop,
1297 ("syscall: extra tokens held after trap! %zd",
1298 td->td_toks_stop - &td->td_toks_base));
1303 * NOTE: mplock not held at any point
1306 fork_return(struct lwp *lp, struct trapframe *frame)
1308 frame->tf_eax = 0; /* Child returns zero */
1309 frame->tf_eflags &= ~PSL_C; /* success */
1312 generic_lwp_return(lp, frame);
1313 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1317 * Simplified back end of syscall(), used when returning from fork()
1318 * directly into user mode.
1320 * This code will return back into the fork trampoline code which then
1323 * NOTE: The mplock is not held at any point.
1326 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1328 struct proc *p = lp->lwp_proc;
1331 * Newly forked processes are given a kernel priority. We have to
1332 * adjust the priority to a normal user priority and fake entry
1333 * into the kernel (call userenter()) to install a passive release
1334 * function just in case userret() decides to stop the process. This
1335 * can occur when ^Z races a fork. If we do not install the passive
1336 * release function the current process designation will not be
1337 * released when the thread goes to sleep.
1339 lwkt_setpri_self(TDPRI_USER_NORM);
1340 userenter(lp->lwp_thread, p);
1341 userret(lp, frame, 0);
1343 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1344 ktrsysret(lp, SYS_fork, 0, 0);
1346 lp->lwp_flags |= LWP_PASSIVE_ACQ;
1348 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
1352 * doreti has turned into this. The frame is directly on the stack. We
1353 * pull everything else we need (fpu and tls context) from the current
1356 * Note on fpu interactions: In a virtual kernel, the fpu context for
1357 * an emulated user mode process is not shared with the virtual kernel's
1358 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1359 * kernel itself, and not even then since the signal() contexts that we care
1360 * about save and restore the FPU state (I think anyhow).
1362 * vmspace_ctl() returns an error only if it had problems instaling the
1363 * context we supplied or problems copying data to/from our VM space.
1366 go_user(struct intrframe *frame)
1368 struct trapframe *tf = (void *)&frame->if_gs;
1372 * Interrupts may be disabled on entry, make sure all signals
1373 * can be received before beginning our loop.
1378 * Switch to the current simulated user process, then call
1379 * user_trap() when we break out of it (usually due to a signal).
1383 * Tell the real kernel whether it is ok to use the FP
1386 * The critical section is required to prevent an interrupt
1387 * from causing a preemptive task switch and changing
1391 if (mdcpu->gd_npxthread == curthread) {
1392 tf->tf_xflags &= ~PGEX_FPFAULT;
1394 tf->tf_xflags |= PGEX_FPFAULT;
1398 * Run emulated user process context. This call interlocks
1399 * with new mailbox signals.
1401 * Set PGEX_U unconditionally, indicating a user frame (the
1402 * bit is normally set only by T_PAGEFLT).
1404 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1405 tf, &curthread->td_savevext);
1407 frame->if_xflags |= PGEX_U;
1409 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1410 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1411 tf->tf_xflags, frame->if_xflags);
1415 panic("vmspace_ctl failed error %d", errno);
1417 if (tf->tf_trapno) {
1421 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1422 tf->tf_trapno = T_ASTFLT;
1430 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1431 * fault (which is then passed back to the virtual kernel) if an attempt is
1432 * made to use the FP unit.
1434 * XXX this is a fairly big hack.
1437 set_vkernel_fp(struct trapframe *frame)
1439 struct thread *td = curthread;
1441 if (frame->tf_xflags & PGEX_FPFAULT) {
1442 td->td_pcb->pcb_flags |= FP_VIRTFP;
1443 if (mdcpu->gd_npxthread == td)
1446 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1451 * Called from vkernel_trap() to fixup the vkernel's syscall
1452 * frame for vmspace_ctl() return.
1455 cpu_vkernel_trap(struct trapframe *frame, int error)
1457 frame->tf_eax = error;
1459 frame->tf_eflags |= PSL_C;
1461 frame->tf_eflags &= ~PSL_C;