2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
42 * 386 Trap and System call handling
49 #include "opt_ktrace.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/pioctl.h>
55 #include <sys/kernel.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signalvar.h>
58 #include <sys/signal2.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
63 #include <sys/vmmeter.h>
64 #include <sys/malloc.h>
66 #include <sys/ktrace.h>
69 #include <sys/upcall.h>
70 #include <sys/vkernel.h>
71 #include <sys/sysproto.h>
72 #include <sys/sysunion.h>
73 #include <sys/vmspace.h>
76 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_extern.h>
84 #include <machine/cpu.h>
85 #include <machine/md_var.h>
86 #include <machine/pcb.h>
87 #include <machine/smp.h>
88 #include <machine/tss.h>
89 #include <machine/globaldata.h>
91 #include <machine/vm86.h>
95 #include <sys/msgport2.h>
96 #include <sys/thread2.h>
97 #include <sys/mplock2.h>
99 #define MAKEMPSAFE(have_mplock) \
100 if (have_mplock == 0) { \
105 int (*pmath_emulate) (struct trapframe *);
107 static int trap_pfault (struct trapframe *, int, vm_offset_t);
108 static void trap_fatal (struct trapframe *, int, vm_offset_t);
109 void dblfault_handler (void);
112 extern inthand_t IDTVEC(syscall);
115 #define MAX_TRAP_MSG 28
116 static char *trap_msg[] = {
118 "privileged instruction fault", /* 1 T_PRIVINFLT */
120 "breakpoint instruction fault", /* 3 T_BPTFLT */
123 "arithmetic trap", /* 6 T_ARITHTRAP */
124 "system forced exception", /* 7 T_ASTFLT */
126 "general protection fault", /* 9 T_PROTFLT */
127 "trace trap", /* 10 T_TRCTRAP */
129 "page fault", /* 12 T_PAGEFLT */
131 "alignment fault", /* 14 T_ALIGNFLT */
135 "integer divide fault", /* 18 T_DIVIDE */
136 "non-maskable interrupt trap", /* 19 T_NMI */
137 "overflow trap", /* 20 T_OFLOW */
138 "FPU bounds check fault", /* 21 T_BOUND */
139 "FPU device not available", /* 22 T_DNA */
140 "double fault", /* 23 T_DOUBLEFLT */
141 "FPU operand fetch fault", /* 24 T_FPOPFLT */
142 "invalid TSS fault", /* 25 T_TSSFLT */
143 "segment not present fault", /* 26 T_SEGNPFLT */
144 "stack fault", /* 27 T_STKFLT */
145 "machine check trap", /* 28 T_MCHK */
149 static int ddb_on_nmi = 1;
150 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
151 &ddb_on_nmi, 0, "Go to DDB on NMI");
153 static int panic_on_nmi = 1;
154 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
155 &panic_on_nmi, 0, "Panic on NMI");
156 static int fast_release;
157 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
158 &fast_release, 0, "Passive Release was optimal");
159 static int slow_release;
160 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
161 &slow_release, 0, "Passive Release was nonoptimal");
163 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
164 extern int max_sysmsg;
167 * Passively intercepts the thread switch function to increase the thread
168 * priority from a user priority to a kernel priority, reducing
169 * syscall and trap overhead for the case where no switch occurs.
171 * Synchronizes td_ucred with p_ucred. This is used by system calls,
172 * signal handling, faults, AST traps, and anything else that enters the
173 * kernel from userland and provides the kernel with a stable read-only
174 * copy of the process ucred.
177 userenter(struct thread *curtd, struct proc *curp)
182 curtd->td_release = lwkt_passive_release;
184 if (curtd->td_ucred != curp->p_ucred) {
185 ncred = crhold(curp->p_ucred);
186 ocred = curtd->td_ucred;
187 curtd->td_ucred = ncred;
195 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
196 * must be completed before we can return to or try to return to userland.
198 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
199 * arithmatic on the delta calculation so the absolute tick values are
200 * truncated to an integer.
203 userret(struct lwp *lp, struct trapframe *frame, int sticks)
205 struct proc *p = lp->lwp_proc;
209 * Charge system time if profiling. Note: times are in microseconds.
210 * This may do a copyout and block, so do it first even though it
211 * means some system time will be charged as user time.
213 if (p->p_flags & P_PROFIL) {
214 addupc_task(p, frame->tf_eip,
215 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
220 * Specific on-return-to-usermode checks (LWP_MP_WEXIT,
221 * LWP_MP_VNLRU, etc).
223 if (lp->lwp_mpflags & LWP_MP_URETMASK)
227 * Block here if we are in a stopped state.
229 if (p->p_stat == SSTOP) {
230 lwkt_gettoken(&p->p_token);
232 lwkt_reltoken(&p->p_token);
237 * Post any pending upcalls. If running a virtual kernel be sure
238 * to restore the virtual kernel's vmspace before posting the upcall.
240 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) {
241 lwkt_gettoken(&p->p_token);
242 if (p->p_flags & P_SIGVTALRM) {
243 p->p_flags &= ~P_SIGVTALRM;
244 ksignal(p, SIGVTALRM);
246 if (p->p_flags & P_SIGPROF) {
247 p->p_flags &= ~P_SIGPROF;
250 if (p->p_flags & P_UPCALLPEND) {
251 p->p_flags &= ~P_UPCALLPEND;
254 lwkt_reltoken(&p->p_token);
259 * Post any pending signals
261 * WARNING! postsig() can exit and not return.
263 if ((sig = CURSIG_TRACE(lp)) != 0) {
264 lwkt_gettoken(&p->p_token);
266 lwkt_reltoken(&p->p_token);
271 * block here if we are swapped out, but still process signals
272 * (such as SIGKILL). proc0 (the swapin scheduler) is already
273 * aware of our situation, we do not have to wake it up.
275 if (p->p_flags & P_SWAPPEDOUT) {
276 lwkt_gettoken(&p->p_token);
278 p->p_flags |= P_SWAPWAIT;
280 if (p->p_flags & P_SWAPWAIT)
281 tsleep(p, PCATCH, "SWOUT", 0);
282 p->p_flags &= ~P_SWAPWAIT;
284 lwkt_reltoken(&p->p_token);
289 * In a multi-threaded program it is possible for a thread to change
290 * signal state during a system call which temporarily changes the
291 * signal mask. In this case postsig() might not be run and we
292 * have to restore the mask ourselves.
294 if (lp->lwp_flags & LWP_OLDMASK) {
295 lp->lwp_flags &= ~LWP_OLDMASK;
296 lp->lwp_sigmask = lp->lwp_oldsigmask;
302 * Cleanup from userenter and any passive release that might have occured.
303 * We must reclaim the current-process designation before we can return
304 * to usermode. We also handle both LWKT and USER reschedule requests.
307 userexit(struct lwp *lp)
309 struct thread *td = lp->lwp_thread;
310 /* globaldata_t gd = td->td_gd; */
313 * Handle stop requests at kernel priority. Any requests queued
314 * after this loop will generate another AST.
316 while (lp->lwp_proc->p_stat == SSTOP) {
317 lwkt_gettoken(&lp->lwp_proc->p_token);
319 lwkt_reltoken(&lp->lwp_proc->p_token);
323 * Become the current user scheduled process if we aren't already,
324 * and deal with reschedule requests and other factors.
326 lp->lwp_proc->p_usched->acquire_curproc(lp);
327 /* WARNING: we may have migrated cpu's */
328 /* gd = td->td_gd; */
331 * Reduce our priority in preparation for a return to userland. If
332 * our passive release function was still in place, our priority was
333 * never raised and does not need to be reduced.
335 lwkt_passive_recover(td);
338 #if !defined(KTR_KERNENTRY)
339 #define KTR_KERNENTRY KTR_ALL
341 KTR_INFO_MASTER(kernentry);
342 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0,
343 "TRAP(pid %d, tid %d, trapno %d, eva %lu)",
344 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva);
345 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)",
346 pid_t pid, lwpid_t tid);
347 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %d)",
348 pid_t pid, lwpid_t tid, register_t trapno);
349 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)",
350 pid_t pid, lwpid_t tid, int err);
351 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)",
352 pid_t pid, lwpid_t tid);
355 * Exception, fault, and trap interface to the kernel.
356 * This common code is called from assembly language IDT gate entry
357 * routines that prepare a suitable stack frame, and restore this
358 * frame after the exception has been processed.
360 * This function is also called from doreti in an interlock to handle ASTs.
361 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
363 * NOTE! We have to retrieve the fault address prior to obtaining the
364 * MP lock because get_mplock() may switch out. YYY cr2 really ought
365 * to be retrieved by the assembly code, not here.
367 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
368 * if an attempt is made to switch from a fast interrupt or IPI. This is
369 * necessary to properly take fatal kernel traps on SMP machines if
370 * get_mplock() has to block.
374 user_trap(struct trapframe *frame)
376 struct globaldata *gd = mycpu;
377 struct thread *td = gd->gd_curthread;
378 struct lwp *lp = td->td_lwp;
381 int i = 0, ucode = 0, type, code;
384 int crit_count = td->td_critcount;
385 lwkt_tokref_t curstop = td->td_toks_stop;
392 * This is a bad kludge to avoid changing the various trapframe
393 * structures. Because we are enabled as a virtual kernel,
394 * the original tf_err field will be passed to us shifted 16
395 * over in the tf_trapno field for T_PAGEFLT.
397 if (frame->tf_trapno == T_PAGEFLT)
402 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
403 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
407 * Everything coming from user mode runs through user_trap,
408 * including system calls.
410 if (frame->tf_trapno == T_SYSCALL80) {
415 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
416 frame->tf_trapno, eva);
420 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
421 ++gd->gd_trap_nesting_level;
422 MAKEMPSAFE(have_mplock);
423 trap_fatal(frame, TRUE, eva);
424 --gd->gd_trap_nesting_level;
429 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
432 type = frame->tf_trapno;
433 code = frame->tf_err;
437 sticks = (int)td->td_sticks;
438 lp->lwp_md.md_regs = frame;
441 case T_PRIVINFLT: /* privileged instruction fault */
446 case T_BPTFLT: /* bpt instruction fault */
447 case T_TRCTRAP: /* trace trap */
448 frame->tf_eflags &= ~PSL_T;
450 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
453 case T_ARITHTRAP: /* arithmetic trap */
458 case T_ASTFLT: /* Allow process switch */
459 mycpu->gd_cnt.v_soft++;
460 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
461 atomic_clear_int(&mycpu->gd_reqflags,
463 addupc_task(p, p->p_prof.pr_addr,
469 * The following two traps can happen in
470 * vm86 mode, and, if so, we want to handle
473 case T_PROTFLT: /* general protection fault */
474 case T_STKFLT: /* stack fault */
476 if (frame->tf_eflags & PSL_VM) {
477 i = vm86_emulate((struct vm86frame *)frame);
484 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
486 case T_SEGNPFLT: /* segment not present fault */
490 case T_TSSFLT: /* invalid TSS fault */
491 case T_DOUBLEFLT: /* double fault */
497 case T_PAGEFLT: /* page fault */
498 MAKEMPSAFE(have_mplock);
499 i = trap_pfault(frame, TRUE, eva);
502 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
517 case T_DIVIDE: /* integer divide fault */
524 MAKEMPSAFE(have_mplock);
525 /* machine/parity/power fail/"kitchen sink" faults */
526 if (isa_nmi(code) == 0) {
529 * NMI can be hooked up to a pushbutton
533 kprintf ("NMI ... going to debugger\n");
534 kdb_trap (type, 0, frame);
538 } else if (panic_on_nmi)
539 panic("NMI indicates hardware failure");
541 #endif /* NISA > 0 */
543 case T_OFLOW: /* integer overflow fault */
548 case T_BOUND: /* bounds check fault */
555 * Virtual kernel intercept - pass the DNA exception
556 * to the (emulated) virtual kernel if it asked to handle
557 * it. This occurs when the virtual kernel is holding
558 * onto the FP context for a different emulated
559 * process then the one currently running.
561 * We must still call npxdna() since we may have
562 * saved FP state that the (emulated) virtual kernel
563 * needs to hand over to a different emulated process.
565 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
566 (td->td_pcb->pcb_flags & FP_VIRTFP)
574 * The kernel may have switched out the FP unit's
575 * state, causing the user process to take a fault
576 * when it tries to use the FP unit. Restore the
582 if (!pmath_emulate) {
584 ucode = FPE_FPU_NP_TRAP;
587 i = (*pmath_emulate)(frame);
589 if (!(frame->tf_eflags & PSL_T))
591 frame->tf_eflags &= ~PSL_T;
594 /* else ucode = emulator_only_knows() XXX */
597 case T_FPOPFLT: /* FPU operand fetch fault */
602 case T_XMMFLT: /* SIMD floating-point exception */
609 * Virtual kernel intercept - if the fault is directly related to a
610 * VM context managed by a virtual kernel then let the virtual kernel
613 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
614 vkernel_trap(lp, frame);
619 * Translate fault for emulators (e.g. Linux)
621 if (*p->p_sysent->sv_transtrap)
622 i = (*p->p_sysent->sv_transtrap)(i, type);
624 MAKEMPSAFE(have_mplock);
625 trapsignal(lp, i, ucode);
628 if (type <= MAX_TRAP_MSG) {
629 uprintf("fatal process exception: %s",
631 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
632 uprintf(", fault VA = 0x%lx", (u_long)eva);
638 userret(lp, frame, sticks);
643 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
645 KASSERT(crit_count == td->td_critcount,
646 ("trap: critical section count mismatch! %d/%d",
647 crit_count, td->td_pri));
648 KASSERT(curstop == td->td_toks_stop,
649 ("trap: extra tokens held after trap! %zd/%zd",
650 curstop - &td->td_toks_base,
651 td->td_toks_stop - &td->td_toks_base));
656 kern_trap(struct trapframe *frame)
658 struct globaldata *gd = mycpu;
659 struct thread *td = gd->gd_curthread;
662 int i = 0, ucode = 0, type, code;
665 int crit_count = td->td_critcount;
666 lwkt_tokref_t curstop = td->td_toks_stop;
673 if (frame->tf_trapno == T_PAGEFLT)
680 ++gd->gd_trap_nesting_level;
681 MAKEMPSAFE(have_mplock);
682 trap_fatal(frame, FALSE, eva);
683 --gd->gd_trap_nesting_level;
687 type = frame->tf_trapno;
688 code = frame->tf_err;
696 case T_PAGEFLT: /* page fault */
697 MAKEMPSAFE(have_mplock);
698 trap_pfault(frame, FALSE, eva);
704 * The kernel may be using npx for copying or other
707 panic("kernel NPX should not happen");
713 case T_PROTFLT: /* general protection fault */
714 case T_SEGNPFLT: /* segment not present fault */
716 * Invalid segment selectors and out of bounds
717 * %eip's and %esp's can be set up in user mode.
718 * This causes a fault in kernel mode when the
719 * kernel tries to return to user mode. We want
720 * to get this fault so that we can fix the
721 * problem here and not have to check all the
722 * selectors and pointers when the user changes
725 if (mycpu->gd_intr_nesting_level == 0) {
726 if (td->td_pcb->pcb_onfault) {
728 (register_t)td->td_pcb->pcb_onfault;
736 * PSL_NT can be set in user mode and isn't cleared
737 * automatically when the kernel is entered. This
738 * causes a TSS fault when the kernel attempts to
739 * `iret' because the TSS link is uninitialized. We
740 * want to get this fault so that we can fix the
741 * problem here and not every time the kernel is
744 if (frame->tf_eflags & PSL_NT) {
745 frame->tf_eflags &= ~PSL_NT;
750 case T_TRCTRAP: /* trace trap */
752 if (frame->tf_eip == (int)IDTVEC(syscall)) {
754 * We've just entered system mode via the
755 * syscall lcall. Continue single stepping
756 * silently until the syscall handler has
761 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
763 * The syscall handler has now saved the
764 * flags. Stop single stepping it.
766 frame->tf_eflags &= ~PSL_T;
772 * Ignore debug register trace traps due to
773 * accesses in the user's address space, which
774 * can happen under several conditions such as
775 * if a user sets a watchpoint on a buffer and
776 * then passes that buffer to a system call.
777 * We still want to get TRCTRAPS for addresses
778 * in kernel space because that is useful when
779 * debugging the kernel.
781 if (user_dbreg_trap()) {
783 * Reset breakpoint bits because the
786 load_dr6(rdr6() & 0xfffffff0);
791 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
795 * If DDB is enabled, let it handle the debugger trap.
796 * Otherwise, debugger traps "can't happen".
799 MAKEMPSAFE(have_mplock);
800 if (kdb_trap (type, 0, frame))
805 MAKEMPSAFE(have_mplock);
806 trap_fatal(frame, FALSE, eva);
809 MAKEMPSAFE(have_mplock);
810 trap_fatal(frame, FALSE, eva);
814 * Ignore this trap generated from a spurious SIGTRAP.
816 * single stepping in / syscalls leads to spurious / SIGTRAP
819 * Haiku (c) 2007 Simon 'corecode' Schubert
825 * Translate fault for emulators (e.g. Linux)
827 if (*p->p_sysent->sv_transtrap)
828 i = (*p->p_sysent->sv_transtrap)(i, type);
830 MAKEMPSAFE(have_mplock);
831 trapsignal(lp, i, ucode);
834 if (type <= MAX_TRAP_MSG) {
835 uprintf("fatal process exception: %s",
837 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
838 uprintf(", fault VA = 0x%lx", (u_long)eva);
848 KASSERT(crit_count == td->td_critcount,
849 ("trap: critical section count mismatch! %d/%d",
850 crit_count, td->td_pri));
851 KASSERT(curstop == td->td_toks_stop,
852 ("trap: extra tokens held after trap! %zd/%zd",
853 curstop - &td->td_toks_base,
854 td->td_toks_stop - &td->td_toks_base));
859 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
862 struct vmspace *vm = NULL;
867 thread_t td = curthread;
868 struct lwp *lp = td->td_lwp;
870 va = trunc_page(eva);
871 if (usermode == FALSE) {
873 * This is a fault on kernel virtual memory.
878 * This is a fault on non-kernel virtual memory.
879 * vm is initialized above to NULL. If curproc is NULL
880 * or curproc->p_vmspace is NULL the fault is fatal.
883 vm = lp->lwp_vmspace;
891 if (frame->tf_xflags & PGEX_W)
892 ftype = VM_PROT_READ | VM_PROT_WRITE;
894 ftype = VM_PROT_READ;
896 if (map != &kernel_map) {
898 * Keep swapout from messing with us during this
908 fault_flags |= VM_FAULT_BURST;
909 if (ftype & VM_PROT_WRITE)
910 fault_flags |= VM_FAULT_DIRTY;
912 fault_flags |= VM_FAULT_NORMAL;
913 rv = vm_fault(map, va, ftype, fault_flags);
917 * Don't have to worry about process locking or stacks in the kernel.
919 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
921 if (rv == KERN_SUCCESS)
925 if (td->td_gd->gd_intr_nesting_level == 0 &&
926 td->td_pcb->pcb_onfault) {
927 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
930 trap_fatal(frame, usermode, eva);
933 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
937 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
939 int code, type, ss, esp;
941 code = frame->tf_xflags;
942 type = frame->tf_trapno;
944 if (type <= MAX_TRAP_MSG) {
945 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
946 type, trap_msg[type],
947 (usermode ? "user" : "kernel"));
949 /* two separate prints in case of a trap on an unmapped page */
950 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
951 if (type == T_PAGEFLT) {
952 kprintf("fault virtual address = %p\n", (void *)eva);
953 kprintf("fault code = %s %s, %s\n",
954 usermode ? "user" : "supervisor",
955 code & PGEX_W ? "write" : "read",
956 code & PGEX_P ? "protection violation" : "page not present");
958 kprintf("instruction pointer = 0x%x:0x%x\n",
959 frame->tf_cs & 0xffff, frame->tf_eip);
961 ss = frame->tf_ss & 0xffff;
964 ss = GSEL(GDATA_SEL, SEL_KPL);
965 esp = (int)&frame->tf_esp;
967 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
968 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
969 kprintf("processor eflags = ");
970 if (frame->tf_eflags & PSL_T)
971 kprintf("trace trap, ");
972 if (frame->tf_eflags & PSL_I)
973 kprintf("interrupt enabled, ");
974 if (frame->tf_eflags & PSL_NT)
975 kprintf("nested task, ");
976 if (frame->tf_eflags & PSL_RF)
979 if (frame->tf_eflags & PSL_VM)
982 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
983 kprintf("current process = ");
985 kprintf("%lu (%s)\n",
986 (u_long)curproc->p_pid, curproc->p_comm ?
987 curproc->p_comm : "");
991 kprintf("current thread = pri %d ", curthread->td_pri);
992 if (curthread->td_critcount)
997 * we probably SHOULD have stopped the other CPUs before now!
998 * another CPU COULD have been touching cpl at this moment...
1000 kprintf(" <- SMP: XXX");
1008 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1011 kprintf("trap number = %d\n", type);
1012 if (type <= MAX_TRAP_MSG)
1013 panic("%s", trap_msg[type]);
1015 panic("unknown/reserved trap");
1019 * Double fault handler. Called when a fault occurs while writing
1020 * a frame for a trap/exception onto the stack. This usually occurs
1021 * when the stack overflows (such is the case with infinite recursion,
1024 * XXX Note that the current PTD gets replaced by IdlePTD when the
1025 * task switch occurs. This means that the stack that was active at
1026 * the time of the double fault is not available at <kstack> unless
1027 * the machine was idle when the double fault occurred. The downside
1028 * of this is that "trace <ebp>" in ddb won't work.
1031 dblfault_handler(void)
1033 struct mdglobaldata *gd = mdcpu;
1035 kprintf("\nFatal double fault:\n");
1036 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1037 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1038 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1039 /* two separate prints in case of a trap on an unmapped page */
1040 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1041 panic("double fault");
1045 * syscall2 - MP aware system call request C handler
1047 * A system call is essentially treated as a trap except that the
1048 * MP lock is not held on entry or return. We are responsible for
1049 * obtaining the MP lock if necessary and for handling ASTs
1050 * (e.g. a task switch) prior to return.
1055 syscall2(struct trapframe *frame)
1057 struct thread *td = curthread;
1058 struct proc *p = td->td_proc;
1059 struct lwp *lp = td->td_lwp;
1061 struct sysent *callp;
1062 register_t orig_tf_eflags;
1067 int crit_count = td->td_critcount;
1069 int have_mplock = 0;
1071 union sysunion args;
1073 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1076 userenter(td, p); /* lazy raise our priority */
1081 sticks = (int)td->td_sticks;
1082 orig_tf_eflags = frame->tf_eflags;
1085 * Virtual kernel intercept - if a VM context managed by a virtual
1086 * kernel issues a system call the virtual kernel handles it, not us.
1087 * Restore the virtual kernel context and return from its system
1088 * call. The current frame is copied out to the virtual kernel.
1090 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1091 vkernel_trap(lp, frame);
1092 error = EJUSTRETURN;
1097 * Get the system call parameters and account for time
1099 lp->lwp_md.md_regs = frame;
1100 params = (caddr_t)frame->tf_esp + sizeof(int);
1101 code = frame->tf_eax;
1103 if (p->p_sysent->sv_prepsyscall) {
1104 (*p->p_sysent->sv_prepsyscall)(
1105 frame, (int *)(&args.nosys.sysmsg + 1),
1109 * Need to check if this is a 32 bit or 64 bit syscall.
1110 * fuword is MP aware.
1112 if (code == SYS_syscall) {
1114 * Code is first argument, followed by actual args.
1116 code = fuword(params);
1117 params += sizeof(int);
1118 } else if (code == SYS___syscall) {
1120 * Like syscall, but code is a quad, so as to maintain
1121 * quad alignment for the rest of the arguments.
1123 code = fuword(params);
1124 params += sizeof(quad_t);
1128 code &= p->p_sysent->sv_mask;
1129 if (code >= p->p_sysent->sv_size)
1130 callp = &p->p_sysent->sv_table[0];
1132 callp = &p->p_sysent->sv_table[code];
1134 narg = callp->sy_narg & SYF_ARGMASK;
1137 * copyin is MP aware, but the tracing code is not
1139 if (narg && params) {
1140 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1141 narg * sizeof(register_t));
1144 if (KTRPOINT(td, KTR_SYSCALL)) {
1145 MAKEMPSAFE(have_mplock);
1147 ktrsyscall(lp, code, narg,
1148 (void *)(&args.nosys.sysmsg + 1));
1156 if (KTRPOINT(td, KTR_SYSCALL)) {
1157 MAKEMPSAFE(have_mplock);
1158 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1163 * For traditional syscall code edx is left untouched when 32 bit
1164 * results are returned. Since edx is loaded from fds[1] when the
1165 * system call returns we pre-set it here.
1167 args.sysmsg_fds[0] = 0;
1168 args.sysmsg_fds[1] = frame->tf_edx;
1171 * The syscall might manipulate the trap frame. If it does it
1172 * will probably return EJUSTRETURN.
1174 args.sysmsg_frame = frame;
1176 STOPEVENT(p, S_SCE, narg); /* MP aware */
1179 * NOTE: All system calls run MPSAFE now. The system call itself
1180 * is responsible for getting the MP lock.
1182 error = (*callp->sy_call)(&args);
1185 kprintf("system call %d returned %d\n", code, error);
1190 * MP SAFE (we may or may not have the MP lock at this point)
1195 * Reinitialize proc pointer `p' as it may be different
1196 * if this is a child returning from fork syscall.
1199 lp = curthread->td_lwp;
1200 frame->tf_eax = args.sysmsg_fds[0];
1201 frame->tf_edx = args.sysmsg_fds[1];
1202 frame->tf_eflags &= ~PSL_C;
1206 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1207 * int 0x80 is 2 bytes. We saved this in tf_err.
1209 frame->tf_eip -= frame->tf_err;
1214 panic("Unexpected EASYNC return value (for now)");
1217 if (p->p_sysent->sv_errsize) {
1218 if (error >= p->p_sysent->sv_errsize)
1219 error = -1; /* XXX */
1221 error = p->p_sysent->sv_errtbl[error];
1223 frame->tf_eax = error;
1224 frame->tf_eflags |= PSL_C;
1229 * Traced syscall. trapsignal() is not MP aware.
1231 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1232 MAKEMPSAFE(have_mplock);
1233 frame->tf_eflags &= ~PSL_T;
1234 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1238 * Handle reschedule and other end-of-syscall issues
1240 userret(lp, frame, sticks);
1243 if (KTRPOINT(td, KTR_SYSRET)) {
1244 MAKEMPSAFE(have_mplock);
1245 ktrsysret(lp, code, error, args.sysmsg_result);
1250 * This works because errno is findable through the
1251 * register set. If we ever support an emulation where this
1252 * is not the case, this code will need to be revisited.
1254 STOPEVENT(p, S_SCX, code);
1258 * Release the MP lock if we had to get it
1262 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1264 KASSERT(crit_count == td->td_critcount,
1265 ("syscall: critical section count mismatch! %d/%d",
1266 crit_count, td->td_pri));
1267 KASSERT(&td->td_toks_base == td->td_toks_stop,
1268 ("syscall: extra tokens held after trap! %zd",
1269 td->td_toks_stop - &td->td_toks_base));
1274 * NOTE: mplock not held at any point
1277 fork_return(struct lwp *lp, struct trapframe *frame)
1279 frame->tf_eax = 0; /* Child returns zero */
1280 frame->tf_eflags &= ~PSL_C; /* success */
1283 generic_lwp_return(lp, frame);
1284 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1288 * Simplified back end of syscall(), used when returning from fork()
1289 * directly into user mode.
1291 * This code will return back into the fork trampoline code which then
1294 * NOTE: The mplock is not held at any point.
1297 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1299 struct proc *p = lp->lwp_proc;
1302 * Newly forked processes are given a kernel priority. We have to
1303 * adjust the priority to a normal user priority and fake entry
1304 * into the kernel (call userenter()) to install a passive release
1305 * function just in case userret() decides to stop the process. This
1306 * can occur when ^Z races a fork. If we do not install the passive
1307 * release function the current process designation will not be
1308 * released when the thread goes to sleep.
1310 lwkt_setpri_self(TDPRI_USER_NORM);
1311 userenter(lp->lwp_thread, p);
1312 userret(lp, frame, 0);
1314 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1315 ktrsysret(lp, SYS_fork, 0, 0);
1317 lp->lwp_flags |= LWP_PASSIVE_ACQ;
1319 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
1323 * doreti has turned into this. The frame is directly on the stack. We
1324 * pull everything else we need (fpu and tls context) from the current
1327 * Note on fpu interactions: In a virtual kernel, the fpu context for
1328 * an emulated user mode process is not shared with the virtual kernel's
1329 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1330 * kernel itself, and not even then since the signal() contexts that we care
1331 * about save and restore the FPU state (I think anyhow).
1333 * vmspace_ctl() returns an error only if it had problems instaling the
1334 * context we supplied or problems copying data to/from our VM space.
1337 go_user(struct intrframe *frame)
1339 struct trapframe *tf = (void *)&frame->if_gs;
1343 * Interrupts may be disabled on entry, make sure all signals
1344 * can be received before beginning our loop.
1349 * Switch to the current simulated user process, then call
1350 * user_trap() when we break out of it (usually due to a signal).
1354 * Tell the real kernel whether it is ok to use the FP
1357 * The critical section is required to prevent an interrupt
1358 * from causing a preemptive task switch and changing
1362 if (mdcpu->gd_npxthread == curthread) {
1363 tf->tf_xflags &= ~PGEX_FPFAULT;
1365 tf->tf_xflags |= PGEX_FPFAULT;
1369 * Run emulated user process context. This call interlocks
1370 * with new mailbox signals.
1372 * Set PGEX_U unconditionally, indicating a user frame (the
1373 * bit is normally set only by T_PAGEFLT).
1375 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1376 tf, &curthread->td_savevext);
1378 frame->if_xflags |= PGEX_U;
1380 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1381 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1382 tf->tf_xflags, frame->if_xflags);
1386 panic("vmspace_ctl failed error %d", errno);
1388 if (tf->tf_trapno) {
1392 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1393 tf->tf_trapno = T_ASTFLT;
1401 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1402 * fault (which is then passed back to the virtual kernel) if an attempt is
1403 * made to use the FP unit.
1405 * XXX this is a fairly big hack.
1408 set_vkernel_fp(struct trapframe *frame)
1410 struct thread *td = curthread;
1412 if (frame->tf_xflags & PGEX_FPFAULT) {
1413 td->td_pcb->pcb_flags |= FP_VIRTFP;
1414 if (mdcpu->gd_npxthread == td)
1417 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1422 * Called from vkernel_trap() to fixup the vkernel's syscall
1423 * frame for vmspace_ctl() return.
1426 cpu_vkernel_trap(struct trapframe *frame, int error)
1428 frame->tf_eax = error;
1430 frame->tf_eflags |= PSL_C;
1432 frame->tf_eflags &= ~PSL_C;