2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.35 2008/09/09 04:06:19 dillon Exp $
43 * 386 Trap and System call handling
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/signal2.h>
60 #include <sys/syscall.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
64 #include <sys/vmmeter.h>
65 #include <sys/malloc.h>
67 #include <sys/ktrace.h>
70 #include <sys/upcall.h>
71 #include <sys/vkernel.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
74 #include <sys/vmspace.h>
77 #include <vm/vm_param.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_extern.h>
85 #include <machine/cpu.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/smp.h>
89 #include <machine/tss.h>
90 #include <machine/globaldata.h>
92 #include <machine/vm86.h>
96 #include <sys/msgport2.h>
97 #include <sys/thread2.h>
98 #include <sys/mplock2.h>
102 #define MAKEMPSAFE(have_mplock) \
103 if (have_mplock == 0) { \
110 #define MAKEMPSAFE(have_mplock)
114 int (*pmath_emulate) (struct trapframe *);
116 static int trap_pfault (struct trapframe *, int, vm_offset_t);
117 static void trap_fatal (struct trapframe *, int, vm_offset_t);
118 void dblfault_handler (void);
121 extern inthand_t IDTVEC(syscall);
124 #define MAX_TRAP_MSG 28
125 static char *trap_msg[] = {
127 "privileged instruction fault", /* 1 T_PRIVINFLT */
129 "breakpoint instruction fault", /* 3 T_BPTFLT */
132 "arithmetic trap", /* 6 T_ARITHTRAP */
133 "system forced exception", /* 7 T_ASTFLT */
135 "general protection fault", /* 9 T_PROTFLT */
136 "trace trap", /* 10 T_TRCTRAP */
138 "page fault", /* 12 T_PAGEFLT */
140 "alignment fault", /* 14 T_ALIGNFLT */
144 "integer divide fault", /* 18 T_DIVIDE */
145 "non-maskable interrupt trap", /* 19 T_NMI */
146 "overflow trap", /* 20 T_OFLOW */
147 "FPU bounds check fault", /* 21 T_BOUND */
148 "FPU device not available", /* 22 T_DNA */
149 "double fault", /* 23 T_DOUBLEFLT */
150 "FPU operand fetch fault", /* 24 T_FPOPFLT */
151 "invalid TSS fault", /* 25 T_TSSFLT */
152 "segment not present fault", /* 26 T_SEGNPFLT */
153 "stack fault", /* 27 T_STKFLT */
154 "machine check trap", /* 28 T_MCHK */
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
172 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
173 extern int max_sysmsg;
176 * Passively intercepts the thread switch function to increase
177 * the thread priority from a user priority to a kernel priority, reducing
178 * syscall and trap overhead for the case where no switch occurs.
180 * Synchronizes td_ucred with p_ucred. This is used by system calls,
181 * signal handling, faults, AST traps, and anything else that enters the
182 * kernel from userland and provides the kernel with a stable read-only
183 * copy of the process ucred.
186 userenter(struct thread *curtd, struct proc *curp)
191 curtd->td_release = lwkt_passive_release;
193 if (curtd->td_ucred != curp->p_ucred) {
194 ncred = crhold(curp->p_ucred);
195 ocred = curtd->td_ucred;
196 curtd->td_ucred = ncred;
203 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
204 * must be completed before we can return to or try to return to userland.
206 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
207 * arithmatic on the delta calculation so the absolute tick values are
208 * truncated to an integer.
211 userret(struct lwp *lp, struct trapframe *frame, int sticks)
213 struct proc *p = lp->lwp_proc;
217 * Charge system time if profiling. Note: times are in microseconds.
218 * This may do a copyout and block, so do it first even though it
219 * means some system time will be charged as user time.
221 if (p->p_flag & P_PROFIL) {
222 addupc_task(p, frame->tf_eip,
223 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
228 * If the jungle wants us dead, so be it.
230 if (lp->lwp_flag & LWP_WEXIT) {
233 rel_mplock(); /* NOT REACHED */
237 * Block here if we are in a stopped state.
239 if (p->p_stat == SSTOP) {
247 * Post any pending upcalls
249 if (p->p_flag & P_UPCALLPEND) {
251 p->p_flag &= ~P_UPCALLPEND;
258 * Post any pending signals
260 * WARNING! postsig() can exit and not return.
262 if ((sig = CURSIG_TRACE(lp)) != 0) {
270 * block here if we are swapped out, but still process signals
271 * (such as SIGKILL). proc0 (the swapin scheduler) is already
272 * aware of our situation, we do not have to wake it up.
274 if (p->p_flag & P_SWAPPEDOUT) {
276 p->p_flag |= P_SWAPWAIT;
278 if (p->p_flag & P_SWAPWAIT)
279 tsleep(p, PCATCH, "SWOUT", 0);
280 p->p_flag &= ~P_SWAPWAIT;
286 * Make sure postsig() handled request to restore old signal mask after
287 * running signal handler.
289 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
293 * Cleanup from userenter and any passive release that might have occured.
294 * We must reclaim the current-process designation before we can return
295 * to usermode. We also handle both LWKT and USER reschedule requests.
298 userexit(struct lwp *lp)
300 struct thread *td = lp->lwp_thread;
301 /* globaldata_t gd = td->td_gd; */
304 * Handle stop requests at kernel priority. Any requests queued
305 * after this loop will generate another AST.
307 while (lp->lwp_proc->p_stat == SSTOP) {
314 * Reduce our priority in preparation for a return to userland. If
315 * our passive release function was still in place, our priority was
316 * never raised and does not need to be reduced.
318 lwkt_passive_recover(td);
321 * Become the current user scheduled process if we aren't already,
322 * and deal with reschedule requests and other factors.
324 lp->lwp_proc->p_usched->acquire_curproc(lp);
325 /* WARNING: we may have migrated cpu's */
326 /* gd = td->td_gd; */
329 #if !defined(KTR_KERNENTRY)
330 #define KTR_KERNENTRY KTR_ALL
332 KTR_INFO_MASTER(kernentry);
333 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
334 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
335 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
336 sizeof(int) + sizeof(int));
337 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
338 sizeof(int) + sizeof(int) + sizeof(int));
339 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
340 sizeof(int) + sizeof(int) + sizeof(int));
341 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
342 sizeof(int) + sizeof(int));
345 * Exception, fault, and trap interface to the kernel.
346 * This common code is called from assembly language IDT gate entry
347 * routines that prepare a suitable stack frame, and restore this
348 * frame after the exception has been processed.
350 * This function is also called from doreti in an interlock to handle ASTs.
351 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
353 * NOTE! We have to retrieve the fault address prior to obtaining the
354 * MP lock because get_mplock() may switch out. YYY cr2 really ought
355 * to be retrieved by the assembly code, not here.
357 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
358 * if an attempt is made to switch from a fast interrupt or IPI. This is
359 * necessary to properly take fatal kernel traps on SMP machines if
360 * get_mplock() has to block.
364 user_trap(struct trapframe *frame)
366 struct globaldata *gd = mycpu;
367 struct thread *td = gd->gd_curthread;
368 struct lwp *lp = td->td_lwp;
371 int i = 0, ucode = 0, type, code;
376 int crit_count = td->td_critcount;
377 lwkt_tokref_t curstop = td->td_toks_stop;
384 * This is a bad kludge to avoid changing the various trapframe
385 * structures. Because we are enabled as a virtual kernel,
386 * the original tf_err field will be passed to us shifted 16
387 * over in the tf_trapno field for T_PAGEFLT.
389 if (frame->tf_trapno == T_PAGEFLT)
394 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
395 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
399 * Everything coming from user mode runs through user_trap,
400 * including system calls.
402 if (frame->tf_trapno == T_SYSCALL80) {
407 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
408 frame->tf_trapno, eva);
412 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
413 ++gd->gd_trap_nesting_level;
414 MAKEMPSAFE(have_mplock);
415 trap_fatal(frame, TRUE, eva);
416 --gd->gd_trap_nesting_level;
421 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
424 type = frame->tf_trapno;
425 code = frame->tf_err;
429 sticks = (int)td->td_sticks;
430 lp->lwp_md.md_regs = frame;
433 case T_PRIVINFLT: /* privileged instruction fault */
438 case T_BPTFLT: /* bpt instruction fault */
439 case T_TRCTRAP: /* trace trap */
440 frame->tf_eflags &= ~PSL_T;
445 case T_ARITHTRAP: /* arithmetic trap */
450 case T_ASTFLT: /* Allow process switch */
451 mycpu->gd_cnt.v_soft++;
452 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
453 atomic_clear_int(&mycpu->gd_reqflags,
455 addupc_task(p, p->p_prof.pr_addr,
461 * The following two traps can happen in
462 * vm86 mode, and, if so, we want to handle
465 case T_PROTFLT: /* general protection fault */
466 case T_STKFLT: /* stack fault */
468 if (frame->tf_eflags & PSL_VM) {
469 i = vm86_emulate((struct vm86frame *)frame);
476 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
478 case T_SEGNPFLT: /* segment not present fault */
482 case T_TSSFLT: /* invalid TSS fault */
483 case T_DOUBLEFLT: /* double fault */
488 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
494 case T_PAGEFLT: /* page fault */
495 MAKEMPSAFE(have_mplock);
496 i = trap_pfault(frame, TRUE, eva);
499 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
515 case T_DIVIDE: /* integer divide fault */
522 MAKEMPSAFE(have_mplock);
523 /* machine/parity/power fail/"kitchen sink" faults */
524 if (isa_nmi(code) == 0) {
527 * NMI can be hooked up to a pushbutton
531 kprintf ("NMI ... going to debugger\n");
532 kdb_trap (type, 0, frame);
536 } else if (panic_on_nmi)
537 panic("NMI indicates hardware failure");
539 #endif /* NISA > 0 */
541 case T_OFLOW: /* integer overflow fault */
546 case T_BOUND: /* bounds check fault */
553 * Virtual kernel intercept - pass the DNA exception
554 * to the (emulated) virtual kernel if it asked to handle
555 * it. This occurs when the virtual kernel is holding
556 * onto the FP context for a different emulated
557 * process then the one currently running.
559 * We must still call npxdna() since we may have
560 * saved FP state that the (emulated) virtual kernel
561 * needs to hand over to a different emulated process.
563 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
564 (td->td_pcb->pcb_flags & FP_VIRTFP)
571 * The kernel may have switched out the FP unit's
572 * state, causing the user process to take a fault
573 * when it tries to use the FP unit. Restore the
579 if (!pmath_emulate) {
581 ucode = FPE_FPU_NP_TRAP;
584 i = (*pmath_emulate)(frame);
586 if (!(frame->tf_eflags & PSL_T))
588 frame->tf_eflags &= ~PSL_T;
591 /* else ucode = emulator_only_knows() XXX */
594 case T_FPOPFLT: /* FPU operand fetch fault */
599 case T_XMMFLT: /* SIMD floating-point exception */
606 * Virtual kernel intercept - if the fault is directly related to a
607 * VM context managed by a virtual kernel then let the virtual kernel
610 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
611 vkernel_trap(lp, frame);
616 * Translate fault for emulators (e.g. Linux)
618 if (*p->p_sysent->sv_transtrap)
619 i = (*p->p_sysent->sv_transtrap)(i, type);
621 MAKEMPSAFE(have_mplock);
622 trapsignal(lp, i, ucode);
625 if (type <= MAX_TRAP_MSG) {
626 uprintf("fatal process exception: %s",
628 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
629 uprintf(", fault VA = 0x%lx", (u_long)eva);
635 userret(lp, frame, sticks);
642 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
644 KASSERT(crit_count == td->td_critcount,
645 ("trap: critical section count mismatch! %d/%d",
646 crit_count, td->td_pri));
647 KASSERT(curstop == td->td_toks_stop,
648 ("trap: extra tokens held after trap! %zd/%zd",
649 curstop - &td->td_toks_base,
650 td->td_toks_stop - &td->td_toks_base));
655 kern_trap(struct trapframe *frame)
657 struct globaldata *gd = mycpu;
658 struct thread *td = gd->gd_curthread;
661 int i = 0, ucode = 0, type, code;
666 int crit_count = td->td_critcount;
667 lwkt_tokref_t curstop = td->td_toks_stop;
674 if (frame->tf_trapno == T_PAGEFLT)
681 ++gd->gd_trap_nesting_level;
682 MAKEMPSAFE(have_mplock);
683 trap_fatal(frame, FALSE, eva);
684 --gd->gd_trap_nesting_level;
688 type = frame->tf_trapno;
689 code = frame->tf_err;
697 case T_PAGEFLT: /* page fault */
698 MAKEMPSAFE(have_mplock);
699 trap_pfault(frame, FALSE, eva);
705 * The kernel may be using npx for copying or other
708 panic("kernel NPX should not happen");
714 case T_PROTFLT: /* general protection fault */
715 case T_SEGNPFLT: /* segment not present fault */
717 * Invalid segment selectors and out of bounds
718 * %eip's and %esp's can be set up in user mode.
719 * This causes a fault in kernel mode when the
720 * kernel tries to return to user mode. We want
721 * to get this fault so that we can fix the
722 * problem here and not have to check all the
723 * selectors and pointers when the user changes
726 if (mycpu->gd_intr_nesting_level == 0) {
727 if (td->td_pcb->pcb_onfault) {
729 (register_t)td->td_pcb->pcb_onfault;
737 * PSL_NT can be set in user mode and isn't cleared
738 * automatically when the kernel is entered. This
739 * causes a TSS fault when the kernel attempts to
740 * `iret' because the TSS link is uninitialized. We
741 * want to get this fault so that we can fix the
742 * problem here and not every time the kernel is
745 if (frame->tf_eflags & PSL_NT) {
746 frame->tf_eflags &= ~PSL_NT;
751 case T_TRCTRAP: /* trace trap */
753 if (frame->tf_eip == (int)IDTVEC(syscall)) {
755 * We've just entered system mode via the
756 * syscall lcall. Continue single stepping
757 * silently until the syscall handler has
762 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
764 * The syscall handler has now saved the
765 * flags. Stop single stepping it.
767 frame->tf_eflags &= ~PSL_T;
773 * Ignore debug register trace traps due to
774 * accesses in the user's address space, which
775 * can happen under several conditions such as
776 * if a user sets a watchpoint on a buffer and
777 * then passes that buffer to a system call.
778 * We still want to get TRCTRAPS for addresses
779 * in kernel space because that is useful when
780 * debugging the kernel.
782 if (user_dbreg_trap()) {
784 * Reset breakpoint bits because the
787 load_dr6(rdr6() & 0xfffffff0);
792 * Fall through (TRCTRAP kernel mode, kernel address)
796 * If DDB is enabled, let it handle the debugger trap.
797 * Otherwise, debugger traps "can't happen".
800 MAKEMPSAFE(have_mplock);
801 if (kdb_trap (type, 0, frame))
806 MAKEMPSAFE(have_mplock);
807 trap_fatal(frame, FALSE, eva);
810 MAKEMPSAFE(have_mplock);
811 trap_fatal(frame, FALSE, eva);
815 * Ignore this trap generated from a spurious SIGTRAP.
817 * single stepping in / syscalls leads to spurious / SIGTRAP
820 * Haiku (c) 2007 Simon 'corecode' Schubert
826 * Translate fault for emulators (e.g. Linux)
828 if (*p->p_sysent->sv_transtrap)
829 i = (*p->p_sysent->sv_transtrap)(i, type);
831 MAKEMPSAFE(have_mplock);
832 trapsignal(lp, i, ucode);
835 if (type <= MAX_TRAP_MSG) {
836 uprintf("fatal process exception: %s",
838 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
839 uprintf(", fault VA = 0x%lx", (u_long)eva);
851 KASSERT(crit_count == td->td_critcount,
852 ("trap: critical section count mismatch! %d/%d",
853 crit_count, td->td_pri));
854 KASSERT(curstop == td->td_toks_stop,
855 ("trap: extra tokens held after trap! %zd/%zd",
856 curstop - &td->td_toks_base,
857 td->td_toks_stop - &td->td_toks_base));
862 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
865 struct vmspace *vm = NULL;
870 thread_t td = curthread;
871 struct lwp *lp = td->td_lwp;
873 va = trunc_page(eva);
874 if (usermode == FALSE) {
876 * This is a fault on kernel virtual memory.
881 * This is a fault on non-kernel virtual memory.
882 * vm is initialized above to NULL. If curproc is NULL
883 * or curproc->p_vmspace is NULL the fault is fatal.
886 vm = lp->lwp_vmspace;
894 if (frame->tf_xflags & PGEX_W)
895 ftype = VM_PROT_READ | VM_PROT_WRITE;
897 ftype = VM_PROT_READ;
899 if (map != &kernel_map) {
901 * Keep swapout from messing with us during this
911 fault_flags |= VM_FAULT_BURST;
912 if (ftype & VM_PROT_WRITE)
913 fault_flags |= VM_FAULT_DIRTY;
915 fault_flags |= VM_FAULT_NORMAL;
916 rv = vm_fault(map, va, ftype, fault_flags);
921 * Don't have to worry about process locking or stacks in the kernel.
923 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
926 if (rv == KERN_SUCCESS)
930 if (td->td_gd->gd_intr_nesting_level == 0 &&
931 td->td_pcb->pcb_onfault) {
932 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
935 trap_fatal(frame, usermode, eva);
938 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
942 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
944 int code, type, ss, esp;
946 code = frame->tf_xflags;
947 type = frame->tf_trapno;
949 if (type <= MAX_TRAP_MSG) {
950 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
951 type, trap_msg[type],
952 (usermode ? "user" : "kernel"));
955 /* two separate prints in case of a trap on an unmapped page */
956 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
958 if (type == T_PAGEFLT) {
959 kprintf("fault virtual address = %p\n", (void *)eva);
960 kprintf("fault code = %s %s, %s\n",
961 usermode ? "user" : "supervisor",
962 code & PGEX_W ? "write" : "read",
963 code & PGEX_P ? "protection violation" : "page not present");
965 kprintf("instruction pointer = 0x%x:0x%x\n",
966 frame->tf_cs & 0xffff, frame->tf_eip);
968 ss = frame->tf_ss & 0xffff;
971 ss = GSEL(GDATA_SEL, SEL_KPL);
972 esp = (int)&frame->tf_esp;
974 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
975 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
976 kprintf("processor eflags = ");
977 if (frame->tf_eflags & PSL_T)
978 kprintf("trace trap, ");
979 if (frame->tf_eflags & PSL_I)
980 kprintf("interrupt enabled, ");
981 if (frame->tf_eflags & PSL_NT)
982 kprintf("nested task, ");
983 if (frame->tf_eflags & PSL_RF)
986 if (frame->tf_eflags & PSL_VM)
989 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
990 kprintf("current process = ");
992 kprintf("%lu (%s)\n",
993 (u_long)curproc->p_pid, curproc->p_comm ?
994 curproc->p_comm : "");
998 kprintf("current thread = pri %d ", curthread->td_pri);
999 if (curthread->td_critcount)
1005 * we probably SHOULD have stopped the other CPUs before now!
1006 * another CPU COULD have been touching cpl at this moment...
1008 kprintf(" <- SMP: XXX");
1017 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1020 kprintf("trap number = %d\n", type);
1021 if (type <= MAX_TRAP_MSG)
1022 panic("%s", trap_msg[type]);
1024 panic("unknown/reserved trap");
1028 * Double fault handler. Called when a fault occurs while writing
1029 * a frame for a trap/exception onto the stack. This usually occurs
1030 * when the stack overflows (such is the case with infinite recursion,
1033 * XXX Note that the current PTD gets replaced by IdlePTD when the
1034 * task switch occurs. This means that the stack that was active at
1035 * the time of the double fault is not available at <kstack> unless
1036 * the machine was idle when the double fault occurred. The downside
1037 * of this is that "trace <ebp>" in ddb won't work.
1040 dblfault_handler(void)
1042 struct mdglobaldata *gd = mdcpu;
1044 kprintf("\nFatal double fault:\n");
1045 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1046 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1047 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1049 /* two separate prints in case of a trap on an unmapped page */
1050 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1052 panic("double fault");
1056 * syscall2 - MP aware system call request C handler
1058 * A system call is essentially treated as a trap except that the
1059 * MP lock is not held on entry or return. We are responsible for
1060 * obtaining the MP lock if necessary and for handling ASTs
1061 * (e.g. a task switch) prior to return.
1066 syscall2(struct trapframe *frame)
1068 struct thread *td = curthread;
1069 struct proc *p = td->td_proc;
1070 struct lwp *lp = td->td_lwp;
1072 struct sysent *callp;
1073 register_t orig_tf_eflags;
1078 int crit_count = td->td_critcount;
1081 int have_mplock = 0;
1084 union sysunion args;
1086 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1089 userenter(td, p); /* lazy raise our priority */
1094 sticks = (int)td->td_sticks;
1095 orig_tf_eflags = frame->tf_eflags;
1098 * Virtual kernel intercept - if a VM context managed by a virtual
1099 * kernel issues a system call the virtual kernel handles it, not us.
1100 * Restore the virtual kernel context and return from its system
1101 * call. The current frame is copied out to the virtual kernel.
1103 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1104 vkernel_trap(lp, frame);
1105 error = EJUSTRETURN;
1110 * Get the system call parameters and account for time
1112 lp->lwp_md.md_regs = frame;
1113 params = (caddr_t)frame->tf_esp + sizeof(int);
1114 code = frame->tf_eax;
1116 if (p->p_sysent->sv_prepsyscall) {
1117 (*p->p_sysent->sv_prepsyscall)(
1118 frame, (int *)(&args.nosys.sysmsg + 1),
1122 * Need to check if this is a 32 bit or 64 bit syscall.
1123 * fuword is MP aware.
1125 if (code == SYS_syscall) {
1127 * Code is first argument, followed by actual args.
1129 code = fuword(params);
1130 params += sizeof(int);
1131 } else if (code == SYS___syscall) {
1133 * Like syscall, but code is a quad, so as to maintain
1134 * quad alignment for the rest of the arguments.
1136 code = fuword(params);
1137 params += sizeof(quad_t);
1141 code &= p->p_sysent->sv_mask;
1142 if (code >= p->p_sysent->sv_size)
1143 callp = &p->p_sysent->sv_table[0];
1145 callp = &p->p_sysent->sv_table[code];
1147 narg = callp->sy_narg & SYF_ARGMASK;
1150 * copyin is MP aware, but the tracing code is not
1152 if (narg && params) {
1153 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1154 narg * sizeof(register_t));
1157 if (KTRPOINT(td, KTR_SYSCALL)) {
1158 MAKEMPSAFE(have_mplock);
1160 ktrsyscall(lp, code, narg,
1161 (void *)(&args.nosys.sysmsg + 1));
1169 if (KTRPOINT(td, KTR_SYSCALL)) {
1170 MAKEMPSAFE(have_mplock);
1171 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1176 * For traditional syscall code edx is left untouched when 32 bit
1177 * results are returned. Since edx is loaded from fds[1] when the
1178 * system call returns we pre-set it here.
1180 args.sysmsg_fds[0] = 0;
1181 args.sysmsg_fds[1] = frame->tf_edx;
1184 * The syscall might manipulate the trap frame. If it does it
1185 * will probably return EJUSTRETURN.
1187 args.sysmsg_frame = frame;
1189 STOPEVENT(p, S_SCE, narg); /* MP aware */
1192 * NOTE: All system calls run MPSAFE now. The system call itself
1193 * is responsible for getting the MP lock.
1195 error = (*callp->sy_call)(&args);
1198 kprintf("system call %d returned %d\n", code, error);
1203 * MP SAFE (we may or may not have the MP lock at this point)
1208 * Reinitialize proc pointer `p' as it may be different
1209 * if this is a child returning from fork syscall.
1212 lp = curthread->td_lwp;
1213 frame->tf_eax = args.sysmsg_fds[0];
1214 frame->tf_edx = args.sysmsg_fds[1];
1215 frame->tf_eflags &= ~PSL_C;
1219 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1220 * int 0x80 is 2 bytes. We saved this in tf_err.
1222 frame->tf_eip -= frame->tf_err;
1227 panic("Unexpected EASYNC return value (for now)");
1230 if (p->p_sysent->sv_errsize) {
1231 if (error >= p->p_sysent->sv_errsize)
1232 error = -1; /* XXX */
1234 error = p->p_sysent->sv_errtbl[error];
1236 frame->tf_eax = error;
1237 frame->tf_eflags |= PSL_C;
1242 * Traced syscall. trapsignal() is not MP aware.
1244 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1245 MAKEMPSAFE(have_mplock);
1246 frame->tf_eflags &= ~PSL_T;
1247 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1251 * Handle reschedule and other end-of-syscall issues
1253 userret(lp, frame, sticks);
1256 if (KTRPOINT(td, KTR_SYSRET)) {
1257 MAKEMPSAFE(have_mplock);
1258 ktrsysret(lp, code, error, args.sysmsg_result);
1263 * This works because errno is findable through the
1264 * register set. If we ever support an emulation where this
1265 * is not the case, this code will need to be revisited.
1267 STOPEVENT(p, S_SCX, code);
1272 * Release the MP lock if we had to get it
1277 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1279 KASSERT(crit_count == td->td_critcount,
1280 ("syscall: critical section count mismatch! %d/%d",
1281 crit_count, td->td_pri));
1282 KASSERT(&td->td_toks_base == td->td_toks_stop,
1283 ("syscall: extra tokens held after trap! %zd",
1284 td->td_toks_stop - &td->td_toks_base));
1289 * NOTE: mplock not held at any point
1292 fork_return(struct lwp *lp, struct trapframe *frame)
1294 frame->tf_eax = 0; /* Child returns zero */
1295 frame->tf_eflags &= ~PSL_C; /* success */
1298 generic_lwp_return(lp, frame);
1299 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1303 * Simplified back end of syscall(), used when returning from fork()
1304 * directly into user mode.
1306 * This code will return back into the fork trampoline code which then
1309 * NOTE: The mplock is not held at any point.
1312 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1314 struct proc *p = lp->lwp_proc;
1317 * Newly forked processes are given a kernel priority. We have to
1318 * adjust the priority to a normal user priority and fake entry
1319 * into the kernel (call userenter()) to install a passive release
1320 * function just in case userret() decides to stop the process. This
1321 * can occur when ^Z races a fork. If we do not install the passive
1322 * release function the current process designation will not be
1323 * released when the thread goes to sleep.
1325 lwkt_setpri_self(TDPRI_USER_NORM);
1326 userenter(lp->lwp_thread, p);
1327 userret(lp, frame, 0);
1329 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1330 ktrsysret(lp, SYS_fork, 0, 0);
1332 p->p_flag |= P_PASSIVE_ACQ;
1334 p->p_flag &= ~P_PASSIVE_ACQ;
1338 * doreti has turned into this. The frame is directly on the stack. We
1339 * pull everything else we need (fpu and tls context) from the current
1342 * Note on fpu interactions: In a virtual kernel, the fpu context for
1343 * an emulated user mode process is not shared with the virtual kernel's
1344 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1345 * kernel itself, and not even then since the signal() contexts that we care
1346 * about save and restore the FPU state (I think anyhow).
1348 * vmspace_ctl() returns an error only if it had problems instaling the
1349 * context we supplied or problems copying data to/from our VM space.
1352 go_user(struct intrframe *frame)
1354 struct trapframe *tf = (void *)&frame->if_gs;
1358 * Interrupts may be disabled on entry, make sure all signals
1359 * can be received before beginning our loop.
1364 * Switch to the current simulated user process, then call
1365 * user_trap() when we break out of it (usually due to a signal).
1369 * Tell the real kernel whether it is ok to use the FP
1372 * The critical section is required to prevent an interrupt
1373 * from causing a preemptive task switch and changing
1377 if (mdcpu->gd_npxthread == curthread) {
1378 tf->tf_xflags &= ~PGEX_FPFAULT;
1380 tf->tf_xflags |= PGEX_FPFAULT;
1384 * Run emulated user process context. This call interlocks
1385 * with new mailbox signals.
1387 * Set PGEX_U unconditionally, indicating a user frame (the
1388 * bit is normally set only by T_PAGEFLT).
1390 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1391 tf, &curthread->td_savevext);
1393 frame->if_xflags |= PGEX_U;
1395 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1396 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1397 tf->tf_xflags, frame->if_xflags);
1401 panic("vmspace_ctl failed error %d", errno);
1403 if (tf->tf_trapno) {
1407 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1408 tf->tf_trapno = T_ASTFLT;
1416 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1417 * fault (which is then passed back to the virtual kernel) if an attempt is
1418 * made to use the FP unit.
1420 * XXX this is a fairly big hack.
1423 set_vkernel_fp(struct trapframe *frame)
1425 struct thread *td = curthread;
1427 if (frame->tf_xflags & PGEX_FPFAULT) {
1428 td->td_pcb->pcb_flags |= FP_VIRTFP;
1429 if (mdcpu->gd_npxthread == td)
1432 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1437 * Called from vkernel_trap() to fixup the vkernel's syscall
1438 * frame for vmspace_ctl() return.
1441 cpu_vkernel_trap(struct trapframe *frame, int error)
1443 frame->tf_eax = error;
1445 frame->tf_eflags |= PSL_C;
1447 frame->tf_eflags &= ~PSL_C;