2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.17 2007/02/16 23:11:40 corecode Exp $
43 * 386 Trap and System call handling
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
63 #include <sys/vmmeter.h>
64 #include <sys/malloc.h>
66 #include <sys/ktrace.h>
68 #include <sys/upcall.h>
69 #include <sys/vkernel.h>
70 #include <sys/sysproto.h>
71 #include <sys/sysunion.h>
72 #include <sys/vmspace.h>
75 #include <vm/vm_param.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_extern.h>
83 #include <machine/cpu.h>
84 #include <machine/md_var.h>
85 #include <machine/pcb.h>
86 #include <machine/smp.h>
87 #include <machine/tss.h>
88 #include <machine/globaldata.h>
90 #include <machine/vm86.h>
93 #include <sys/msgport2.h>
94 #include <sys/thread2.h>
98 #define MAKEMPSAFE(have_mplock) \
99 if (have_mplock == 0) { \
106 #define MAKEMPSAFE(have_mplock)
110 int (*pmath_emulate) (struct trapframe *);
112 extern int trapwrite (unsigned addr);
114 static int trap_pfault (struct trapframe *, int, vm_offset_t);
115 static void trap_fatal (struct trapframe *, int, vm_offset_t);
116 void dblfault_handler (void);
119 extern inthand_t IDTVEC(syscall);
122 #define MAX_TRAP_MSG 28
123 static char *trap_msg[] = {
125 "privileged instruction fault", /* 1 T_PRIVINFLT */
127 "breakpoint instruction fault", /* 3 T_BPTFLT */
130 "arithmetic trap", /* 6 T_ARITHTRAP */
131 "system forced exception", /* 7 T_ASTFLT */
133 "general protection fault", /* 9 T_PROTFLT */
134 "trace trap", /* 10 T_TRCTRAP */
136 "page fault", /* 12 T_PAGEFLT */
138 "alignment fault", /* 14 T_ALIGNFLT */
142 "integer divide fault", /* 18 T_DIVIDE */
143 "non-maskable interrupt trap", /* 19 T_NMI */
144 "overflow trap", /* 20 T_OFLOW */
145 "FPU bounds check fault", /* 21 T_BOUND */
146 "FPU device not available", /* 22 T_DNA */
147 "double fault", /* 23 T_DOUBLEFLT */
148 "FPU operand fetch fault", /* 24 T_FPOPFLT */
149 "invalid TSS fault", /* 25 T_TSSFLT */
150 "segment not present fault", /* 26 T_SEGNPFLT */
151 "stack fault", /* 27 T_STKFLT */
152 "machine check trap", /* 28 T_MCHK */
156 static int ddb_on_nmi = 1;
157 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
158 &ddb_on_nmi, 0, "Go to DDB on NMI");
160 static int panic_on_nmi = 1;
161 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
162 &panic_on_nmi, 0, "Panic on NMI");
163 static int fast_release;
164 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
165 &fast_release, 0, "Passive Release was optimal");
166 static int slow_release;
167 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
168 &slow_release, 0, "Passive Release was nonoptimal");
170 static int syscall_mpsafe = 0;
171 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
172 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
173 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
174 static int trap_mpsafe = 0;
175 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
176 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
177 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
180 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
181 extern int max_sysmsg;
184 * Passive USER->KERNEL transition. This only occurs if we block in the
185 * kernel while still holding our userland priority. We have to fixup our
186 * priority in order to avoid potential deadlocks before we allow the system
187 * to switch us to another thread.
190 passive_release(struct thread *td)
192 struct lwp *lp = td->td_lwp;
194 td->td_release = NULL;
195 lwkt_setpri_self(TDPRI_KERN_USER);
196 lp->lwp_proc->p_usched->release_curproc(lp);
200 * userenter() passively intercepts the thread switch function to increase
201 * the thread priority from a user priority to a kernel priority, reducing
202 * syscall and trap overhead for the case where no switch occurs.
206 userenter(struct thread *curtd)
208 curtd->td_release = passive_release;
212 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
213 * must be completed before we can return to or try to return to userland.
215 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
216 * arithmatic on the delta calculation so the absolute tick values are
217 * truncated to an integer.
220 userret(struct lwp *lp, struct trapframe *frame, int sticks)
222 struct proc *p = lp->lwp_proc;
226 * Charge system time if profiling. Note: times are in microseconds.
227 * This may do a copyout and block, so do it first even though it
228 * means some system time will be charged as user time.
230 if (p->p_flag & P_PROFIL) {
231 addupc_task(p, frame->tf_eip,
232 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
237 * Block here if we are in a stopped state.
239 if (p->p_stat == SSTOP) {
247 * Post any pending upcalls
249 if (p->p_flag & P_UPCALLPEND) {
251 p->p_flag &= ~P_UPCALLPEND;
258 * Post any pending signals
260 if ((sig = CURSIG(lp)) != 0) {
268 * block here if we are swapped out, but still process signals
269 * (such as SIGKILL). proc0 (the swapin scheduler) is already
270 * aware of our situation, we do not have to wake it up.
272 if (p->p_flag & P_SWAPPEDOUT) {
274 p->p_flag |= P_SWAPWAIT;
276 if (p->p_flag & P_SWAPWAIT)
277 tsleep(p, PCATCH, "SWOUT", 0);
278 p->p_flag &= ~P_SWAPWAIT;
285 * Cleanup from userenter and any passive release that might have occured.
286 * We must reclaim the current-process designation before we can return
287 * to usermode. We also handle both LWKT and USER reschedule requests.
290 userexit(struct lwp *lp)
292 struct thread *td = lp->lwp_thread;
293 globaldata_t gd = td->td_gd;
297 * If a user reschedule is requested force a new process to be
298 * chosen by releasing the current process. Our process will only
299 * be chosen again if it has a considerably better priority.
301 if (user_resched_wanted())
302 lp->lwp_proc->p_usched->release_curproc(lp);
306 * Handle a LWKT reschedule request first. Since our passive release
307 * is still in place we do not have to do anything special.
309 if (lwkt_resched_wanted())
313 * Acquire the current process designation for this user scheduler
314 * on this cpu. This will also handle any user-reschedule requests.
316 lp->lwp_proc->p_usched->acquire_curproc(lp);
317 /* We may have switched cpus on acquisition */
321 * Reduce our priority in preparation for a return to userland. If
322 * our passive release function was still in place, our priority was
323 * never raised and does not need to be reduced.
325 if (td->td_release == NULL)
326 lwkt_setpri_self(TDPRI_USER_NORM);
327 td->td_release = NULL;
330 * After reducing our priority there might be other kernel-level
331 * LWKTs that now have a greater priority. Run them as necessary.
332 * We don't have to worry about losing cpu to userland because
333 * we still control the current-process designation and we no longer
334 * have a passive release function installed.
336 if (lwkt_checkpri_self())
341 * Exception, fault, and trap interface to the kernel.
342 * This common code is called from assembly language IDT gate entry
343 * routines that prepare a suitable stack frame, and restore this
344 * frame after the exception has been processed.
346 * This function is also called from doreti in an interlock to handle ASTs.
347 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
349 * NOTE! We have to retrieve the fault address prior to obtaining the
350 * MP lock because get_mplock() may switch out. YYY cr2 really ought
351 * to be retrieved by the assembly code, not here.
353 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
354 * if an attempt is made to switch from a fast interrupt or IPI. This is
355 * necessary to properly take fatal kernel traps on SMP machines if
356 * get_mplock() has to block.
360 user_trap(struct trapframe *frame)
362 struct globaldata *gd = mycpu;
363 struct thread *td = gd->gd_curthread;
364 struct lwp *lp = td->td_lwp;
367 int i = 0, ucode = 0, type, code;
372 int crit_count = td->td_pri & ~TDPRI_MASK;
379 * This is a bad kludge to avoid changing the various trapframe
380 * structures. Because we are enabled as a virtual kernel,
381 * the original tf_err field will be passed to us shifted 16
382 * over in the tf_trapno field for T_PAGEFLT.
384 if (frame->tf_trapno == T_PAGEFLT)
389 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n",
390 frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva);
394 * Everything coming from user mode runs through user_trap,
395 * including system calls.
397 if (frame->tf_trapno == T_SYSCALL80) {
404 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
405 ++gd->gd_trap_nesting_level;
406 MAKEMPSAFE(have_mplock);
407 trap_fatal(frame, TRUE, eva);
408 --gd->gd_trap_nesting_level;
413 ++gd->gd_trap_nesting_level;
415 if (trap_mpsafe == 0)
416 MAKEMPSAFE(have_mplock);
419 --gd->gd_trap_nesting_level;
421 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
424 type = frame->tf_trapno;
425 code = frame->tf_err;
429 sticks = (int)td->td_sticks;
430 lp->lwp_md.md_regs = frame;
433 case T_PRIVINFLT: /* privileged instruction fault */
438 case T_BPTFLT: /* bpt instruction fault */
439 case T_TRCTRAP: /* trace trap */
440 frame->tf_eflags &= ~PSL_T;
444 case T_ARITHTRAP: /* arithmetic trap */
449 case T_ASTFLT: /* Allow process switch */
450 mycpu->gd_cnt.v_soft++;
451 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
452 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
454 addupc_task(p, p->p_prof.pr_addr,
460 * The following two traps can happen in
461 * vm86 mode, and, if so, we want to handle
464 case T_PROTFLT: /* general protection fault */
465 case T_STKFLT: /* stack fault */
467 if (frame->tf_eflags & PSL_VM) {
468 i = vm86_emulate((struct vm86frame *)frame);
476 case T_SEGNPFLT: /* segment not present fault */
477 case T_TSSFLT: /* invalid TSS fault */
478 case T_DOUBLEFLT: /* double fault */
480 ucode = code + BUS_SEGM_FAULT ;
484 case T_PAGEFLT: /* page fault */
485 MAKEMPSAFE(have_mplock);
486 i = trap_pfault(frame, TRUE, eva);
489 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
499 case T_DIVIDE: /* integer divide fault */
506 MAKEMPSAFE(have_mplock);
507 /* machine/parity/power fail/"kitchen sink" faults */
508 if (isa_nmi(code) == 0) {
511 * NMI can be hooked up to a pushbutton
515 kprintf ("NMI ... going to debugger\n");
516 kdb_trap (type, 0, frame);
520 } else if (panic_on_nmi)
521 panic("NMI indicates hardware failure");
523 #endif /* NISA > 0 */
525 case T_OFLOW: /* integer overflow fault */
530 case T_BOUND: /* bounds check fault */
537 * Virtual kernel intercept - pass the DNA exception
538 * to the (emulated) virtual kernel if it asked to handle
539 * it. This occurs when the virtual kernel is holding
540 * onto the FP context for a different emulated
541 * process then the one currently running.
543 * We must still call npxdna() since we may have
544 * saved FP state that the (emulated) virtual kernel
545 * needs to hand over to a different emulated process.
547 if (p->p_vkernel && p->p_vkernel->vk_current &&
548 (td->td_pcb->pcb_flags & FP_VIRTFP)
555 * The kernel may have switched out the FP unit's
556 * state, causing the user process to take a fault
557 * when it tries to use the FP unit. Restore the
563 if (!pmath_emulate) {
565 ucode = FPE_FPU_NP_TRAP;
568 i = (*pmath_emulate)(frame);
570 if (!(frame->tf_eflags & PSL_T))
572 frame->tf_eflags &= ~PSL_T;
575 /* else ucode = emulator_only_knows() XXX */
578 case T_FPOPFLT: /* FPU operand fetch fault */
583 case T_XMMFLT: /* SIMD floating-point exception */
590 * Virtual kernel intercept - if the fault is directly related to a
591 * VM context managed by a virtual kernel then let the virtual kernel
594 if (p->p_vkernel && p->p_vkernel->vk_current) {
595 vkernel_trap(p, frame);
600 * Translate fault for emulators (e.g. Linux)
602 if (*p->p_sysent->sv_transtrap)
603 i = (*p->p_sysent->sv_transtrap)(i, type);
605 MAKEMPSAFE(have_mplock);
606 trapsignal(lp, i, ucode);
609 if (type <= MAX_TRAP_MSG) {
610 uprintf("fatal process exception: %s",
612 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
613 uprintf(", fault VA = 0x%lx", (u_long)eva);
620 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_eip));
622 userret(lp, frame, sticks);
630 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
631 ("syscall: critical section count mismatch! %d/%d",
632 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
637 kern_trap(struct trapframe *frame)
639 struct globaldata *gd = mycpu;
640 struct thread *td = gd->gd_curthread;
643 int i = 0, ucode = 0, type, code;
648 int crit_count = td->td_pri & ~TDPRI_MASK;
655 if (frame->tf_trapno == T_PAGEFLT)
662 ++gd->gd_trap_nesting_level;
663 MAKEMPSAFE(have_mplock);
664 trap_fatal(frame, FALSE, eva);
665 --gd->gd_trap_nesting_level;
670 ++gd->gd_trap_nesting_level;
673 if (trap_mpsafe == 0)
674 MAKEMPSAFE(have_mplock);
677 --gd->gd_trap_nesting_level;
679 type = frame->tf_trapno;
680 code = frame->tf_err;
688 case T_PAGEFLT: /* page fault */
689 MAKEMPSAFE(have_mplock);
690 trap_pfault(frame, FALSE, eva);
696 * The kernel may be using npx for copying or other
699 panic("kernel NPX should not happen");
705 case T_PROTFLT: /* general protection fault */
706 case T_SEGNPFLT: /* segment not present fault */
708 * Invalid segment selectors and out of bounds
709 * %eip's and %esp's can be set up in user mode.
710 * This causes a fault in kernel mode when the
711 * kernel tries to return to user mode. We want
712 * to get this fault so that we can fix the
713 * problem here and not have to check all the
714 * selectors and pointers when the user changes
717 if (mycpu->gd_intr_nesting_level == 0) {
718 if (td->td_pcb->pcb_onfault) {
720 (register_t)td->td_pcb->pcb_onfault;
728 * PSL_NT can be set in user mode and isn't cleared
729 * automatically when the kernel is entered. This
730 * causes a TSS fault when the kernel attempts to
731 * `iret' because the TSS link is uninitialized. We
732 * want to get this fault so that we can fix the
733 * problem here and not every time the kernel is
736 if (frame->tf_eflags & PSL_NT) {
737 frame->tf_eflags &= ~PSL_NT;
742 case T_TRCTRAP: /* trace trap */
744 if (frame->tf_eip == (int)IDTVEC(syscall)) {
746 * We've just entered system mode via the
747 * syscall lcall. Continue single stepping
748 * silently until the syscall handler has
753 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
755 * The syscall handler has now saved the
756 * flags. Stop single stepping it.
758 frame->tf_eflags &= ~PSL_T;
764 * Ignore debug register trace traps due to
765 * accesses in the user's address space, which
766 * can happen under several conditions such as
767 * if a user sets a watchpoint on a buffer and
768 * then passes that buffer to a system call.
769 * We still want to get TRCTRAPS for addresses
770 * in kernel space because that is useful when
771 * debugging the kernel.
773 if (user_dbreg_trap()) {
775 * Reset breakpoint bits because the
778 load_dr6(rdr6() & 0xfffffff0);
783 * Fall through (TRCTRAP kernel mode, kernel address)
787 * If DDB is enabled, let it handle the debugger trap.
788 * Otherwise, debugger traps "can't happen".
791 MAKEMPSAFE(have_mplock);
792 if (kdb_trap (type, 0, frame))
798 MAKEMPSAFE(have_mplock);
799 trap_fatal(frame, FALSE, eva);
803 * Ignore this trap generated from a spurious SIGTRAP.
805 * single stepping in / syscalls leads to spurious / SIGTRAP
808 * Haiku (c) 2007 Simon 'corecode' Schubert
814 * Translate fault for emulators (e.g. Linux)
816 if (*p->p_sysent->sv_transtrap)
817 i = (*p->p_sysent->sv_transtrap)(i, type);
819 MAKEMPSAFE(have_mplock);
820 trapsignal(lp, i, ucode);
823 if (type <= MAX_TRAP_MSG) {
824 uprintf("fatal process exception: %s",
826 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
827 uprintf(", fault VA = 0x%lx", (u_long)eva);
839 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
840 ("syscall: critical section count mismatch! %d/%d",
841 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
846 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
849 struct vmspace *vm = NULL;
853 thread_t td = curthread;
854 struct proc *p = td->td_proc;
856 va = trunc_page(eva);
857 if (usermode == FALSE) {
859 * This is a fault on kernel virtual memory.
864 * This is a fault on non-kernel virtual memory.
865 * vm is initialized above to NULL. If curproc is NULL
866 * or curproc->p_vmspace is NULL the fault is fatal.
877 if (frame->tf_xflags & PGEX_W)
878 ftype = VM_PROT_READ | VM_PROT_WRITE;
880 ftype = VM_PROT_READ;
882 if (map != &kernel_map) {
884 * Keep swapout from messing with us during this
890 * Grow the stack if necessary
892 /* grow_stack returns false only if va falls into
893 * a growable stack region and the stack growth
894 * fails. It returns true if va was not within
895 * a growable stack region, or if the stack
898 if (!grow_stack (p, va)) {
904 /* Fault in the user page: */
905 rv = vm_fault(map, va, ftype,
906 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
912 * Don't have to worry about process locking or stacks in the kernel.
914 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
917 if (rv == KERN_SUCCESS)
921 if (td->td_gd->gd_intr_nesting_level == 0 &&
922 td->td_pcb->pcb_onfault) {
923 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
926 trap_fatal(frame, usermode, eva);
929 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
933 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
935 int code, type, ss, esp;
937 code = frame->tf_xflags;
938 type = frame->tf_trapno;
940 if (type <= MAX_TRAP_MSG) {
941 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
942 type, trap_msg[type],
943 (usermode ? "user" : "kernel"));
946 /* three separate prints in case of a trap on an unmapped page */
947 kprintf("mp_lock = %08x; ", mp_lock);
948 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
949 kprintf("lapic.id = %08x\n", lapic.id);
951 if (type == T_PAGEFLT) {
952 kprintf("fault virtual address = 0x%x\n", eva);
953 kprintf("fault code = %s %s, %s\n",
954 usermode ? "user" : "supervisor",
955 code & PGEX_W ? "write" : "read",
956 code & PGEX_P ? "protection violation" : "page not present");
958 kprintf("instruction pointer = 0x%x:0x%x\n",
959 frame->tf_cs & 0xffff, frame->tf_eip);
961 ss = frame->tf_ss & 0xffff;
964 ss = GSEL(GDATA_SEL, SEL_KPL);
965 esp = (int)&frame->tf_esp;
967 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
968 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
969 kprintf("processor eflags = ");
970 if (frame->tf_eflags & PSL_T)
971 kprintf("trace trap, ");
972 if (frame->tf_eflags & PSL_I)
973 kprintf("interrupt enabled, ");
974 if (frame->tf_eflags & PSL_NT)
975 kprintf("nested task, ");
976 if (frame->tf_eflags & PSL_RF)
979 if (frame->tf_eflags & PSL_VM)
982 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
983 kprintf("current process = ");
985 kprintf("%lu (%s)\n",
986 (u_long)curproc->p_pid, curproc->p_comm ?
987 curproc->p_comm : "");
991 kprintf("current thread = pri %d ", curthread->td_pri);
992 if (curthread->td_pri >= TDPRI_CRIT)
998 * we probably SHOULD have stopped the other CPUs before now!
999 * another CPU COULD have been touching cpl at this moment...
1001 kprintf(" <- SMP: XXX");
1010 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1013 kprintf("trap number = %d\n", type);
1014 if (type <= MAX_TRAP_MSG)
1015 panic("%s", trap_msg[type]);
1017 panic("unknown/reserved trap");
1021 * Double fault handler. Called when a fault occurs while writing
1022 * a frame for a trap/exception onto the stack. This usually occurs
1023 * when the stack overflows (such is the case with infinite recursion,
1026 * XXX Note that the current PTD gets replaced by IdlePTD when the
1027 * task switch occurs. This means that the stack that was active at
1028 * the time of the double fault is not available at <kstack> unless
1029 * the machine was idle when the double fault occurred. The downside
1030 * of this is that "trace <ebp>" in ddb won't work.
1033 dblfault_handler(void)
1035 struct mdglobaldata *gd = mdcpu;
1037 kprintf("\nFatal double fault:\n");
1038 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1039 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1040 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1042 /* three separate prints in case of a trap on an unmapped page */
1043 kprintf("mp_lock = %08x; ", mp_lock);
1044 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1045 kprintf("lapic.id = %08x\n", lapic.id);
1047 panic("double fault");
1051 * Compensate for 386 brain damage (missing URKR).
1052 * This is a little simpler than the pagefault handler in trap() because
1053 * it the page tables have already been faulted in and high addresses
1054 * are thrown out early for other reasons.
1057 trapwrite(unsigned addr)
1064 va = trunc_page((vm_offset_t)addr);
1066 * XXX - MAX is END. Changed > to >= for temp. fix.
1068 if (va >= VM_MAX_USER_ADDRESS)
1076 if (!grow_stack (p, va)) {
1082 * fault the data page
1084 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1088 if (rv != KERN_SUCCESS)
1095 * syscall2 - MP aware system call request C handler
1097 * A system call is essentially treated as a trap except that the
1098 * MP lock is not held on entry or return. We are responsible for
1099 * obtaining the MP lock if necessary and for handling ASTs
1100 * (e.g. a task switch) prior to return.
1102 * In general, only simple access and manipulation of curproc and
1103 * the current stack is allowed without having to hold MP lock.
1105 * MPSAFE - note that large sections of this routine are run without
1110 syscall2(struct trapframe *frame)
1112 struct thread *td = curthread;
1113 struct proc *p = td->td_proc;
1114 struct lwp *lp = td->td_lwp;
1116 struct sysent *callp;
1117 register_t orig_tf_eflags;
1122 int crit_count = td->td_pri & ~TDPRI_MASK;
1125 int have_mplock = 0;
1128 union sysunion args;
1131 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1132 if (syscall_mpsafe == 0)
1133 MAKEMPSAFE(have_mplock);
1135 userenter(td); /* lazy raise our priority */
1140 sticks = (int)td->td_sticks;
1141 orig_tf_eflags = frame->tf_eflags;
1144 * Virtual kernel intercept - if a VM context managed by a virtual
1145 * kernel issues a system call the virtual kernel handles it, not us.
1146 * Restore the virtual kernel context and return from its system
1147 * call. The current frame is copied out to the virtual kernel.
1149 if (p->p_vkernel && p->p_vkernel->vk_current) {
1150 error = vkernel_trap(p, frame);
1151 frame->tf_eax = error;
1153 frame->tf_eflags |= PSL_C;
1154 error = EJUSTRETURN;
1159 * Get the system call parameters and account for time
1161 lp->lwp_md.md_regs = frame;
1162 params = (caddr_t)frame->tf_esp + sizeof(int);
1163 code = frame->tf_eax;
1165 if (p->p_sysent->sv_prepsyscall) {
1166 (*p->p_sysent->sv_prepsyscall)(
1167 frame, (int *)(&args.nosys.sysmsg + 1),
1171 * Need to check if this is a 32 bit or 64 bit syscall.
1172 * fuword is MP aware.
1174 if (code == SYS_syscall) {
1176 * Code is first argument, followed by actual args.
1178 code = fuword(params);
1179 params += sizeof(int);
1180 } else if (code == SYS___syscall) {
1182 * Like syscall, but code is a quad, so as to maintain
1183 * quad alignment for the rest of the arguments.
1185 code = fuword(params);
1186 params += sizeof(quad_t);
1190 code &= p->p_sysent->sv_mask;
1191 if (code >= p->p_sysent->sv_size)
1192 callp = &p->p_sysent->sv_table[0];
1194 callp = &p->p_sysent->sv_table[code];
1196 narg = callp->sy_narg & SYF_ARGMASK;
1199 * copyin is MP aware, but the tracing code is not
1201 if (narg && params) {
1202 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1203 narg * sizeof(register_t));
1206 if (KTRPOINT(td, KTR_SYSCALL)) {
1207 MAKEMPSAFE(have_mplock);
1209 ktrsyscall(p, code, narg,
1210 (void *)(&args.nosys.sysmsg + 1));
1218 if (KTRPOINT(td, KTR_SYSCALL)) {
1219 MAKEMPSAFE(have_mplock);
1220 ktrsyscall(p, code, narg, (void *)(&args.nosys.sysmsg + 1));
1225 * For traditional syscall code edx is left untouched when 32 bit
1226 * results are returned. Since edx is loaded from fds[1] when the
1227 * system call returns we pre-set it here.
1229 args.sysmsg_fds[0] = 0;
1230 args.sysmsg_fds[1] = frame->tf_edx;
1233 * The syscall might manipulate the trap frame. If it does it
1234 * will probably return EJUSTRETURN.
1236 args.sysmsg_frame = frame;
1238 STOPEVENT(p, S_SCE, narg); /* MP aware */
1242 * Try to run the syscall without the MP lock if the syscall
1243 * is MP safe. We have to obtain the MP lock no matter what if
1246 if ((callp->sy_narg & SYF_MPSAFE) == 0)
1247 MAKEMPSAFE(have_mplock);
1250 error = (*callp->sy_call)(&args);
1253 kprintf("system call %d returned %d\n", code, error);
1258 * MP SAFE (we may or may not have the MP lock at this point)
1263 * Reinitialize proc pointer `p' as it may be different
1264 * if this is a child returning from fork syscall.
1267 lp = curthread->td_lwp;
1268 frame->tf_eax = args.sysmsg_fds[0];
1269 frame->tf_edx = args.sysmsg_fds[1];
1270 frame->tf_eflags &= ~PSL_C;
1274 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1275 * int 0x80 is 2 bytes. We saved this in tf_err.
1277 frame->tf_eip -= frame->tf_err;
1282 panic("Unexpected EASYNC return value (for now)");
1285 if (p->p_sysent->sv_errsize) {
1286 if (error >= p->p_sysent->sv_errsize)
1287 error = -1; /* XXX */
1289 error = p->p_sysent->sv_errtbl[error];
1291 frame->tf_eax = error;
1292 frame->tf_eflags |= PSL_C;
1297 * Traced syscall. trapsignal() is not MP aware.
1299 if ((orig_tf_eflags & PSL_T) /*&& !(orig_tf_eflags & PSL_VM)*/) {
1300 MAKEMPSAFE(have_mplock);
1301 frame->tf_eflags &= ~PSL_T;
1302 trapsignal(lp, SIGTRAP, 0);
1306 * Handle reschedule and other end-of-syscall issues
1308 userret(lp, frame, sticks);
1311 if (KTRPOINT(td, KTR_SYSRET)) {
1312 MAKEMPSAFE(have_mplock);
1313 ktrsysret(p, code, error, args.sysmsg_result);
1318 * This works because errno is findable through the
1319 * register set. If we ever support an emulation where this
1320 * is not the case, this code will need to be revisited.
1322 STOPEVENT(p, S_SCX, code);
1327 * Release the MP lock if we had to get it
1329 KASSERT(td->td_mpcount == have_mplock,
1330 ("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
1335 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1336 ("syscall: critical section count mismatch! %d/%d",
1337 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1342 * Simplified back end of syscall(), used when returning from fork()
1343 * directly into user mode. MP lock is held on entry and should be
1344 * released on return. This code will return back into the fork
1345 * trampoline code which then runs doreti.
1348 fork_return(struct lwp *lp, struct trapframe *frame)
1350 struct proc *p = lp->lwp_proc;
1352 frame->tf_eax = 0; /* Child returns zero */
1353 frame->tf_eflags &= ~PSL_C; /* success */
1357 * Newly forked processes are given a kernel priority. We have to
1358 * adjust the priority to a normal user priority and fake entry
1359 * into the kernel (call userenter()) to install a passive release
1360 * function just in case userret() decides to stop the process. This
1361 * can occur when ^Z races a fork. If we do not install the passive
1362 * release function the current process designation will not be
1363 * released when the thread goes to sleep.
1365 lwkt_setpri_self(TDPRI_USER_NORM);
1366 userenter(lp->lwp_thread);
1367 userret(lp, frame, 0);
1369 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1370 ktrsysret(p, SYS_fork, 0, 0);
1372 p->p_flag |= P_PASSIVE_ACQ;
1374 p->p_flag &= ~P_PASSIVE_ACQ;
1376 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1382 * doreti has turned into this. The frame is directly on the stack. We
1383 * pull everything else we need (fpu and tls context) from the current
1386 * Note on fpu interactions: In a virtual kernel, the fpu context for
1387 * an emulated user mode process is not shared with the virtual kernel's
1388 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1389 * kernel itself, and not even then since the signal() contexts that we care
1390 * about save and restore the FPU state (I think anyhow).
1392 * vmspace_ctl() returns an error only if it had problems instaling the
1393 * context we supplied or problems copying data to/from our VM space.
1396 go_user(struct intrframe *frame)
1398 struct trapframe *tf = (void *)&frame->if_gs;
1402 * Interrupts may be disabled on entry, make sure all signals
1403 * can be received before beginning our loop.
1408 * Switch to the current simulated user process, then call
1409 * user_trap() when we break out of it (usually due to a signal).
1413 * Tell the real kernel whether it is ok to use the FP
1416 if (mdcpu->gd_npxthread == curthread) {
1417 tf->tf_xflags &= ~PGEX_FPFAULT;
1419 tf->tf_xflags |= PGEX_FPFAULT;
1423 * We must poll the mailbox prior to making the system call
1424 * to properly interlock new mailbox signals against the
1427 * Passing a NULL frame causes the interrupt code to assume
1430 if (mdcpu->gd_mailbox)
1431 signalmailbox(NULL);
1434 * Run emulated user process context. This call interlocks
1435 * with new mailbox signals.
1437 * Set PGEX_U unconditionally, indicating a user frame (the
1438 * bit is normally set only by T_PAGEFLT).
1440 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1441 tf, &curthread->td_savevext);
1442 frame->if_xflags |= PGEX_U;
1444 kprintf("GO USER %d trap %d EVA %08x EIP %08x ESP %08x XFLAGS %02x/%02x\n",
1445 r, tf->tf_trapno, tf->tf_err, tf->tf_eip, tf->tf_esp,
1446 tf->tf_xflags, frame->if_xflags);
1450 signalmailbox(frame);
1452 panic("vmspace_ctl failed");
1454 signalmailbox(frame);
1455 if (tf->tf_trapno) {
1457 } else if (mycpu->gd_reqflags & RQF_AST_MASK) {
1458 tf->tf_trapno = T_ASTFLT;
1467 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1468 * fault (which is then passed back to the virtual kernel) if an attempt is
1469 * made to use the FP unit.
1471 * XXX this is a fairly big hack.
1474 set_vkernel_fp(struct trapframe *frame)
1476 struct thread *td = curthread;
1478 if (frame->tf_xflags & PGEX_FPFAULT) {
1479 td->td_pcb->pcb_flags |= FP_VIRTFP;
1480 if (mdcpu->gd_npxthread == td)
1483 td->td_pcb->pcb_flags &= ~FP_VIRTFP;