2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
42 * x86_64 Trap and System call handling
48 #include "opt_ktrace.h"
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/pioctl.h>
54 #include <sys/kernel.h>
55 #include <sys/resourcevar.h>
56 #include <sys/signalvar.h>
57 #include <sys/signal2.h>
58 #include <sys/syscall.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
62 #include <sys/vmmeter.h>
63 #include <sys/malloc.h>
65 #include <sys/ktrace.h>
68 #include <sys/upcall.h>
69 #include <sys/vkernel.h>
70 #include <sys/sysproto.h>
71 #include <sys/sysunion.h>
72 #include <sys/vmspace.h>
75 #include <vm/vm_param.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_extern.h>
83 #include <machine/cpu.h>
84 #include <machine/md_var.h>
85 #include <machine/pcb.h>
86 #include <machine/smp.h>
87 #include <machine/tss.h>
88 #include <machine/globaldata.h>
92 #include <sys/msgport2.h>
93 #include <sys/thread2.h>
94 #include <sys/mplock2.h>
98 #define MAKEMPSAFE(have_mplock) \
99 if (have_mplock == 0) { \
106 #define MAKEMPSAFE(have_mplock)
110 int (*pmath_emulate) (struct trapframe *);
112 extern int trapwrite (unsigned addr);
114 static int trap_pfault (struct trapframe *, int, vm_offset_t);
115 static void trap_fatal (struct trapframe *, int, vm_offset_t);
116 void dblfault_handler (void);
119 extern inthand_t IDTVEC(syscall);
122 #define MAX_TRAP_MSG 30
123 static char *trap_msg[] = {
125 "privileged instruction fault", /* 1 T_PRIVINFLT */
127 "breakpoint instruction fault", /* 3 T_BPTFLT */
130 "arithmetic trap", /* 6 T_ARITHTRAP */
131 "system forced exception", /* 7 T_ASTFLT */
133 "general protection fault", /* 9 T_PROTFLT */
134 "trace trap", /* 10 T_TRCTRAP */
136 "page fault", /* 12 T_PAGEFLT */
138 "alignment fault", /* 14 T_ALIGNFLT */
142 "integer divide fault", /* 18 T_DIVIDE */
143 "non-maskable interrupt trap", /* 19 T_NMI */
144 "overflow trap", /* 20 T_OFLOW */
145 "FPU bounds check fault", /* 21 T_BOUND */
146 "FPU device not available", /* 22 T_DNA */
147 "double fault", /* 23 T_DOUBLEFLT */
148 "FPU operand fetch fault", /* 24 T_FPOPFLT */
149 "invalid TSS fault", /* 25 T_TSSFLT */
150 "segment not present fault", /* 26 T_SEGNPFLT */
151 "stack fault", /* 27 T_STKFLT */
152 "machine check trap", /* 28 T_MCHK */
153 "SIMD floating-point exception", /* 29 T_XMMFLT */
154 "reserved (unknown) fault", /* 30 T_RESERVED */
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
172 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
173 extern int max_sysmsg;
176 * Passively intercepts the thread switch function to increase the thread
177 * priority from a user priority to a kernel priority, reducing
178 * syscall and trap overhead for the case where no switch occurs.
180 * Synchronizes td_ucred with p_ucred. This is used by system calls,
181 * signal handling, faults, AST traps, and anything else that enters the
182 * kernel from userland and provides the kernel with a stable read-only
183 * copy of the process ucred.
186 userenter(struct thread *curtd, struct proc *curp)
191 curtd->td_release = lwkt_passive_release;
193 if (curtd->td_ucred != curp->p_ucred) {
194 ncred = crhold(curp->p_ucred);
195 ocred = curtd->td_ucred;
196 curtd->td_ucred = ncred;
203 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
204 * must be completed before we can return to or try to return to userland.
206 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
207 * arithmatic on the delta calculation so the absolute tick values are
208 * truncated to an integer.
211 userret(struct lwp *lp, struct trapframe *frame, int sticks)
213 struct proc *p = lp->lwp_proc;
217 * Charge system time if profiling. Note: times are in microseconds.
218 * This may do a copyout and block, so do it first even though it
219 * means some system time will be charged as user time.
221 if (p->p_flag & P_PROFIL) {
222 addupc_task(p, frame->tf_rip,
223 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
228 * If the jungle wants us dead, so be it.
230 if (lp->lwp_flag & LWP_WEXIT) {
233 rel_mplock(); /* NOT REACHED */
237 * Block here if we are in a stopped state.
239 if (p->p_stat == SSTOP) {
247 * Post any pending upcalls
249 if (p->p_flag & P_UPCALLPEND) {
251 p->p_flag &= ~P_UPCALLPEND;
258 * Post any pending signals
260 * WARNING! postsig() can exit and not return.
262 if ((sig = CURSIG_TRACE(lp)) != 0) {
270 * block here if we are swapped out, but still process signals
271 * (such as SIGKILL). proc0 (the swapin scheduler) is already
272 * aware of our situation, we do not have to wake it up.
274 if (p->p_flag & P_SWAPPEDOUT) {
276 p->p_flag |= P_SWAPWAIT;
278 if (p->p_flag & P_SWAPWAIT)
279 tsleep(p, PCATCH, "SWOUT", 0);
280 p->p_flag &= ~P_SWAPWAIT;
286 * Make sure postsig() handled request to restore old signal mask after
287 * running signal handler.
289 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
293 * Cleanup from userenter and any passive release that might have occured.
294 * We must reclaim the current-process designation before we can return
295 * to usermode. We also handle both LWKT and USER reschedule requests.
298 userexit(struct lwp *lp)
300 struct thread *td = lp->lwp_thread;
301 /* globaldata_t gd = td->td_gd; */
304 * Handle stop requests at kernel priority. Any requests queued
305 * after this loop will generate another AST.
307 while (lp->lwp_proc->p_stat == SSTOP) {
314 * Reduce our priority in preparation for a return to userland. If
315 * our passive release function was still in place, our priority was
316 * never raised and does not need to be reduced.
318 lwkt_passive_recover(td);
321 * Become the current user scheduled process if we aren't already,
322 * and deal with reschedule requests and other factors.
324 lp->lwp_proc->p_usched->acquire_curproc(lp);
325 /* WARNING: we may have migrated cpu's */
326 /* gd = td->td_gd; */
329 #if !defined(KTR_KERNENTRY)
330 #define KTR_KERNENTRY KTR_ALL
332 KTR_INFO_MASTER(kernentry);
333 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
334 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
335 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
336 sizeof(int) + sizeof(int));
337 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
338 sizeof(int) + sizeof(int) + sizeof(int));
339 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
340 sizeof(int) + sizeof(int) + sizeof(int));
341 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
342 sizeof(int) + sizeof(int));
345 * Exception, fault, and trap interface to the kernel.
346 * This common code is called from assembly language IDT gate entry
347 * routines that prepare a suitable stack frame, and restore this
348 * frame after the exception has been processed.
350 * This function is also called from doreti in an interlock to handle ASTs.
351 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
353 * NOTE! We have to retrieve the fault address prior to obtaining the
354 * MP lock because get_mplock() may switch out. YYY cr2 really ought
355 * to be retrieved by the assembly code, not here.
357 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
358 * if an attempt is made to switch from a fast interrupt or IPI. This is
359 * necessary to properly take fatal kernel traps on SMP machines if
360 * get_mplock() has to block.
364 user_trap(struct trapframe *frame)
366 struct globaldata *gd = mycpu;
367 struct thread *td = gd->gd_curthread;
368 struct lwp *lp = td->td_lwp;
371 int i = 0, ucode = 0, type, code;
376 int crit_count = td->td_critcount;
377 lwkt_tokref_t curstop = td->td_toks_stop;
383 if (frame->tf_trapno == T_PAGEFLT)
384 eva = frame->tf_addr;
388 kprintf("USER_TRAP AT %08lx xflags %ld trapno %ld eva %08lx\n",
389 frame->tf_rip, frame->tf_xflags, frame->tf_trapno, eva);
393 * Everything coming from user mode runs through user_trap,
394 * including system calls.
396 if (frame->tf_trapno == T_FAST_SYSCALL) {
401 KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid,
402 frame->tf_trapno, eva);
406 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
407 ++gd->gd_trap_nesting_level;
408 MAKEMPSAFE(have_mplock);
409 trap_fatal(frame, TRUE, eva);
410 --gd->gd_trap_nesting_level;
415 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
418 type = frame->tf_trapno;
419 code = frame->tf_err;
423 sticks = (int)td->td_sticks;
424 lp->lwp_md.md_regs = frame;
427 case T_PRIVINFLT: /* privileged instruction fault */
432 case T_BPTFLT: /* bpt instruction fault */
433 case T_TRCTRAP: /* trace trap */
434 frame->tf_rflags &= ~PSL_T;
438 case T_ARITHTRAP: /* arithmetic trap */
443 case T_ASTFLT: /* Allow process switch */
444 mycpu->gd_cnt.v_soft++;
445 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
446 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
448 addupc_task(p, p->p_prof.pr_addr,
454 * The following two traps can happen in
455 * vm86 mode, and, if so, we want to handle
458 case T_PROTFLT: /* general protection fault */
459 case T_STKFLT: /* stack fault */
461 if (frame->tf_eflags & PSL_VM) {
462 i = vm86_emulate((struct vm86frame *)frame);
470 case T_SEGNPFLT: /* segment not present fault */
471 case T_TSSFLT: /* invalid TSS fault */
472 case T_DOUBLEFLT: /* double fault */
474 ucode = code + BUS_SEGM_FAULT ;
478 case T_PAGEFLT: /* page fault */
479 MAKEMPSAFE(have_mplock);
480 i = trap_pfault(frame, TRUE, eva);
483 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
493 case T_DIVIDE: /* integer divide fault */
500 MAKEMPSAFE(have_mplock);
501 /* machine/parity/power fail/"kitchen sink" faults */
502 if (isa_nmi(code) == 0) {
505 * NMI can be hooked up to a pushbutton
509 kprintf ("NMI ... going to debugger\n");
510 kdb_trap (type, 0, frame);
514 } else if (panic_on_nmi)
515 panic("NMI indicates hardware failure");
517 #endif /* NISA > 0 */
519 case T_OFLOW: /* integer overflow fault */
524 case T_BOUND: /* bounds check fault */
531 * Virtual kernel intercept - pass the DNA exception
532 * to the (emulated) virtual kernel if it asked to handle
533 * it. This occurs when the virtual kernel is holding
534 * onto the FP context for a different emulated
535 * process then the one currently running.
537 * We must still call npxdna() since we may have
538 * saved FP state that the (emulated) virtual kernel
539 * needs to hand over to a different emulated process.
541 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
542 (td->td_pcb->pcb_flags & FP_VIRTFP)
548 * The kernel may have switched out the FP unit's
549 * state, causing the user process to take a fault
550 * when it tries to use the FP unit. Restore the
555 if (!pmath_emulate) {
557 ucode = FPE_FPU_NP_TRAP;
560 i = (*pmath_emulate)(frame);
562 if (!(frame->tf_rflags & PSL_T))
564 frame->tf_rflags &= ~PSL_T;
567 /* else ucode = emulator_only_knows() XXX */
570 case T_FPOPFLT: /* FPU operand fetch fault */
575 case T_XMMFLT: /* SIMD floating-point exception */
582 * Virtual kernel intercept - if the fault is directly related to a
583 * VM context managed by a virtual kernel then let the virtual kernel
586 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
587 vkernel_trap(lp, frame);
592 * Translate fault for emulators (e.g. Linux)
594 if (*p->p_sysent->sv_transtrap)
595 i = (*p->p_sysent->sv_transtrap)(i, type);
597 MAKEMPSAFE(have_mplock);
598 trapsignal(lp, i, ucode);
601 if (type <= MAX_TRAP_MSG) {
602 uprintf("fatal process exception: %s",
604 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
605 uprintf(", fault VA = 0x%lx", (u_long)eva);
612 KASSERT(td->td_mpcount == have_mplock,
613 ("badmpcount trap/end from %p", (void *)frame->tf_rip));
615 userret(lp, frame, sticks);
622 KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
624 KASSERT(crit_count == td->td_critcount,
625 ("trap: critical section count mismatch! %d/%d",
626 crit_count, td->td_pri));
627 KASSERT(curstop == td->td_toks_stop,
628 ("trap: extra tokens held after trap! %ld/%ld",
629 curstop - &td->td_toks_base,
630 td->td_toks_stop - &td->td_toks_base));
635 kern_trap(struct trapframe *frame)
637 struct globaldata *gd = mycpu;
638 struct thread *td = gd->gd_curthread;
641 int i = 0, ucode = 0, type, code;
646 int crit_count = td->td_critcount;
647 lwkt_tokref_t curstop = td->td_toks_stop;
654 if (frame->tf_trapno == T_PAGEFLT)
655 eva = frame->tf_addr;
661 ++gd->gd_trap_nesting_level;
662 MAKEMPSAFE(have_mplock);
663 trap_fatal(frame, FALSE, eva);
664 --gd->gd_trap_nesting_level;
669 type = frame->tf_trapno;
670 code = frame->tf_err;
678 case T_PAGEFLT: /* page fault */
679 MAKEMPSAFE(have_mplock);
680 trap_pfault(frame, FALSE, eva);
685 * The kernel may be using npx for copying or other
688 panic("kernel NPX should not happen");
693 case T_PROTFLT: /* general protection fault */
694 case T_SEGNPFLT: /* segment not present fault */
696 * Invalid segment selectors and out of bounds
697 * %eip's and %esp's can be set up in user mode.
698 * This causes a fault in kernel mode when the
699 * kernel tries to return to user mode. We want
700 * to get this fault so that we can fix the
701 * problem here and not have to check all the
702 * selectors and pointers when the user changes
705 if (mycpu->gd_intr_nesting_level == 0) {
706 if (td->td_pcb->pcb_onfault) {
708 (register_t)td->td_pcb->pcb_onfault;
716 * PSL_NT can be set in user mode and isn't cleared
717 * automatically when the kernel is entered. This
718 * causes a TSS fault when the kernel attempts to
719 * `iret' because the TSS link is uninitialized. We
720 * want to get this fault so that we can fix the
721 * problem here and not every time the kernel is
724 if (frame->tf_rflags & PSL_NT) {
725 frame->tf_rflags &= ~PSL_NT;
730 case T_TRCTRAP: /* trace trap */
732 if (frame->tf_eip == (int)IDTVEC(syscall)) {
734 * We've just entered system mode via the
735 * syscall lcall. Continue single stepping
736 * silently until the syscall handler has
741 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
743 * The syscall handler has now saved the
744 * flags. Stop single stepping it.
746 frame->tf_eflags &= ~PSL_T;
752 * Ignore debug register trace traps due to
753 * accesses in the user's address space, which
754 * can happen under several conditions such as
755 * if a user sets a watchpoint on a buffer and
756 * then passes that buffer to a system call.
757 * We still want to get TRCTRAPS for addresses
758 * in kernel space because that is useful when
759 * debugging the kernel.
761 if (user_dbreg_trap()) {
763 * Reset breakpoint bits because the
766 load_dr6(rdr6() & 0xfffffff0);
771 * Fall through (TRCTRAP kernel mode, kernel address)
775 * If DDB is enabled, let it handle the debugger trap.
776 * Otherwise, debugger traps "can't happen".
779 MAKEMPSAFE(have_mplock);
780 if (kdb_trap (type, 0, frame))
785 MAKEMPSAFE(have_mplock);
786 trap_fatal(frame, FALSE, eva);
789 MAKEMPSAFE(have_mplock);
790 trap_fatal(frame, FALSE, eva);
795 * Ignore this trap generated from a spurious SIGTRAP.
797 * single stepping in / syscalls leads to spurious / SIGTRAP
800 * Haiku (c) 2007 Simon 'corecode' Schubert
806 * Translate fault for emulators (e.g. Linux)
808 if (*p->p_sysent->sv_transtrap)
809 i = (*p->p_sysent->sv_transtrap)(i, type);
811 MAKEMPSAFE(have_mplock);
812 trapsignal(lp, i, ucode);
815 if (type <= MAX_TRAP_MSG) {
816 uprintf("fatal process exception: %s",
818 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
819 uprintf(", fault VA = 0x%lx", (u_long)eva);
831 KASSERT(crit_count == td->td_critcount,
832 ("trap: critical section count mismatch! %d/%d",
833 crit_count, td->td_pri));
834 KASSERT(curstop == td->td_toks_stop,
835 ("trap: extra tokens held after trap! %ld/%ld",
836 curstop - &td->td_toks_base,
837 td->td_toks_stop - &td->td_toks_base));
842 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
845 struct vmspace *vm = NULL;
849 thread_t td = curthread;
850 struct lwp *lp = td->td_lwp;
852 va = trunc_page(eva);
853 if (usermode == FALSE) {
855 * This is a fault on kernel virtual memory.
860 * This is a fault on non-kernel virtual memory.
861 * vm is initialized above to NULL. If curproc is NULL
862 * or curproc->p_vmspace is NULL the fault is fatal.
865 vm = lp->lwp_vmspace;
873 if (frame->tf_err & PGEX_W)
874 ftype = VM_PROT_READ | VM_PROT_WRITE;
876 ftype = VM_PROT_READ;
878 if (map != &kernel_map) {
880 * Keep swapout from messing with us during this
886 * Grow the stack if necessary
888 /* grow_stack returns false only if va falls into
889 * a growable stack region and the stack growth
890 * fails. It returns true if va was not within
891 * a growable stack region, or if the stack
894 if (!grow_stack (lp->lwp_proc, va)) {
900 /* Fault in the user page: */
901 rv = vm_fault(map, va, ftype,
902 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
908 * Don't have to worry about process locking or stacks in the kernel.
910 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
913 if (rv == KERN_SUCCESS)
917 if (td->td_gd->gd_intr_nesting_level == 0 &&
918 td->td_pcb->pcb_onfault) {
919 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
922 trap_fatal(frame, usermode, eva);
927 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
928 * kludge is needed to pass the fault address to signal handlers.
930 struct proc *p = td->td_proc;
931 kprintf("seg-fault accessing address %p rip=%p pid=%d p_comm=%s\n",
932 (void *)va, (void *)frame->tf_rip, p->p_pid, p->p_comm);
933 /* Debugger("seg-fault"); */
935 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
939 trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
944 code = frame->tf_xflags;
945 type = frame->tf_trapno;
947 if (type <= MAX_TRAP_MSG) {
948 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
949 type, trap_msg[type],
950 (usermode ? "user" : "kernel"));
953 /* two separate prints in case of a trap on an unmapped page */
954 kprintf("mp_lock = %08x; ", mp_lock);
955 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
957 if (type == T_PAGEFLT) {
958 kprintf("fault virtual address = %p\n", (void *)eva);
959 kprintf("fault code = %s %s, %s\n",
960 usermode ? "user" : "supervisor",
961 code & PGEX_W ? "write" : "read",
962 code & PGEX_P ? "protection violation" : "page not present");
964 kprintf("instruction pointer = 0x%lx:0x%lx\n",
965 frame->tf_cs & 0xffff, frame->tf_rip);
967 ss = frame->tf_ss & 0xffff;
970 ss = GSEL(GDATA_SEL, SEL_KPL);
971 rsp = (long)&frame->tf_rsp;
973 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
974 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
975 kprintf("processor eflags = ");
976 if (frame->tf_rflags & PSL_T)
977 kprintf("trace trap, ");
978 if (frame->tf_rflags & PSL_I)
979 kprintf("interrupt enabled, ");
980 if (frame->tf_rflags & PSL_NT)
981 kprintf("nested task, ");
982 if (frame->tf_rflags & PSL_RF)
985 if (frame->tf_eflags & PSL_VM)
988 kprintf("IOPL = %jd\n", (intmax_t)((frame->tf_rflags & PSL_IOPL) >> 12));
989 kprintf("current process = ");
991 kprintf("%lu (%s)\n",
992 (u_long)curproc->p_pid, curproc->p_comm ?
993 curproc->p_comm : "");
997 kprintf("current thread = pri %d ", curthread->td_pri);
998 if (curthread->td_critcount)
1004 * we probably SHOULD have stopped the other CPUs before now!
1005 * another CPU COULD have been touching cpl at this moment...
1007 kprintf(" <- SMP: XXX");
1016 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1019 kprintf("trap number = %d\n", type);
1020 if (type <= MAX_TRAP_MSG)
1021 panic("%s", trap_msg[type]);
1023 panic("unknown/reserved trap");
1027 * Double fault handler. Called when a fault occurs while writing
1028 * a frame for a trap/exception onto the stack. This usually occurs
1029 * when the stack overflows (such is the case with infinite recursion,
1032 * XXX Note that the current PTD gets replaced by IdlePTD when the
1033 * task switch occurs. This means that the stack that was active at
1034 * the time of the double fault is not available at <kstack> unless
1035 * the machine was idle when the double fault occurred. The downside
1036 * of this is that "trace <ebp>" in ddb won't work.
1039 dblfault_handler(void)
1042 struct mdglobaldata *gd = mdcpu;
1045 kprintf("\nFatal double fault:\n");
1047 kprintf("rip = 0x%lx\n", gd->gd_common_tss.tss_rip);
1048 kprintf("rsp = 0x%lx\n", gd->gd_common_tss.tss_rsp);
1049 kprintf("rbp = 0x%lx\n", gd->gd_common_tss.tss_rbp);
1052 /* two separate prints in case of a trap on an unmapped page */
1053 kprintf("mp_lock = %08x; ", mp_lock);
1054 kprintf("cpuid = %d\n", mycpu->gd_cpuid);
1056 panic("double fault");
1060 * Compensate for 386 brain damage (missing URKR).
1061 * This is a little simpler than the pagefault handler in trap() because
1062 * it the page tables have already been faulted in and high addresses
1063 * are thrown out early for other reasons.
1066 trapwrite(unsigned addr)
1073 va = trunc_page((vm_offset_t)addr);
1075 * XXX - MAX is END. Changed > to >= for temp. fix.
1077 if (va >= VM_MAX_USER_ADDRESS)
1080 lp = curthread->td_lwp;
1081 vm = lp->lwp_vmspace;
1083 PHOLD(lp->lwp_proc);
1085 if (!grow_stack (lp->lwp_proc, va)) {
1086 PRELE(lp->lwp_proc);
1091 * fault the data page
1093 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1095 PRELE(lp->lwp_proc);
1097 if (rv != KERN_SUCCESS)
1104 * syscall2 - MP aware system call request C handler
1106 * A system call is essentially treated as a trap except that the
1107 * MP lock is not held on entry or return. We are responsible for
1108 * obtaining the MP lock if necessary and for handling ASTs
1109 * (e.g. a task switch) prior to return.
1111 * In general, only simple access and manipulation of curproc and
1112 * the current stack is allowed without having to hold MP lock.
1114 * MPSAFE - note that large sections of this routine are run without
1118 syscall2(struct trapframe *frame)
1120 struct thread *td = curthread;
1121 struct proc *p = td->td_proc;
1122 struct lwp *lp = td->td_lwp;
1124 struct sysent *callp;
1125 register_t orig_tf_rflags;
1130 int crit_count = td->td_critcount;
1131 lwkt_tokref_t curstop = td->td_toks_stop;
1134 int have_mplock = 0;
1139 union sysunion args;
1140 register_t *argsdst;
1142 mycpu->gd_cnt.v_syscall++;
1144 KTR_LOG(kernentry_syscall, lp->lwp_proc->p_pid, lp->lwp_tid,
1148 KASSERT(td->td_mpcount == 0,
1149 ("badmpcount syscall2 from %p", (void *)frame->tf_rip));
1151 userenter(td, p); /* lazy raise our priority */
1158 sticks = (int)td->td_sticks;
1159 orig_tf_rflags = frame->tf_rflags;
1162 * Virtual kernel intercept - if a VM context managed by a virtual
1163 * kernel issues a system call the virtual kernel handles it, not us.
1164 * Restore the virtual kernel context and return from its system
1165 * call. The current frame is copied out to the virtual kernel.
1167 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1168 vkernel_trap(lp, frame);
1169 error = EJUSTRETURN;
1174 * Get the system call parameters and account for time
1176 lp->lwp_md.md_regs = frame;
1177 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1178 code = frame->tf_rax;
1180 if (p->p_sysent->sv_prepsyscall) {
1181 (*p->p_sysent->sv_prepsyscall)(
1182 frame, (int *)(&args.nosys.sysmsg + 1),
1185 if (code == SYS_syscall || code == SYS___syscall) {
1186 code = frame->tf_rdi;
1192 if (p->p_sysent->sv_mask)
1193 code &= p->p_sysent->sv_mask;
1195 if (code >= p->p_sysent->sv_size)
1196 callp = &p->p_sysent->sv_table[0];
1198 callp = &p->p_sysent->sv_table[code];
1200 narg = callp->sy_narg & SYF_ARGMASK;
1203 * On x86_64 we get up to six arguments in registers. The rest are
1204 * on the stack. The first six members of 'struct trapframe' happen
1205 * to be the registers used to pass arguments, in exactly the right
1208 argp = &frame->tf_rdi;
1210 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1212 * JG can we overflow the space pointed to by 'argsdst'
1213 * either with 'bcopy' or with 'copyin'?
1215 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1217 * copyin is MP aware, but the tracing code is not
1219 if (narg > regcnt) {
1220 KASSERT(params != NULL, ("copyin args with no params!"));
1221 error = copyin(params, &argsdst[regcnt],
1222 (narg - regcnt) * sizeof(register_t));
1225 if (KTRPOINT(td, KTR_SYSCALL)) {
1226 MAKEMPSAFE(have_mplock);
1228 ktrsyscall(lp, code, narg,
1229 (void *)(&args.nosys.sysmsg + 1));
1237 if (KTRPOINT(td, KTR_SYSCALL)) {
1238 MAKEMPSAFE(have_mplock);
1239 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1244 * Default return value is 0 (will be copied to %rax). Double-value
1245 * returns use %rax and %rdx. %rdx is left unchanged for system
1246 * calls which return only one result.
1248 args.sysmsg_fds[0] = 0;
1249 args.sysmsg_fds[1] = frame->tf_rdx;
1252 * The syscall might manipulate the trap frame. If it does it
1253 * will probably return EJUSTRETURN.
1255 args.sysmsg_frame = frame;
1257 STOPEVENT(p, S_SCE, narg); /* MP aware */
1260 * NOTE: All system calls run MPSAFE now. The system call itself
1261 * is responsible for getting the MP lock.
1263 error = (*callp->sy_call)(&args);
1266 kprintf("system call %d returned %d\n", code, error);
1271 * MP SAFE (we may or may not have the MP lock at this point)
1276 * Reinitialize proc pointer `p' as it may be different
1277 * if this is a child returning from fork syscall.
1280 lp = curthread->td_lwp;
1281 frame->tf_rax = args.sysmsg_fds[0];
1282 frame->tf_rdx = args.sysmsg_fds[1];
1283 frame->tf_rflags &= ~PSL_C;
1287 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1288 * We have to do a full context restore so that %r10
1289 * (which was holding the value of %rcx) is restored for
1290 * the next iteration.
1292 frame->tf_rip -= frame->tf_err;
1293 frame->tf_r10 = frame->tf_rcx;
1298 panic("Unexpected EASYNC return value (for now)");
1301 if (p->p_sysent->sv_errsize) {
1302 if (error >= p->p_sysent->sv_errsize)
1303 error = -1; /* XXX */
1305 error = p->p_sysent->sv_errtbl[error];
1307 frame->tf_rax = error;
1308 frame->tf_rflags |= PSL_C;
1313 * Traced syscall. trapsignal() is not MP aware.
1315 if (orig_tf_rflags & PSL_T) {
1316 MAKEMPSAFE(have_mplock);
1317 frame->tf_rflags &= ~PSL_T;
1318 trapsignal(lp, SIGTRAP, 0);
1322 * Handle reschedule and other end-of-syscall issues
1324 userret(lp, frame, sticks);
1327 if (KTRPOINT(td, KTR_SYSRET)) {
1328 MAKEMPSAFE(have_mplock);
1329 ktrsysret(lp, code, error, args.sysmsg_result);
1334 * This works because errno is findable through the
1335 * register set. If we ever support an emulation where this
1336 * is not the case, this code will need to be revisited.
1338 STOPEVENT(p, S_SCX, code);
1343 * Release the MP lock if we had to get it
1345 KASSERT(td->td_mpcount == have_mplock,
1346 ("badmpcount syscall2/end from %p", (void *)frame->tf_rip));
1350 KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error);
1352 KASSERT(&td->td_toks_base == td->td_toks_stop,
1353 ("syscall: critical section count mismatch! %d/%d",
1354 crit_count, td->td_pri));
1355 KASSERT(curstop == td->td_toks_stop,
1356 ("syscall: extra tokens held after trap! %ld",
1357 td->td_toks_stop - &td->td_toks_base));
1362 * NOTE: mplock not held at any point
1365 fork_return(struct lwp *lp, struct trapframe *frame)
1367 frame->tf_rax = 0; /* Child returns zero */
1368 frame->tf_rflags &= ~PSL_C; /* success */
1371 generic_lwp_return(lp, frame);
1372 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1376 * Simplified back end of syscall(), used when returning from fork()
1377 * directly into user mode.
1379 * This code will return back into the fork trampoline code which then
1382 * NOTE: The mplock is not held at any point.
1385 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1387 struct proc *p = lp->lwp_proc;
1390 * Newly forked processes are given a kernel priority. We have to
1391 * adjust the priority to a normal user priority and fake entry
1392 * into the kernel (call userenter()) to install a passive release
1393 * function just in case userret() decides to stop the process. This
1394 * can occur when ^Z races a fork. If we do not install the passive
1395 * release function the current process designation will not be
1396 * released when the thread goes to sleep.
1398 lwkt_setpri_self(TDPRI_USER_NORM);
1399 userenter(lp->lwp_thread, p);
1400 userret(lp, frame, 0);
1402 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1403 ktrsysret(lp, SYS_fork, 0, 0);
1405 p->p_flag |= P_PASSIVE_ACQ;
1407 p->p_flag &= ~P_PASSIVE_ACQ;
1411 * doreti has turned into this. The frame is directly on the stack. We
1412 * pull everything else we need (fpu and tls context) from the current
1415 * Note on fpu interactions: In a virtual kernel, the fpu context for
1416 * an emulated user mode process is not shared with the virtual kernel's
1417 * fpu context, so we only have to 'stack' fpu contexts within the virtual
1418 * kernel itself, and not even then since the signal() contexts that we care
1419 * about save and restore the FPU state (I think anyhow).
1421 * vmspace_ctl() returns an error only if it had problems instaling the
1422 * context we supplied or problems copying data to/from our VM space.
1425 go_user(struct intrframe *frame)
1427 struct trapframe *tf = (void *)&frame->if_rdi;
1431 * Interrupts may be disabled on entry, make sure all signals
1432 * can be received before beginning our loop.
1437 * Switch to the current simulated user process, then call
1438 * user_trap() when we break out of it (usually due to a signal).
1442 * Tell the real kernel whether it is ok to use the FP
1445 if (mdcpu->gd_npxthread == curthread) {
1446 tf->tf_xflags &= ~PGEX_FPFAULT;
1448 tf->tf_xflags |= PGEX_FPFAULT;
1452 * Run emulated user process context. This call interlocks
1453 * with new mailbox signals.
1455 * Set PGEX_U unconditionally, indicating a user frame (the
1456 * bit is normally set only by T_PAGEFLT).
1458 r = vmspace_ctl(&curproc->p_vmspace->vm_pmap, VMSPACE_CTL_RUN,
1459 tf, &curthread->td_savevext);
1460 frame->if_xflags |= PGEX_U;
1462 kprintf("GO USER %d trap %ld EVA %08lx RIP %08lx RSP %08lx XFLAGS %02lx/%02lx\n",
1463 r, tf->tf_trapno, tf->tf_addr, tf->tf_rip, tf->tf_rsp,
1464 tf->tf_xflags, frame->if_xflags);
1468 panic("vmspace_ctl failed error %d", errno);
1470 if (tf->tf_trapno) {
1474 if (mycpu->gd_reqflags & RQF_AST_MASK) {
1475 tf->tf_trapno = T_ASTFLT;
1483 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1484 * fault (which is then passed back to the virtual kernel) if an attempt is
1485 * made to use the FP unit.
1487 * XXX this is a fairly big hack.
1490 set_vkernel_fp(struct trapframe *frame)
1492 struct thread *td = curthread;
1494 if (frame->tf_xflags & PGEX_FPFAULT) {
1495 td->td_pcb->pcb_flags |= FP_VIRTFP;
1496 if (mdcpu->gd_npxthread == td)
1499 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1504 * Called from vkernel_trap() to fixup the vkernel's syscall
1505 * frame for vmspace_ctl() return.
1508 cpu_vkernel_trap(struct trapframe *frame, int error)
1510 frame->tf_rax = error;
1512 frame->tf_rflags |= PSL_C;
1514 frame->tf_rflags &= ~PSL_C;