2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
44 * x86_64 Trap and System call handling
50 #include "opt_ktrace.h"
52 #include <machine/frame.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/kerneldump.h>
58 #include <sys/pioctl.h>
59 #include <sys/types.h>
60 #include <sys/signal2.h>
61 #include <sys/syscall.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysent.h>
65 #include <sys/ktrace.h>
68 #include <sys/sysmsg.h>
69 #include <sys/sysproto.h>
70 #include <sys/sysunion.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_param.h>
77 #include <machine/cpu.h>
78 #include <machine/pcb.h>
79 #include <machine/smp.h>
80 #include <machine/thread.h>
81 #include <machine/clock.h>
82 #include <machine/vmparam.h>
83 #include <machine/md_var.h>
84 #include <machine_base/isa/isa_intr.h>
85 #include <machine_base/apic/lapic.h>
89 #include <sys/thread2.h>
90 #include <sys/mplock2.h>
91 #include <sys/spinlock2.h>
93 #define MAKEMPSAFE(have_mplock) \
94 if (have_mplock == 0) { \
99 extern void trap(struct trapframe *frame);
101 static int trap_pfault(struct trapframe *, int);
102 static void trap_fatal(struct trapframe *, vm_offset_t);
103 void dblfault_handler(struct trapframe *frame);
105 #define MAX_TRAP_MSG 30
106 static char *trap_msg[] = {
108 "privileged instruction fault", /* 1 T_PRIVINFLT */
110 "breakpoint instruction fault", /* 3 T_BPTFLT */
113 "arithmetic trap", /* 6 T_ARITHTRAP */
114 "system forced exception", /* 7 T_ASTFLT */
116 "general protection fault", /* 9 T_PROTFLT */
117 "trace trap", /* 10 T_TRCTRAP */
119 "page fault", /* 12 T_PAGEFLT */
121 "alignment fault", /* 14 T_ALIGNFLT */
125 "integer divide fault", /* 18 T_DIVIDE */
126 "non-maskable interrupt trap", /* 19 T_NMI */
127 "overflow trap", /* 20 T_OFLOW */
128 "FPU bounds check fault", /* 21 T_BOUND */
129 "FPU device not available", /* 22 T_DNA */
130 "double fault", /* 23 T_DOUBLEFLT */
131 "FPU operand fetch fault", /* 24 T_FPOPFLT */
132 "invalid TSS fault", /* 25 T_TSSFLT */
133 "segment not present fault", /* 26 T_SEGNPFLT */
134 "stack fault", /* 27 T_STKFLT */
135 "machine check trap", /* 28 T_MCHK */
136 "SIMD floating-point exception", /* 29 T_XMMFLT */
137 "reserved (unknown) fault", /* 30 T_RESERVED */
141 static int ddb_on_nmi = 1;
142 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
143 &ddb_on_nmi, 0, "Go to DDB on NMI");
144 static int ddb_on_seg_fault = 0;
145 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW,
146 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault");
147 static int freeze_on_seg_fault = 0;
148 SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW,
149 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault");
151 static int panic_on_nmi = 1;
152 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
153 &panic_on_nmi, 0, "Panic on NMI");
154 static int fast_release;
155 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
156 &fast_release, 0, "Passive Release was optimal");
157 static int slow_release;
158 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
159 &slow_release, 0, "Passive Release was nonoptimal");
162 * System call debugging records the worst-case system call
163 * overhead (inclusive of blocking), but may be inaccurate.
165 /*#define SYSCALL_DEBUG*/
167 uint64_t SysCallsWorstCase[SYS_MAXSYSCALL];
171 * Passively intercepts the thread switch function to increase
172 * the thread priority from a user priority to a kernel priority, reducing
173 * syscall and trap overhead for the case where no switch occurs.
175 * Synchronizes td_ucred with p_ucred. This is used by system calls,
176 * signal handling, faults, AST traps, and anything else that enters the
177 * kernel from userland and provides the kernel with a stable read-only
178 * copy of the process ucred.
180 * To avoid races with another thread updating p_ucred we obtain p_spin.
181 * The other thread doing the update will obtain both p_token and p_spin.
182 * In the case where the cached cred pointer matches, we will already have
183 * the ref and we don't have to do one blessed thing.
186 userenter(struct thread *curtd, struct proc *curp)
191 curtd->td_release = lwkt_passive_release;
193 if (curtd->td_ucred != curp->p_ucred) {
194 spin_lock(&curp->p_spin);
195 ncred = crhold(curp->p_ucred);
196 spin_unlock(&curp->p_spin);
197 ocred = curtd->td_ucred;
198 curtd->td_ucred = ncred;
205 * Debugging, remove top two user stack pages to catch kernel faults
207 if (freeze_on_seg_fault > 1 && curtd->td_lwp) {
208 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace),
209 0x00007FFFFFFFD000LU,
210 0x0000800000000000LU);
216 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
217 * must be completed before we can return to or try to return to userland.
219 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
220 * arithmatic on the delta calculation so the absolute tick values are
221 * truncated to an integer.
224 userret(struct lwp *lp, struct trapframe *frame, int sticks)
226 struct proc *p = lp->lwp_proc;
230 * Charge system time if profiling. Note: times are in microseconds.
231 * This may do a copyout and block, so do it first even though it
232 * means some system time will be charged as user time.
234 if (p->p_flags & P_PROFIL) {
235 addupc_task(p, frame->tf_rip,
236 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
241 * Specific on-return-to-usermode checks (LWP_MP_WEXIT,
242 * LWP_MP_VNLRU, etc).
244 if (lp->lwp_mpflags & LWP_MP_URETMASK)
248 * Block here if we are in a stopped state.
250 if (p->p_stat == SSTOP || dump_stop_usertds) {
251 lwkt_gettoken(&p->p_token);
253 lwkt_reltoken(&p->p_token);
258 * Post any pending upcalls. If running a virtual kernel be sure
259 * to restore the virtual kernel's vmspace before posting the upcall.
261 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) {
262 lwkt_gettoken(&p->p_token);
263 if (p->p_flags & P_SIGVTALRM) {
264 p->p_flags &= ~P_SIGVTALRM;
265 ksignal(p, SIGVTALRM);
267 if (p->p_flags & P_SIGPROF) {
268 p->p_flags &= ~P_SIGPROF;
271 lwkt_reltoken(&p->p_token);
276 * Post any pending signals. If running a virtual kernel be sure
277 * to restore the virtual kernel's vmspace before posting the signal.
279 * WARNING! postsig() can exit and not return.
281 if ((sig = CURSIG_TRACE(lp)) != 0) {
282 lwkt_gettoken(&p->p_token);
284 lwkt_reltoken(&p->p_token);
289 * block here if we are swapped out, but still process signals
290 * (such as SIGKILL). proc0 (the swapin scheduler) is already
291 * aware of our situation, we do not have to wake it up.
293 if (p->p_flags & P_SWAPPEDOUT) {
294 lwkt_gettoken(&p->p_token);
296 p->p_flags |= P_SWAPWAIT;
298 if (p->p_flags & P_SWAPWAIT)
299 tsleep(p, PCATCH, "SWOUT", 0);
300 p->p_flags &= ~P_SWAPWAIT;
302 lwkt_reltoken(&p->p_token);
307 * In a multi-threaded program it is possible for a thread to change
308 * signal state during a system call which temporarily changes the
309 * signal mask. In this case postsig() might not be run and we
310 * have to restore the mask ourselves.
312 if (lp->lwp_flags & LWP_OLDMASK) {
313 lp->lwp_flags &= ~LWP_OLDMASK;
314 lp->lwp_sigmask = lp->lwp_oldsigmask;
320 * Cleanup from userenter and any passive release that might have occured.
321 * We must reclaim the current-process designation before we can return
322 * to usermode. We also handle both LWKT and USER reschedule requests.
325 userexit(struct lwp *lp)
327 struct thread *td = lp->lwp_thread;
328 /* globaldata_t gd = td->td_gd; */
331 * Handle stop requests at kernel priority. Any requests queued
332 * after this loop will generate another AST.
334 while (lp->lwp_proc->p_stat == SSTOP) {
335 lwkt_gettoken(&lp->lwp_proc->p_token);
337 lwkt_reltoken(&lp->lwp_proc->p_token);
341 * Reduce our priority in preparation for a return to userland. If
342 * our passive release function was still in place, our priority was
343 * never raised and does not need to be reduced.
345 lwkt_passive_recover(td);
347 /* WARNING: we may have migrated cpu's */
348 /* gd = td->td_gd; */
351 * Become the current user scheduled process if we aren't already,
352 * and deal with reschedule requests and other factors.
354 lp->lwp_proc->p_usched->acquire_curproc(lp);
357 #if !defined(KTR_KERNENTRY)
358 #define KTR_KERNENTRY KTR_ALL
360 KTR_INFO_MASTER(kernentry);
361 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0,
362 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)",
363 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva);
364 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)",
365 pid_t pid, lwpid_t tid);
366 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)",
367 pid_t pid, lwpid_t tid, register_t trapno);
368 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)",
369 pid_t pid, lwpid_t tid, int err);
370 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)",
371 pid_t pid, lwpid_t tid);
374 * Exception, fault, and trap interface to the kernel.
375 * This common code is called from assembly language IDT gate entry
376 * routines that prepare a suitable stack frame, and restore this
377 * frame after the exception has been processed.
379 * This function is also called from doreti in an interlock to handle ASTs.
380 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
382 * NOTE! We have to retrieve the fault address prior to obtaining the
383 * MP lock because get_mplock() may switch out. YYY cr2 really ought
384 * to be retrieved by the assembly code, not here.
386 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
387 * if an attempt is made to switch from a fast interrupt or IPI. This is
388 * necessary to properly take fatal kernel traps on SMP machines if
389 * get_mplock() has to block.
393 trap(struct trapframe *frame)
395 struct globaldata *gd = mycpu;
396 struct thread *td = gd->gd_curthread;
397 struct lwp *lp = td->td_lwp;
400 int i = 0, ucode = 0, type, code;
403 int crit_count = td->td_critcount;
404 lwkt_tokref_t curstop = td->td_toks_stop;
413 * We need to allow T_DNA faults when the debugger is active since
414 * some dumping paths do large bcopy() which use the floating
415 * point registers for faster copying.
417 if (db_active && frame->tf_trapno != T_DNA) {
418 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0);
419 ++gd->gd_trap_nesting_level;
420 MAKEMPSAFE(have_mplock);
421 trap_fatal(frame, eva);
422 --gd->gd_trap_nesting_level;
429 if ((frame->tf_rflags & PSL_I) == 0) {
431 * Buggy application or kernel code has disabled interrupts
432 * and then trapped. Enabling interrupts now is wrong, but
433 * it is better than running with interrupts disabled until
434 * they are accidentally enabled later.
436 type = frame->tf_trapno;
437 if (ISPL(frame->tf_cs) == SEL_UPL) {
438 MAKEMPSAFE(have_mplock);
439 /* JG curproc can be NULL */
441 "pid %ld (%s): trap %d with interrupts disabled\n",
442 (long)curproc->p_pid, curproc->p_comm, type);
443 } else if (type != T_NMI && type != T_BPTFLT &&
446 * XXX not quite right, since this may be for a
447 * multiple fault in user mode.
449 MAKEMPSAFE(have_mplock);
450 kprintf("kernel trap %d with interrupts disabled\n",
456 type = frame->tf_trapno;
457 code = frame->tf_err;
459 if (ISPL(frame->tf_cs) == SEL_UPL) {
462 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
463 frame->tf_trapno, eva);
467 sticks = (int)td->td_sticks;
468 KASSERT(lp->lwp_md.md_regs == frame,
469 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
472 case T_PRIVINFLT: /* privileged instruction fault */
477 case T_BPTFLT: /* bpt instruction fault */
478 case T_TRCTRAP: /* trace trap */
479 frame->tf_rflags &= ~PSL_T;
481 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
484 case T_ARITHTRAP: /* arithmetic trap */
489 case T_ASTFLT: /* Allow process switch */
490 mycpu->gd_cnt.v_soft++;
491 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
492 atomic_clear_int(&mycpu->gd_reqflags,
494 addupc_task(p, p->p_prof.pr_addr,
499 case T_PROTFLT: /* general protection fault */
503 case T_STKFLT: /* stack fault */
504 case T_SEGNPFLT: /* segment not present fault */
508 case T_TSSFLT: /* invalid TSS fault */
509 case T_DOUBLEFLT: /* double fault */
515 case T_PAGEFLT: /* page fault */
516 i = trap_pfault(frame, TRUE);
517 if (frame->tf_rip == 0) {
518 kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
520 while (freeze_on_seg_fault)
521 tsleep(p, 0, "freeze", hz * 20);
524 if (i == -1 || i == 0)
536 case T_DIVIDE: /* integer divide fault */
543 MAKEMPSAFE(have_mplock);
544 /* machine/parity/power fail/"kitchen sink" faults */
545 if (isa_nmi(code) == 0) {
548 * NMI can be hooked up to a pushbutton
552 kprintf ("NMI ... going to debugger\n");
553 kdb_trap(type, 0, frame);
557 } else if (panic_on_nmi)
558 panic("NMI indicates hardware failure");
560 #endif /* NISA > 0 */
562 case T_OFLOW: /* integer overflow fault */
567 case T_BOUND: /* bounds check fault */
574 * Virtual kernel intercept - pass the DNA exception
575 * to the virtual kernel if it asked to handle it.
576 * This occurs when the virtual kernel is holding
577 * onto the FP context for a different emulated
578 * process then the one currently running.
580 * We must still call npxdna() since we may have
581 * saved FP state that the virtual kernel needs
582 * to hand over to a different emulated process.
584 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
585 (td->td_pcb->pcb_flags & FP_VIRTFP)
592 * The kernel may have switched out the FP unit's
593 * state, causing the user process to take a fault
594 * when it tries to use the FP unit. Restore the
600 ucode = FPE_FPU_NP_TRAP;
603 case T_FPOPFLT: /* FPU operand fetch fault */
608 case T_XMMFLT: /* SIMD floating-point exception */
617 case T_PAGEFLT: /* page fault */
618 trap_pfault(frame, FALSE);
623 * The kernel is apparently using fpu for copying.
624 * XXX this should be fatal unless the kernel has
625 * registered such use.
631 case T_STKFLT: /* stack fault */
634 case T_PROTFLT: /* general protection fault */
635 case T_SEGNPFLT: /* segment not present fault */
637 * Invalid segment selectors and out of bounds
638 * %rip's and %rsp's can be set up in user mode.
639 * This causes a fault in kernel mode when the
640 * kernel tries to return to user mode. We want
641 * to get this fault so that we can fix the
642 * problem here and not have to check all the
643 * selectors and pointers when the user changes
646 if (mycpu->gd_intr_nesting_level == 0) {
648 * NOTE: in 64-bit mode traps push rsp/ss
649 * even if no ring change occurs.
651 if (td->td_pcb->pcb_onfault &&
652 td->td_pcb->pcb_onfault_sp ==
654 frame->tf_rip = (register_t)
655 td->td_pcb->pcb_onfault;
658 if (frame->tf_rip == (long)doreti_iret) {
659 frame->tf_rip = (long)doreti_iret_fault;
667 * PSL_NT can be set in user mode and isn't cleared
668 * automatically when the kernel is entered. This
669 * causes a TSS fault when the kernel attempts to
670 * `iret' because the TSS link is uninitialized. We
671 * want to get this fault so that we can fix the
672 * problem here and not every time the kernel is
675 if (frame->tf_rflags & PSL_NT) {
676 frame->tf_rflags &= ~PSL_NT;
681 case T_TRCTRAP: /* trace trap */
683 if (frame->tf_rip == (int)IDTVEC(syscall)) {
685 * We've just entered system mode via the
686 * syscall lcall. Continue single stepping
687 * silently until the syscall handler has
692 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) {
694 * The syscall handler has now saved the
695 * flags. Stop single stepping it.
697 frame->tf_rflags &= ~PSL_T;
703 * Ignore debug register trace traps due to
704 * accesses in the user's address space, which
705 * can happen under several conditions such as
706 * if a user sets a watchpoint on a buffer and
707 * then passes that buffer to a system call.
708 * We still want to get TRCTRAPS for addresses
709 * in kernel space because that is useful when
710 * debugging the kernel.
713 if (user_dbreg_trap()) {
715 * Reset breakpoint bits because the
718 /* XXX check upper bits here */
719 load_dr6(rdr6() & 0xfffffff0);
724 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
728 * If DDB is enabled, let it handle the debugger trap.
729 * Otherwise, debugger traps "can't happen".
733 MAKEMPSAFE(have_mplock);
734 if (kdb_trap(type, 0, frame))
741 MAKEMPSAFE(have_mplock);
742 /* machine/parity/power fail/"kitchen sink" faults */
743 if (isa_nmi(code) == 0) {
746 * NMI can be hooked up to a pushbutton
750 kprintf ("NMI ... going to debugger\n");
751 kdb_trap(type, 0, frame);
755 } else if (panic_on_nmi == 0)
758 #endif /* NISA > 0 */
760 MAKEMPSAFE(have_mplock);
761 trap_fatal(frame, 0);
766 * Virtual kernel intercept - if the fault is directly related to a
767 * VM context managed by a virtual kernel then let the virtual kernel
770 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
771 vkernel_trap(lp, frame);
775 /* Translate fault for emulators (e.g. Linux) */
776 if (*p->p_sysent->sv_transtrap)
777 i = (*p->p_sysent->sv_transtrap)(i, type);
779 MAKEMPSAFE(have_mplock);
780 trapsignal(lp, i, ucode);
783 if (type <= MAX_TRAP_MSG) {
784 uprintf("fatal process exception: %s",
786 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
787 uprintf(", fault VA = 0x%lx", frame->tf_addr);
793 userret(lp, frame, sticks);
798 if (p != NULL && lp != NULL)
799 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
801 KASSERT(crit_count == td->td_critcount,
802 ("trap: critical section count mismatch! %d/%d",
803 crit_count, td->td_pri));
804 KASSERT(curstop == td->td_toks_stop,
805 ("trap: extra tokens held after trap! %ld/%ld",
806 curstop - &td->td_toks_base,
807 td->td_toks_stop - &td->td_toks_base));
812 trap_handle_userenter(struct thread *td)
814 userenter(td, td->td_proc);
818 trap_handle_userexit(struct trapframe *frame, int sticks)
820 struct lwp *lp = curthread->td_lwp;
823 userret(lp, frame, sticks);
829 trap_pfault(struct trapframe *frame, int usermode)
832 struct vmspace *vm = NULL;
837 thread_t td = curthread;
838 struct lwp *lp = td->td_lwp;
841 va = trunc_page(frame->tf_addr);
842 if (va >= VM_MIN_KERNEL_ADDRESS) {
844 * Don't allow user-mode faults in kernel address space.
855 * This is a fault on non-kernel virtual memory.
856 * vm is initialized above to NULL. If curproc is NULL
857 * or curproc->p_vmspace is NULL the fault is fatal.
860 vm = lp->lwp_vmspace;
869 * Debugging, try to catch kernel faults on the user address
870 * space when not inside on onfault (e.g. copyin/copyout)
873 if (usermode == 0 && (td->td_pcb == NULL ||
874 td->td_pcb->pcb_onfault == NULL)) {
876 if (freeze_on_seg_fault) {
877 kprintf("trap_pfault: user address fault from kernel mode "
878 "%016lx\n", (long)frame->tf_addr);
879 while (freeze_on_seg_fault)
880 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20);
888 * PGEX_I is defined only if the execute disable bit capability is
889 * supported and enabled.
891 if (frame->tf_err & PGEX_W)
892 ftype = VM_PROT_WRITE;
894 else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
895 ftype = VM_PROT_EXECUTE;
898 ftype = VM_PROT_READ;
900 if (map != &kernel_map) {
902 * Keep swapout from messing with us during this
912 fault_flags |= VM_FAULT_BURST;
913 if (ftype & VM_PROT_WRITE)
914 fault_flags |= VM_FAULT_DIRTY;
916 fault_flags |= VM_FAULT_NORMAL;
917 rv = vm_fault(map, va, ftype, fault_flags);
922 * Don't have to worry about process locking or stacks in the
925 fault_flags = VM_FAULT_NORMAL;
926 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
928 if (rv == KERN_SUCCESS)
933 * NOTE: in 64-bit mode traps push rsp/ss
934 * even if no ring change occurs.
936 if (td->td_pcb->pcb_onfault &&
937 td->td_pcb->pcb_onfault_sp == frame->tf_rsp &&
938 td->td_gd->gd_intr_nesting_level == 0) {
939 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
942 trap_fatal(frame, frame->tf_addr);
947 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
948 * kludge is needed to pass the fault address to signal handlers.
952 if (td->td_lwp->lwp_vkernel == NULL) {
953 while (freeze_on_seg_fault) {
954 tsleep(p, 0, "freeze", hz * 20);
956 if (ddb_on_seg_fault)
957 Debugger("ddb_on_seg_fault");
961 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
965 trap_fatal(struct trapframe *frame, vm_offset_t eva)
970 struct soft_segment_descriptor softseg;
973 code = frame->tf_err;
974 type = frame->tf_trapno;
975 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
977 if (type <= MAX_TRAP_MSG)
978 msg = trap_msg[type];
981 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
982 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
983 /* three separate prints in case of a trap on an unmapped page */
984 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
985 kprintf("lapic->id = %08x\n", lapic->id);
986 if (type == T_PAGEFLT) {
987 kprintf("fault virtual address = 0x%lx\n", eva);
988 kprintf("fault code = %s %s %s, %s\n",
989 code & PGEX_U ? "user" : "supervisor",
990 code & PGEX_W ? "write" : "read",
991 code & PGEX_I ? "instruction" : "data",
992 code & PGEX_P ? "protection violation" : "page not present");
994 kprintf("instruction pointer = 0x%lx:0x%lx\n",
995 frame->tf_cs & 0xffff, frame->tf_rip);
996 if (ISPL(frame->tf_cs) == SEL_UPL) {
997 ss = frame->tf_ss & 0xffff;
1001 * NOTE: in 64-bit mode traps push rsp/ss even if no ring
1004 ss = GSEL(GDATA_SEL, SEL_KPL);
1005 rsp = frame->tf_rsp;
1007 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
1008 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
1009 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
1010 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1011 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
1012 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
1014 kprintf("processor eflags = ");
1015 if (frame->tf_rflags & PSL_T)
1016 kprintf("trace trap, ");
1017 if (frame->tf_rflags & PSL_I)
1018 kprintf("interrupt enabled, ");
1019 if (frame->tf_rflags & PSL_NT)
1020 kprintf("nested task, ");
1021 if (frame->tf_rflags & PSL_RF)
1022 kprintf("resume, ");
1023 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
1024 kprintf("current process = ");
1027 (u_long)curproc->p_pid);
1031 kprintf("current thread = pri %d ", curthread->td_pri);
1032 if (curthread->td_critcount)
1037 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1040 kprintf("trap number = %d\n", type);
1041 if (type <= MAX_TRAP_MSG)
1042 panic("%s", trap_msg[type]);
1044 panic("unknown/reserved trap");
1048 * Double fault handler. Called when a fault occurs while writing
1049 * a frame for a trap/exception onto the stack. This usually occurs
1050 * when the stack overflows (such is the case with infinite recursion,
1055 in_kstack_guard(register_t rptr)
1057 thread_t td = curthread;
1059 if ((char *)rptr >= td->td_kstack &&
1060 (char *)rptr < td->td_kstack + PAGE_SIZE) {
1067 dblfault_handler(struct trapframe *frame)
1069 thread_t td = curthread;
1071 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) {
1072 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
1073 if (in_kstack_guard(frame->tf_rsp))
1074 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE);
1075 if (in_kstack_guard(frame->tf_rbp))
1076 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE);
1078 kprintf("DOUBLE FAULT\n");
1080 kprintf("\nFatal double fault\n");
1081 kprintf("rip = 0x%lx\n", frame->tf_rip);
1082 kprintf("rsp = 0x%lx\n", frame->tf_rsp);
1083 kprintf("rbp = 0x%lx\n", frame->tf_rbp);
1084 /* three separate prints in case of a trap on an unmapped page */
1085 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1086 kprintf("lapic->id = %08x\n", lapic->id);
1087 panic("double fault");
1091 * syscall2 - MP aware system call request C handler
1093 * A system call is essentially treated as a trap except that the
1094 * MP lock is not held on entry or return. We are responsible for
1095 * obtaining the MP lock if necessary and for handling ASTs
1096 * (e.g. a task switch) prior to return.
1101 syscall2(struct trapframe *frame)
1103 struct thread *td = curthread;
1104 struct proc *p = td->td_proc;
1105 struct lwp *lp = td->td_lwp;
1107 struct sysent *callp;
1108 register_t orig_tf_rflags;
1113 int crit_count = td->td_critcount;
1115 int have_mplock = 0;
1119 union sysunion args;
1120 register_t *argsdst;
1122 mycpu->gd_cnt.v_syscall++;
1125 if (ISPL(frame->tf_cs) != SEL_UPL) {
1132 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
1135 userenter(td, p); /* lazy raise our priority */
1142 sticks = (int)td->td_sticks;
1143 orig_tf_rflags = frame->tf_rflags;
1146 * Virtual kernel intercept - if a VM context managed by a virtual
1147 * kernel issues a system call the virtual kernel handles it, not us.
1148 * Restore the virtual kernel context and return from its system
1149 * call. The current frame is copied out to the virtual kernel.
1151 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1152 vkernel_trap(lp, frame);
1153 error = EJUSTRETURN;
1158 * Get the system call parameters and account for time
1160 KASSERT(lp->lwp_md.md_regs == frame,
1161 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
1162 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1163 code = frame->tf_rax;
1165 if (p->p_sysent->sv_prepsyscall) {
1166 (*p->p_sysent->sv_prepsyscall)(
1167 frame, (int *)(&args.nosys.sysmsg + 1),
1170 if (code == SYS_syscall || code == SYS___syscall) {
1171 code = frame->tf_rdi;
1177 if (p->p_sysent->sv_mask)
1178 code &= p->p_sysent->sv_mask;
1180 if (code >= p->p_sysent->sv_size)
1181 callp = &p->p_sysent->sv_table[0];
1183 callp = &p->p_sysent->sv_table[code];
1185 narg = callp->sy_narg & SYF_ARGMASK;
1188 * On x86_64 we get up to six arguments in registers. The rest are
1189 * on the stack. The first six members of 'struct trapframe' happen
1190 * to be the registers used to pass arguments, in exactly the right
1193 argp = &frame->tf_rdi;
1195 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1197 * JG can we overflow the space pointed to by 'argsdst'
1198 * either with 'bcopy' or with 'copyin'?
1200 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1202 * copyin is MP aware, but the tracing code is not
1204 if (narg > regcnt) {
1205 KASSERT(params != NULL, ("copyin args with no params!"));
1206 error = copyin(params, &argsdst[regcnt],
1207 (narg - regcnt) * sizeof(register_t));
1210 if (KTRPOINT(td, KTR_SYSCALL)) {
1211 MAKEMPSAFE(have_mplock);
1213 ktrsyscall(lp, code, narg,
1214 (void *)(&args.nosys.sysmsg + 1));
1222 if (KTRPOINT(td, KTR_SYSCALL)) {
1223 MAKEMPSAFE(have_mplock);
1224 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1229 * Default return value is 0 (will be copied to %rax). Double-value
1230 * returns use %rax and %rdx. %rdx is left unchanged for system
1231 * calls which return only one result.
1233 args.sysmsg_fds[0] = 0;
1234 args.sysmsg_fds[1] = frame->tf_rdx;
1237 * The syscall might manipulate the trap frame. If it does it
1238 * will probably return EJUSTRETURN.
1240 args.sysmsg_frame = frame;
1242 STOPEVENT(p, S_SCE, narg); /* MP aware */
1245 * NOTE: All system calls run MPSAFE now. The system call itself
1246 * is responsible for getting the MP lock.
1248 #ifdef SYSCALL_DEBUG
1249 uint64_t tscval = rdtsc();
1251 error = (*callp->sy_call)(&args);
1252 #ifdef SYSCALL_DEBUG
1253 tscval = rdtsc() - tscval;
1254 tscval = tscval * 1000000 / tsc_frequency;
1255 if (SysCallsWorstCase[code] < tscval)
1256 SysCallsWorstCase[code] = tscval;
1261 * MP SAFE (we may or may not have the MP lock at this point)
1263 //kprintf("SYSMSG %d ", error);
1267 * Reinitialize proc pointer `p' as it may be different
1268 * if this is a child returning from fork syscall.
1271 lp = curthread->td_lwp;
1272 frame->tf_rax = args.sysmsg_fds[0];
1273 frame->tf_rdx = args.sysmsg_fds[1];
1274 frame->tf_rflags &= ~PSL_C;
1278 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1279 * We have to do a full context restore so that %r10
1280 * (which was holding the value of %rcx) is restored for
1281 * the next iteration.
1283 if (frame->tf_err != 0 && frame->tf_err != 2)
1284 kprintf("lp %s:%d frame->tf_err is weird %ld\n",
1285 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err);
1286 frame->tf_rip -= frame->tf_err;
1287 frame->tf_r10 = frame->tf_rcx;
1292 panic("Unexpected EASYNC return value (for now)");
1295 if (p->p_sysent->sv_errsize) {
1296 if (error >= p->p_sysent->sv_errsize)
1297 error = -1; /* XXX */
1299 error = p->p_sysent->sv_errtbl[error];
1301 frame->tf_rax = error;
1302 frame->tf_rflags |= PSL_C;
1307 * Traced syscall. trapsignal() is not MP aware.
1309 if (orig_tf_rflags & PSL_T) {
1310 MAKEMPSAFE(have_mplock);
1311 frame->tf_rflags &= ~PSL_T;
1312 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1316 * Handle reschedule and other end-of-syscall issues
1318 userret(lp, frame, sticks);
1321 if (KTRPOINT(td, KTR_SYSRET)) {
1322 MAKEMPSAFE(have_mplock);
1323 ktrsysret(lp, code, error, args.sysmsg_result);
1328 * This works because errno is findable through the
1329 * register set. If we ever support an emulation where this
1330 * is not the case, this code will need to be revisited.
1332 STOPEVENT(p, S_SCX, code);
1336 * Release the MP lock if we had to get it
1340 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1342 KASSERT(crit_count == td->td_critcount,
1343 ("syscall: critical section count mismatch! %d/%d",
1344 crit_count, td->td_pri));
1345 KASSERT(&td->td_toks_base == td->td_toks_stop,
1346 ("syscall: extra tokens held after trap! %ld",
1347 td->td_toks_stop - &td->td_toks_base));
1352 * NOTE: mplock not held at any point
1355 fork_return(struct lwp *lp, struct trapframe *frame)
1357 frame->tf_rax = 0; /* Child returns zero */
1358 frame->tf_rflags &= ~PSL_C; /* success */
1361 generic_lwp_return(lp, frame);
1362 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1366 * Simplified back end of syscall(), used when returning from fork()
1367 * directly into user mode.
1369 * This code will return back into the fork trampoline code which then
1372 * NOTE: The mplock is not held at any point.
1375 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1377 struct proc *p = lp->lwp_proc;
1380 * Newly forked processes are given a kernel priority. We have to
1381 * adjust the priority to a normal user priority and fake entry
1382 * into the kernel (call userenter()) to install a passive release
1383 * function just in case userret() decides to stop the process. This
1384 * can occur when ^Z races a fork. If we do not install the passive
1385 * release function the current process designation will not be
1386 * released when the thread goes to sleep.
1388 lwkt_setpri_self(TDPRI_USER_NORM);
1389 userenter(lp->lwp_thread, p);
1390 userret(lp, frame, 0);
1392 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1393 ktrsysret(lp, SYS_fork, 0, 0);
1395 lp->lwp_flags |= LWP_PASSIVE_ACQ;
1397 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
1401 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1402 * fault (which is then passed back to the virtual kernel) if an attempt is
1403 * made to use the FP unit.
1405 * XXX this is a fairly big hack.
1408 set_vkernel_fp(struct trapframe *frame)
1410 struct thread *td = curthread;
1412 if (frame->tf_xflags & PGEX_FPFAULT) {
1413 td->td_pcb->pcb_flags |= FP_VIRTFP;
1414 if (mdcpu->gd_npxthread == td)
1417 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1422 * Called from vkernel_trap() to fixup the vkernel's syscall
1423 * frame for vmspace_ctl() return.
1426 cpu_vkernel_trap(struct trapframe *frame, int error)
1428 frame->tf_rax = error;
1430 frame->tf_rflags |= PSL_C;
1432 frame->tf_rflags &= ~PSL_C;