2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
41 * $DragonFly: src/sys/platform/pc64/amd64/trap.c,v 1.2 2008/08/29 17:07:10 dillon Exp $
45 * AMD64 Trap and System call handling
49 #include "opt_ktrace.h"
51 #include <machine/frame.h>
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
56 #include <sys/pioctl.h>
57 #include <sys/types.h>
58 #include <sys/signal2.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/systm.h>
64 #include <sys/ktrace.h>
67 #include <sys/sysmsg.h>
68 #include <sys/sysproto.h>
69 #include <sys/sysunion.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_param.h>
76 #include <machine/cpu.h>
77 #include <machine/pcb.h>
78 #include <machine/thread.h>
79 #include <machine/vmparam.h>
80 #include <machine/md_var.h>
86 #define MAKEMPSAFE(have_mplock) \
87 if (have_mplock == 0) { \
94 #define MAKEMPSAFE(have_mplock)
98 extern void trap(struct trapframe *frame);
99 extern void syscall2(struct trapframe *frame);
101 static int trap_pfault(struct trapframe *, int);
102 static void trap_fatal(struct trapframe *, vm_offset_t);
103 void dblfault_handler(struct trapframe *frame);
105 #define PCPU_GET(member) ((mycpu)->gd_##member)
106 #define PCPU_INC(member) ((mycpu)->gd_##member)++
108 #define MAX_TRAP_MSG 30
109 static char *trap_msg[] = {
111 "privileged instruction fault", /* 1 T_PRIVINFLT */
113 "breakpoint instruction fault", /* 3 T_BPTFLT */
116 "arithmetic trap", /* 6 T_ARITHTRAP */
117 "system forced exception", /* 7 T_ASTFLT */
119 "general protection fault", /* 9 T_PROTFLT */
120 "trace trap", /* 10 T_TRCTRAP */
122 "page fault", /* 12 T_PAGEFLT */
124 "alignment fault", /* 14 T_ALIGNFLT */
128 "integer divide fault", /* 18 T_DIVIDE */
129 "non-maskable interrupt trap", /* 19 T_NMI */
130 "overflow trap", /* 20 T_OFLOW */
131 "FPU bounds check fault", /* 21 T_BOUND */
132 "FPU device not available", /* 22 T_DNA */
133 "double fault", /* 23 T_DOUBLEFLT */
134 "FPU operand fetch fault", /* 24 T_FPOPFLT */
135 "invalid TSS fault", /* 25 T_TSSFLT */
136 "segment not present fault", /* 26 T_SEGNPFLT */
137 "stack fault", /* 27 T_STKFLT */
138 "machine check trap", /* 28 T_MCHK */
139 "SIMD floating-point exception", /* 29 T_XMMFLT */
140 "reserved (unknown) fault", /* 30 T_RESERVED */
144 static int ddb_on_nmi = 1;
145 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
146 &ddb_on_nmi, 0, "Go to DDB on NMI");
148 static int panic_on_nmi = 1;
149 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
150 &panic_on_nmi, 0, "Panic on NMI");
151 static int fast_release;
152 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
153 &fast_release, 0, "Passive Release was optimal");
154 static int slow_release;
155 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
156 &slow_release, 0, "Passive Release was nonoptimal");
158 static int syscall_mpsafe = 1;
159 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
160 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
161 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
162 static int trap_mpsafe = 1;
163 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
164 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
165 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
171 * Passive USER->KERNEL transition. This only occurs if we block in the
172 * kernel while still holding our userland priority. We have to fixup our
173 * priority in order to avoid potential deadlocks before we allow the system
174 * to switch us to another thread.
177 passive_release(struct thread *td)
179 struct lwp *lp = td->td_lwp;
181 td->td_release = NULL;
182 lwkt_setpri_self(TDPRI_KERN_USER);
183 lp->lwp_proc->p_usched->release_curproc(lp);
187 * userenter() passively intercepts the thread switch function to increase
188 * the thread priority from a user priority to a kernel priority, reducing
189 * syscall and trap overhead for the case where no switch occurs.
193 userenter(struct thread *curtd)
195 curtd->td_release = passive_release;
199 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
200 * must be completed before we can return to or try to return to userland.
202 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
203 * arithmatic on the delta calculation so the absolute tick values are
204 * truncated to an integer.
207 userret(struct lwp *lp, struct trapframe *frame, int sticks)
209 struct proc *p = lp->lwp_proc;
213 * Charge system time if profiling. Note: times are in microseconds.
214 * This may do a copyout and block, so do it first even though it
215 * means some system time will be charged as user time.
217 if (p->p_flag & P_PROFIL) {
218 addupc_task(p, frame->tf_rip,
219 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
224 * If the jungle wants us dead, so be it.
226 if (lp->lwp_flag & LWP_WEXIT) {
229 rel_mplock(); /* NOT REACHED */
233 * Block here if we are in a stopped state.
235 if (p->p_stat == SSTOP) {
243 * Post any pending upcalls. If running a virtual kernel be sure
244 * to restore the virtual kernel's vmspace before posting the upcall.
246 if (p->p_flag & P_UPCALLPEND) {
247 p->p_flag &= ~P_UPCALLPEND;
255 * Post any pending signals. If running a virtual kernel be sure
256 * to restore the virtual kernel's vmspace before posting the signal.
258 if ((sig = CURSIG(lp)) != 0) {
266 * block here if we are swapped out, but still process signals
267 * (such as SIGKILL). proc0 (the swapin scheduler) is already
268 * aware of our situation, we do not have to wake it up.
270 if (p->p_flag & P_SWAPPEDOUT) {
272 p->p_flag |= P_SWAPWAIT;
274 if (p->p_flag & P_SWAPWAIT)
275 tsleep(p, PCATCH, "SWOUT", 0);
276 p->p_flag &= ~P_SWAPWAIT;
282 * Make sure postsig() handled request to restore old signal mask after
283 * running signal handler.
285 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
289 * Cleanup from userenter and any passive release that might have occured.
290 * We must reclaim the current-process designation before we can return
291 * to usermode. We also handle both LWKT and USER reschedule requests.
294 userexit(struct lwp *lp)
296 struct thread *td = lp->lwp_thread;
297 globaldata_t gd = td->td_gd;
301 * If a user reschedule is requested force a new process to be
302 * chosen by releasing the current process. Our process will only
303 * be chosen again if it has a considerably better priority.
305 if (user_resched_wanted())
306 lp->lwp_proc->p_usched->release_curproc(lp);
310 * Handle a LWKT reschedule request first. Since our passive release
311 * is still in place we do not have to do anything special.
313 while (lwkt_resched_wanted()) {
317 * The thread that preempted us may have stopped our process.
319 while (lp->lwp_proc->p_stat == SSTOP) {
327 * Acquire the current process designation for this user scheduler
328 * on this cpu. This will also handle any user-reschedule requests.
330 lp->lwp_proc->p_usched->acquire_curproc(lp);
331 /* We may have switched cpus on acquisition */
335 * Reduce our priority in preparation for a return to userland. If
336 * our passive release function was still in place, our priority was
337 * never raised and does not need to be reduced.
339 if (td->td_release == NULL)
340 lwkt_setpri_self(TDPRI_USER_NORM);
341 td->td_release = NULL;
344 * After reducing our priority there might be other kernel-level
345 * LWKTs that now have a greater priority. Run them as necessary.
346 * We don't have to worry about losing cpu to userland because
347 * we still control the current-process designation and we no longer
348 * have a passive release function installed.
350 if (lwkt_checkpri_self())
356 * Exception, fault, and trap interface to the kernel.
357 * This common code is called from assembly language IDT gate entry
358 * routines that prepare a suitable stack frame, and restore this
359 * frame after the exception has been processed.
361 * This function is also called from doreti in an interlock to handle ASTs.
362 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
364 * NOTE! We have to retrieve the fault address prior to obtaining the
365 * MP lock because get_mplock() may switch out. YYY cr2 really ought
366 * to be retrieved by the assembly code, not here.
368 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
369 * if an attempt is made to switch from a fast interrupt or IPI. This is
370 * necessary to properly take fatal kernel traps on SMP machines if
371 * get_mplock() has to block.
375 trap(struct trapframe *frame)
377 struct globaldata *gd = mycpu;
378 struct thread *td = gd->gd_curthread;
379 struct lwp *lp = td->td_lwp;
382 int i = 0, ucode = 0, type, code;
387 int crit_count = td->td_pri & ~TDPRI_MASK;
395 kprintf0("\"%s\" type=%ld\n",
396 trap_msg[frame->tf_trapno], frame->tf_trapno);
397 kprintf0(" rip=%lx rsp=%lx\n", frame->tf_rip, frame->tf_rsp);
398 kprintf0(" err=%lx addr=%lx\n", frame->tf_err, frame->tf_addr);
399 kprintf0(" cs=%lx ss=%lx rflags=%lx\n", (unsigned long)frame->tf_cs, (unsigned long)frame->tf_ss, frame->tf_rflags);
404 ++gd->gd_trap_nesting_level;
405 MAKEMPSAFE(have_mplock);
406 trap_fatal(frame, frame->tf_addr);
407 --gd->gd_trap_nesting_level;
413 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0);
414 ++gd->gd_trap_nesting_level;
415 MAKEMPSAFE(have_mplock);
416 trap_fatal(frame, eva);
417 --gd->gd_trap_nesting_level;
425 if (trap_mpsafe == 0) {
426 ++gd->gd_trap_nesting_level;
427 MAKEMPSAFE(have_mplock);
428 --gd->gd_trap_nesting_level;
432 if ((frame->tf_rflags & PSL_I) == 0) {
434 * Buggy application or kernel code has disabled interrupts
435 * and then trapped. Enabling interrupts now is wrong, but
436 * it is better than running with interrupts disabled until
437 * they are accidentally enabled later.
439 type = frame->tf_trapno;
440 if (ISPL(frame->tf_cs) == SEL_UPL) {
441 MAKEMPSAFE(have_mplock);
442 /* JG curproc can be NULL */
444 "pid %ld (%s): trap %d with interrupts disabled\n",
445 (long)curproc->p_pid, curproc->p_comm, type);
446 } else if (type != T_NMI && type != T_BPTFLT &&
449 * XXX not quite right, since this may be for a
450 * multiple fault in user mode.
452 MAKEMPSAFE(have_mplock);
453 kprintf("kernel trap %d with interrupts disabled\n",
459 type = frame->tf_trapno;
460 code = frame->tf_err;
462 if (ISPL(frame->tf_cs) == SEL_UPL) {
465 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
466 frame->tf_trapno, eva);
470 sticks = (int)td->td_sticks;
471 lp->lwp_md.md_regs = frame;
474 case T_PRIVINFLT: /* privileged instruction fault */
479 case T_BPTFLT: /* bpt instruction fault */
480 case T_TRCTRAP: /* trace trap */
481 frame->tf_rflags &= ~PSL_T;
485 case T_ARITHTRAP: /* arithmetic trap */
498 case T_ASTFLT: /* Allow process switch */
499 mycpu->gd_cnt.v_soft++;
500 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
501 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
503 addupc_task(p, p->p_prof.pr_addr,
508 case T_PROTFLT: /* general protection fault */
509 case T_SEGNPFLT: /* segment not present fault */
510 case T_TSSFLT: /* invalid TSS fault */
511 case T_DOUBLEFLT: /* double fault */
513 ucode = code + BUS_SEGM_FAULT ;
517 case T_PAGEFLT: /* page fault */
518 MAKEMPSAFE(have_mplock);
519 i = trap_pfault(frame, TRUE);
520 kprintf("TRAP_PFAULT %d\n", i);
521 if (frame->tf_rip == 0)
531 case T_DIVIDE: /* integer divide fault */
537 MAKEMPSAFE(have_mplock);
538 /* machine/parity/power fail/"kitchen sink" faults */
539 if (isa_nmi(code) == 0) {
542 * NMI can be hooked up to a pushbutton
546 kprintf ("NMI ... going to debugger\n");
547 kdb_trap(type, 0, frame);
551 } else if (panic_on_nmi)
552 panic("NMI indicates hardware failure");
555 case T_OFLOW: /* integer overflow fault */
560 case T_BOUND: /* bounds check fault */
567 * Virtual kernel intercept - pass the DNA exception
568 * to the virtual kernel if it asked to handle it.
569 * This occurs when the virtual kernel is holding
570 * onto the FP context for a different emulated
571 * process then the one currently running.
573 * We must still call npxdna() since we may have
574 * saved FP state that the virtual kernel needs
575 * to hand over to a different emulated process.
577 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
578 (td->td_pcb->pcb_flags & FP_VIRTFP)
585 * The kernel may have switched out the FP unit's
586 * state, causing the user process to take a fault
587 * when it tries to use the FP unit. Restore the
593 ucode = FPE_FPU_NP_TRAP;
596 case T_FPOPFLT: /* FPU operand fetch fault */
601 case T_XMMFLT: /* SIMD floating-point exception */
610 case T_PAGEFLT: /* page fault */
611 MAKEMPSAFE(have_mplock);
612 trap_pfault(frame, FALSE);
617 * The kernel is apparently using fpu for copying.
618 * XXX this should be fatal unless the kernel has
619 * registered such use.
625 case T_STKFLT: /* stack fault */
628 case T_PROTFLT: /* general protection fault */
629 case T_SEGNPFLT: /* segment not present fault */
631 * Invalid segment selectors and out of bounds
632 * %rip's and %rsp's can be set up in user mode.
633 * This causes a fault in kernel mode when the
634 * kernel tries to return to user mode. We want
635 * to get this fault so that we can fix the
636 * problem here and not have to check all the
637 * selectors and pointers when the user changes
640 kprintf0("trap.c line %d\n", __LINE__);
641 if (mycpu->gd_intr_nesting_level == 0) {
642 if (td->td_pcb->pcb_onfault) {
643 frame->tf_rip = (register_t)
644 td->td_pcb->pcb_onfault;
652 * PSL_NT can be set in user mode and isn't cleared
653 * automatically when the kernel is entered. This
654 * causes a TSS fault when the kernel attempts to
655 * `iret' because the TSS link is uninitialized. We
656 * want to get this fault so that we can fix the
657 * problem here and not every time the kernel is
660 if (frame->tf_rflags & PSL_NT) {
661 frame->tf_rflags &= ~PSL_NT;
666 case T_TRCTRAP: /* trace trap */
668 if (frame->tf_rip == (int)IDTVEC(syscall)) {
670 * We've just entered system mode via the
671 * syscall lcall. Continue single stepping
672 * silently until the syscall handler has
677 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) {
679 * The syscall handler has now saved the
680 * flags. Stop single stepping it.
682 frame->tf_rflags &= ~PSL_T;
688 * Ignore debug register trace traps due to
689 * accesses in the user's address space, which
690 * can happen under several conditions such as
691 * if a user sets a watchpoint on a buffer and
692 * then passes that buffer to a system call.
693 * We still want to get TRCTRAPS for addresses
694 * in kernel space because that is useful when
695 * debugging the kernel.
698 if (user_dbreg_trap()) {
700 * Reset breakpoint bits because the
703 /* XXX check upper bits here */
704 load_dr6(rdr6() & 0xfffffff0);
709 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
713 * If DDB is enabled, let it handle the debugger trap.
714 * Otherwise, debugger traps "can't happen".
717 MAKEMPSAFE(have_mplock);
718 if (kdb_trap(type, 0, frame))
724 MAKEMPSAFE(have_mplock);
725 /* machine/parity/power fail/"kitchen sink" faults */
727 if (isa_nmi(code) == 0) {
730 * NMI can be hooked up to a pushbutton
734 kprintf ("NMI ... going to debugger\n");
735 kdb_trap(type, 0, frame);
739 } else if (panic_on_nmi == 0)
742 #endif /* NISA > 0 */
744 MAKEMPSAFE(have_mplock);
745 trap_fatal(frame, 0);
750 * Virtual kernel intercept - if the fault is directly related to a
751 * VM context managed by a virtual kernel then let the virtual kernel
754 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
755 vkernel_trap(lp, frame);
760 * Virtual kernel intercept - if the fault is directly related to a
761 * VM context managed by a virtual kernel then let the virtual kernel
764 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
765 vkernel_trap(lp, frame);
770 * Translate fault for emulators (e.g. Linux)
772 if (*p->p_sysent->sv_transtrap)
773 i = (*p->p_sysent->sv_transtrap)(i, type);
775 MAKEMPSAFE(have_mplock);
776 trapsignal(lp, i, ucode);
779 if (type <= MAX_TRAP_MSG) {
780 uprintf("fatal process exception: %s",
782 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
783 uprintf(", fault VA = 0x%lx", frame->tf_addr);
790 if (ISPL(frame->tf_cs) == SEL_UPL)
791 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame->tf_rip));
793 userret(lp, frame, sticks);
800 if (p != NULL && lp != NULL)
801 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
803 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
804 ("syscall: critical section count mismatch! %d/%d",
805 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
810 trap_pfault(struct trapframe *frame, int usermode)
813 struct vmspace *vm = NULL;
817 thread_t td = curthread;
818 struct lwp *lp = td->td_lwp;
820 va = trunc_page(frame->tf_addr);
821 if (va >= VM_MIN_KERNEL_ADDRESS) {
823 * Don't allow user-mode faults in kernel address space.
831 * This is a fault on non-kernel virtual memory.
832 * vm is initialized above to NULL. If curproc is NULL
833 * or curproc->p_vmspace is NULL the fault is fatal.
836 vm = lp->lwp_vmspace;
845 * PGEX_I is defined only if the execute disable bit capability is
846 * supported and enabled.
848 if (frame->tf_err & PGEX_W)
849 ftype = VM_PROT_WRITE;
851 else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
852 ftype = VM_PROT_EXECUTE;
855 ftype = VM_PROT_READ;
857 if (map != &kernel_map) {
859 * Keep swapout from messing with us during this
865 * Grow the stack if necessary
867 /* grow_stack returns false only if va falls into
868 * a growable stack region and the stack growth
869 * fails. It returns true if va was not within
870 * a growable stack region, or if the stack
873 if (!grow_stack(lp->lwp_proc, va)) {
879 /* Fault in the user page: */
880 rv = vm_fault(map, va, ftype,
881 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
887 * Don't have to worry about process locking or stacks
890 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
893 if (rv == KERN_SUCCESS)
897 if (td->td_gd->gd_intr_nesting_level == 0 &&
898 td->td_pcb->pcb_onfault) {
899 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
902 trap_fatal(frame, frame->tf_addr);
907 * NOTE: on amd64 we have a tf_addr field in the trapframe, no
908 * kludge is needed to pass the fault address to signal handlers.
911 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
915 trap_fatal(struct trapframe *frame, vm_offset_t eva)
920 struct soft_segment_descriptor softseg;
923 code = frame->tf_err;
924 type = frame->tf_trapno;
925 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
927 if (type <= MAX_TRAP_MSG)
928 msg = trap_msg[type];
931 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
932 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
934 /* two separate prints in case of a trap on an unmapped page */
935 kprintf("cpuid = %d; ", PCPU_GET(cpuid));
936 kprintf("apic id = %02x\n", PCPU_GET(apic_id));
938 if (type == T_PAGEFLT) {
939 kprintf("fault virtual address = 0x%lx\n", eva);
940 kprintf("fault code = %s %s %s, %s\n",
941 code & PGEX_U ? "user" : "supervisor",
942 code & PGEX_W ? "write" : "read",
943 code & PGEX_I ? "instruction" : "data",
944 code & PGEX_P ? "protection violation" : "page not present");
946 kprintf("instruction pointer = 0x%lx:0x%lx\n",
947 frame->tf_cs & 0xffff, frame->tf_rip);
948 if (ISPL(frame->tf_cs) == SEL_UPL) {
949 ss = frame->tf_ss & 0xffff;
952 ss = GSEL(GDATA_SEL, SEL_KPL);
953 rsp = (long)&frame->tf_rsp;
955 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
956 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
957 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
958 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
959 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
960 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
962 kprintf("processor eflags = ");
963 if (frame->tf_rflags & PSL_T)
964 kprintf("trace trap, ");
965 if (frame->tf_rflags & PSL_I)
966 kprintf("interrupt enabled, ");
967 if (frame->tf_rflags & PSL_NT)
968 kprintf("nested task, ");
969 if (frame->tf_rflags & PSL_RF)
971 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
972 kprintf("current process = ");
975 (u_long)curproc->p_pid);
979 kprintf("current thread = pri %d ", curthread->td_pri);
980 if (curthread->td_pri >= TDPRI_CRIT)
985 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
988 kprintf("trap number = %d\n", type);
989 if (type <= MAX_TRAP_MSG)
990 panic("%s", trap_msg[type]);
992 panic("unknown/reserved trap");
996 * Double fault handler. Called when a fault occurs while writing
997 * a frame for a trap/exception onto the stack. This usually occurs
998 * when the stack overflows (such is the case with infinite recursion,
1002 dblfault_handler(struct trapframe *frame)
1004 kprintf0("DOUBLE FAULT\n");
1005 kprintf("\nFatal double fault\n");
1006 kprintf("rip = 0x%lx\n", frame->tf_rip);
1007 kprintf("rsp = 0x%lx\n", frame->tf_rsp);
1008 kprintf("rbp = 0x%lx\n", frame->tf_rbp);
1010 /* two separate prints in case of a trap on an unmapped page */
1011 kprintf("cpuid = %d; ", PCPU_GET(cpuid));
1012 kprintf("apic id = %02x\n", PCPU_GET(apic_id));
1014 panic("double fault");
1018 * syscall2 - MP aware system call request C handler
1020 * A system call is essentially treated as a trap except that the
1021 * MP lock is not held on entry or return. We are responsible for
1022 * obtaining the MP lock if necessary and for handling ASTs
1023 * (e.g. a task switch) prior to return.
1025 * In general, only simple access and manipulation of curproc and
1026 * the current stack is allowed without having to hold MP lock.
1028 * MPSAFE - note that large sections of this routine are run without
1032 syscall2(struct trapframe *frame)
1034 struct thread *td = curthread;
1035 struct proc *p = td->td_proc;
1036 struct lwp *lp = td->td_lwp;
1038 struct sysent *callp;
1039 register_t orig_tf_rflags;
1044 int crit_count = td->td_pri & ~TDPRI_MASK;
1047 int have_mplock = 0;
1052 union sysunion args;
1053 register_t *argsdst;
1054 kprintf0("SYSCALL rip = %016llx\n", frame->tf_rip);
1056 PCPU_INC(cnt.v_syscall);
1058 kprintf0("\033[31mSYSCALL %ld\033[39m\n", frame->tf_rax);
1060 if (ISPL(frame->tf_cs) != SEL_UPL) {
1067 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
1071 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1072 if (syscall_mpsafe == 0)
1073 MAKEMPSAFE(have_mplock);
1075 userenter(td); /* lazy raise our priority */
1082 sticks = (int)td->td_sticks;
1083 orig_tf_rflags = frame->tf_rflags;
1086 * Virtual kernel intercept - if a VM context managed by a virtual
1087 * kernel issues a system call the virtual kernel handles it, not us.
1088 * Restore the virtual kernel context and return from its system
1089 * call. The current frame is copied out to the virtual kernel.
1091 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1092 error = vkernel_trap(lp, frame);
1093 frame->tf_rax = error;
1095 frame->tf_rflags |= PSL_C;
1096 error = EJUSTRETURN;
1101 * Get the system call parameters and account for time
1103 lp->lwp_md.md_regs = frame;
1104 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1105 code = frame->tf_rax;
1107 if (p->p_sysent->sv_prepsyscall) {
1108 (*p->p_sysent->sv_prepsyscall)(
1109 frame, (int *)(&args.nosys.sysmsg + 1),
1112 if (code == SYS_syscall || code == SYS___syscall) {
1113 code = frame->tf_rdi;
1119 if (p->p_sysent->sv_mask)
1120 code &= p->p_sysent->sv_mask;
1122 if (code >= p->p_sysent->sv_size)
1123 callp = &p->p_sysent->sv_table[0];
1125 callp = &p->p_sysent->sv_table[code];
1127 narg = callp->sy_narg & SYF_ARGMASK;
1130 * On amd64 we get up to six arguments in registers. The rest are
1131 * on the stack. The first six members of 'struct trampframe' happen
1132 * to be the registers used to pass arguments, in exactly the right
1135 argp = &frame->tf_rdi;
1137 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1139 * JG can we overflow the space pointed to by 'argsdst'
1140 * either with 'bcopy' or with 'copyin'?
1142 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1144 * copyin is MP aware, but the tracing code is not
1146 if (narg > regcnt) {
1147 KASSERT(params != NULL, ("copyin args with no params!"));
1148 error = copyin(params, &argsdst[regcnt],
1149 (narg - regcnt) * sizeof(register_t));
1152 if (KTRPOINT(td, KTR_SYSCALL)) {
1153 MAKEMPSAFE(have_mplock);
1155 ktrsyscall(lp, code, narg,
1156 (void *)(&args.nosys.sysmsg + 1));
1164 if (KTRPOINT(td, KTR_SYSCALL)) {
1165 MAKEMPSAFE(have_mplock);
1166 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1171 * Default return value is 0 (will be copied to %rax). Double-value
1172 * returns use %rax and %rdx. %rdx is left unchanged for system
1173 * calls which return only one result.
1175 args.sysmsg_fds[0] = 0;
1176 args.sysmsg_fds[1] = frame->tf_rdx;
1179 * The syscall might manipulate the trap frame. If it does it
1180 * will probably return EJUSTRETURN.
1182 args.sysmsg_frame = frame;
1184 STOPEVENT(p, S_SCE, narg); /* MP aware */
1188 * Try to run the syscall without the MP lock if the syscall
1189 * is MP safe. We have to obtain the MP lock no matter what if
1192 if ((callp->sy_narg & SYF_MPSAFE) == 0)
1193 MAKEMPSAFE(have_mplock);
1196 error = (*callp->sy_call)(&args);
1200 * MP SAFE (we may or may not have the MP lock at this point)
1202 kprintf("SYSMSG %d ", error);
1206 * Reinitialize proc pointer `p' as it may be different
1207 * if this is a child returning from fork syscall.
1210 lp = curthread->td_lwp;
1211 frame->tf_rax = args.sysmsg_fds[0];
1212 frame->tf_rdx = args.sysmsg_fds[1];
1213 kprintf0("RESULT %lld %lld\n", frame->tf_rax, frame->tf_rdx);
1214 frame->tf_rflags &= ~PSL_C;
1218 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1219 * We have to do a full context restore so that %r10
1220 * (which was holding the value of %rcx) is restored for
1221 * the next iteration.
1223 frame->tf_rip -= frame->tf_err;
1224 frame->tf_r10 = frame->tf_rcx;
1225 td->td_pcb->pcb_flags |= PCB_FULLCTX;
1230 panic("Unexpected EASYNC return value (for now)");
1233 if (p->p_sysent->sv_errsize) {
1234 if (error >= p->p_sysent->sv_errsize)
1235 error = -1; /* XXX */
1237 error = p->p_sysent->sv_errtbl[error];
1239 kprintf0("ERROR %d\n", error);
1240 frame->tf_rax = error;
1241 frame->tf_rflags |= PSL_C;
1246 * Traced syscall. trapsignal() is not MP aware.
1248 if (orig_tf_rflags & PSL_T) {
1249 MAKEMPSAFE(have_mplock);
1250 frame->tf_rflags &= ~PSL_T;
1251 trapsignal(lp, SIGTRAP, 0);
1255 * Handle reschedule and other end-of-syscall issues
1257 userret(lp, frame, sticks);
1260 if (KTRPOINT(td, KTR_SYSRET)) {
1261 MAKEMPSAFE(have_mplock);
1262 ktrsysret(lp, code, error, args.sysmsg_result);
1267 * This works because errno is findable through the
1268 * register set. If we ever support an emulation where this
1269 * is not the case, this code will need to be revisited.
1271 STOPEVENT(p, S_SCX, code);
1276 * Release the MP lock if we had to get it
1278 KASSERT(td->td_mpcount == have_mplock,
1279 ("badmpcount syscall2/end from %p", (void *)frame->tf_eip));
1283 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1285 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1286 ("syscall: critical section count mismatch! %d/%d",
1287 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1292 fork_return(struct lwp *lp, struct trapframe *frame)
1294 kprintf0("fork return\n");
1295 frame->tf_rax = 0; /* Child returns zero */
1296 frame->tf_rflags &= ~PSL_C; /* success */
1299 generic_lwp_return(lp, frame);
1300 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1304 * Simplified back end of syscall(), used when returning from fork()
1305 * directly into user mode. MP lock is held on entry and should be
1306 * released on return. This code will return back into the fork
1307 * trampoline code which then runs doreti.
1310 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1312 kprintf0("generic_lwp_return\n");
1313 struct proc *p = lp->lwp_proc;
1316 * Newly forked processes are given a kernel priority. We have to
1317 * adjust the priority to a normal user priority and fake entry
1318 * into the kernel (call userenter()) to install a passive release
1319 * function just in case userret() decides to stop the process. This
1320 * can occur when ^Z races a fork. If we do not install the passive
1321 * release function the current process designation will not be
1322 * released when the thread goes to sleep.
1324 lwkt_setpri_self(TDPRI_USER_NORM);
1325 userenter(lp->lwp_thread);
1326 userret(lp, frame, 0);
1328 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1329 ktrsysret(lp, SYS_fork, 0, 0);
1331 p->p_flag |= P_PASSIVE_ACQ;
1333 p->p_flag &= ~P_PASSIVE_ACQ;
1335 KKASSERT(lp->lwp_thread->td_mpcount == 1);
1341 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1342 * fault (which is then passed back to the virtual kernel) if an attempt is
1343 * made to use the FP unit.
1345 * XXX this is a fairly big hack.
1348 set_vkernel_fp(struct trapframe *frame)