2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.115 2008/09/09 04:06:17 dillon Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/kerneldump.h>
61 #include <sys/resourcevar.h>
62 #include <sys/signalvar.h>
63 #include <sys/signal2.h>
64 #include <sys/syscall.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
68 #include <sys/vmmeter.h>
69 #include <sys/malloc.h>
71 #include <sys/ktrace.h>
74 #include <sys/upcall.h>
75 #include <sys/vkernel.h>
76 #include <sys/sysproto.h>
77 #include <sys/sysunion.h>
80 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_extern.h>
88 #include <machine/cpu.h>
89 #include <machine/md_var.h>
90 #include <machine/pcb.h>
91 #include <machine/smp.h>
92 #include <machine/tss.h>
93 #include <machine/specialreg.h>
94 #include <machine/globaldata.h>
96 #include <machine_base/isa/intr_machdep.h>
99 #include <sys/syslog.h>
100 #include <machine/clock.h>
103 #include <machine/vm86.h>
107 #include <sys/msgport2.h>
108 #include <sys/thread2.h>
109 #include <sys/mplock2.h>
113 #define MAKEMPSAFE(have_mplock) \
114 if (have_mplock == 0) { \
121 #define MAKEMPSAFE(have_mplock)
125 int (*pmath_emulate) (struct trapframe *);
127 extern void trap (struct trapframe *frame);
128 extern void syscall2 (struct trapframe *frame);
130 static int trap_pfault (struct trapframe *, int, vm_offset_t);
131 static void trap_fatal (struct trapframe *, vm_offset_t);
132 void dblfault_handler (void);
134 extern inthand_t IDTVEC(syscall);
136 #define MAX_TRAP_MSG 28
137 static char *trap_msg[] = {
139 "privileged instruction fault", /* 1 T_PRIVINFLT */
141 "breakpoint instruction fault", /* 3 T_BPTFLT */
144 "arithmetic trap", /* 6 T_ARITHTRAP */
145 "system forced exception", /* 7 T_ASTFLT */
147 "general protection fault", /* 9 T_PROTFLT */
148 "trace trap", /* 10 T_TRCTRAP */
150 "page fault", /* 12 T_PAGEFLT */
152 "alignment fault", /* 14 T_ALIGNFLT */
156 "integer divide fault", /* 18 T_DIVIDE */
157 "non-maskable interrupt trap", /* 19 T_NMI */
158 "overflow trap", /* 20 T_OFLOW */
159 "FPU bounds check fault", /* 21 T_BOUND */
160 "FPU device not available", /* 22 T_DNA */
161 "double fault", /* 23 T_DOUBLEFLT */
162 "FPU operand fetch fault", /* 24 T_FPOPFLT */
163 "invalid TSS fault", /* 25 T_TSSFLT */
164 "segment not present fault", /* 26 T_SEGNPFLT */
165 "stack fault", /* 27 T_STKFLT */
166 "machine check trap", /* 28 T_MCHK */
169 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
170 extern int has_f00f_bug;
174 static int ddb_on_nmi = 1;
175 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
176 &ddb_on_nmi, 0, "Go to DDB on NMI");
178 static int panic_on_nmi = 1;
179 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
180 &panic_on_nmi, 0, "Panic on NMI");
181 static int fast_release;
182 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
183 &fast_release, 0, "Passive Release was optimal");
184 static int slow_release;
185 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
186 &slow_release, 0, "Passive Release was nonoptimal");
188 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
189 extern int max_sysmsg;
192 * Passively intercepts the thread switch function to increase the thread
193 * priority from a user priority to a kernel priority, reducing
194 * syscall and trap overhead for the case where no switch occurs.
196 * Synchronizes td_ucred with p_ucred. This is used by system calls,
197 * signal handling, faults, AST traps, and anything else that enters the
198 * kernel from userland and provides the kernel with a stable read-only
199 * copy of the process ucred.
202 userenter(struct thread *curtd, struct proc *curp)
207 curtd->td_release = lwkt_passive_release;
209 if (curtd->td_ucred != curp->p_ucred) {
210 ncred = crhold(curp->p_ucred);
211 ocred = curtd->td_ucred;
212 curtd->td_ucred = ncred;
220 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
221 * must be completed before we can return to or try to return to userland.
223 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
224 * arithmatic on the delta calculation so the absolute tick values are
225 * truncated to an integer.
228 userret(struct lwp *lp, struct trapframe *frame, int sticks)
230 struct proc *p = lp->lwp_proc;
234 if (p->p_userret != NULL) {
241 * Charge system time if profiling. Note: times are in microseconds.
242 * This may do a copyout and block, so do it first even though it
243 * means some system time will be charged as user time.
245 if (p->p_flag & P_PROFIL) {
246 addupc_task(p, frame->tf_eip,
247 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
252 * If the jungle wants us dead, so be it.
254 if (lp->lwp_flag & LWP_WEXIT) {
257 rel_mplock(); /* NOT REACHED */
261 * Block here if we are in a stopped state.
263 if (p->p_stat == SSTOP || dump_stop_usertds) {
271 * Post any pending upcalls. If running a virtual kernel be sure
272 * to restore the virtual kernel's vmspace before posting the upcall.
274 if (p->p_flag & P_UPCALLPEND) {
275 p->p_flag &= ~P_UPCALLPEND;
283 * Post any pending signals. If running a virtual kernel be sure
284 * to restore the virtual kernel's vmspace before posting the signal.
286 * WARNING! postsig() can exit and not return.
288 if ((sig = CURSIG_TRACE(lp)) != 0) {
296 * block here if we are swapped out, but still process signals
297 * (such as SIGKILL). proc0 (the swapin scheduler) is already
298 * aware of our situation, we do not have to wake it up.
300 if (p->p_flag & P_SWAPPEDOUT) {
302 p->p_flag |= P_SWAPWAIT;
304 if (p->p_flag & P_SWAPWAIT)
305 tsleep(p, PCATCH, "SWOUT", 0);
306 p->p_flag &= ~P_SWAPWAIT;
312 * Make sure postsig() handled request to restore old signal mask after
313 * running signal handler.
315 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
319 * Cleanup from userenter and any passive release that might have occured.
320 * We must reclaim the current-process designation before we can return
321 * to usermode. We also handle both LWKT and USER reschedule requests.
324 userexit(struct lwp *lp)
326 struct thread *td = lp->lwp_thread;
327 /* globaldata_t gd = td->td_gd; */
330 * Handle stop requests at kernel priority. Any requests queued
331 * after this loop will generate another AST.
333 while (lp->lwp_proc->p_stat == SSTOP) {
340 * Reduce our priority in preparation for a return to userland. If
341 * our passive release function was still in place, our priority was
342 * never raised and does not need to be reduced.
344 lwkt_passive_recover(td);
347 * Become the current user scheduled process if we aren't already,
348 * and deal with reschedule requests and other factors.
350 lp->lwp_proc->p_usched->acquire_curproc(lp);
351 /* WARNING: we may have migrated cpu's */
352 /* gd = td->td_gd; */
355 #if !defined(KTR_KERNENTRY)
356 #define KTR_KERNENTRY KTR_ALL
358 KTR_INFO_MASTER(kernentry);
359 KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
360 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
361 KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
362 sizeof(int) + sizeof(int));
363 KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
364 sizeof(int) + sizeof(int) + sizeof(int));
365 KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
366 sizeof(int) + sizeof(int) + sizeof(int));
367 KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
368 sizeof(int) + sizeof(int));
371 * Exception, fault, and trap interface to the kernel.
372 * This common code is called from assembly language IDT gate entry
373 * routines that prepare a suitable stack frame, and restore this
374 * frame after the exception has been processed.
376 * This function is also called from doreti in an interlock to handle ASTs.
377 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
379 * NOTE! We have to retrieve the fault address prior to obtaining the
380 * MP lock because get_mplock() may switch out. YYY cr2 really ought
381 * to be retrieved by the assembly code, not here.
383 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
384 * if an attempt is made to switch from a fast interrupt or IPI. This is
385 * necessary to properly take fatal kernel traps on SMP machines if
386 * get_mplock() has to block.
390 trap(struct trapframe *frame)
392 struct globaldata *gd = mycpu;
393 struct thread *td = gd->gd_curthread;
394 struct lwp *lp = td->td_lwp;
397 int i = 0, ucode = 0, type, code;
402 int crit_count = td->td_critcount;
403 lwkt_tokref_t curstop = td->td_toks_stop;
410 * We need to allow T_DNA faults when the debugger is active since
411 * some dumping paths do large bcopy() which use the floating
412 * point registers for faster copying.
414 if (db_active && frame->tf_trapno != T_DNA) {
415 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
416 ++gd->gd_trap_nesting_level;
417 MAKEMPSAFE(have_mplock);
418 trap_fatal(frame, eva);
419 --gd->gd_trap_nesting_level;
425 ++gd->gd_trap_nesting_level;
426 if (frame->tf_trapno == T_PAGEFLT) {
428 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
429 * This problem is worked around by using an interrupt
430 * gate for the pagefault handler. We are finally ready
431 * to read %cr2 and then must reenable interrupts.
433 * XXX this should be in the switch statement, but the
434 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
435 * flow of control too much for this to be obviously
442 --gd->gd_trap_nesting_level;
444 if (!(frame->tf_eflags & PSL_I)) {
446 * Buggy application or kernel code has disabled interrupts
447 * and then trapped. Enabling interrupts now is wrong, but
448 * it is better than running with interrupts disabled until
449 * they are accidentally enabled later.
451 type = frame->tf_trapno;
452 if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) {
453 MAKEMPSAFE(have_mplock);
455 "pid %ld (%s): trap %d with interrupts disabled\n",
456 (long)curproc->p_pid, curproc->p_comm, type);
457 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
459 * XXX not quite right, since this may be for a
460 * multiple fault in user mode.
462 MAKEMPSAFE(have_mplock);
463 kprintf("kernel trap %d with interrupts disabled\n",
469 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
472 type = frame->tf_trapno;
473 code = frame->tf_err;
476 if (frame->tf_eflags & PSL_VM &&
477 (type == T_PROTFLT || type == T_STKFLT)) {
479 KKASSERT(td->td_mpcount > 0);
481 i = vm86_emulate((struct vm86frame *)frame);
483 KKASSERT(td->td_mpcount > 0);
487 * returns to original process
490 vm86_trap((struct vm86frame *)frame,
493 vm86_trap((struct vm86frame *)frame, 0);
495 KKASSERT(0); /* NOT REACHED */
501 * these traps want either a process context, or
502 * assume a normal userspace trap.
506 trap_fatal(frame, eva);
509 type = T_BPTFLT; /* kernel breakpoint */
512 goto kernel_trap; /* normal kernel trap handling */
515 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
518 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
519 frame->tf_trapno, eva);
523 sticks = (int)td->td_sticks;
524 lp->lwp_md.md_regs = frame;
527 case T_PRIVINFLT: /* privileged instruction fault */
532 case T_BPTFLT: /* bpt instruction fault */
533 case T_TRCTRAP: /* trace trap */
534 frame->tf_eflags &= ~PSL_T;
539 case T_ARITHTRAP: /* arithmetic trap */
544 case T_ASTFLT: /* Allow process switch */
545 mycpu->gd_cnt.v_soft++;
546 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
547 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
549 addupc_task(p, p->p_prof.pr_addr,
555 * The following two traps can happen in
556 * vm86 mode, and, if so, we want to handle
559 case T_PROTFLT: /* general protection fault */
560 case T_STKFLT: /* stack fault */
561 if (frame->tf_eflags & PSL_VM) {
562 i = vm86_emulate((struct vm86frame *)frame);
568 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
570 case T_SEGNPFLT: /* segment not present fault */
574 case T_TSSFLT: /* invalid TSS fault */
575 case T_DOUBLEFLT: /* double fault */
580 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
586 case T_PAGEFLT: /* page fault */
587 MAKEMPSAFE(have_mplock);
588 i = trap_pfault(frame, TRUE, eva);
591 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
603 ucode = BUS_ADRERR; /* XXX */
606 case T_DIVIDE: /* integer divide fault */
613 MAKEMPSAFE(have_mplock);
615 goto handle_powerfail;
616 #else /* !POWERFAIL_NMI */
617 /* machine/parity/power fail/"kitchen sink" faults */
618 if (isa_nmi(code) == 0) {
621 * NMI can be hooked up to a pushbutton
625 kprintf ("NMI ... going to debugger\n");
626 kdb_trap (type, 0, frame);
630 } else if (panic_on_nmi)
631 panic("NMI indicates hardware failure");
633 #endif /* POWERFAIL_NMI */
634 #endif /* NISA > 0 */
636 case T_OFLOW: /* integer overflow fault */
641 case T_BOUND: /* bounds check fault */
648 * Virtual kernel intercept - pass the DNA exception
649 * to the virtual kernel if it asked to handle it.
650 * This occurs when the virtual kernel is holding
651 * onto the FP context for a different emulated
652 * process then the one currently running.
654 * We must still call npxdna() since we may have
655 * saved FP state that the virtual kernel needs
656 * to hand over to a different emulated process.
658 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
659 (td->td_pcb->pcb_flags & FP_VIRTFP)
667 * The kernel may have switched out the FP unit's
668 * state, causing the user process to take a fault
669 * when it tries to use the FP unit. Restore the
675 if (!pmath_emulate) {
677 ucode = FPE_FPU_NP_TRAP;
680 i = (*pmath_emulate)(frame);
682 if (!(frame->tf_eflags & PSL_T))
684 frame->tf_eflags &= ~PSL_T;
687 /* else ucode = emulator_only_knows() XXX */
690 case T_FPOPFLT: /* FPU operand fetch fault */
695 case T_XMMFLT: /* SIMD floating-point exception */
705 case T_PAGEFLT: /* page fault */
706 MAKEMPSAFE(have_mplock);
707 trap_pfault(frame, FALSE, eva);
713 * The kernel may be using npx for copying or other
721 case T_PROTFLT: /* general protection fault */
722 case T_SEGNPFLT: /* segment not present fault */
724 * Invalid segment selectors and out of bounds
725 * %eip's and %esp's can be set up in user mode.
726 * This causes a fault in kernel mode when the
727 * kernel tries to return to user mode. We want
728 * to get this fault so that we can fix the
729 * problem here and not have to check all the
730 * selectors and pointers when the user changes
733 #define MAYBE_DORETI_FAULT(where, whereto) \
735 if (frame->tf_eip == (int)where) { \
736 frame->tf_eip = (int)whereto; \
740 if (mycpu->gd_intr_nesting_level == 0) {
742 * Invalid %fs's and %gs's can be created using
743 * procfs or PT_SETREGS or by invalidating the
744 * underlying LDT entry. This causes a fault
745 * in kernel mode when the kernel attempts to
746 * switch contexts. Lose the bad context
747 * (XXX) so that we can continue, and generate
750 MAYBE_DORETI_FAULT(doreti_iret,
752 MAYBE_DORETI_FAULT(doreti_popl_ds,
753 doreti_popl_ds_fault);
754 MAYBE_DORETI_FAULT(doreti_popl_es,
755 doreti_popl_es_fault);
756 MAYBE_DORETI_FAULT(doreti_popl_fs,
757 doreti_popl_fs_fault);
758 MAYBE_DORETI_FAULT(doreti_popl_gs,
759 doreti_popl_gs_fault);
760 if (td->td_pcb->pcb_onfault) {
762 (register_t)td->td_pcb->pcb_onfault;
770 * PSL_NT can be set in user mode and isn't cleared
771 * automatically when the kernel is entered. This
772 * causes a TSS fault when the kernel attempts to
773 * `iret' because the TSS link is uninitialized. We
774 * want to get this fault so that we can fix the
775 * problem here and not every time the kernel is
778 if (frame->tf_eflags & PSL_NT) {
779 frame->tf_eflags &= ~PSL_NT;
784 case T_TRCTRAP: /* trace trap */
785 if (frame->tf_eip == (int)IDTVEC(syscall)) {
787 * We've just entered system mode via the
788 * syscall lcall. Continue single stepping
789 * silently until the syscall handler has
794 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
796 * The syscall handler has now saved the
797 * flags. Stop single stepping it.
799 frame->tf_eflags &= ~PSL_T;
803 * Ignore debug register trace traps due to
804 * accesses in the user's address space, which
805 * can happen under several conditions such as
806 * if a user sets a watchpoint on a buffer and
807 * then passes that buffer to a system call.
808 * We still want to get TRCTRAPS for addresses
809 * in kernel space because that is useful when
810 * debugging the kernel.
812 if (user_dbreg_trap()) {
814 * Reset breakpoint bits because the
817 load_dr6(rdr6() & 0xfffffff0);
821 * Fall through (TRCTRAP kernel mode, kernel address)
825 * If DDB is enabled, let it handle the debugger trap.
826 * Otherwise, debugger traps "can't happen".
830 MAKEMPSAFE(have_mplock);
831 if (kdb_trap (type, 0, frame))
838 MAKEMPSAFE(have_mplock);
841 # define TIMER_FREQ 1193182
845 static unsigned lastalert = 0;
847 if(time_second - lastalert > 10)
849 log(LOG_WARNING, "NMI: power fail\n");
850 sysbeep(TIMER_FREQ/880, hz);
851 lastalert = time_second;
856 #else /* !POWERFAIL_NMI */
857 /* machine/parity/power fail/"kitchen sink" faults */
858 if (isa_nmi(code) == 0) {
861 * NMI can be hooked up to a pushbutton
865 kprintf ("NMI ... going to debugger\n");
866 kdb_trap (type, 0, frame);
870 } else if (panic_on_nmi == 0)
873 #endif /* POWERFAIL_NMI */
874 #endif /* NISA > 0 */
877 MAKEMPSAFE(have_mplock);
878 trap_fatal(frame, eva);
883 * Virtual kernel intercept - if the fault is directly related to a
884 * VM context managed by a virtual kernel then let the virtual kernel
887 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
888 vkernel_trap(lp, frame);
893 * Translate fault for emulators (e.g. Linux)
895 if (*p->p_sysent->sv_transtrap)
896 i = (*p->p_sysent->sv_transtrap)(i, type);
898 MAKEMPSAFE(have_mplock);
899 trapsignal(lp, i, ucode);
902 if (type <= MAX_TRAP_MSG) {
903 uprintf("fatal process exception: %s",
905 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
906 uprintf(", fault VA = 0x%lx", (u_long)eva);
913 if (ISPL(frame->tf_cs) == SEL_UPL) {
914 KASSERT(td->td_mpcount == have_mplock,
915 ("badmpcount trap/end from %p", (void *)frame->tf_eip));
918 userret(lp, frame, sticks);
925 if (p != NULL && lp != NULL)
926 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
928 KASSERT(crit_count == td->td_critcount,
929 ("trap: critical section count mismatch! %d/%d",
930 crit_count, td->td_pri));
931 KASSERT(curstop == td->td_toks_stop,
932 ("trap: extra tokens held after trap! %zd/%zd",
933 curstop - &td->td_toks_base,
934 td->td_toks_stop - &td->td_toks_base));
939 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
942 struct vmspace *vm = NULL;
947 thread_t td = curthread;
948 struct lwp *lp = td->td_lwp;
950 va = trunc_page(eva);
951 if (va >= KERNBASE) {
953 * Don't allow user-mode faults in kernel address space.
954 * An exception: if the faulting address is the invalid
955 * instruction entry in the IDT, then the Intel Pentium
956 * F00F bug workaround was triggered, and we need to
957 * treat it is as an illegal instruction, and not a page
960 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
961 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
962 frame->tf_trapno = T_PRIVINFLT;
972 * This is a fault on non-kernel virtual memory.
973 * vm is initialized above to NULL. If curproc is NULL
974 * or curproc->p_vmspace is NULL the fault is fatal.
977 vm = lp->lwp_vmspace;
985 if (frame->tf_err & PGEX_W)
986 ftype = VM_PROT_WRITE;
988 ftype = VM_PROT_READ;
990 if (map != &kernel_map) {
992 * Keep swapout from messing with us during this
1002 fault_flags |= VM_FAULT_BURST;
1003 if (ftype & VM_PROT_WRITE)
1004 fault_flags |= VM_FAULT_DIRTY;
1006 fault_flags |= VM_FAULT_NORMAL;
1007 rv = vm_fault(map, va, ftype, fault_flags);
1008 PRELE(lp->lwp_proc);
1011 * Don't have to worry about process locking or stacks
1014 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
1017 if (rv == KERN_SUCCESS)
1021 if (td->td_gd->gd_intr_nesting_level == 0 &&
1022 td->td_pcb->pcb_onfault) {
1023 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
1026 trap_fatal(frame, eva);
1030 /* kludge to pass faulting virtual address to sendsig */
1031 frame->tf_xflags = frame->tf_err;
1032 frame->tf_err = eva;
1034 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1038 trap_fatal(struct trapframe *frame, vm_offset_t eva)
1040 int code, type, ss, esp;
1041 struct soft_segment_descriptor softseg;
1043 code = frame->tf_err;
1044 type = frame->tf_trapno;
1045 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
1047 if (type <= MAX_TRAP_MSG)
1048 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
1049 type, trap_msg[type],
1050 frame->tf_eflags & PSL_VM ? "vm86" :
1051 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1053 /* three separate prints in case of a trap on an unmapped page */
1054 kprintf("mp_lock = %08x; ", mp_lock);
1055 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1056 kprintf("lapic.id = %08x\n", lapic.id);
1058 if (type == T_PAGEFLT) {
1059 kprintf("fault virtual address = %p\n", (void *)eva);
1060 kprintf("fault code = %s %s, %s\n",
1061 code & PGEX_U ? "user" : "supervisor",
1062 code & PGEX_W ? "write" : "read",
1063 code & PGEX_P ? "protection violation" : "page not present");
1065 kprintf("instruction pointer = 0x%x:0x%x\n",
1066 frame->tf_cs & 0xffff, frame->tf_eip);
1067 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1068 ss = frame->tf_ss & 0xffff;
1069 esp = frame->tf_esp;
1071 ss = GSEL(GDATA_SEL, SEL_KPL);
1072 esp = (int)&frame->tf_esp;
1074 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
1075 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1076 kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1077 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1078 kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1079 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1081 kprintf("processor eflags = ");
1082 if (frame->tf_eflags & PSL_T)
1083 kprintf("trace trap, ");
1084 if (frame->tf_eflags & PSL_I)
1085 kprintf("interrupt enabled, ");
1086 if (frame->tf_eflags & PSL_NT)
1087 kprintf("nested task, ");
1088 if (frame->tf_eflags & PSL_RF)
1089 kprintf("resume, ");
1090 if (frame->tf_eflags & PSL_VM)
1092 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1093 kprintf("current process = ");
1095 kprintf("%lu (%s)\n",
1096 (u_long)curproc->p_pid, curproc->p_comm ?
1097 curproc->p_comm : "");
1101 kprintf("current thread = pri %d ", curthread->td_pri);
1102 if (curthread->td_critcount)
1108 * we probably SHOULD have stopped the other CPUs before now!
1109 * another CPU COULD have been touching cpl at this moment...
1111 kprintf(" <- SMP: XXX");
1120 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1123 kprintf("trap number = %d\n", type);
1124 if (type <= MAX_TRAP_MSG)
1125 panic("%s", trap_msg[type]);
1127 panic("unknown/reserved trap");
1131 * Double fault handler. Called when a fault occurs while writing
1132 * a frame for a trap/exception onto the stack. This usually occurs
1133 * when the stack overflows (such is the case with infinite recursion,
1136 * XXX Note that the current PTD gets replaced by IdlePTD when the
1137 * task switch occurs. This means that the stack that was active at
1138 * the time of the double fault is not available at <kstack> unless
1139 * the machine was idle when the double fault occurred. The downside
1140 * of this is that "trace <ebp>" in ddb won't work.
1144 in_kstack_guard(register_t rptr)
1146 thread_t td = curthread;
1148 if ((char *)rptr >= td->td_kstack &&
1149 (char *)rptr < td->td_kstack + PAGE_SIZE) {
1156 dblfault_handler(void)
1158 struct mdglobaldata *gd = mdcpu;
1160 if (in_kstack_guard(gd->gd_common_tss.tss_esp) ||
1161 in_kstack_guard(gd->gd_common_tss.tss_ebp)) {
1162 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
1164 kprintf("DOUBLE FAULT:\n");
1166 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1167 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1168 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1170 /* three separate prints in case of a trap on an unmapped page */
1171 kprintf("mp_lock = %08x; ", mp_lock);
1172 kprintf("cpuid = %d; ", gd->mi.gd_cpuid);
1173 kprintf("lapic.id = %08x\n", lapic.id);
1175 panic("double fault");
1179 * syscall2 - MP aware system call request C handler
1181 * A system call is essentially treated as a trap. The MP lock is not
1182 * held on entry or return. We are responsible for handling ASTs
1183 * (e.g. a task switch) prior to return.
1188 syscall2(struct trapframe *frame)
1190 struct thread *td = curthread;
1191 struct proc *p = td->td_proc;
1192 struct lwp *lp = td->td_lwp;
1194 struct sysent *callp;
1195 register_t orig_tf_eflags;
1200 int crit_count = td->td_critcount;
1203 int have_mplock = 0;
1206 union sysunion args;
1209 if (ISPL(frame->tf_cs) != SEL_UPL) {
1216 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
1220 KASSERT(td->td_mpcount == 0,
1221 ("badmpcount syscall2 from %p", (void *)frame->tf_eip));
1223 userenter(td, p); /* lazy raise our priority */
1228 sticks = (int)td->td_sticks;
1229 orig_tf_eflags = frame->tf_eflags;
1232 * Virtual kernel intercept - if a VM context managed by a virtual
1233 * kernel issues a system call the virtual kernel handles it, not us.
1234 * Restore the virtual kernel context and return from its system
1235 * call. The current frame is copied out to the virtual kernel.
1237 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1238 vkernel_trap(lp, frame);
1239 error = EJUSTRETURN;
1245 * Get the system call parameters and account for time
1247 lp->lwp_md.md_regs = frame;
1248 params = (caddr_t)frame->tf_esp + sizeof(int);
1249 code = frame->tf_eax;
1251 if (p->p_sysent->sv_prepsyscall) {
1252 (*p->p_sysent->sv_prepsyscall)(
1253 frame, (int *)(&args.nosys.sysmsg + 1),
1257 * Need to check if this is a 32 bit or 64 bit syscall.
1258 * fuword is MP aware.
1260 if (code == SYS_syscall) {
1262 * Code is first argument, followed by actual args.
1264 code = fuword(params);
1265 params += sizeof(int);
1266 } else if (code == SYS___syscall) {
1268 * Like syscall, but code is a quad, so as to maintain
1269 * quad alignment for the rest of the arguments.
1271 code = fuword(params);
1272 params += sizeof(quad_t);
1276 code &= p->p_sysent->sv_mask;
1278 if (code >= p->p_sysent->sv_size)
1279 callp = &p->p_sysent->sv_table[0];
1281 callp = &p->p_sysent->sv_table[code];
1283 narg = callp->sy_narg & SYF_ARGMASK;
1286 if (p->p_sysent->sv_name[0] == 'L')
1287 kprintf("Linux syscall, code = %d\n", code);
1291 * copyin is MP aware, but the tracing code is not
1293 if (narg && params) {
1294 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1295 narg * sizeof(register_t));
1298 if (KTRPOINT(td, KTR_SYSCALL)) {
1299 MAKEMPSAFE(have_mplock);
1301 ktrsyscall(lp, code, narg,
1302 (void *)(&args.nosys.sysmsg + 1));
1310 if (KTRPOINT(td, KTR_SYSCALL)) {
1311 MAKEMPSAFE(have_mplock);
1312 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1317 * For traditional syscall code edx is left untouched when 32 bit
1318 * results are returned. Since edx is loaded from fds[1] when the
1319 * system call returns we pre-set it here.
1321 args.sysmsg_fds[0] = 0;
1322 args.sysmsg_fds[1] = frame->tf_edx;
1325 * The syscall might manipulate the trap frame. If it does it
1326 * will probably return EJUSTRETURN.
1328 args.sysmsg_frame = frame;
1330 STOPEVENT(p, S_SCE, narg); /* MP aware */
1333 * NOTE: All system calls run MPSAFE now. The system call itself
1334 * is responsible for getting the MP lock.
1336 error = (*callp->sy_call)(&args);
1340 * MP SAFE (we may or may not have the MP lock at this point)
1345 * Reinitialize proc pointer `p' as it may be different
1346 * if this is a child returning from fork syscall.
1349 lp = curthread->td_lwp;
1350 frame->tf_eax = args.sysmsg_fds[0];
1351 frame->tf_edx = args.sysmsg_fds[1];
1352 frame->tf_eflags &= ~PSL_C;
1356 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1357 * int 0x80 is 2 bytes. We saved this in tf_err.
1359 frame->tf_eip -= frame->tf_err;
1364 panic("Unexpected EASYNC return value (for now)");
1367 if (p->p_sysent->sv_errsize) {
1368 if (error >= p->p_sysent->sv_errsize)
1369 error = -1; /* XXX */
1371 error = p->p_sysent->sv_errtbl[error];
1373 frame->tf_eax = error;
1374 frame->tf_eflags |= PSL_C;
1379 * Traced syscall. trapsignal() is not MP aware.
1381 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1382 MAKEMPSAFE(have_mplock);
1383 frame->tf_eflags &= ~PSL_T;
1384 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1388 * Handle reschedule and other end-of-syscall issues
1390 userret(lp, frame, sticks);
1393 if (KTRPOINT(td, KTR_SYSRET)) {
1394 MAKEMPSAFE(have_mplock);
1395 ktrsysret(lp, code, error, args.sysmsg_result);
1400 * This works because errno is findable through the
1401 * register set. If we ever support an emulation where this
1402 * is not the case, this code will need to be revisited.
1404 STOPEVENT(p, S_SCX, code);
1409 * Release the MP lock if we had to get it
1411 KASSERT(td->td_mpcount == have_mplock,
1412 ("badmpcount syscall2/end from %p callp %p",
1413 (void *)frame->tf_eip, callp));
1417 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1419 KASSERT(crit_count == td->td_critcount,
1420 ("syscall: critical section count mismatch! %d/%d",
1421 crit_count, td->td_pri));
1422 KASSERT(&td->td_toks_base == td->td_toks_stop,
1423 ("syscall: extra tokens held after trap! %zd",
1424 td->td_toks_stop - &td->td_toks_base));
1429 * NOTE: MP lock not held at any point.
1432 fork_return(struct lwp *lp, struct trapframe *frame)
1434 frame->tf_eax = 0; /* Child returns zero */
1435 frame->tf_eflags &= ~PSL_C; /* success */
1438 generic_lwp_return(lp, frame);
1439 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1443 * Simplified back end of syscall(), used when returning from fork()
1444 * directly into user mode.
1446 * This code will return back into the fork trampoline code which then
1449 * NOTE: The mplock is not held at any point.
1452 generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1454 struct proc *p = lp->lwp_proc;
1457 * Newly forked processes are given a kernel priority. We have to
1458 * adjust the priority to a normal user priority and fake entry
1459 * into the kernel (call userenter()) to install a passive release
1460 * function just in case userret() decides to stop the process. This
1461 * can occur when ^Z races a fork. If we do not install the passive
1462 * release function the current process designation will not be
1463 * released when the thread goes to sleep.
1465 lwkt_setpri_self(TDPRI_USER_NORM);
1466 userenter(lp->lwp_thread, p);
1467 userret(lp, frame, 0);
1469 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1470 ktrsysret(lp, SYS_fork, 0, 0);
1472 p->p_flag |= P_PASSIVE_ACQ;
1474 p->p_flag &= ~P_PASSIVE_ACQ;
1478 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1479 * fault (which is then passed back to the virtual kernel) if an attempt is
1480 * made to use the FP unit.
1482 * XXX this is a fairly big hack.
1485 set_vkernel_fp(struct trapframe *frame)
1487 struct thread *td = curthread;
1489 if (frame->tf_xflags & PGEX_FPFAULT) {
1490 td->td_pcb->pcb_flags |= FP_VIRTFP;
1491 if (mdcpu->gd_npxthread == td)
1494 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1499 * Called from vkernel_trap() to fixup the vkernel's syscall
1500 * frame for vmspace_ctl() return.
1503 cpu_vkernel_trap(struct trapframe *frame, int error)
1505 frame->tf_eax = error;
1507 frame->tf_eflags |= PSL_C;
1509 frame->tf_eflags &= ~PSL_C;