2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.80 2006/09/13 18:45:12 swildner Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/syscall.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
66 #include <sys/vmmeter.h>
67 #include <sys/malloc.h>
69 #include <sys/ktrace.h>
71 #include <sys/upcall.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
76 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_extern.h>
84 #include <machine/cpu.h>
85 #include <machine/ipl.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/smp.h>
89 #include <machine/tss.h>
90 #include <machine/globaldata.h>
92 #include <i386/isa/intr_machdep.h>
95 #include <sys/syslog.h>
96 #include <machine/clock.h>
99 #include <machine/vm86.h>
102 #include <sys/msgport2.h>
103 #include <sys/thread2.h>
107 #define MAKEMPSAFE(have_mplock) \
108 if (have_mplock == 0) { \
115 #define MAKEMPSAFE(have_mplock)
119 int (*pmath_emulate) (struct trapframe *);
121 extern void trap (struct trapframe frame);
122 extern int trapwrite (unsigned addr);
123 extern void syscall2 (struct trapframe frame);
125 static int trap_pfault (struct trapframe *, int, vm_offset_t);
126 static void trap_fatal (struct trapframe *, vm_offset_t);
127 void dblfault_handler (void);
129 extern inthand_t IDTVEC(syscall);
131 #define MAX_TRAP_MSG 28
132 static char *trap_msg[] = {
134 "privileged instruction fault", /* 1 T_PRIVINFLT */
136 "breakpoint instruction fault", /* 3 T_BPTFLT */
139 "arithmetic trap", /* 6 T_ARITHTRAP */
140 "system forced exception", /* 7 T_ASTFLT */
142 "general protection fault", /* 9 T_PROTFLT */
143 "trace trap", /* 10 T_TRCTRAP */
145 "page fault", /* 12 T_PAGEFLT */
147 "alignment fault", /* 14 T_ALIGNFLT */
151 "integer divide fault", /* 18 T_DIVIDE */
152 "non-maskable interrupt trap", /* 19 T_NMI */
153 "overflow trap", /* 20 T_OFLOW */
154 "FPU bounds check fault", /* 21 T_BOUND */
155 "FPU device not available", /* 22 T_DNA */
156 "double fault", /* 23 T_DOUBLEFLT */
157 "FPU operand fetch fault", /* 24 T_FPOPFLT */
158 "invalid TSS fault", /* 25 T_TSSFLT */
159 "segment not present fault", /* 26 T_SEGNPFLT */
160 "stack fault", /* 27 T_STKFLT */
161 "machine check trap", /* 28 T_MCHK */
164 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
165 extern int has_f00f_bug;
169 static int ddb_on_nmi = 1;
170 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
171 &ddb_on_nmi, 0, "Go to DDB on NMI");
173 static int panic_on_nmi = 1;
174 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
175 &panic_on_nmi, 0, "Panic on NMI");
176 static int fast_release;
177 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
178 &fast_release, 0, "Passive Release was optimal");
179 static int slow_release;
180 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
181 &slow_release, 0, "Passive Release was nonoptimal");
183 static int syscall_mpsafe = 0;
184 SYSCTL_INT(_kern, OID_AUTO, syscall_mpsafe, CTLFLAG_RW,
185 &syscall_mpsafe, 0, "Allow MPSAFE marked syscalls to run without BGL");
186 TUNABLE_INT("kern.syscall_mpsafe", &syscall_mpsafe);
187 static int trap_mpsafe = 0;
188 SYSCTL_INT(_kern, OID_AUTO, trap_mpsafe, CTLFLAG_RW,
189 &trap_mpsafe, 0, "Allow traps to mostly run without the BGL");
190 TUNABLE_INT("kern.trap_mpsafe", &trap_mpsafe);
193 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
194 extern int max_sysmsg;
197 * Passive USER->KERNEL transition. This only occurs if we block in the
198 * kernel while still holding our userland priority. We have to fixup our
199 * priority in order to avoid potential deadlocks before we allow the system
200 * to switch us to another thread.
203 passive_release(struct thread *td)
205 struct lwp *lp = td->td_lwp;
207 td->td_release = NULL;
208 lwkt_setpri_self(TDPRI_KERN_USER);
209 lp->lwp_proc->p_usched->release_curproc(lp);
213 * userenter() passively intercepts the thread switch function to increase
214 * the thread priority from a user priority to a kernel priority, reducing
215 * syscall and trap overhead for the case where no switch occurs.
219 userenter(struct thread *curtd)
221 curtd->td_release = passive_release;
225 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
226 * must be completed before we can return to or try to return to userland.
228 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
229 * arithmatic on the delta calculation so the absolute tick values are
230 * truncated to an integer.
233 userret(struct lwp *lp, struct trapframe *frame, int sticks)
235 struct proc *p = lp->lwp_proc;
239 * Charge system time if profiling. Note: times are in microseconds.
240 * This may do a copyout and block, so do it first even though it
241 * means some system time will be charged as user time.
243 if (p->p_flag & P_PROFIL) {
244 addupc_task(p, frame->tf_eip,
245 (u_int)((int)p->p_thread->td_sticks - sticks));
250 * Block here if we are in a stopped state.
252 if (p->p_flag & P_STOPPED) {
260 * Post any pending upcalls
262 if (p->p_flag & P_UPCALLPEND) {
263 p->p_flag &= ~P_UPCALLPEND;
271 * Post any pending signals
273 if ((sig = CURSIG(p)) != 0) {
281 * block here if we are swapped out, but still process signals
282 * (such as SIGKILL). proc0 (the swapin scheduler) is already
283 * aware of our situation, we do not have to wake it up.
285 if (p->p_flag & P_SWAPPEDOUT) {
287 p->p_flag |= P_SWAPWAIT;
289 if (p->p_flag & P_SWAPWAIT)
290 tsleep(p, PCATCH, "SWOUT", 0);
291 p->p_flag &= ~P_SWAPWAIT;
298 * Cleanup from userenter and any passive release that might have occured.
299 * We must reclaim the current-process designation before we can return
300 * to usermode. We also handle both LWKT and USER reschedule requests.
303 userexit(struct lwp *lp)
305 struct thread *td = lp->lwp_thread;
306 globaldata_t gd = td->td_gd;
310 * If a user reschedule is requested force a new process to be
311 * chosen by releasing the current process. Our process will only
312 * be chosen again if it has a considerably better priority.
314 if (user_resched_wanted())
315 lp->lwp_proc->p_usched->release_curproc(lp);
319 * Handle a LWKT reschedule request first. Since our passive release
320 * is still in place we do not have to do anything special.
322 if (lwkt_resched_wanted())
326 * Acquire the current process designation for this user scheduler
327 * on this cpu. This will also handle any user-reschedule requests.
329 lp->lwp_proc->p_usched->acquire_curproc(lp);
330 /* We may have switched cpus on acquisition */
334 * Reduce our priority in preparation for a return to userland. If
335 * our passive release function was still in place, our priority was
336 * never raised and does not need to be reduced.
338 if (td->td_release == NULL)
339 lwkt_setpri_self(TDPRI_USER_NORM);
340 td->td_release = NULL;
343 * After reducing our priority there might be other kernel-level
344 * LWKTs that now have a greater priority. Run them as necessary.
345 * We don't have to worry about losing cpu to userland because
346 * we still control the current-process designation and we no longer
347 * have a passive release function installed.
349 if (lwkt_checkpri_self())
354 * Exception, fault, and trap interface to the kernel.
355 * This common code is called from assembly language IDT gate entry
356 * routines that prepare a suitable stack frame, and restore this
357 * frame after the exception has been processed.
359 * This function is also called from doreti in an interlock to handle ASTs.
360 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
362 * NOTE! We have to retrieve the fault address prior to obtaining the
363 * MP lock because get_mplock() may switch out. YYY cr2 really ought
364 * to be retrieved by the assembly code, not here.
366 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
367 * if an attempt is made to switch from a fast interrupt or IPI. This is
368 * necessary to properly take fatal kernel traps on SMP machines if
369 * get_mplock() has to block.
373 trap(struct trapframe frame)
375 struct globaldata *gd = mycpu;
376 struct thread *td = gd->gd_curthread;
377 struct lwp *lp = td->td_lwp;
380 int i = 0, ucode = 0, type, code;
385 int crit_count = td->td_pri & ~TDPRI_MASK;
392 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
393 ++gd->gd_trap_nesting_level;
394 MAKEMPSAFE(have_mplock);
395 trap_fatal(&frame, eva);
396 --gd->gd_trap_nesting_level;
402 ++gd->gd_trap_nesting_level;
403 if (frame.tf_trapno == T_PAGEFLT) {
405 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
406 * This problem is worked around by using an interrupt
407 * gate for the pagefault handler. We are finally ready
408 * to read %cr2 and then must reenable interrupts.
410 * XXX this should be in the switch statement, but the
411 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
412 * flow of control too much for this to be obviously
419 if (trap_mpsafe == 0)
420 MAKEMPSAFE(have_mplock);
423 --gd->gd_trap_nesting_level;
425 if (!(frame.tf_eflags & PSL_I)) {
427 * Buggy application or kernel code has disabled interrupts
428 * and then trapped. Enabling interrupts now is wrong, but
429 * it is better than running with interrupts disabled until
430 * they are accidentally enabled later.
432 type = frame.tf_trapno;
433 if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
434 MAKEMPSAFE(have_mplock);
436 "pid %ld (%s): trap %d with interrupts disabled\n",
437 (long)curproc->p_pid, curproc->p_comm, type);
438 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
440 * XXX not quite right, since this may be for a
441 * multiple fault in user mode.
443 MAKEMPSAFE(have_mplock);
444 printf("kernel trap %d with interrupts disabled\n",
450 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
453 type = frame.tf_trapno;
457 ASSERT_MP_LOCK_HELD(curthread);
458 if (frame.tf_eflags & PSL_VM &&
459 (type == T_PROTFLT || type == T_STKFLT)) {
461 KKASSERT(td->td_mpcount > 0);
463 i = vm86_emulate((struct vm86frame *)&frame);
465 KKASSERT(td->td_mpcount > 0);
469 * returns to original process
472 vm86_trap((struct vm86frame *)&frame,
475 vm86_trap((struct vm86frame *)&frame, 0);
477 KKASSERT(0); /* NOT REACHED */
483 * these traps want either a process context, or
484 * assume a normal userspace trap.
488 trap_fatal(&frame, eva);
491 type = T_BPTFLT; /* kernel breakpoint */
494 goto kernel_trap; /* normal kernel trap handling */
497 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
502 sticks = (int)td->td_sticks;
503 lp->lwp_md.md_regs = &frame;
506 case T_PRIVINFLT: /* privileged instruction fault */
511 case T_BPTFLT: /* bpt instruction fault */
512 case T_TRCTRAP: /* trace trap */
513 frame.tf_eflags &= ~PSL_T;
517 case T_ARITHTRAP: /* arithmetic trap */
522 case T_ASTFLT: /* Allow process switch */
523 mycpu->gd_cnt.v_soft++;
524 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
525 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
527 addupc_task(p, p->p_prof.pr_addr,
533 * The following two traps can happen in
534 * vm86 mode, and, if so, we want to handle
537 case T_PROTFLT: /* general protection fault */
538 case T_STKFLT: /* stack fault */
539 if (frame.tf_eflags & PSL_VM) {
540 i = vm86_emulate((struct vm86frame *)&frame);
547 case T_SEGNPFLT: /* segment not present fault */
548 case T_TSSFLT: /* invalid TSS fault */
549 case T_DOUBLEFLT: /* double fault */
551 ucode = code + BUS_SEGM_FAULT ;
555 case T_PAGEFLT: /* page fault */
556 MAKEMPSAFE(have_mplock);
557 i = trap_pfault(&frame, TRUE, eva);
560 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
570 case T_DIVIDE: /* integer divide fault */
577 MAKEMPSAFE(have_mplock);
579 goto handle_powerfail;
580 #else /* !POWERFAIL_NMI */
581 /* machine/parity/power fail/"kitchen sink" faults */
582 if (isa_nmi(code) == 0) {
585 * NMI can be hooked up to a pushbutton
589 printf ("NMI ... going to debugger\n");
590 kdb_trap (type, 0, &frame);
594 } else if (panic_on_nmi)
595 panic("NMI indicates hardware failure");
597 #endif /* POWERFAIL_NMI */
598 #endif /* NISA > 0 */
600 case T_OFLOW: /* integer overflow fault */
605 case T_BOUND: /* bounds check fault */
613 * The kernel may have switched out the FP unit's
614 * state, causing the user process to take a fault
615 * when it tries to use the FP unit. Restore the
621 if (!pmath_emulate) {
623 ucode = FPE_FPU_NP_TRAP;
626 i = (*pmath_emulate)(&frame);
628 if (!(frame.tf_eflags & PSL_T))
630 frame.tf_eflags &= ~PSL_T;
633 /* else ucode = emulator_only_knows() XXX */
636 case T_FPOPFLT: /* FPU operand fetch fault */
641 case T_XMMFLT: /* SIMD floating-point exception */
651 case T_PAGEFLT: /* page fault */
652 MAKEMPSAFE(have_mplock);
653 trap_pfault(&frame, FALSE, eva);
659 * The kernel may be using npx for copying or other
667 case T_PROTFLT: /* general protection fault */
668 case T_SEGNPFLT: /* segment not present fault */
670 * Invalid segment selectors and out of bounds
671 * %eip's and %esp's can be set up in user mode.
672 * This causes a fault in kernel mode when the
673 * kernel tries to return to user mode. We want
674 * to get this fault so that we can fix the
675 * problem here and not have to check all the
676 * selectors and pointers when the user changes
679 #define MAYBE_DORETI_FAULT(where, whereto) \
681 if (frame.tf_eip == (int)where) { \
682 frame.tf_eip = (int)whereto; \
687 * Since we don't save %gs across an interrupt
688 * frame this check must occur outside the intr
689 * nesting level check.
691 if (frame.tf_eip == (int)cpu_switch_load_gs) {
692 td->td_pcb->pcb_gs = 0;
693 MAKEMPSAFE(have_mplock);
697 if (mycpu->gd_intr_nesting_level == 0) {
699 * Invalid %fs's and %gs's can be created using
700 * procfs or PT_SETREGS or by invalidating the
701 * underlying LDT entry. This causes a fault
702 * in kernel mode when the kernel attempts to
703 * switch contexts. Lose the bad context
704 * (XXX) so that we can continue, and generate
707 MAYBE_DORETI_FAULT(doreti_iret,
709 MAYBE_DORETI_FAULT(doreti_popl_ds,
710 doreti_popl_ds_fault);
711 MAYBE_DORETI_FAULT(doreti_popl_es,
712 doreti_popl_es_fault);
713 MAYBE_DORETI_FAULT(doreti_popl_fs,
714 doreti_popl_fs_fault);
715 if (td->td_pcb->pcb_onfault) {
717 (register_t)td->td_pcb->pcb_onfault;
725 * PSL_NT can be set in user mode and isn't cleared
726 * automatically when the kernel is entered. This
727 * causes a TSS fault when the kernel attempts to
728 * `iret' because the TSS link is uninitialized. We
729 * want to get this fault so that we can fix the
730 * problem here and not every time the kernel is
733 if (frame.tf_eflags & PSL_NT) {
734 frame.tf_eflags &= ~PSL_NT;
739 case T_TRCTRAP: /* trace trap */
740 if (frame.tf_eip == (int)IDTVEC(syscall)) {
742 * We've just entered system mode via the
743 * syscall lcall. Continue single stepping
744 * silently until the syscall handler has
749 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
751 * The syscall handler has now saved the
752 * flags. Stop single stepping it.
754 frame.tf_eflags &= ~PSL_T;
758 * Ignore debug register trace traps due to
759 * accesses in the user's address space, which
760 * can happen under several conditions such as
761 * if a user sets a watchpoint on a buffer and
762 * then passes that buffer to a system call.
763 * We still want to get TRCTRAPS for addresses
764 * in kernel space because that is useful when
765 * debugging the kernel.
767 if (user_dbreg_trap()) {
769 * Reset breakpoint bits because the
772 load_dr6(rdr6() & 0xfffffff0);
776 * Fall through (TRCTRAP kernel mode, kernel address)
780 * If DDB is enabled, let it handle the debugger trap.
781 * Otherwise, debugger traps "can't happen".
784 MAKEMPSAFE(have_mplock);
785 if (kdb_trap (type, 0, &frame))
792 MAKEMPSAFE(have_mplock);
795 # define TIMER_FREQ 1193182
799 static unsigned lastalert = 0;
801 if(time_second - lastalert > 10)
803 log(LOG_WARNING, "NMI: power fail\n");
804 sysbeep(TIMER_FREQ/880, hz);
805 lastalert = time_second;
810 #else /* !POWERFAIL_NMI */
811 /* machine/parity/power fail/"kitchen sink" faults */
812 if (isa_nmi(code) == 0) {
815 * NMI can be hooked up to a pushbutton
819 printf ("NMI ... going to debugger\n");
820 kdb_trap (type, 0, &frame);
824 } else if (panic_on_nmi == 0)
827 #endif /* POWERFAIL_NMI */
828 #endif /* NISA > 0 */
831 MAKEMPSAFE(have_mplock);
832 trap_fatal(&frame, eva);
836 /* Translate fault for emulators (e.g. Linux) */
837 if (*p->p_sysent->sv_transtrap)
838 i = (*p->p_sysent->sv_transtrap)(i, type);
840 MAKEMPSAFE(have_mplock);
841 trapsignal(p, i, ucode);
844 if (type <= MAX_TRAP_MSG) {
845 uprintf("fatal process exception: %s",
847 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
848 uprintf(", fault VA = 0x%lx", (u_long)eva);
855 if (ISPL(frame.tf_cs) == SEL_UPL)
856 KASSERT(td->td_mpcount == have_mplock, ("badmpcount trap/end from %p", (void *)frame.tf_eip));
858 userret(lp, &frame, sticks);
866 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
867 ("syscall: critical section count mismatch! %d/%d",
868 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
874 * This version doesn't allow a page fault to user space while
875 * in the kernel. The rest of the kernel needs to be made "safe"
876 * before this can be used. I think the only things remaining
877 * to be made safe is the process tracing/debugging code.
880 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
883 struct vmspace *vm = NULL;
887 thread_t td = curthread;
888 struct proc *p = td->td_proc; /* may be NULL */
890 if (frame->tf_err & PGEX_W)
891 ftype = VM_PROT_WRITE;
893 ftype = VM_PROT_READ;
895 va = trunc_page(eva);
896 if (va < VM_MIN_KERNEL_ADDRESS) {
901 (!usermode && va < VM_MAXUSER_ADDRESS &&
902 (td->td_gd->gd_intr_nesting_level != 0 ||
903 td->td_pcb->pcb_onfault == NULL))) {
904 trap_fatal(frame, eva);
909 * This is a fault on non-kernel virtual memory.
910 * vm is initialized above to NULL. If curproc is NULL
911 * or curproc->p_vmspace is NULL the fault is fatal.
920 * Keep swapout from messing with us during this
926 * Grow the stack if necessary
928 /* grow_stack returns false only if va falls into
929 * a growable stack region and the stack growth
930 * fails. It returns true if va was not within
931 * a growable stack region, or if the stack
934 if (!grow_stack (p, va)) {
940 /* Fault in the user page: */
941 rv = vm_fault(map, va, ftype,
942 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
948 * Don't allow user-mode faults in kernel address space.
954 * Since we know that kernel virtual address addresses
955 * always have pte pages mapped, we just have to fault
958 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
961 if (rv == KERN_SUCCESS)
965 if (mtd->td_gd->gd_intr_nesting_level == 0 &&
966 td->td_pcb->pcb_onfault) {
967 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
970 trap_fatal(frame, eva);
974 /* kludge to pass faulting virtual address to sendsig */
977 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
982 trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
985 struct vmspace *vm = NULL;
989 thread_t td = curthread;
990 struct proc *p = td->td_proc;
992 va = trunc_page(eva);
993 if (va >= KERNBASE) {
995 * Don't allow user-mode faults in kernel address space.
996 * An exception: if the faulting address is the invalid
997 * instruction entry in the IDT, then the Intel Pentium
998 * F00F bug workaround was triggered, and we need to
999 * treat it is as an illegal instruction, and not a page
1002 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1003 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
1004 frame->tf_trapno = T_PRIVINFLT;
1014 * This is a fault on non-kernel virtual memory.
1015 * vm is initialized above to NULL. If curproc is NULL
1016 * or curproc->p_vmspace is NULL the fault is fatal.
1027 if (frame->tf_err & PGEX_W)
1028 ftype = VM_PROT_WRITE;
1030 ftype = VM_PROT_READ;
1032 if (map != kernel_map) {
1034 * Keep swapout from messing with us during this
1040 * Grow the stack if necessary
1042 /* grow_stack returns false only if va falls into
1043 * a growable stack region and the stack growth
1044 * fails. It returns true if va was not within
1045 * a growable stack region, or if the stack
1048 if (!grow_stack (p, va)) {
1054 /* Fault in the user page: */
1055 rv = vm_fault(map, va, ftype,
1056 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
1062 * Don't have to worry about process locking or stacks in the kernel.
1064 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
1067 if (rv == KERN_SUCCESS)
1071 if (td->td_gd->gd_intr_nesting_level == 0 &&
1072 td->td_pcb->pcb_onfault) {
1073 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
1076 trap_fatal(frame, eva);
1080 /* kludge to pass faulting virtual address to sendsig */
1081 frame->tf_err = eva;
1083 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1087 trap_fatal(struct trapframe *frame, vm_offset_t eva)
1089 int code, type, ss, esp;
1090 struct soft_segment_descriptor softseg;
1092 code = frame->tf_err;
1093 type = frame->tf_trapno;
1094 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
1096 if (type <= MAX_TRAP_MSG)
1097 printf("\n\nFatal trap %d: %s while in %s mode\n",
1098 type, trap_msg[type],
1099 frame->tf_eflags & PSL_VM ? "vm86" :
1100 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1102 /* three separate prints in case of a trap on an unmapped page */
1103 printf("mp_lock = %08x; ", mp_lock);
1104 printf("cpuid = %d; ", mycpu->gd_cpuid);
1105 printf("lapic.id = %08x\n", lapic.id);
1107 if (type == T_PAGEFLT) {
1108 printf("fault virtual address = 0x%x\n", eva);
1109 printf("fault code = %s %s, %s\n",
1110 code & PGEX_U ? "user" : "supervisor",
1111 code & PGEX_W ? "write" : "read",
1112 code & PGEX_P ? "protection violation" : "page not present");
1114 printf("instruction pointer = 0x%x:0x%x\n",
1115 frame->tf_cs & 0xffff, frame->tf_eip);
1116 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1117 ss = frame->tf_ss & 0xffff;
1118 esp = frame->tf_esp;
1120 ss = GSEL(GDATA_SEL, SEL_KPL);
1121 esp = (int)&frame->tf_esp;
1123 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1124 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1125 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1126 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1127 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1128 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1130 printf("processor eflags = ");
1131 if (frame->tf_eflags & PSL_T)
1132 printf("trace trap, ");
1133 if (frame->tf_eflags & PSL_I)
1134 printf("interrupt enabled, ");
1135 if (frame->tf_eflags & PSL_NT)
1136 printf("nested task, ");
1137 if (frame->tf_eflags & PSL_RF)
1139 if (frame->tf_eflags & PSL_VM)
1141 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1142 printf("current process = ");
1144 printf("%lu (%s)\n",
1145 (u_long)curproc->p_pid, curproc->p_comm ?
1146 curproc->p_comm : "");
1150 printf("current thread = pri %d ", curthread->td_pri);
1151 if (curthread->td_pri >= TDPRI_CRIT)
1157 * we probably SHOULD have stopped the other CPUs before now!
1158 * another CPU COULD have been touching cpl at this moment...
1160 printf(" <- SMP: XXX");
1169 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1172 printf("trap number = %d\n", type);
1173 if (type <= MAX_TRAP_MSG)
1174 panic("%s", trap_msg[type]);
1176 panic("unknown/reserved trap");
1180 * Double fault handler. Called when a fault occurs while writing
1181 * a frame for a trap/exception onto the stack. This usually occurs
1182 * when the stack overflows (such is the case with infinite recursion,
1185 * XXX Note that the current PTD gets replaced by IdlePTD when the
1186 * task switch occurs. This means that the stack that was active at
1187 * the time of the double fault is not available at <kstack> unless
1188 * the machine was idle when the double fault occurred. The downside
1189 * of this is that "trace <ebp>" in ddb won't work.
1192 dblfault_handler(void)
1194 struct mdglobaldata *gd = mdcpu;
1196 printf("\nFatal double fault:\n");
1197 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1198 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1199 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1201 /* three separate prints in case of a trap on an unmapped page */
1202 printf("mp_lock = %08x; ", mp_lock);
1203 printf("cpuid = %d; ", mycpu->gd_cpuid);
1204 printf("lapic.id = %08x\n", lapic.id);
1206 panic("double fault");
1210 * Compensate for 386 brain damage (missing URKR).
1211 * This is a little simpler than the pagefault handler in trap() because
1212 * it the page tables have already been faulted in and high addresses
1213 * are thrown out early for other reasons.
1216 trapwrite(unsigned addr)
1223 va = trunc_page((vm_offset_t)addr);
1225 * XXX - MAX is END. Changed > to >= for temp. fix.
1227 if (va >= VM_MAXUSER_ADDRESS)
1235 if (!grow_stack (p, va)) {
1241 * fault the data page
1243 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1247 if (rv != KERN_SUCCESS)
1254 * syscall2 - MP aware system call request C handler
1256 * A system call is essentially treated as a trap except that the
1257 * MP lock is not held on entry or return. We are responsible for
1258 * obtaining the MP lock if necessary and for handling ASTs
1259 * (e.g. a task switch) prior to return.
1261 * In general, only simple access and manipulation of curproc and
1262 * the current stack is allowed without having to hold MP lock.
1264 * MPSAFE - note that large sections of this routine are run without
1269 syscall2(struct trapframe frame)
1271 struct thread *td = curthread;
1272 struct proc *p = td->td_proc;
1273 struct lwp *lp = td->td_lwp;
1275 struct sysent *callp;
1276 register_t orig_tf_eflags;
1281 int crit_count = td->td_pri & ~TDPRI_MASK;
1284 int have_mplock = 0;
1287 union sysunion args;
1290 if (ISPL(frame.tf_cs) != SEL_UPL) {
1298 KASSERT(td->td_mpcount == 0, ("badmpcount syscall2 from %p", (void *)frame.tf_eip));
1299 if (syscall_mpsafe == 0)
1300 MAKEMPSAFE(have_mplock);
1302 userenter(td); /* lazy raise our priority */
1304 sticks = (int)td->td_sticks;
1306 lp->lwp_md.md_regs = &frame;
1307 params = (caddr_t)frame.tf_esp + sizeof(int);
1308 code = frame.tf_eax;
1309 orig_tf_eflags = frame.tf_eflags;
1311 if (p->p_sysent->sv_prepsyscall) {
1312 (*p->p_sysent->sv_prepsyscall)(
1313 &frame, (int *)(&args.nosys.sysmsg + 1),
1317 * Need to check if this is a 32 bit or 64 bit syscall.
1318 * fuword is MP aware.
1320 if (code == SYS_syscall) {
1322 * Code is first argument, followed by actual args.
1324 code = fuword(params);
1325 params += sizeof(int);
1326 } else if (code == SYS___syscall) {
1328 * Like syscall, but code is a quad, so as to maintain
1329 * quad alignment for the rest of the arguments.
1331 code = fuword(params);
1332 params += sizeof(quad_t);
1336 code &= p->p_sysent->sv_mask;
1337 if (code >= p->p_sysent->sv_size)
1338 callp = &p->p_sysent->sv_table[0];
1340 callp = &p->p_sysent->sv_table[code];
1342 narg = callp->sy_narg & SYF_ARGMASK;
1345 * copyin is MP aware, but the tracing code is not
1347 if (narg && params) {
1348 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1349 narg * sizeof(register_t));
1352 if (KTRPOINT(td, KTR_SYSCALL)) {
1353 MAKEMPSAFE(have_mplock);
1355 ktrsyscall(p, code, narg,
1356 (void *)(&args.nosys.sysmsg + 1));
1364 if (KTRPOINT(td, KTR_SYSCALL)) {
1365 MAKEMPSAFE(have_mplock);
1366 ktrsyscall(p, code, narg, (void *)(&args.nosys.sysmsg + 1));
1371 * For traditional syscall code edx is left untouched when 32 bit
1372 * results are returned. Since edx is loaded from fds[1] when the
1373 * system call returns we pre-set it here.
1375 args.sysmsg_fds[0] = 0;
1376 args.sysmsg_fds[1] = frame.tf_edx;
1378 STOPEVENT(p, S_SCE, narg); /* MP aware */
1382 * Try to run the syscall without the MP lock if the syscall
1383 * is MP safe. We have to obtain the MP lock no matter what if
1386 if ((callp->sy_narg & SYF_MPSAFE) == 0)
1387 MAKEMPSAFE(have_mplock);
1390 error = (*callp->sy_call)(&args);
1393 * MP SAFE (we may or may not have the MP lock at this point)
1398 * Reinitialize proc pointer `p' as it may be different
1399 * if this is a child returning from fork syscall.
1402 lp = curthread->td_lwp;
1403 frame.tf_eax = args.sysmsg_fds[0];
1404 frame.tf_edx = args.sysmsg_fds[1];
1405 frame.tf_eflags &= ~PSL_C;
1409 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1410 * int 0x80 is 2 bytes. We saved this in tf_err.
1412 frame.tf_eip -= frame.tf_err;
1417 panic("Unexpected EASYNC return value (for now)");
1420 if (p->p_sysent->sv_errsize) {
1421 if (error >= p->p_sysent->sv_errsize)
1422 error = -1; /* XXX */
1424 error = p->p_sysent->sv_errtbl[error];
1426 frame.tf_eax = error;
1427 frame.tf_eflags |= PSL_C;
1432 * Traced syscall. trapsignal() is not MP aware.
1434 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1435 MAKEMPSAFE(have_mplock);
1436 frame.tf_eflags &= ~PSL_T;
1437 trapsignal(p, SIGTRAP, 0);
1441 * Handle reschedule and other end-of-syscall issues
1443 userret(lp, &frame, sticks);
1446 if (KTRPOINT(td, KTR_SYSRET)) {
1447 MAKEMPSAFE(have_mplock);
1448 ktrsysret(p, code, error, args.sysmsg_result);
1453 * This works because errno is findable through the
1454 * register set. If we ever support an emulation where this
1455 * is not the case, this code will need to be revisited.
1457 STOPEVENT(p, S_SCX, code);
1462 * Release the MP lock if we had to get it
1464 KASSERT(td->td_mpcount == have_mplock,
1465 ("badmpcount syscall2/end from %p", (void *)frame.tf_eip));
1470 KASSERT(crit_count == (td->td_pri & ~TDPRI_MASK),
1471 ("syscall: critical section count mismatch! %d/%d",
1472 crit_count / TDPRI_CRIT, td->td_pri / TDPRI_CRIT));
1477 * Simplified back end of syscall(), used when returning from fork()
1478 * directly into user mode. MP lock is held on entry and should be
1479 * released on return. This code will return back into the fork
1480 * trampoline code which then runs doreti.
1483 fork_return(struct proc *p, struct trapframe frame)
1487 KKASSERT(p->p_nthreads == 1);
1489 lp = LIST_FIRST(&p->p_lwps);
1491 frame.tf_eax = 0; /* Child returns zero */
1492 frame.tf_eflags &= ~PSL_C; /* success */
1496 * Newly forked processes are given a kernel priority. We have to
1497 * adjust the priority to a normal user priority and fake entry
1498 * into the kernel (call userenter()) to install a passive release
1499 * function just in case userret() decides to stop the process. This
1500 * can occur when ^Z races a fork. If we do not install the passive
1501 * release function the current process designation will not be
1502 * released when the thread goes to sleep.
1504 lwkt_setpri_self(TDPRI_USER_NORM);
1505 userenter(lp->lwp_thread);
1506 userret(lp, &frame, 0);
1508 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1509 ktrsysret(p, SYS_fork, 0, 0);
1511 p->p_flag |= P_PASSIVE_ACQ;
1513 p->p_flag &= ~P_PASSIVE_ACQ;
1515 KKASSERT(lp->lwp_thread->td_mpcount == 1);