2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.26 2003/07/24 01:41:16 dillon Exp $
43 * 386 Trap and System call handling
48 #include "opt_ktrace.h"
49 #include "opt_clock.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
63 #include <sys/vmmeter.h>
65 #include <sys/ktrace.h>
67 #include <sys/sysproto.h>
68 #include <sys/sysunion.h>
71 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_extern.h>
79 #include <machine/cpu.h>
80 #include <machine/ipl.h>
81 #include <machine/md_var.h>
82 #include <machine/pcb.h>
84 #include <machine/smp.h>
86 #include <machine/tss.h>
87 #include <machine/globaldata.h>
89 #include <i386/isa/intr_machdep.h>
92 #include <sys/syslog.h>
93 #include <machine/clock.h>
96 #include <machine/vm86.h>
99 #include <sys/thread2.h>
104 int (*pmath_emulate) __P((struct trapframe *));
106 extern void trap __P((struct trapframe frame));
107 extern int trapwrite __P((unsigned addr));
108 extern void syscall2 __P((struct trapframe frame));
109 extern void sendsys2 __P((struct trapframe frame));
111 static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
112 static void trap_fatal __P((struct trapframe *, vm_offset_t));
113 void dblfault_handler __P((void));
115 extern inthand_t IDTVEC(syscall);
117 #define MAX_TRAP_MSG 28
118 static char *trap_msg[] = {
120 "privileged instruction fault", /* 1 T_PRIVINFLT */
122 "breakpoint instruction fault", /* 3 T_BPTFLT */
125 "arithmetic trap", /* 6 T_ARITHTRAP */
126 "system forced exception", /* 7 T_ASTFLT */
128 "general protection fault", /* 9 T_PROTFLT */
129 "trace trap", /* 10 T_TRCTRAP */
131 "page fault", /* 12 T_PAGEFLT */
133 "alignment fault", /* 14 T_ALIGNFLT */
137 "integer divide fault", /* 18 T_DIVIDE */
138 "non-maskable interrupt trap", /* 19 T_NMI */
139 "overflow trap", /* 20 T_OFLOW */
140 "FPU bounds check fault", /* 21 T_BOUND */
141 "FPU device not available", /* 22 T_DNA */
142 "double fault", /* 23 T_DOUBLEFLT */
143 "FPU operand fetch fault", /* 24 T_FPOPFLT */
144 "invalid TSS fault", /* 25 T_TSSFLT */
145 "segment not present fault", /* 26 T_SEGNPFLT */
146 "stack fault", /* 27 T_STKFLT */
147 "machine check trap", /* 28 T_MCHK */
150 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
151 extern int has_f00f_bug;
155 static int ddb_on_nmi = 1;
156 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
157 &ddb_on_nmi, 0, "Go to DDB on NMI");
159 static int panic_on_nmi = 1;
160 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
161 &panic_on_nmi, 0, "Panic on NMI");
162 static int fast_release;
163 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
164 &fast_release, 0, "Passive Release was optimal");
165 static int slow_release;
166 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
167 &slow_release, 0, "Passive Release was nonoptimal");
170 * USER->KERNEL transition. Do not transition us out of userland from the
171 * point of view of the userland scheduler unless we actually have to
174 * usertdsw is called from within a critical section and the BGL will still
175 * be held. This function is NOT called for preemptions, only for switchouts.
178 passive_release(struct thread *td)
180 struct proc *p = td->td_proc;
182 td->td_release = NULL;
183 lwkt_setpri_self(TDPRI_KERN_USER);
184 if (p->p_flag & P_CURPROC) {
190 * userenter() passively intercepts the thread switch function to increase
191 * the thread priority from a user priority to a kernel priority, reducing
192 * syscall and trap overhead for the case where no switch occurs.
198 struct thread *td = curthread;
200 td->td_release = passive_release;
204 userexit(struct proc *p)
206 struct thread *td = p->p_thread;
209 * If we did not have to release we should already be P_CURPROC. If
210 * we did have to release we must acquire P_CURPROC again and then
211 * restore our priority for user return.
213 * Lowering our priority may make other higher priority threads
214 * runnable. lwkt_setpri_self() does not switch away, so call
215 * lwkt_maybe_switch() to deal with it.
217 if (td->td_release) {
219 td->td_release = NULL;
220 KKASSERT(p->p_flag & P_CURPROC);
224 switch(p->p_rtprio.type) {
226 lwkt_setpri_self(TDPRI_USER_IDLE);
228 case RTP_PRIO_REALTIME:
230 lwkt_setpri_self(TDPRI_USER_REAL);
233 lwkt_setpri_self(TDPRI_USER_NORM);
242 userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
247 * Post any pending signals
249 while ((sig = CURSIG(p)) != 0) {
254 * If a reschedule has been requested then the easiest solution
255 * is to run our passive release function which will possibly
256 * shift our P_CURPROC designation to another user process.
257 * We don't actually switch here because that would be a waste
258 * of cycles (the newly scheduled user process would just switch
259 * back to us since we might be running at a kernel priority).
260 * Instead we fall through and will switch away when we attempt
261 * to reacquire our P_CURPROC designation.
263 if (resched_wanted()) {
264 if (curthread->td_release)
265 passive_release(curthread);
269 * Charge system time if profiling. Note: times are in microseconds.
271 if (p->p_flag & P_PROFIL) {
272 addupc_task(p, frame->tf_eip,
273 (u_int)(curthread->td_sticks - oticks));
277 * Post any pending signals XXX
279 while ((sig = CURSIG(p)) != 0)
283 #ifdef DEVICE_POLLING
284 extern u_int32_t poll_in_trap;
285 extern int ether_poll __P((int count));
286 #endif /* DEVICE_POLLING */
289 * Exception, fault, and trap interface to the FreeBSD kernel.
290 * This common code is called from assembly language IDT gate entry
291 * routines that prepare a suitable stack frame, and restore this
292 * frame after the exception has been processed.
294 * This function is also called from doreti in an interlock to handle ASTs.
295 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
297 * NOTE! We have to retrieve the fault address prior to obtaining the
298 * MP lock because get_mplock() may switch out. YYY cr2 really ought
299 * to be retrieved by the assembly code, not here.
303 struct trapframe frame;
305 struct proc *p = curproc;
307 int i = 0, ucode = 0, type, code;
312 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
314 trap_fatal(&frame, eva);
320 if (frame.tf_trapno == T_PAGEFLT) {
322 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
323 * This problem is worked around by using an interrupt
324 * gate for the pagefault handler. We are finally ready
325 * to read %cr2 and then must reenable interrupts.
327 * XXX this should be in the switch statement, but the
328 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
329 * flow of control too much for this to be obviously
339 * MP lock is held at this point
342 if (!(frame.tf_eflags & PSL_I)) {
344 * Buggy application or kernel code has disabled interrupts
345 * and then trapped. Enabling interrupts now is wrong, but
346 * it is better than running with interrupts disabled until
347 * they are accidentally enabled later.
349 type = frame.tf_trapno;
350 if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
352 "pid %ld (%s): trap %d with interrupts disabled\n",
353 (long)curproc->p_pid, curproc->p_comm, type);
354 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
356 * XXX not quite right, since this may be for a
357 * multiple fault in user mode.
359 printf("kernel trap %d with interrupts disabled\n",
366 #ifdef DEVICE_POLLING
368 ether_poll(poll_in_trap);
369 #endif /* DEVICE_POLLING */
371 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
374 type = frame.tf_trapno;
378 if (frame.tf_eflags & PSL_VM &&
379 (type == T_PROTFLT || type == T_STKFLT)) {
381 KKASSERT(curthread->td_mpcount > 0);
383 i = vm86_emulate((struct vm86frame *)&frame);
385 KKASSERT(curthread->td_mpcount > 0);
389 * returns to original process
391 vm86_trap((struct vm86frame *)&frame);
398 * these traps want either a process context, or
399 * assume a normal userspace trap.
403 trap_fatal(&frame, eva);
406 type = T_BPTFLT; /* kernel breakpoint */
409 goto kernel_trap; /* normal kernel trap handling */
412 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
417 sticks = curthread->td_sticks;
418 p->p_md.md_regs = &frame;
421 case T_PRIVINFLT: /* privileged instruction fault */
426 case T_BPTFLT: /* bpt instruction fault */
427 case T_TRCTRAP: /* trace trap */
428 frame.tf_eflags &= ~PSL_T;
432 case T_ARITHTRAP: /* arithmetic trap */
437 case T_ASTFLT: /* Allow process switch */
438 mycpu->gd_cnt.v_soft++;
439 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
440 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
442 addupc_task(p, p->p_stats->p_prof.pr_addr,
443 p->p_stats->p_prof.pr_ticks);
448 * The following two traps can happen in
449 * vm86 mode, and, if so, we want to handle
452 case T_PROTFLT: /* general protection fault */
453 case T_STKFLT: /* stack fault */
454 if (frame.tf_eflags & PSL_VM) {
455 i = vm86_emulate((struct vm86frame *)&frame);
462 case T_SEGNPFLT: /* segment not present fault */
463 case T_TSSFLT: /* invalid TSS fault */
464 case T_DOUBLEFLT: /* double fault */
466 ucode = code + BUS_SEGM_FAULT ;
470 case T_PAGEFLT: /* page fault */
471 i = trap_pfault(&frame, TRUE, eva);
474 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
484 case T_DIVIDE: /* integer divide fault */
492 goto handle_powerfail;
493 #else /* !POWERFAIL_NMI */
494 /* machine/parity/power fail/"kitchen sink" faults */
495 if (isa_nmi(code) == 0) {
498 * NMI can be hooked up to a pushbutton
502 printf ("NMI ... going to debugger\n");
503 kdb_trap (type, 0, &frame);
507 } else if (panic_on_nmi)
508 panic("NMI indicates hardware failure");
510 #endif /* POWERFAIL_NMI */
511 #endif /* NISA > 0 */
513 case T_OFLOW: /* integer overflow fault */
518 case T_BOUND: /* bounds check fault */
525 /* if a transparent fault (due to context switch "late") */
529 if (!pmath_emulate) {
531 ucode = FPE_FPU_NP_TRAP;
534 i = (*pmath_emulate)(&frame);
536 if (!(frame.tf_eflags & PSL_T))
538 frame.tf_eflags &= ~PSL_T;
541 /* else ucode = emulator_only_knows() XXX */
544 case T_FPOPFLT: /* FPU operand fetch fault */
549 case T_XMMFLT: /* SIMD floating-point exception */
559 case T_PAGEFLT: /* page fault */
560 (void) trap_pfault(&frame, FALSE, eva);
566 * The kernel is apparently using npx for copying.
567 * XXX this should be fatal unless the kernel has
568 * registered such use.
575 case T_PROTFLT: /* general protection fault */
576 case T_SEGNPFLT: /* segment not present fault */
578 * Invalid segment selectors and out of bounds
579 * %eip's and %esp's can be set up in user mode.
580 * This causes a fault in kernel mode when the
581 * kernel tries to return to user mode. We want
582 * to get this fault so that we can fix the
583 * problem here and not have to check all the
584 * selectors and pointers when the user changes
587 #define MAYBE_DORETI_FAULT(where, whereto) \
589 if (frame.tf_eip == (int)where) { \
590 frame.tf_eip = (int)whereto; \
595 if (mycpu->gd_intr_nesting_level == 0) {
597 * Invalid %fs's and %gs's can be created using
598 * procfs or PT_SETREGS or by invalidating the
599 * underlying LDT entry. This causes a fault
600 * in kernel mode when the kernel attempts to
601 * switch contexts. Lose the bad context
602 * (XXX) so that we can continue, and generate
605 if (frame.tf_eip == (int)cpu_switch_load_gs) {
606 curthread->td_pcb->pcb_gs = 0;
610 MAYBE_DORETI_FAULT(doreti_iret,
612 MAYBE_DORETI_FAULT(doreti_popl_ds,
613 doreti_popl_ds_fault);
614 MAYBE_DORETI_FAULT(doreti_popl_es,
615 doreti_popl_es_fault);
616 MAYBE_DORETI_FAULT(doreti_popl_fs,
617 doreti_popl_fs_fault);
618 if (curthread->td_pcb->pcb_onfault) {
619 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
627 * PSL_NT can be set in user mode and isn't cleared
628 * automatically when the kernel is entered. This
629 * causes a TSS fault when the kernel attempts to
630 * `iret' because the TSS link is uninitialized. We
631 * want to get this fault so that we can fix the
632 * problem here and not every time the kernel is
635 if (frame.tf_eflags & PSL_NT) {
636 frame.tf_eflags &= ~PSL_NT;
641 case T_TRCTRAP: /* trace trap */
642 if (frame.tf_eip == (int)IDTVEC(syscall)) {
644 * We've just entered system mode via the
645 * syscall lcall. Continue single stepping
646 * silently until the syscall handler has
651 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
653 * The syscall handler has now saved the
654 * flags. Stop single stepping it.
656 frame.tf_eflags &= ~PSL_T;
660 * Ignore debug register trace traps due to
661 * accesses in the user's address space, which
662 * can happen under several conditions such as
663 * if a user sets a watchpoint on a buffer and
664 * then passes that buffer to a system call.
665 * We still want to get TRCTRAPS for addresses
666 * in kernel space because that is useful when
667 * debugging the kernel.
669 if (user_dbreg_trap()) {
671 * Reset breakpoint bits because the
674 load_dr6(rdr6() & 0xfffffff0);
678 * Fall through (TRCTRAP kernel mode, kernel address)
682 * If DDB is enabled, let it handle the debugger trap.
683 * Otherwise, debugger traps "can't happen".
686 if (kdb_trap (type, 0, &frame))
695 # define TIMER_FREQ 1193182
699 static unsigned lastalert = 0;
701 if(time_second - lastalert > 10)
703 log(LOG_WARNING, "NMI: power fail\n");
704 sysbeep(TIMER_FREQ/880, hz);
705 lastalert = time_second;
710 #else /* !POWERFAIL_NMI */
711 /* machine/parity/power fail/"kitchen sink" faults */
712 if (isa_nmi(code) == 0) {
715 * NMI can be hooked up to a pushbutton
719 printf ("NMI ... going to debugger\n");
720 kdb_trap (type, 0, &frame);
724 } else if (panic_on_nmi == 0)
727 #endif /* POWERFAIL_NMI */
728 #endif /* NISA > 0 */
731 trap_fatal(&frame, eva);
735 /* Translate fault for emulators (e.g. Linux) */
736 if (*p->p_sysent->sv_transtrap)
737 i = (*p->p_sysent->sv_transtrap)(i, type);
739 trapsignal(p, i, ucode);
742 if (type <= MAX_TRAP_MSG) {
743 uprintf("fatal process exception: %s",
745 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
746 uprintf(", fault VA = 0x%lx", (u_long)eva);
753 if (ISPL(frame.tf_cs) == SEL_UPL)
754 KASSERT(curthread->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
756 userret(p, &frame, sticks);
760 KKASSERT(curthread->td_mpcount > 0);
767 * This version doesn't allow a page fault to user space while
768 * in the kernel. The rest of the kernel needs to be made "safe"
769 * before this can be used. I think the only things remaining
770 * to be made safe are the iBCS2 code and the process tracing/
774 trap_pfault(frame, usermode, eva)
775 struct trapframe *frame;
780 struct vmspace *vm = NULL;
784 struct proc *p = curproc;
786 if (frame->tf_err & PGEX_W)
787 ftype = VM_PROT_WRITE;
789 ftype = VM_PROT_READ;
791 va = trunc_page(eva);
792 if (va < VM_MIN_KERNEL_ADDRESS) {
797 (!usermode && va < VM_MAXUSER_ADDRESS &&
798 (mycpu->gd_intr_nesting_level != 0 ||
799 curthread->td_pcb->pcb_onfault == NULL))) {
800 trap_fatal(frame, eva);
805 * This is a fault on non-kernel virtual memory.
806 * vm is initialized above to NULL. If curproc is NULL
807 * or curproc->p_vmspace is NULL the fault is fatal.
816 * Keep swapout from messing with us during this
822 * Grow the stack if necessary
824 /* grow_stack returns false only if va falls into
825 * a growable stack region and the stack growth
826 * fails. It returns true if va was not within
827 * a growable stack region, or if the stack
830 if (!grow_stack (p, va)) {
836 /* Fault in the user page: */
837 rv = vm_fault(map, va, ftype,
838 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
844 * Don't allow user-mode faults in kernel address space.
850 * Since we know that kernel virtual address addresses
851 * always have pte pages mapped, we just have to fault
854 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
857 if (rv == KERN_SUCCESS)
861 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
862 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
865 trap_fatal(frame, eva);
869 /* kludge to pass faulting virtual address to sendsig */
872 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
877 trap_pfault(frame, usermode, eva)
878 struct trapframe *frame;
883 struct vmspace *vm = NULL;
887 struct proc *p = curproc;
889 va = trunc_page(eva);
890 if (va >= KERNBASE) {
892 * Don't allow user-mode faults in kernel address space.
893 * An exception: if the faulting address is the invalid
894 * instruction entry in the IDT, then the Intel Pentium
895 * F00F bug workaround was triggered, and we need to
896 * treat it is as an illegal instruction, and not a page
899 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
900 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
901 frame->tf_trapno = T_PRIVINFLT;
911 * This is a fault on non-kernel virtual memory.
912 * vm is initialized above to NULL. If curproc is NULL
913 * or curproc->p_vmspace is NULL the fault is fatal.
924 if (frame->tf_err & PGEX_W)
925 ftype = VM_PROT_WRITE;
927 ftype = VM_PROT_READ;
929 if (map != kernel_map) {
931 * Keep swapout from messing with us during this
937 * Grow the stack if necessary
939 /* grow_stack returns false only if va falls into
940 * a growable stack region and the stack growth
941 * fails. It returns true if va was not within
942 * a growable stack region, or if the stack
945 if (!grow_stack (p, va)) {
951 /* Fault in the user page: */
952 rv = vm_fault(map, va, ftype,
953 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
959 * Don't have to worry about process locking or stacks in the kernel.
961 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
964 if (rv == KERN_SUCCESS)
968 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
969 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
972 trap_fatal(frame, eva);
976 /* kludge to pass faulting virtual address to sendsig */
979 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
983 trap_fatal(frame, eva)
984 struct trapframe *frame;
987 int code, type, ss, esp;
988 struct soft_segment_descriptor softseg;
990 code = frame->tf_err;
991 type = frame->tf_trapno;
992 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
994 if (type <= MAX_TRAP_MSG)
995 printf("\n\nFatal trap %d: %s while in %s mode\n",
996 type, trap_msg[type],
997 frame->tf_eflags & PSL_VM ? "vm86" :
998 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1000 /* three seperate prints in case of a trap on an unmapped page */
1001 printf("mp_lock = %08x; ", mp_lock);
1002 printf("cpuid = %d; ", mycpu->gd_cpuid);
1003 printf("lapic.id = %08x\n", lapic.id);
1005 if (type == T_PAGEFLT) {
1006 printf("fault virtual address = 0x%x\n", eva);
1007 printf("fault code = %s %s, %s\n",
1008 code & PGEX_U ? "user" : "supervisor",
1009 code & PGEX_W ? "write" : "read",
1010 code & PGEX_P ? "protection violation" : "page not present");
1012 printf("instruction pointer = 0x%x:0x%x\n",
1013 frame->tf_cs & 0xffff, frame->tf_eip);
1014 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1015 ss = frame->tf_ss & 0xffff;
1016 esp = frame->tf_esp;
1018 ss = GSEL(GDATA_SEL, SEL_KPL);
1019 esp = (int)&frame->tf_esp;
1021 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1022 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1023 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1024 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1025 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1026 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1028 printf("processor eflags = ");
1029 if (frame->tf_eflags & PSL_T)
1030 printf("trace trap, ");
1031 if (frame->tf_eflags & PSL_I)
1032 printf("interrupt enabled, ");
1033 if (frame->tf_eflags & PSL_NT)
1034 printf("nested task, ");
1035 if (frame->tf_eflags & PSL_RF)
1037 if (frame->tf_eflags & PSL_VM)
1039 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1040 printf("current process = ");
1042 printf("%lu (%s)\n",
1043 (u_long)curproc->p_pid, curproc->p_comm ?
1044 curproc->p_comm : "");
1048 printf("current thread = pri %d ", curthread->td_pri);
1049 if (curthread->td_pri >= TDPRI_CRIT)
1052 printf("interrupt mask = ");
1053 if ((curthread->td_cpl & net_imask) == net_imask)
1055 if ((curthread->td_cpl & tty_imask) == tty_imask)
1057 if ((curthread->td_cpl & bio_imask) == bio_imask)
1059 if ((curthread->td_cpl & cam_imask) == cam_imask)
1061 if (curthread->td_cpl == 0)
1066 * we probably SHOULD have stopped the other CPUs before now!
1067 * another CPU COULD have been touching cpl at this moment...
1069 printf(" <- SMP: XXX");
1078 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame))
1081 printf("trap number = %d\n", type);
1082 if (type <= MAX_TRAP_MSG)
1083 panic("%s", trap_msg[type]);
1085 panic("unknown/reserved trap");
1089 * Double fault handler. Called when a fault occurs while writing
1090 * a frame for a trap/exception onto the stack. This usually occurs
1091 * when the stack overflows (such is the case with infinite recursion,
1094 * XXX Note that the current PTD gets replaced by IdlePTD when the
1095 * task switch occurs. This means that the stack that was active at
1096 * the time of the double fault is not available at <kstack> unless
1097 * the machine was idle when the double fault occurred. The downside
1098 * of this is that "trace <ebp>" in ddb won't work.
1103 struct mdglobaldata *gd = mdcpu;
1105 printf("\nFatal double fault:\n");
1106 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1107 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1108 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1110 /* three seperate prints in case of a trap on an unmapped page */
1111 printf("mp_lock = %08x; ", mp_lock);
1112 printf("cpuid = %d; ", mycpu->gd_cpuid);
1113 printf("lapic.id = %08x\n", lapic.id);
1115 panic("double fault");
1119 * Compensate for 386 brain damage (missing URKR).
1120 * This is a little simpler than the pagefault handler in trap() because
1121 * it the page tables have already been faulted in and high addresses
1122 * are thrown out early for other reasons.
1132 va = trunc_page((vm_offset_t)addr);
1134 * XXX - MAX is END. Changed > to >= for temp. fix.
1136 if (va >= VM_MAXUSER_ADDRESS)
1144 if (!grow_stack (p, va)) {
1150 * fault the data page
1152 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1156 if (rv != KERN_SUCCESS)
1163 * syscall2 - MP aware system call request C handler
1165 * A system call is essentially treated as a trap except that the
1166 * MP lock is not held on entry or return. We are responsible for
1167 * obtaining the MP lock if necessary and for handling ASTs
1168 * (e.g. a task switch) prior to return.
1170 * In general, only simple access and manipulation of curproc and
1171 * the current stack is allowed without having to hold MP lock.
1174 syscall2(struct trapframe frame)
1176 struct thread *td = curthread;
1177 struct proc *p = td->td_proc;
1180 struct sysent *callp;
1181 register_t orig_tf_eflags;
1186 union sysunion args;
1189 if (ISPL(frame.tf_cs) != SEL_UPL) {
1197 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1201 * access non-atomic field from critical section. p_sticks is
1202 * updated by the clock interrupt. Also use this opportunity
1203 * to lazy-raise our LWKT priority.
1207 sticks = curthread->td_sticks;
1210 p->p_md.md_regs = &frame;
1211 params = (caddr_t)frame.tf_esp + sizeof(int);
1212 code = frame.tf_eax;
1213 orig_tf_eflags = frame.tf_eflags;
1215 if (p->p_sysent->sv_prepsyscall) {
1217 * The prep code is not MP aware.
1219 (*p->p_sysent->sv_prepsyscall)(&frame, (int *)(&args.lmsg + 1), &code, ¶ms);
1222 * Need to check if this is a 32 bit or 64 bit syscall.
1223 * fuword is MP aware.
1225 if (code == SYS_syscall) {
1227 * Code is first argument, followed by actual args.
1229 code = fuword(params);
1230 params += sizeof(int);
1231 } else if (code == SYS___syscall) {
1233 * Like syscall, but code is a quad, so as to maintain
1234 * quad alignment for the rest of the arguments.
1236 code = fuword(params);
1237 params += sizeof(quad_t);
1241 if (p->p_sysent->sv_mask)
1242 code &= p->p_sysent->sv_mask;
1244 if (code >= p->p_sysent->sv_size)
1245 callp = &p->p_sysent->sv_table[0];
1247 callp = &p->p_sysent->sv_table[code];
1249 narg = callp->sy_narg & SYF_ARGMASK;
1252 * copyin is MP aware, but the tracing code is not
1254 if (params && (i = narg * sizeof(int)) &&
1255 (error = copyin(params, (caddr_t)(&args.lmsg + 1), (u_int)i))) {
1257 if (KTRPOINT(td, KTR_SYSCALL))
1258 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.lmsg + 1));
1265 * Try to run the syscall without the MP lock if the syscall
1266 * is MP safe. We have to obtain the MP lock no matter what if
1269 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1276 if (KTRPOINT(td, KTR_SYSCALL)) {
1277 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.lmsg + 1));
1281 p->p_retval[1] = frame.tf_edx;
1283 STOPEVENT(p, S_SCE, narg); /* MP aware */
1285 error = (*callp->sy_call)(&args);
1288 * MP SAFE (we may or may not have the MP lock at this point)
1293 * Reinitialize proc pointer `p' as it may be different
1294 * if this is a child returning from fork syscall.
1297 frame.tf_eax = p->p_retval[0];
1298 frame.tf_edx = p->p_retval[1];
1299 frame.tf_eflags &= ~PSL_C;
1304 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1305 * int 0x80 is 2 bytes. We saved this in tf_err.
1307 frame.tf_eip -= frame.tf_err;
1315 if (p->p_sysent->sv_errsize) {
1316 if (error >= p->p_sysent->sv_errsize)
1317 error = -1; /* XXX */
1319 error = p->p_sysent->sv_errtbl[error];
1321 frame.tf_eax = error;
1322 frame.tf_eflags |= PSL_C;
1327 * Traced syscall. trapsignal() is not MP aware.
1329 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1330 frame.tf_eflags &= ~PSL_T;
1331 trapsignal(p, SIGTRAP, 0);
1335 * Handle reschedule and other end-of-syscall issues
1337 userret(p, &frame, sticks);
1340 if (KTRPOINT(td, KTR_SYSRET)) {
1341 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1346 * This works because errno is findable through the
1347 * register set. If we ever support an emulation where this
1348 * is not the case, this code will need to be revisited.
1350 STOPEVENT(p, S_SCX, code);
1355 * Release the MP lock if we had to get it
1357 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1362 #if 0 /* work in progress */
1365 * sendsys2 - MP aware system message request C handler
1368 sendsys2(struct trapframe frame)
1370 struct thread *td = curthread;
1371 struct proc *p = td->td_proc;
1372 struct sysent *callp;
1381 if (ISPL(frame.tf_cs) != SEL_UPL) {
1389 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1393 * access non-atomic field from critical section. p_sticks is
1394 * updated by the clock interrupt. Also use this opportunity
1395 * to lazy-raise our LWKT priority.
1399 sticks = curthread->td_sticks;
1402 p->p_md.md_regs = &frame;
1405 * Extract the system call message. If msgsize is zero we are
1406 * blocking on a message and/or message port.
1408 if ((msgsize = frame.tf_edx) == 0) {
1409 ... handle waiting ...
1415 if (msgsize < 0 || msgsize > sizeof(*sysmsg)) {
1421 * Obtain a sysunion structure from our per-cpu cache or allocate
1422 * one. This per-cpu cache may be accessed by interrupts returning
1426 if ((sysmsg = TAILQ_FIRST(&mycpu->gd_sysmsgq)) != NULL) {
1427 TAILQ_REMOVE(&mycpu->gd_sysmsgq, sysmsg, lmsg.ms_node);
1431 sysmsg = malloc(sizeof(*sysmsg), M_SYSMSG, M_WAITOK);
1433 umsg = (void *)frame.tf_ecx;
1434 if ((error = copyin(umsg, sysmsg, msgsize)) != 0)
1437 code = sysmsg->lmsg.ms_cmd;
1439 if (code >= p->p_sysent->sv_size) {
1444 callp = &p->p_sysent->sv_table[code];
1447 if (KTRPOINT(td, KTR_SYSCALL)) {
1448 ktrsyscall(p->p_tracep, code, narg, (void *)(&args + 1));
1454 STOPEVENT(p, S_SCE, narg); /* MP aware */
1457 * Make the system call. An error code is always returned, results
1458 * are copied back via ms_result32 or ms_result64.
1460 * NOTE! XXX if this is a child returning from a fork curproc
1461 * might be different.
1463 error = (*callp->sy_call)(sysmsg);
1466 * If a synchronous return copy p_retval to ms_result64.
1468 if (error != EASYNC) {
1469 error = copyout(p->p_retval, &umsg->ms_result64, sizeof(umsg->ms_result64));
1471 frame.tf_eax = error;
1474 * Traced syscall. trapsignal() is not MP aware.
1476 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1477 frame.tf_eflags &= ~PSL_T;
1478 trapsignal(p, SIGTRAP, 0);
1482 * Handle reschedule and other end-of-syscall issues
1484 userret(p, &frame, sticks);
1487 if (KTRPOINT(td, KTR_SYSRET)) {
1488 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1493 * This works because errno is findable through the
1494 * register set. If we ever support an emulation where this
1495 * is not the case, this code will need to be revisited.
1497 STOPEVENT(p, S_SCX, code);
1502 * Release the MP lock if we had to get it
1504 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1512 * Simplified back end of syscall(), used when returning from fork()
1513 * directly into user mode. MP lock is held on entry and should be
1514 * released on return. This code will return back into the fork
1515 * trampoline code which then runs doreti.
1518 fork_return(p, frame)
1520 struct trapframe frame;
1522 frame.tf_eax = 0; /* Child returns zero */
1523 frame.tf_eflags &= ~PSL_C; /* success */
1526 userret(p, &frame, 0);
1528 if (KTRPOINT(p->p_thread, KTR_SYSRET))
1529 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1531 p->p_flag |= P_PASSIVE_ACQ;
1533 p->p_flag &= ~P_PASSIVE_ACQ;
1535 KKASSERT(curthread->td_mpcount == 1);