2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.19 2003/07/08 06:27:26 dillon Exp $
43 * 386 Trap and System call handling
48 #include "opt_ktrace.h"
49 #include "opt_clock.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/pioctl.h>
56 #include <sys/kernel.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/syscall.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
63 #include <sys/vmmeter.h>
65 #include <sys/ktrace.h>
69 #include <vm/vm_param.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_extern.h>
77 #include <machine/cpu.h>
78 #include <machine/ipl.h>
79 #include <machine/md_var.h>
80 #include <machine/pcb.h>
82 #include <machine/smp.h>
84 #include <machine/tss.h>
85 #include <machine/globaldata.h>
87 #include <i386/isa/intr_machdep.h>
90 #include <sys/syslog.h>
91 #include <machine/clock.h>
94 #include <machine/vm86.h>
97 #include <sys/thread2.h>
102 int (*pmath_emulate) __P((struct trapframe *));
104 extern void trap __P((struct trapframe frame));
105 extern int trapwrite __P((unsigned addr));
106 extern void syscall2 __P((struct trapframe frame));
108 static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
109 static void trap_fatal __P((struct trapframe *, vm_offset_t));
110 void dblfault_handler __P((void));
112 extern inthand_t IDTVEC(syscall);
114 #define MAX_TRAP_MSG 28
115 static char *trap_msg[] = {
117 "privileged instruction fault", /* 1 T_PRIVINFLT */
119 "breakpoint instruction fault", /* 3 T_BPTFLT */
122 "arithmetic trap", /* 6 T_ARITHTRAP */
123 "system forced exception", /* 7 T_ASTFLT */
125 "general protection fault", /* 9 T_PROTFLT */
126 "trace trap", /* 10 T_TRCTRAP */
128 "page fault", /* 12 T_PAGEFLT */
130 "alignment fault", /* 14 T_ALIGNFLT */
134 "integer divide fault", /* 18 T_DIVIDE */
135 "non-maskable interrupt trap", /* 19 T_NMI */
136 "overflow trap", /* 20 T_OFLOW */
137 "FPU bounds check fault", /* 21 T_BOUND */
138 "FPU device not available", /* 22 T_DNA */
139 "double fault", /* 23 T_DOUBLEFLT */
140 "FPU operand fetch fault", /* 24 T_FPOPFLT */
141 "invalid TSS fault", /* 25 T_TSSFLT */
142 "segment not present fault", /* 26 T_SEGNPFLT */
143 "stack fault", /* 27 T_STKFLT */
144 "machine check trap", /* 28 T_MCHK */
147 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
148 extern int has_f00f_bug;
152 static int ddb_on_nmi = 1;
153 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
154 &ddb_on_nmi, 0, "Go to DDB on NMI");
156 static int panic_on_nmi = 1;
157 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
158 &panic_on_nmi, 0, "Panic on NMI");
161 * USER->KERNEL transition. Do not transition us out of userland from the
162 * point of view of the userland scheduler unless we actually have to
165 * usertdsw is called from within a critical section, but the BGL will
166 * have already been released by lwkt_switch() so only call MP safe functions
167 * that don't block and don't require the BGL!
170 usertdsw(struct thread *ntd)
172 struct thread *td = curthread;
174 td->td_switch = cpu_heavy_switch;
175 lwkt_setpri_self(TDPRI_KERN_USER);
178 * This is where we might want to catch the P_CURPROC designation
179 * and fix it for *any* switchout rather then just an mi_switch()
180 * switchout (move from mi_switch()?) YYY
182 if (p->p_flag & P_CURPROC) {
190 * userenter() passively intercepts the thread switch function to increase
191 * the thread priority from a user priority to a kernel priority, reducing
192 * syscall and trap overhead for the case where no switch occurs.
201 KASSERT(td->td_switch == cpu_heavy_switch,
202 ("userenter: bad td_switch = %p", td->td_switch));
204 KASSERT(td->td_switch == cpu_heavy_switch || td->td_switch == usertdsw,
205 ("userenter: bad td_switch = %p", td->td_switch));
207 td->td_switch = usertdsw;
211 userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
214 struct thread *td = curthread;
217 * Post any pending signals
220 while ((sig = CURSIG(p)) != 0) {
227 * Set our priority properly and restore our switch function. If
228 * we did not hit our lazy switch function in the first place we
229 * do not need to restore anything.
231 if (td->td_switch == cpu_heavy_switch) {
232 switch(p->p_rtprio.type) {
234 lwkt_setpri_self(TDPRI_USER_IDLE);
236 case RTP_PRIO_REALTIME:
238 lwkt_setpri_self(TDPRI_USER_REAL);
241 lwkt_setpri_self(TDPRI_USER_NORM);
245 KKASSERT(td->td_switch == usertdsw);
246 td->td_switch = cpu_heavy_switch;
251 * If a reschedule has been requested we call chooseproc() to locate
252 * the next runnable process. When we wakeup from that we check
253 * for pending signals again.
255 if (resched_wanted()) {
257 while ((sig = CURSIG(p)) != 0)
262 * Charge system time if profiling.
264 if (p->p_flag & P_PROFIL) {
265 addupc_task(p, frame->tf_eip,
266 (u_int)(curthread->td_sticks - oticks) * psratio);
270 * In order to return to userland we need to be the designated
271 * current (user) process on this cpu. We have to wait for
272 * the userland scheduler to schedule as P_CURPROC.
275 while ((p->p_flag & P_CURPROC) == 0) {
276 p->p_stats->p_ru.ru_nivcsw++;
277 lwkt_deschedule_self();
281 KKASSERT(mycpu->gd_uprocscheduled == 1);
284 #ifdef DEVICE_POLLING
285 extern u_int32_t poll_in_trap;
286 extern int ether_poll __P((int count));
287 #endif /* DEVICE_POLLING */
290 * Exception, fault, and trap interface to the FreeBSD kernel.
291 * This common code is called from assembly language IDT gate entry
292 * routines that prepare a suitable stack frame, and restore this
293 * frame after the exception has been processed.
298 struct trapframe frame;
300 struct proc *p = curproc;
302 int i = 0, ucode = 0, type, code;
306 if (panicstr == NULL)
307 KASSERT(curthread->td_mpcount >= 0, ("BADX1 AT %08x %08x", frame.tf_eip, frame.tf_esp));
311 if (panicstr == NULL)
312 KKASSERT(curthread->td_mpcount > 0);
317 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
318 trap_fatal(&frame, eva);
323 if (!(frame.tf_eflags & PSL_I)) {
325 * Buggy application or kernel code has disabled interrupts
326 * and then trapped. Enabling interrupts now is wrong, but
327 * it is better than running with interrupts disabled until
328 * they are accidentally enabled later.
330 type = frame.tf_trapno;
331 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
333 "pid %ld (%s): trap %d with interrupts disabled\n",
334 (long)curproc->p_pid, curproc->p_comm, type);
335 else if (type != T_BPTFLT && type != T_TRCTRAP)
337 * XXX not quite right, since this may be for a
338 * multiple fault in user mode.
340 printf("kernel trap %d with interrupts disabled\n",
346 if (frame.tf_trapno == T_PAGEFLT) {
348 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
349 * This problem is worked around by using an interrupt
350 * gate for the pagefault handler. We are finally ready
351 * to read %cr2 and then must reenable interrupts.
353 * XXX this should be in the switch statement, but the
354 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
355 * flow of control too much for this to be obviously
362 #ifdef DEVICE_POLLING
364 ether_poll(poll_in_trap);
365 #endif /* DEVICE_POLLING */
367 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
370 type = frame.tf_trapno;
374 if (frame.tf_eflags & PSL_VM &&
375 (type == T_PROTFLT || type == T_STKFLT)) {
377 KKASSERT(curthread->td_mpcount > 0);
379 i = vm86_emulate((struct vm86frame *)&frame);
381 KKASSERT(curthread->td_mpcount > 0);
385 * returns to original process
387 vm86_trap((struct vm86frame *)&frame);
394 * these traps want either a process context, or
395 * assume a normal userspace trap.
399 trap_fatal(&frame, eva);
402 type = T_BPTFLT; /* kernel breakpoint */
405 goto kernel_trap; /* normal kernel trap handling */
408 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
413 sticks = curthread->td_sticks;
414 p->p_md.md_regs = &frame;
417 case T_PRIVINFLT: /* privileged instruction fault */
422 case T_BPTFLT: /* bpt instruction fault */
423 case T_TRCTRAP: /* trace trap */
424 frame.tf_eflags &= ~PSL_T;
428 case T_ARITHTRAP: /* arithmetic trap */
433 case T_ASTFLT: /* Allow process switch */
435 mycpu->gd_cnt.v_soft++;
436 if (p->p_flag & P_OWEUPC) {
437 p->p_flag &= ~P_OWEUPC;
438 addupc_task(p, p->p_stats->p_prof.pr_addr,
439 p->p_stats->p_prof.pr_ticks);
444 * The following two traps can happen in
445 * vm86 mode, and, if so, we want to handle
448 case T_PROTFLT: /* general protection fault */
449 case T_STKFLT: /* stack fault */
450 if (frame.tf_eflags & PSL_VM) {
451 i = vm86_emulate((struct vm86frame *)&frame);
458 case T_SEGNPFLT: /* segment not present fault */
459 case T_TSSFLT: /* invalid TSS fault */
460 case T_DOUBLEFLT: /* double fault */
462 ucode = code + BUS_SEGM_FAULT ;
466 case T_PAGEFLT: /* page fault */
467 i = trap_pfault(&frame, TRUE, eva);
470 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
480 case T_DIVIDE: /* integer divide fault */
488 goto handle_powerfail;
489 #else /* !POWERFAIL_NMI */
490 /* machine/parity/power fail/"kitchen sink" faults */
491 if (isa_nmi(code) == 0) {
494 * NMI can be hooked up to a pushbutton
498 printf ("NMI ... going to debugger\n");
499 kdb_trap (type, 0, &frame);
503 } else if (panic_on_nmi)
504 panic("NMI indicates hardware failure");
506 #endif /* POWERFAIL_NMI */
507 #endif /* NISA > 0 */
509 case T_OFLOW: /* integer overflow fault */
514 case T_BOUND: /* bounds check fault */
521 /* if a transparent fault (due to context switch "late") */
525 if (!pmath_emulate) {
527 ucode = FPE_FPU_NP_TRAP;
530 i = (*pmath_emulate)(&frame);
532 if (!(frame.tf_eflags & PSL_T))
534 frame.tf_eflags &= ~PSL_T;
537 /* else ucode = emulator_only_knows() XXX */
540 case T_FPOPFLT: /* FPU operand fetch fault */
545 case T_XMMFLT: /* SIMD floating-point exception */
555 case T_PAGEFLT: /* page fault */
556 (void) trap_pfault(&frame, FALSE, eva);
562 * The kernel is apparently using npx for copying.
563 * XXX this should be fatal unless the kernel has
564 * registered such use.
571 case T_PROTFLT: /* general protection fault */
572 case T_SEGNPFLT: /* segment not present fault */
574 * Invalid segment selectors and out of bounds
575 * %eip's and %esp's can be set up in user mode.
576 * This causes a fault in kernel mode when the
577 * kernel tries to return to user mode. We want
578 * to get this fault so that we can fix the
579 * problem here and not have to check all the
580 * selectors and pointers when the user changes
583 #define MAYBE_DORETI_FAULT(where, whereto) \
585 if (frame.tf_eip == (int)where) { \
586 frame.tf_eip = (int)whereto; \
591 if (mycpu->gd_intr_nesting_level == 0) {
593 * Invalid %fs's and %gs's can be created using
594 * procfs or PT_SETREGS or by invalidating the
595 * underlying LDT entry. This causes a fault
596 * in kernel mode when the kernel attempts to
597 * switch contexts. Lose the bad context
598 * (XXX) so that we can continue, and generate
601 if (frame.tf_eip == (int)cpu_switch_load_gs) {
602 curthread->td_pcb->pcb_gs = 0;
606 MAYBE_DORETI_FAULT(doreti_iret,
608 MAYBE_DORETI_FAULT(doreti_popl_ds,
609 doreti_popl_ds_fault);
610 MAYBE_DORETI_FAULT(doreti_popl_es,
611 doreti_popl_es_fault);
612 MAYBE_DORETI_FAULT(doreti_popl_fs,
613 doreti_popl_fs_fault);
614 if (curthread->td_pcb->pcb_onfault) {
615 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
623 * PSL_NT can be set in user mode and isn't cleared
624 * automatically when the kernel is entered. This
625 * causes a TSS fault when the kernel attempts to
626 * `iret' because the TSS link is uninitialized. We
627 * want to get this fault so that we can fix the
628 * problem here and not every time the kernel is
631 if (frame.tf_eflags & PSL_NT) {
632 frame.tf_eflags &= ~PSL_NT;
637 case T_TRCTRAP: /* trace trap */
638 if (frame.tf_eip == (int)IDTVEC(syscall)) {
640 * We've just entered system mode via the
641 * syscall lcall. Continue single stepping
642 * silently until the syscall handler has
647 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
649 * The syscall handler has now saved the
650 * flags. Stop single stepping it.
652 frame.tf_eflags &= ~PSL_T;
656 * Ignore debug register trace traps due to
657 * accesses in the user's address space, which
658 * can happen under several conditions such as
659 * if a user sets a watchpoint on a buffer and
660 * then passes that buffer to a system call.
661 * We still want to get TRCTRAPS for addresses
662 * in kernel space because that is useful when
663 * debugging the kernel.
665 if (user_dbreg_trap()) {
667 * Reset breakpoint bits because the
670 load_dr6(rdr6() & 0xfffffff0);
674 * Fall through (TRCTRAP kernel mode, kernel address)
678 * If DDB is enabled, let it handle the debugger trap.
679 * Otherwise, debugger traps "can't happen".
682 if (kdb_trap (type, 0, &frame))
691 # define TIMER_FREQ 1193182
695 static unsigned lastalert = 0;
697 if(time_second - lastalert > 10)
699 log(LOG_WARNING, "NMI: power fail\n");
700 sysbeep(TIMER_FREQ/880, hz);
701 lastalert = time_second;
706 #else /* !POWERFAIL_NMI */
707 /* machine/parity/power fail/"kitchen sink" faults */
708 if (isa_nmi(code) == 0) {
711 * NMI can be hooked up to a pushbutton
715 printf ("NMI ... going to debugger\n");
716 kdb_trap (type, 0, &frame);
720 } else if (panic_on_nmi == 0)
723 #endif /* POWERFAIL_NMI */
724 #endif /* NISA > 0 */
727 trap_fatal(&frame, eva);
731 /* Translate fault for emulators (e.g. Linux) */
732 if (*p->p_sysent->sv_transtrap)
733 i = (*p->p_sysent->sv_transtrap)(i, type);
735 trapsignal(p, i, ucode);
738 if (type <= MAX_TRAP_MSG) {
739 uprintf("fatal process exception: %s",
741 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
742 uprintf(", fault VA = 0x%lx", (u_long)eva);
749 if (ISPL(frame.tf_cs) == SEL_UPL)
750 KASSERT(curthread->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
752 userret(p, &frame, sticks);
755 KKASSERT(curthread->td_mpcount > 0);
762 * This version doesn't allow a page fault to user space while
763 * in the kernel. The rest of the kernel needs to be made "safe"
764 * before this can be used. I think the only things remaining
765 * to be made safe are the iBCS2 code and the process tracing/
769 trap_pfault(frame, usermode, eva)
770 struct trapframe *frame;
775 struct vmspace *vm = NULL;
779 struct proc *p = curproc;
781 if (frame->tf_err & PGEX_W)
782 ftype = VM_PROT_WRITE;
784 ftype = VM_PROT_READ;
786 va = trunc_page(eva);
787 if (va < VM_MIN_KERNEL_ADDRESS) {
792 (!usermode && va < VM_MAXUSER_ADDRESS &&
793 (mycpu->gd_intr_nesting_level != 0 ||
794 curthread->td_pcb->pcb_onfault == NULL))) {
795 trap_fatal(frame, eva);
800 * This is a fault on non-kernel virtual memory.
801 * vm is initialized above to NULL. If curproc is NULL
802 * or curproc->p_vmspace is NULL the fault is fatal.
811 * Keep swapout from messing with us during this
817 * Grow the stack if necessary
819 /* grow_stack returns false only if va falls into
820 * a growable stack region and the stack growth
821 * fails. It returns true if va was not within
822 * a growable stack region, or if the stack
825 if (!grow_stack (p, va)) {
831 /* Fault in the user page: */
832 rv = vm_fault(map, va, ftype,
833 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
839 * Don't allow user-mode faults in kernel address space.
845 * Since we know that kernel virtual address addresses
846 * always have pte pages mapped, we just have to fault
849 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
852 if (rv == KERN_SUCCESS)
856 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
857 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
860 trap_fatal(frame, eva);
864 /* kludge to pass faulting virtual address to sendsig */
867 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
872 trap_pfault(frame, usermode, eva)
873 struct trapframe *frame;
878 struct vmspace *vm = NULL;
882 struct proc *p = curproc;
884 va = trunc_page(eva);
885 if (va >= KERNBASE) {
887 * Don't allow user-mode faults in kernel address space.
888 * An exception: if the faulting address is the invalid
889 * instruction entry in the IDT, then the Intel Pentium
890 * F00F bug workaround was triggered, and we need to
891 * treat it is as an illegal instruction, and not a page
894 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
895 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
896 frame->tf_trapno = T_PRIVINFLT;
906 * This is a fault on non-kernel virtual memory.
907 * vm is initialized above to NULL. If curproc is NULL
908 * or curproc->p_vmspace is NULL the fault is fatal.
919 if (frame->tf_err & PGEX_W)
920 ftype = VM_PROT_WRITE;
922 ftype = VM_PROT_READ;
924 if (map != kernel_map) {
926 * Keep swapout from messing with us during this
932 * Grow the stack if necessary
934 /* grow_stack returns false only if va falls into
935 * a growable stack region and the stack growth
936 * fails. It returns true if va was not within
937 * a growable stack region, or if the stack
940 if (!grow_stack (p, va)) {
946 /* Fault in the user page: */
947 rv = vm_fault(map, va, ftype,
948 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
954 * Don't have to worry about process locking or stacks in the kernel.
956 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
959 if (rv == KERN_SUCCESS)
963 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
964 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
967 trap_fatal(frame, eva);
971 /* kludge to pass faulting virtual address to sendsig */
974 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
978 trap_fatal(frame, eva)
979 struct trapframe *frame;
982 int code, type, ss, esp;
983 struct soft_segment_descriptor softseg;
985 code = frame->tf_err;
986 type = frame->tf_trapno;
987 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
989 if (type <= MAX_TRAP_MSG)
990 printf("\n\nFatal trap %d: %s while in %s mode\n",
991 type, trap_msg[type],
992 frame->tf_eflags & PSL_VM ? "vm86" :
993 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
995 /* three seperate prints in case of a trap on an unmapped page */
996 printf("mp_lock = %08x; ", mp_lock);
997 printf("cpuid = %d; ", mycpu->gd_cpuid);
998 printf("lapic.id = %08x\n", lapic.id);
1000 if (type == T_PAGEFLT) {
1001 printf("fault virtual address = 0x%x\n", eva);
1002 printf("fault code = %s %s, %s\n",
1003 code & PGEX_U ? "user" : "supervisor",
1004 code & PGEX_W ? "write" : "read",
1005 code & PGEX_P ? "protection violation" : "page not present");
1007 printf("instruction pointer = 0x%x:0x%x\n",
1008 frame->tf_cs & 0xffff, frame->tf_eip);
1009 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1010 ss = frame->tf_ss & 0xffff;
1011 esp = frame->tf_esp;
1013 ss = GSEL(GDATA_SEL, SEL_KPL);
1014 esp = (int)&frame->tf_esp;
1016 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1017 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1018 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1019 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1020 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1021 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1023 printf("processor eflags = ");
1024 if (frame->tf_eflags & PSL_T)
1025 printf("trace trap, ");
1026 if (frame->tf_eflags & PSL_I)
1027 printf("interrupt enabled, ");
1028 if (frame->tf_eflags & PSL_NT)
1029 printf("nested task, ");
1030 if (frame->tf_eflags & PSL_RF)
1032 if (frame->tf_eflags & PSL_VM)
1034 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1035 printf("current process = ");
1037 printf("%lu (%s)\n",
1038 (u_long)curproc->p_pid, curproc->p_comm ?
1039 curproc->p_comm : "");
1043 printf("current thread = pri %d ", curthread->td_pri);
1044 if (curthread->td_pri >= TDPRI_CRIT)
1047 printf("interrupt mask = ");
1048 if ((curthread->td_cpl & net_imask) == net_imask)
1050 if ((curthread->td_cpl & tty_imask) == tty_imask)
1052 if ((curthread->td_cpl & bio_imask) == bio_imask)
1054 if ((curthread->td_cpl & cam_imask) == cam_imask)
1056 if (curthread->td_cpl == 0)
1061 * we probably SHOULD have stopped the other CPUs before now!
1062 * another CPU COULD have been touching cpl at this moment...
1064 printf(" <- SMP: XXX");
1073 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame))
1076 printf("trap number = %d\n", type);
1077 if (type <= MAX_TRAP_MSG)
1078 panic("%s", trap_msg[type]);
1080 panic("unknown/reserved trap");
1084 * Double fault handler. Called when a fault occurs while writing
1085 * a frame for a trap/exception onto the stack. This usually occurs
1086 * when the stack overflows (such is the case with infinite recursion,
1089 * XXX Note that the current PTD gets replaced by IdlePTD when the
1090 * task switch occurs. This means that the stack that was active at
1091 * the time of the double fault is not available at <kstack> unless
1092 * the machine was idle when the double fault occurred. The downside
1093 * of this is that "trace <ebp>" in ddb won't work.
1098 struct mdglobaldata *gd = mdcpu;
1100 printf("\nFatal double fault:\n");
1101 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1102 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1103 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1105 /* three seperate prints in case of a trap on an unmapped page */
1106 printf("mp_lock = %08x; ", mp_lock);
1107 printf("cpuid = %d; ", mycpu->gd_cpuid);
1108 printf("lapic.id = %08x\n", lapic.id);
1110 panic("double fault");
1114 * Compensate for 386 brain damage (missing URKR).
1115 * This is a little simpler than the pagefault handler in trap() because
1116 * it the page tables have already been faulted in and high addresses
1117 * are thrown out early for other reasons.
1127 va = trunc_page((vm_offset_t)addr);
1129 * XXX - MAX is END. Changed > to >= for temp. fix.
1131 if (va >= VM_MAXUSER_ADDRESS)
1139 if (!grow_stack (p, va)) {
1145 * fault the data page
1147 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1151 if (rv != KERN_SUCCESS)
1158 * syscall2 - MP aware system call request C handler
1160 * A system call is essentially treated as a trap except that the
1161 * MP lock is not held on entry or return. We are responsible for
1162 * obtaining the MP lock if necessary and for handling ASTs
1163 * (e.g. a task switch) prior to return.
1165 * In general, only simple access and manipulation of curproc and
1166 * the current stack is allowed without having to hold MP lock.
1170 struct trapframe frame;
1172 struct thread *td = curthread;
1173 struct proc *p = td->td_proc;
1176 struct sysent *callp;
1177 register_t orig_tf_eflags;
1185 if (ISPL(frame.tf_cs) != SEL_UPL) {
1193 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1197 * access non-atomic field from critical section. p_sticks is
1198 * updated by the clock interrupt. Also use this opportunity
1199 * to lazy-raise our LWKT priority.
1203 sticks = curthread->td_sticks;
1206 p->p_md.md_regs = &frame;
1207 params = (caddr_t)frame.tf_esp + sizeof(int);
1208 code = frame.tf_eax;
1209 orig_tf_eflags = frame.tf_eflags;
1211 if (p->p_sysent->sv_prepsyscall) {
1213 * The prep code is not MP aware.
1215 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms);
1218 * Need to check if this is a 32 bit or 64 bit syscall.
1219 * fuword is MP aware.
1221 if (code == SYS_syscall) {
1223 * Code is first argument, followed by actual args.
1225 code = fuword(params);
1226 params += sizeof(int);
1227 } else if (code == SYS___syscall) {
1229 * Like syscall, but code is a quad, so as to maintain
1230 * quad alignment for the rest of the arguments.
1232 code = fuword(params);
1233 params += sizeof(quad_t);
1237 if (p->p_sysent->sv_mask)
1238 code &= p->p_sysent->sv_mask;
1240 if (code >= p->p_sysent->sv_size)
1241 callp = &p->p_sysent->sv_table[0];
1243 callp = &p->p_sysent->sv_table[code];
1245 narg = callp->sy_narg & SYF_ARGMASK;
1248 * copyin is MP aware, but the tracing code is not
1250 if (params && (i = narg * sizeof(int)) &&
1251 (error = copyin(params, (caddr_t)args, (u_int)i))) {
1253 if (KTRPOINT(td, KTR_SYSCALL))
1254 ktrsyscall(p->p_tracep, code, narg, args);
1261 * Try to run the syscall without the MP lock if the syscall
1262 * is MP safe. We have to obtain the MP lock no matter what if
1265 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1272 if (KTRPOINT(td, KTR_SYSCALL)) {
1273 ktrsyscall(p->p_tracep, code, narg, args);
1277 p->p_retval[1] = frame.tf_edx;
1279 STOPEVENT(p, S_SCE, narg); /* MP aware */
1281 error = (*callp->sy_call)(args);
1284 * MP SAFE (we may or may not have the MP lock at this point)
1289 * Reinitialize proc pointer `p' as it may be different
1290 * if this is a child returning from fork syscall.
1293 frame.tf_eax = p->p_retval[0];
1294 frame.tf_edx = p->p_retval[1];
1295 frame.tf_eflags &= ~PSL_C;
1300 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1301 * int 0x80 is 2 bytes. We saved this in tf_err.
1303 frame.tf_eip -= frame.tf_err;
1311 if (p->p_sysent->sv_errsize) {
1312 if (error >= p->p_sysent->sv_errsize)
1313 error = -1; /* XXX */
1315 error = p->p_sysent->sv_errtbl[error];
1317 frame.tf_eax = error;
1318 frame.tf_eflags |= PSL_C;
1323 * Traced syscall. trapsignal() is not MP aware.
1325 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1326 frame.tf_eflags &= ~PSL_T;
1327 trapsignal(p, SIGTRAP, 0);
1331 * Handle reschedule and other end-of-syscall issues
1333 userret(p, &frame, sticks);
1336 if (KTRPOINT(td, KTR_SYSRET)) {
1337 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1342 * This works because errno is findable through the
1343 * register set. If we ever support an emulation where this
1344 * is not the case, this code will need to be revisited.
1346 STOPEVENT(p, S_SCX, code);
1350 * Release the MP lock if we had to get it
1352 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1358 * Simplified back end of syscall(), used when returning from fork()
1359 * directly into user mode. MP lock is held on entry and should be
1360 * released on return. This code will return back into the fork
1361 * trampoline code which then runs doreti.
1364 fork_return(p, frame)
1366 struct trapframe frame;
1368 frame.tf_eax = 0; /* Child returns zero */
1369 frame.tf_eflags &= ~PSL_C; /* success */
1372 userret(p, &frame, 0);
1374 if (KTRPOINT(p->p_thread, KTR_SYSRET))
1375 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1378 KKASSERT(curthread->td_mpcount == 1);