2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.51 2004/05/05 19:26:38 dillon Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/syscall.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
66 #include <sys/vmmeter.h>
67 #include <sys/malloc.h>
69 #include <sys/ktrace.h>
71 #include <sys/upcall.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
76 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_extern.h>
84 #include <machine/cpu.h>
85 #include <machine/ipl.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
89 #include <machine/smp.h>
91 #include <machine/tss.h>
92 #include <machine/globaldata.h>
94 #include <i386/isa/intr_machdep.h>
97 #include <sys/syslog.h>
98 #include <machine/clock.h>
101 #include <machine/vm86.h>
104 #include <sys/msgport2.h>
105 #include <sys/thread2.h>
107 int (*pmath_emulate) (struct trapframe *);
109 extern void trap (struct trapframe frame);
110 extern int trapwrite (unsigned addr);
111 extern void syscall2 (struct trapframe frame);
112 extern void sendsys2 (struct trapframe frame);
114 static int trap_pfault (struct trapframe *, int, vm_offset_t);
115 static void trap_fatal (struct trapframe *, vm_offset_t);
116 void dblfault_handler (void);
118 extern inthand_t IDTVEC(syscall);
120 #define MAX_TRAP_MSG 28
121 static char *trap_msg[] = {
123 "privileged instruction fault", /* 1 T_PRIVINFLT */
125 "breakpoint instruction fault", /* 3 T_BPTFLT */
128 "arithmetic trap", /* 6 T_ARITHTRAP */
129 "system forced exception", /* 7 T_ASTFLT */
131 "general protection fault", /* 9 T_PROTFLT */
132 "trace trap", /* 10 T_TRCTRAP */
134 "page fault", /* 12 T_PAGEFLT */
136 "alignment fault", /* 14 T_ALIGNFLT */
140 "integer divide fault", /* 18 T_DIVIDE */
141 "non-maskable interrupt trap", /* 19 T_NMI */
142 "overflow trap", /* 20 T_OFLOW */
143 "FPU bounds check fault", /* 21 T_BOUND */
144 "FPU device not available", /* 22 T_DNA */
145 "double fault", /* 23 T_DOUBLEFLT */
146 "FPU operand fetch fault", /* 24 T_FPOPFLT */
147 "invalid TSS fault", /* 25 T_TSSFLT */
148 "segment not present fault", /* 26 T_SEGNPFLT */
149 "stack fault", /* 27 T_STKFLT */
150 "machine check trap", /* 28 T_MCHK */
153 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
154 extern int has_f00f_bug;
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
172 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
175 * USER->KERNEL transition. Do not transition us out of userland from the
176 * point of view of the userland scheduler unless we actually have to
177 * switch. Switching typically occurs when a process blocks in the kernel.
179 * passive_release is called from within a critical section and the BGL will
180 * still be held. This function is NOT called for preemptions, only for
181 * switchouts. Note that other elements of the system (uio_yield()) assume
182 * that the user cruft will be released when lwkt_switch() is called.
185 passive_release(struct thread *td)
187 struct proc *p = td->td_proc;
189 td->td_release = NULL;
194 * userenter() passively intercepts the thread switch function to increase
195 * the thread priority from a user priority to a kernel priority, reducing
196 * syscall and trap overhead for the case where no switch occurs.
200 userenter(struct thread *curtd)
202 curtd->td_release = passive_release;
206 * Reacquire our current process designation. This will not return until
207 * we have it. Our LWKT priority will be adjusted for our return to
208 * userland. acquire_curproc() also handles cleaning up P_CP_RELEASED.
210 * This is always the last step before returning to user mode.
213 userexit(struct proc *p)
215 struct thread *td = p->p_thread;
217 td->td_release = NULL;
218 if (p->p_flag & P_CP_RELEASED)
226 * userret() handles signals, upcalls, and deals with system profiling
227 * charges. Note that td_sticks is a 64 bit quantity, but there's no
228 * point doing 64 arithmatic on the delta calculation so the absolute
229 * tick values are truncated to an integer.
232 userret(struct proc *p, struct trapframe *frame, int sticks)
237 * Post any pending upcalls
239 if (p->p_flag & P_UPCALLPEND) {
240 p->p_flag &= ~P_UPCALLPEND;
245 * Post any pending signals
247 while ((sig = CURSIG(p)) != 0) {
252 * If a reschedule has been requested then we release the current
253 * process in order to shift the current process designation to
254 * another user process and/or to switch to a higher priority
255 * kernel thread at userexit() time.
257 if (any_resched_wanted()) {
258 p->p_thread->td_release = NULL;
263 * Charge system time if profiling. Note: times are in microseconds.
265 if (p->p_flag & P_PROFIL) {
266 addupc_task(p, frame->tf_eip,
267 (u_int)((int)p->p_thread->td_sticks - sticks));
271 * Post any pending signals XXX
273 while ((sig = CURSIG(p)) != 0)
277 #ifdef DEVICE_POLLING
278 extern u_int32_t poll_in_trap;
279 extern int ether_poll (int count);
280 #endif /* DEVICE_POLLING */
283 * Exception, fault, and trap interface to the FreeBSD kernel.
284 * This common code is called from assembly language IDT gate entry
285 * routines that prepare a suitable stack frame, and restore this
286 * frame after the exception has been processed.
288 * This function is also called from doreti in an interlock to handle ASTs.
289 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
291 * NOTE! We have to retrieve the fault address prior to obtaining the
292 * MP lock because get_mplock() may switch out. YYY cr2 really ought
293 * to be retrieved by the assembly code, not here.
297 struct trapframe frame;
299 struct thread *td = curthread;
302 int i = 0, ucode = 0, type, code;
308 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
310 trap_fatal(&frame, eva);
316 if (frame.tf_trapno == T_PAGEFLT) {
318 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
319 * This problem is worked around by using an interrupt
320 * gate for the pagefault handler. We are finally ready
321 * to read %cr2 and then must reenable interrupts.
323 * XXX this should be in the switch statement, but the
324 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
325 * flow of control too much for this to be obviously
335 * MP lock is held at this point
338 if (!(frame.tf_eflags & PSL_I)) {
340 * Buggy application or kernel code has disabled interrupts
341 * and then trapped. Enabling interrupts now is wrong, but
342 * it is better than running with interrupts disabled until
343 * they are accidentally enabled later.
345 type = frame.tf_trapno;
346 if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
348 "pid %ld (%s): trap %d with interrupts disabled\n",
349 (long)curproc->p_pid, curproc->p_comm, type);
350 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
352 * XXX not quite right, since this may be for a
353 * multiple fault in user mode.
355 printf("kernel trap %d with interrupts disabled\n",
362 #ifdef DEVICE_POLLING
364 ether_poll(poll_in_trap);
365 #endif /* DEVICE_POLLING */
367 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
370 type = frame.tf_trapno;
374 if (frame.tf_eflags & PSL_VM &&
375 (type == T_PROTFLT || type == T_STKFLT)) {
377 KKASSERT(td->td_mpcount > 0);
379 i = vm86_emulate((struct vm86frame *)&frame);
381 KKASSERT(td->td_mpcount > 0);
385 * returns to original process
387 vm86_trap((struct vm86frame *)&frame);
394 * these traps want either a process context, or
395 * assume a normal userspace trap.
399 trap_fatal(&frame, eva);
402 type = T_BPTFLT; /* kernel breakpoint */
405 goto kernel_trap; /* normal kernel trap handling */
408 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
413 sticks = (int)td->td_sticks;
414 p->p_md.md_regs = &frame;
417 case T_PRIVINFLT: /* privileged instruction fault */
422 case T_BPTFLT: /* bpt instruction fault */
423 case T_TRCTRAP: /* trace trap */
424 frame.tf_eflags &= ~PSL_T;
428 case T_ARITHTRAP: /* arithmetic trap */
433 case T_ASTFLT: /* Allow process switch */
434 mycpu->gd_cnt.v_soft++;
435 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
436 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
438 addupc_task(p, p->p_stats->p_prof.pr_addr,
439 p->p_stats->p_prof.pr_ticks);
444 * The following two traps can happen in
445 * vm86 mode, and, if so, we want to handle
448 case T_PROTFLT: /* general protection fault */
449 case T_STKFLT: /* stack fault */
450 if (frame.tf_eflags & PSL_VM) {
451 i = vm86_emulate((struct vm86frame *)&frame);
458 case T_SEGNPFLT: /* segment not present fault */
459 case T_TSSFLT: /* invalid TSS fault */
460 case T_DOUBLEFLT: /* double fault */
462 ucode = code + BUS_SEGM_FAULT ;
466 case T_PAGEFLT: /* page fault */
467 i = trap_pfault(&frame, TRUE, eva);
470 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
480 case T_DIVIDE: /* integer divide fault */
488 goto handle_powerfail;
489 #else /* !POWERFAIL_NMI */
490 /* machine/parity/power fail/"kitchen sink" faults */
491 if (isa_nmi(code) == 0) {
494 * NMI can be hooked up to a pushbutton
498 printf ("NMI ... going to debugger\n");
499 kdb_trap (type, 0, &frame);
503 } else if (panic_on_nmi)
504 panic("NMI indicates hardware failure");
506 #endif /* POWERFAIL_NMI */
507 #endif /* NISA > 0 */
509 case T_OFLOW: /* integer overflow fault */
514 case T_BOUND: /* bounds check fault */
522 * The kernel may have switched out the FP unit's
523 * state, causing the user process to take a fault
524 * when it tries to use the FP unit. Restore the
530 if (!pmath_emulate) {
532 ucode = FPE_FPU_NP_TRAP;
535 i = (*pmath_emulate)(&frame);
537 if (!(frame.tf_eflags & PSL_T))
539 frame.tf_eflags &= ~PSL_T;
542 /* else ucode = emulator_only_knows() XXX */
545 case T_FPOPFLT: /* FPU operand fetch fault */
550 case T_XMMFLT: /* SIMD floating-point exception */
560 case T_PAGEFLT: /* page fault */
561 (void) trap_pfault(&frame, FALSE, eva);
567 * The kernel may be using npx for copying or other
575 case T_PROTFLT: /* general protection fault */
576 case T_SEGNPFLT: /* segment not present fault */
578 * Invalid segment selectors and out of bounds
579 * %eip's and %esp's can be set up in user mode.
580 * This causes a fault in kernel mode when the
581 * kernel tries to return to user mode. We want
582 * to get this fault so that we can fix the
583 * problem here and not have to check all the
584 * selectors and pointers when the user changes
587 #define MAYBE_DORETI_FAULT(where, whereto) \
589 if (frame.tf_eip == (int)where) { \
590 frame.tf_eip = (int)whereto; \
595 * Since we don't save %gs across an interrupt
596 * frame this check must occur outside the intr
597 * nesting level check.
599 if (frame.tf_eip == (int)cpu_switch_load_gs) {
600 td->td_pcb->pcb_gs = 0;
604 if (mycpu->gd_intr_nesting_level == 0) {
606 * Invalid %fs's and %gs's can be created using
607 * procfs or PT_SETREGS or by invalidating the
608 * underlying LDT entry. This causes a fault
609 * in kernel mode when the kernel attempts to
610 * switch contexts. Lose the bad context
611 * (XXX) so that we can continue, and generate
614 MAYBE_DORETI_FAULT(doreti_iret,
616 MAYBE_DORETI_FAULT(doreti_popl_ds,
617 doreti_popl_ds_fault);
618 MAYBE_DORETI_FAULT(doreti_popl_es,
619 doreti_popl_es_fault);
620 MAYBE_DORETI_FAULT(doreti_popl_fs,
621 doreti_popl_fs_fault);
622 if (td->td_pcb->pcb_onfault) {
624 (register_t)td->td_pcb->pcb_onfault;
632 * PSL_NT can be set in user mode and isn't cleared
633 * automatically when the kernel is entered. This
634 * causes a TSS fault when the kernel attempts to
635 * `iret' because the TSS link is uninitialized. We
636 * want to get this fault so that we can fix the
637 * problem here and not every time the kernel is
640 if (frame.tf_eflags & PSL_NT) {
641 frame.tf_eflags &= ~PSL_NT;
646 case T_TRCTRAP: /* trace trap */
647 if (frame.tf_eip == (int)IDTVEC(syscall)) {
649 * We've just entered system mode via the
650 * syscall lcall. Continue single stepping
651 * silently until the syscall handler has
656 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
658 * The syscall handler has now saved the
659 * flags. Stop single stepping it.
661 frame.tf_eflags &= ~PSL_T;
665 * Ignore debug register trace traps due to
666 * accesses in the user's address space, which
667 * can happen under several conditions such as
668 * if a user sets a watchpoint on a buffer and
669 * then passes that buffer to a system call.
670 * We still want to get TRCTRAPS for addresses
671 * in kernel space because that is useful when
672 * debugging the kernel.
674 if (user_dbreg_trap()) {
676 * Reset breakpoint bits because the
679 load_dr6(rdr6() & 0xfffffff0);
683 * Fall through (TRCTRAP kernel mode, kernel address)
687 * If DDB is enabled, let it handle the debugger trap.
688 * Otherwise, debugger traps "can't happen".
691 if (kdb_trap (type, 0, &frame))
700 # define TIMER_FREQ 1193182
704 static unsigned lastalert = 0;
706 if(time_second - lastalert > 10)
708 log(LOG_WARNING, "NMI: power fail\n");
709 sysbeep(TIMER_FREQ/880, hz);
710 lastalert = time_second;
715 #else /* !POWERFAIL_NMI */
716 /* machine/parity/power fail/"kitchen sink" faults */
717 if (isa_nmi(code) == 0) {
720 * NMI can be hooked up to a pushbutton
724 printf ("NMI ... going to debugger\n");
725 kdb_trap (type, 0, &frame);
729 } else if (panic_on_nmi == 0)
732 #endif /* POWERFAIL_NMI */
733 #endif /* NISA > 0 */
736 trap_fatal(&frame, eva);
740 /* Translate fault for emulators (e.g. Linux) */
741 if (*p->p_sysent->sv_transtrap)
742 i = (*p->p_sysent->sv_transtrap)(i, type);
744 trapsignal(p, i, ucode);
747 if (type <= MAX_TRAP_MSG) {
748 uprintf("fatal process exception: %s",
750 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
751 uprintf(", fault VA = 0x%lx", (u_long)eva);
758 if (ISPL(frame.tf_cs) == SEL_UPL)
759 KASSERT(td->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
761 userret(p, &frame, sticks);
765 KKASSERT(td->td_mpcount > 0);
772 * This version doesn't allow a page fault to user space while
773 * in the kernel. The rest of the kernel needs to be made "safe"
774 * before this can be used. I think the only things remaining
775 * to be made safe are the iBCS2 code and the process tracing/
779 trap_pfault(frame, usermode, eva)
780 struct trapframe *frame;
785 struct vmspace *vm = NULL;
789 thread_t td = curthread;
790 struct proc *p = td->td_proc; /* may be NULL */
792 if (frame->tf_err & PGEX_W)
793 ftype = VM_PROT_WRITE;
795 ftype = VM_PROT_READ;
797 va = trunc_page(eva);
798 if (va < VM_MIN_KERNEL_ADDRESS) {
803 (!usermode && va < VM_MAXUSER_ADDRESS &&
804 (td->td_gd->gd_intr_nesting_level != 0 ||
805 td->td_pcb->pcb_onfault == NULL))) {
806 trap_fatal(frame, eva);
811 * This is a fault on non-kernel virtual memory.
812 * vm is initialized above to NULL. If curproc is NULL
813 * or curproc->p_vmspace is NULL the fault is fatal.
822 * Keep swapout from messing with us during this
828 * Grow the stack if necessary
830 /* grow_stack returns false only if va falls into
831 * a growable stack region and the stack growth
832 * fails. It returns true if va was not within
833 * a growable stack region, or if the stack
836 if (!grow_stack (p, va)) {
842 /* Fault in the user page: */
843 rv = vm_fault(map, va, ftype,
844 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
850 * Don't allow user-mode faults in kernel address space.
856 * Since we know that kernel virtual address addresses
857 * always have pte pages mapped, we just have to fault
860 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
863 if (rv == KERN_SUCCESS)
867 if (mtd->td_gd->gd_intr_nesting_level == 0 &&
868 td->td_pcb->pcb_onfault) {
869 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
872 trap_fatal(frame, eva);
876 /* kludge to pass faulting virtual address to sendsig */
879 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
884 trap_pfault(frame, usermode, eva)
885 struct trapframe *frame;
890 struct vmspace *vm = NULL;
894 thread_t td = curthread;
895 struct proc *p = td->td_proc;
897 va = trunc_page(eva);
898 if (va >= KERNBASE) {
900 * Don't allow user-mode faults in kernel address space.
901 * An exception: if the faulting address is the invalid
902 * instruction entry in the IDT, then the Intel Pentium
903 * F00F bug workaround was triggered, and we need to
904 * treat it is as an illegal instruction, and not a page
907 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
908 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
909 frame->tf_trapno = T_PRIVINFLT;
919 * This is a fault on non-kernel virtual memory.
920 * vm is initialized above to NULL. If curproc is NULL
921 * or curproc->p_vmspace is NULL the fault is fatal.
932 if (frame->tf_err & PGEX_W)
933 ftype = VM_PROT_WRITE;
935 ftype = VM_PROT_READ;
937 if (map != kernel_map) {
939 * Keep swapout from messing with us during this
945 * Grow the stack if necessary
947 /* grow_stack returns false only if va falls into
948 * a growable stack region and the stack growth
949 * fails. It returns true if va was not within
950 * a growable stack region, or if the stack
953 if (!grow_stack (p, va)) {
959 /* Fault in the user page: */
960 rv = vm_fault(map, va, ftype,
961 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
967 * Don't have to worry about process locking or stacks in the kernel.
969 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
972 if (rv == KERN_SUCCESS)
976 if (td->td_gd->gd_intr_nesting_level == 0 &&
977 td->td_pcb->pcb_onfault) {
978 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
981 trap_fatal(frame, eva);
985 /* kludge to pass faulting virtual address to sendsig */
988 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
992 trap_fatal(frame, eva)
993 struct trapframe *frame;
996 int code, type, ss, esp;
997 struct soft_segment_descriptor softseg;
999 code = frame->tf_err;
1000 type = frame->tf_trapno;
1001 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
1003 if (type <= MAX_TRAP_MSG)
1004 printf("\n\nFatal trap %d: %s while in %s mode\n",
1005 type, trap_msg[type],
1006 frame->tf_eflags & PSL_VM ? "vm86" :
1007 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1009 /* three separate prints in case of a trap on an unmapped page */
1010 printf("mp_lock = %08x; ", mp_lock);
1011 printf("cpuid = %d; ", mycpu->gd_cpuid);
1012 printf("lapic.id = %08x\n", lapic.id);
1014 if (type == T_PAGEFLT) {
1015 printf("fault virtual address = 0x%x\n", eva);
1016 printf("fault code = %s %s, %s\n",
1017 code & PGEX_U ? "user" : "supervisor",
1018 code & PGEX_W ? "write" : "read",
1019 code & PGEX_P ? "protection violation" : "page not present");
1021 printf("instruction pointer = 0x%x:0x%x\n",
1022 frame->tf_cs & 0xffff, frame->tf_eip);
1023 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1024 ss = frame->tf_ss & 0xffff;
1025 esp = frame->tf_esp;
1027 ss = GSEL(GDATA_SEL, SEL_KPL);
1028 esp = (int)&frame->tf_esp;
1030 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1031 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1032 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1033 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1034 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1035 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1037 printf("processor eflags = ");
1038 if (frame->tf_eflags & PSL_T)
1039 printf("trace trap, ");
1040 if (frame->tf_eflags & PSL_I)
1041 printf("interrupt enabled, ");
1042 if (frame->tf_eflags & PSL_NT)
1043 printf("nested task, ");
1044 if (frame->tf_eflags & PSL_RF)
1046 if (frame->tf_eflags & PSL_VM)
1048 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1049 printf("current process = ");
1051 printf("%lu (%s)\n",
1052 (u_long)curproc->p_pid, curproc->p_comm ?
1053 curproc->p_comm : "");
1057 printf("current thread = pri %d ", curthread->td_pri);
1058 if (curthread->td_pri >= TDPRI_CRIT)
1061 printf("interrupt mask = ");
1062 if ((curthread->td_cpl & net_imask) == net_imask)
1064 if ((curthread->td_cpl & tty_imask) == tty_imask)
1066 if ((curthread->td_cpl & bio_imask) == bio_imask)
1068 if ((curthread->td_cpl & cam_imask) == cam_imask)
1070 if (curthread->td_cpl == 0)
1075 * we probably SHOULD have stopped the other CPUs before now!
1076 * another CPU COULD have been touching cpl at this moment...
1078 printf(" <- SMP: XXX");
1087 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1090 printf("trap number = %d\n", type);
1091 if (type <= MAX_TRAP_MSG)
1092 panic("%s", trap_msg[type]);
1094 panic("unknown/reserved trap");
1098 * Double fault handler. Called when a fault occurs while writing
1099 * a frame for a trap/exception onto the stack. This usually occurs
1100 * when the stack overflows (such is the case with infinite recursion,
1103 * XXX Note that the current PTD gets replaced by IdlePTD when the
1104 * task switch occurs. This means that the stack that was active at
1105 * the time of the double fault is not available at <kstack> unless
1106 * the machine was idle when the double fault occurred. The downside
1107 * of this is that "trace <ebp>" in ddb won't work.
1112 struct mdglobaldata *gd = mdcpu;
1114 printf("\nFatal double fault:\n");
1115 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1116 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1117 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1119 /* three separate prints in case of a trap on an unmapped page */
1120 printf("mp_lock = %08x; ", mp_lock);
1121 printf("cpuid = %d; ", mycpu->gd_cpuid);
1122 printf("lapic.id = %08x\n", lapic.id);
1124 panic("double fault");
1128 * Compensate for 386 brain damage (missing URKR).
1129 * This is a little simpler than the pagefault handler in trap() because
1130 * it the page tables have already been faulted in and high addresses
1131 * are thrown out early for other reasons.
1141 va = trunc_page((vm_offset_t)addr);
1143 * XXX - MAX is END. Changed > to >= for temp. fix.
1145 if (va >= VM_MAXUSER_ADDRESS)
1153 if (!grow_stack (p, va)) {
1159 * fault the data page
1161 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1165 if (rv != KERN_SUCCESS)
1172 * syscall2 - MP aware system call request C handler
1174 * A system call is essentially treated as a trap except that the
1175 * MP lock is not held on entry or return. We are responsible for
1176 * obtaining the MP lock if necessary and for handling ASTs
1177 * (e.g. a task switch) prior to return.
1179 * In general, only simple access and manipulation of curproc and
1180 * the current stack is allowed without having to hold MP lock.
1183 syscall2(struct trapframe frame)
1185 struct thread *td = curthread;
1186 struct proc *p = td->td_proc;
1189 struct sysent *callp;
1190 register_t orig_tf_eflags;
1195 union sysunion args;
1198 if (ISPL(frame.tf_cs) != SEL_UPL) {
1206 KASSERT(td->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1209 userenter(td); /* lazy raise our priority */
1211 sticks = (int)td->td_sticks;
1213 p->p_md.md_regs = &frame;
1214 params = (caddr_t)frame.tf_esp + sizeof(int);
1215 code = frame.tf_eax;
1216 orig_tf_eflags = frame.tf_eflags;
1218 if (p->p_sysent->sv_prepsyscall) {
1220 * The prep code is not MP aware.
1222 (*p->p_sysent->sv_prepsyscall)(&frame, (int *)(&args.nosys.usrmsg + 1), &code, ¶ms);
1225 * Need to check if this is a 32 bit or 64 bit syscall.
1226 * fuword is MP aware.
1228 if (code == SYS_syscall) {
1230 * Code is first argument, followed by actual args.
1232 code = fuword(params);
1233 params += sizeof(int);
1234 } else if (code == SYS___syscall) {
1236 * Like syscall, but code is a quad, so as to maintain
1237 * quad alignment for the rest of the arguments.
1239 code = fuword(params);
1240 params += sizeof(quad_t);
1244 if (p->p_sysent->sv_mask)
1245 code &= p->p_sysent->sv_mask;
1247 if (code >= p->p_sysent->sv_size)
1248 callp = &p->p_sysent->sv_table[0];
1250 callp = &p->p_sysent->sv_table[code];
1252 narg = callp->sy_narg & SYF_ARGMASK;
1255 * copyin is MP aware, but the tracing code is not
1257 if (params && (i = narg * sizeof(register_t)) &&
1258 (error = copyin(params, (caddr_t)(&args.nosys.usrmsg + 1), (u_int)i))) {
1260 if (KTRPOINT(td, KTR_SYSCALL))
1261 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.nosys.usrmsg + 1));
1268 * Try to run the syscall without the MP lock if the syscall
1269 * is MP safe. We have to obtain the MP lock no matter what if
1272 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1279 if (KTRPOINT(td, KTR_SYSCALL)) {
1280 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.nosys.usrmsg + 1));
1285 * For traditional syscall code edx is left untouched when 32 bit
1286 * results are returned. Since edx is loaded from fds[1] when the
1287 * system call returns we pre-set it here.
1289 lwkt_initmsg(&args.lmsg, &td->td_msgport, 0,
1290 lwkt_cmd_op(code), lwkt_cmd_op_none);
1291 args.sysmsg_copyout = NULL;
1292 args.sysmsg_fds[0] = 0;
1293 args.sysmsg_fds[1] = frame.tf_edx;
1295 STOPEVENT(p, S_SCE, narg); /* MP aware */
1297 error = (*callp->sy_call)(&args);
1300 * MP SAFE (we may or may not have the MP lock at this point)
1305 * Reinitialize proc pointer `p' as it may be different
1306 * if this is a child returning from fork syscall.
1309 frame.tf_eax = args.sysmsg_fds[0];
1310 frame.tf_edx = args.sysmsg_fds[1];
1311 frame.tf_eflags &= ~PSL_C;
1315 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1316 * int 0x80 is 2 bytes. We saved this in tf_err.
1318 frame.tf_eip -= frame.tf_err;
1323 panic("Unexpected EASYNC return value (for now)");
1326 if (p->p_sysent->sv_errsize) {
1327 if (error >= p->p_sysent->sv_errsize)
1328 error = -1; /* XXX */
1330 error = p->p_sysent->sv_errtbl[error];
1332 frame.tf_eax = error;
1333 frame.tf_eflags |= PSL_C;
1338 * Traced syscall. trapsignal() is not MP aware.
1340 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1341 frame.tf_eflags &= ~PSL_T;
1342 trapsignal(p, SIGTRAP, 0);
1346 * Handle reschedule and other end-of-syscall issues
1348 userret(p, &frame, sticks);
1351 if (KTRPOINT(td, KTR_SYSRET)) {
1352 ktrsysret(p->p_tracep, code, error, args.sysmsg_result);
1357 * This works because errno is findable through the
1358 * register set. If we ever support an emulation where this
1359 * is not the case, this code will need to be revisited.
1361 STOPEVENT(p, S_SCX, code);
1366 * Release the MP lock if we had to get it
1368 KASSERT(td->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1374 * sendsys2 - MP aware system message request C handler
1377 sendsys2(struct trapframe frame)
1379 struct globaldata *gd;
1380 struct thread *td = curthread;
1381 struct proc *p = td->td_proc;
1382 register_t orig_tf_eflags;
1383 struct sysent *callp;
1384 union sysunion *sysun;
1394 if (ISPL(frame.tf_cs) != SEL_UPL) {
1402 KASSERT(td->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1406 * access non-atomic field from critical section. p_sticks is
1407 * updated by the clock interrupt. Also use this opportunity
1408 * to lazy-raise our LWKT priority.
1411 sticks = td->td_sticks;
1413 p->p_md.md_regs = &frame;
1414 orig_tf_eflags = frame.tf_eflags;
1418 * Handle the waitport/waitmsg/checkport/checkmsg case
1420 * YYY MOVE THIS TO INT 0x82! We don't really need to combine it
1423 if ((msgsize = frame.tf_edx) <= 0) {
1425 printf("waitmsg/checkmsg not yet supported: %08x\n",
1431 printf("waitport/checkport only the default port is supported at the moment\n");
1438 * Wait on port for message
1440 sysun = lwkt_getport(&td->td_msgport);
1445 * Test port for message
1447 sysun = lwkt_getport(&td->td_msgport);
1455 umsg = sysun->lmsg.opaque.ms_umsg;
1456 frame.tf_eax = (register_t)umsg;
1457 if (sysun->sysmsg_copyout)
1458 sysun->sysmsg_copyout(sysun);
1459 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, -1);
1460 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1461 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1462 sysun->nosys.usrmsg.umsg.ms_error = sysun->lmsg.ms_error;
1463 error = sysun->lmsg.ms_error;
1464 result = sysun->lmsg.u.ms_fds[0]; /* for ktrace */
1465 if (error != 0 || code != SYS_execve) {
1467 &sysun->nosys.usrmsg.umsg.ms_copyout_start,
1468 &umsg->ms_copyout_start,
1471 crit_enter_quick(td);
1472 sysun->lmsg.opaque.ms_sysunnext = gd->gd_freesysun;
1473 gd->gd_freesysun = sysun;
1474 crit_exit_quick(td);
1485 * Extract the system call message. If msgsize is zero we are
1486 * blocking on a message and/or message port. If msgsize is -1
1487 * we are testing a message for completion or a message port for
1490 * The userland system call message size includes the size of the
1491 * userland lwkt_msg plus arguments. We load it into the userland
1492 * portion of our sysunion structure then we initialize the kerneland
1499 if (msgsize < sizeof(struct lwkt_msg) ||
1500 msgsize > sizeof(union sysunion) - sizeof(struct sysmsg)
1507 * Obtain a sysun from our per-cpu cache or allocate a new one. Use
1508 * the opaque field to store the original (user) message pointer.
1509 * A critical section is necessary to interlock against interrupts
1510 * returning system messages to the thread cache.
1513 crit_enter_quick(td);
1514 if ((sysun = gd->gd_freesysun) != NULL) {
1515 gd->gd_freesysun = sysun->lmsg.opaque.ms_sysunnext;
1516 crit_exit_quick(td);
1518 crit_exit_quick(td);
1519 sysun = malloc(sizeof(union sysunion), M_SYSMSG, M_WAITOK);
1521 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, 1);
1524 * Copy the user request into the kernel copy of the user request.
1526 umsg = (void *)frame.tf_ecx;
1527 error = copyin(umsg, &sysun->nosys.usrmsg, msgsize);
1530 if ((sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC) &&
1531 (error = suser(td)) != 0
1537 * Initialize the kernel message from the copied-in data and
1538 * pull in appropriate flags from the userland message.
1540 lwkt_initmsg(&sysun->lmsg, &td->td_msgport, 0,
1541 sysun->nosys.usrmsg.umsg.ms_cmd,
1543 sysun->sysmsg_copyout = NULL;
1544 sysun->lmsg.opaque.ms_umsg = umsg;
1545 sysun->lmsg.ms_flags |= sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC;
1548 * Extract the system call number, lookup the system call, and
1549 * set the default return value.
1551 code = (u_int)sysun->lmsg.ms_cmd.cm_op;
1552 if (code >= p->p_sysent->sv_size) {
1557 callp = &p->p_sysent->sv_table[code];
1559 narg = (msgsize - sizeof(struct lwkt_msg)) / sizeof(register_t);
1562 if (KTRPOINT(td, KTR_SYSCALL)) {
1563 ktrsyscall(p->p_tracep, code, narg, (void *)(&sysun->nosys.usrmsg + 1));
1566 sysun->lmsg.u.ms_fds[0] = 0;
1567 sysun->lmsg.u.ms_fds[1] = 0;
1569 STOPEVENT(p, S_SCE, narg); /* MP aware */
1572 * Make the system call. An error code is always returned, results
1573 * are copied back via ms_result32 or ms_result64. YYY temporary
1574 * stage copy p_retval[] into ms_result32/64
1576 * NOTE! XXX if this is a child returning from a fork curproc
1577 * might be different. YYY huh? a child returning from a fork
1578 * should never 'return' from this call, it should go right to the
1579 * fork_trampoline function.
1581 error = (*callp->sy_call)(sysun);
1582 gd = td->td_gd; /* RELOAD, might have switched cpus */
1586 * If a synchronous return copy p_retval to ms_result64 and return
1587 * the sysmsg to the free pool.
1589 * YYY Don't writeback message if execve() YYY
1591 if (error != EASYNC) {
1592 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, -1);
1593 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1594 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1595 result = sysun->nosys.usrmsg.umsg.u.ms_fds[0]; /* for ktrace */
1596 if (error != 0 || code != SYS_execve) {
1598 error2 = copyout(&sysun->nosys.usrmsg.umsg.ms_copyout_start,
1599 &umsg->ms_copyout_start,
1604 crit_enter_quick(td);
1605 sysun->lmsg.opaque.ms_sysunnext = gd->gd_freesysun;
1606 gd->gd_freesysun = sysun;
1607 crit_exit_quick(td);
1610 frame.tf_eax = error;
1614 * Traced syscall. trapsignal() is not MP aware.
1616 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1617 frame.tf_eflags &= ~PSL_T;
1618 trapsignal(p, SIGTRAP, 0);
1622 * Handle reschedule and other end-of-syscall issues
1624 userret(p, &frame, sticks);
1627 if (KTRPOINT(td, KTR_SYSRET)) {
1628 ktrsysret(p->p_tracep, code, error, result);
1633 * This works because errno is findable through the
1634 * register set. If we ever support an emulation where this
1635 * is not the case, this code will need to be revisited.
1637 STOPEVENT(p, S_SCX, code);
1642 * Release the MP lock if we had to get it
1644 KASSERT(td->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1650 * Simplified back end of syscall(), used when returning from fork()
1651 * directly into user mode. MP lock is held on entry and should be
1652 * released on return. This code will return back into the fork
1653 * trampoline code which then runs doreti.
1656 fork_return(p, frame)
1658 struct trapframe frame;
1660 frame.tf_eax = 0; /* Child returns zero */
1661 frame.tf_eflags &= ~PSL_C; /* success */
1664 userret(p, &frame, 0);
1666 if (KTRPOINT(p->p_thread, KTR_SYSRET))
1667 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1669 p->p_flag |= P_PASSIVE_ACQ;
1671 p->p_flag &= ~P_PASSIVE_ACQ;
1673 KKASSERT(p->p_thread->td_mpcount == 1);