2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.45 2004/01/08 18:39:18 asmodai Exp $
43 * 386 Trap and System call handling
51 #include "opt_ktrace.h"
52 #include "opt_clock.h"
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/pioctl.h>
59 #include <sys/kernel.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/syscall.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
66 #include <sys/vmmeter.h>
67 #include <sys/malloc.h>
69 #include <sys/ktrace.h>
71 #include <sys/upcall.h>
72 #include <sys/sysproto.h>
73 #include <sys/sysunion.h>
76 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_extern.h>
84 #include <machine/cpu.h>
85 #include <machine/ipl.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
89 #include <machine/smp.h>
91 #include <machine/tss.h>
92 #include <machine/globaldata.h>
94 #include <i386/isa/intr_machdep.h>
97 #include <sys/syslog.h>
98 #include <machine/clock.h>
101 #include <machine/vm86.h>
104 #include <sys/msgport2.h>
105 #include <sys/thread2.h>
107 int (*pmath_emulate) (struct trapframe *);
109 extern void trap (struct trapframe frame);
110 extern int trapwrite (unsigned addr);
111 extern void syscall2 (struct trapframe frame);
112 extern void sendsys2 (struct trapframe frame);
114 static int trap_pfault (struct trapframe *, int, vm_offset_t);
115 static void trap_fatal (struct trapframe *, vm_offset_t);
116 void dblfault_handler (void);
118 extern inthand_t IDTVEC(syscall);
120 #define MAX_TRAP_MSG 28
121 static char *trap_msg[] = {
123 "privileged instruction fault", /* 1 T_PRIVINFLT */
125 "breakpoint instruction fault", /* 3 T_BPTFLT */
128 "arithmetic trap", /* 6 T_ARITHTRAP */
129 "system forced exception", /* 7 T_ASTFLT */
131 "general protection fault", /* 9 T_PROTFLT */
132 "trace trap", /* 10 T_TRCTRAP */
134 "page fault", /* 12 T_PAGEFLT */
136 "alignment fault", /* 14 T_ALIGNFLT */
140 "integer divide fault", /* 18 T_DIVIDE */
141 "non-maskable interrupt trap", /* 19 T_NMI */
142 "overflow trap", /* 20 T_OFLOW */
143 "FPU bounds check fault", /* 21 T_BOUND */
144 "FPU device not available", /* 22 T_DNA */
145 "double fault", /* 23 T_DOUBLEFLT */
146 "FPU operand fetch fault", /* 24 T_FPOPFLT */
147 "invalid TSS fault", /* 25 T_TSSFLT */
148 "segment not present fault", /* 26 T_SEGNPFLT */
149 "stack fault", /* 27 T_STKFLT */
150 "machine check trap", /* 28 T_MCHK */
153 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
154 extern int has_f00f_bug;
158 static int ddb_on_nmi = 1;
159 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
160 &ddb_on_nmi, 0, "Go to DDB on NMI");
162 static int panic_on_nmi = 1;
163 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
164 &panic_on_nmi, 0, "Panic on NMI");
165 static int fast_release;
166 SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
167 &fast_release, 0, "Passive Release was optimal");
168 static int slow_release;
169 SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
170 &slow_release, 0, "Passive Release was nonoptimal");
171 static int pass_release;
172 SYSCTL_INT(_machdep, OID_AUTO, pass_release, CTLFLAG_RW,
173 &pass_release, 0, "Passive Release on switch");
175 MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
178 * USER->KERNEL transition. Do not transition us out of userland from the
179 * point of view of the userland scheduler unless we actually have to
180 * switch. Switching typically occurs when a process blocks in the kernel.
182 * passive_release is called from within a critical section and the BGL will
183 * still be held. This function is NOT called for preemptions, only for
184 * switchouts. Note that other elements of the system (uio_yield()) assume
185 * that the user cruft will be released when lwkt_switch() is called.
188 passive_release(struct thread *td)
190 struct proc *p = td->td_proc;
192 td->td_release = NULL;
195 * P_CP_RELEASED prevents the userland scheduler from messing with
198 if ((p->p_flag & P_CP_RELEASED) == 0) {
199 p->p_flag |= P_CP_RELEASED;
200 lwkt_setpri_self(TDPRI_KERN_USER);
204 * Only one process will have a P_CURPROC designation for each cpu
205 * in the system. Releasing it allows another userland process to
206 * be scheduled in case our thread blocks in the kernel.
208 if (p->p_flag & P_CURPROC) {
215 * userenter() passively intercepts the thread switch function to increase
216 * the thread priority from a user priority to a kernel priority, reducing
217 * syscall and trap overhead for the case where no switch occurs.
221 userenter(struct thread *curtd)
223 curtd->td_release = passive_release;
227 userexit(struct proc *p)
229 struct thread *td = p->p_thread;
232 * Reacquire our P_CURPROC status and adjust the LWKT priority
233 * for our return to userland. We can fast path the case where
234 * td_release was not called by checking particular proc flags.
235 * Otherwise we do it the slow way.
237 * Lowering our priority may make other higher priority threads
238 * runnable. lwkt_setpri_self() does not switch away, so call
239 * lwkt_maybe_switch() to deal with it.
241 * WARNING! Once our priority is lowered to a user level priority
242 * it is possible, once we return to user mode (or if we were to
243 * block) for a cpu-bound user process to prevent us from getting cpu
244 * again. This is always the last step.
246 td->td_release = NULL;
247 if ((p->p_flag & (P_CP_RELEASED|P_CURPROC)) == P_CURPROC) {
253 switch(p->p_rtprio.type) {
255 lwkt_setpri_self(TDPRI_USER_IDLE);
257 case RTP_PRIO_REALTIME:
259 lwkt_setpri_self(TDPRI_USER_REAL);
262 lwkt_setpri_self(TDPRI_USER_NORM);
271 userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
276 * Post any pending upcalls
278 if (p->p_flag & P_UPCALLPEND) {
279 p->p_flag &= ~P_UPCALLPEND;
284 * Post any pending signals
286 while ((sig = CURSIG(p)) != 0) {
291 * If a reschedule has been requested then we release the current
292 * process in order to shift our P_CURPROC designation to another
293 * user process. userexit() will reacquire P_CURPROC and block
296 if (resched_wanted()) {
297 p->p_thread->td_release = NULL;
298 if ((p->p_flag & P_CP_RELEASED) == 0) {
299 p->p_flag |= P_CP_RELEASED;
300 lwkt_setpri_self(TDPRI_KERN_USER);
302 if (p->p_flag & P_CURPROC) {
310 * Charge system time if profiling. Note: times are in microseconds.
312 if (p->p_flag & P_PROFIL) {
313 addupc_task(p, frame->tf_eip,
314 (u_int)(curthread->td_sticks - oticks));
318 * Post any pending signals XXX
320 while ((sig = CURSIG(p)) != 0)
324 #ifdef DEVICE_POLLING
325 extern u_int32_t poll_in_trap;
326 extern int ether_poll (int count);
327 #endif /* DEVICE_POLLING */
330 * Exception, fault, and trap interface to the FreeBSD kernel.
331 * This common code is called from assembly language IDT gate entry
332 * routines that prepare a suitable stack frame, and restore this
333 * frame after the exception has been processed.
335 * This function is also called from doreti in an interlock to handle ASTs.
336 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
338 * NOTE! We have to retrieve the fault address prior to obtaining the
339 * MP lock because get_mplock() may switch out. YYY cr2 really ought
340 * to be retrieved by the assembly code, not here.
344 struct trapframe frame;
346 struct thread *td = curthread;
349 int i = 0, ucode = 0, type, code;
355 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
357 trap_fatal(&frame, eva);
363 if (frame.tf_trapno == T_PAGEFLT) {
365 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
366 * This problem is worked around by using an interrupt
367 * gate for the pagefault handler. We are finally ready
368 * to read %cr2 and then must reenable interrupts.
370 * XXX this should be in the switch statement, but the
371 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
372 * flow of control too much for this to be obviously
382 * MP lock is held at this point
385 if (!(frame.tf_eflags & PSL_I)) {
387 * Buggy application or kernel code has disabled interrupts
388 * and then trapped. Enabling interrupts now is wrong, but
389 * it is better than running with interrupts disabled until
390 * they are accidentally enabled later.
392 type = frame.tf_trapno;
393 if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
395 "pid %ld (%s): trap %d with interrupts disabled\n",
396 (long)curproc->p_pid, curproc->p_comm, type);
397 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
399 * XXX not quite right, since this may be for a
400 * multiple fault in user mode.
402 printf("kernel trap %d with interrupts disabled\n",
409 #ifdef DEVICE_POLLING
411 ether_poll(poll_in_trap);
412 #endif /* DEVICE_POLLING */
414 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
417 type = frame.tf_trapno;
421 if (frame.tf_eflags & PSL_VM &&
422 (type == T_PROTFLT || type == T_STKFLT)) {
424 KKASSERT(curthread->td_mpcount > 0);
426 i = vm86_emulate((struct vm86frame *)&frame);
428 KKASSERT(curthread->td_mpcount > 0);
432 * returns to original process
434 vm86_trap((struct vm86frame *)&frame);
441 * these traps want either a process context, or
442 * assume a normal userspace trap.
446 trap_fatal(&frame, eva);
449 type = T_BPTFLT; /* kernel breakpoint */
452 goto kernel_trap; /* normal kernel trap handling */
455 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
460 sticks = curthread->td_sticks;
461 p->p_md.md_regs = &frame;
464 case T_PRIVINFLT: /* privileged instruction fault */
469 case T_BPTFLT: /* bpt instruction fault */
470 case T_TRCTRAP: /* trace trap */
471 frame.tf_eflags &= ~PSL_T;
475 case T_ARITHTRAP: /* arithmetic trap */
480 case T_ASTFLT: /* Allow process switch */
481 mycpu->gd_cnt.v_soft++;
482 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
483 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
485 addupc_task(p, p->p_stats->p_prof.pr_addr,
486 p->p_stats->p_prof.pr_ticks);
491 * The following two traps can happen in
492 * vm86 mode, and, if so, we want to handle
495 case T_PROTFLT: /* general protection fault */
496 case T_STKFLT: /* stack fault */
497 if (frame.tf_eflags & PSL_VM) {
498 i = vm86_emulate((struct vm86frame *)&frame);
505 case T_SEGNPFLT: /* segment not present fault */
506 case T_TSSFLT: /* invalid TSS fault */
507 case T_DOUBLEFLT: /* double fault */
509 ucode = code + BUS_SEGM_FAULT ;
513 case T_PAGEFLT: /* page fault */
514 i = trap_pfault(&frame, TRUE, eva);
517 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
527 case T_DIVIDE: /* integer divide fault */
535 goto handle_powerfail;
536 #else /* !POWERFAIL_NMI */
537 /* machine/parity/power fail/"kitchen sink" faults */
538 if (isa_nmi(code) == 0) {
541 * NMI can be hooked up to a pushbutton
545 printf ("NMI ... going to debugger\n");
546 kdb_trap (type, 0, &frame);
550 } else if (panic_on_nmi)
551 panic("NMI indicates hardware failure");
553 #endif /* POWERFAIL_NMI */
554 #endif /* NISA > 0 */
556 case T_OFLOW: /* integer overflow fault */
561 case T_BOUND: /* bounds check fault */
568 /* if a transparent fault (due to context switch "late") */
572 if (!pmath_emulate) {
574 ucode = FPE_FPU_NP_TRAP;
577 i = (*pmath_emulate)(&frame);
579 if (!(frame.tf_eflags & PSL_T))
581 frame.tf_eflags &= ~PSL_T;
584 /* else ucode = emulator_only_knows() XXX */
587 case T_FPOPFLT: /* FPU operand fetch fault */
592 case T_XMMFLT: /* SIMD floating-point exception */
602 case T_PAGEFLT: /* page fault */
603 (void) trap_pfault(&frame, FALSE, eva);
609 * The kernel is apparently using npx for copying.
610 * XXX this should be fatal unless the kernel has
611 * registered such use.
618 case T_PROTFLT: /* general protection fault */
619 case T_SEGNPFLT: /* segment not present fault */
621 * Invalid segment selectors and out of bounds
622 * %eip's and %esp's can be set up in user mode.
623 * This causes a fault in kernel mode when the
624 * kernel tries to return to user mode. We want
625 * to get this fault so that we can fix the
626 * problem here and not have to check all the
627 * selectors and pointers when the user changes
630 #define MAYBE_DORETI_FAULT(where, whereto) \
632 if (frame.tf_eip == (int)where) { \
633 frame.tf_eip = (int)whereto; \
638 * Since we don't save %gs across an interrupt
639 * frame this check must occur outside the intr
640 * nesting level check.
642 if (frame.tf_eip == (int)cpu_switch_load_gs) {
643 curthread->td_pcb->pcb_gs = 0;
647 if (mycpu->gd_intr_nesting_level == 0) {
649 * Invalid %fs's and %gs's can be created using
650 * procfs or PT_SETREGS or by invalidating the
651 * underlying LDT entry. This causes a fault
652 * in kernel mode when the kernel attempts to
653 * switch contexts. Lose the bad context
654 * (XXX) so that we can continue, and generate
657 MAYBE_DORETI_FAULT(doreti_iret,
659 MAYBE_DORETI_FAULT(doreti_popl_ds,
660 doreti_popl_ds_fault);
661 MAYBE_DORETI_FAULT(doreti_popl_es,
662 doreti_popl_es_fault);
663 MAYBE_DORETI_FAULT(doreti_popl_fs,
664 doreti_popl_fs_fault);
665 if (curthread->td_pcb->pcb_onfault) {
666 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
674 * PSL_NT can be set in user mode and isn't cleared
675 * automatically when the kernel is entered. This
676 * causes a TSS fault when the kernel attempts to
677 * `iret' because the TSS link is uninitialized. We
678 * want to get this fault so that we can fix the
679 * problem here and not every time the kernel is
682 if (frame.tf_eflags & PSL_NT) {
683 frame.tf_eflags &= ~PSL_NT;
688 case T_TRCTRAP: /* trace trap */
689 if (frame.tf_eip == (int)IDTVEC(syscall)) {
691 * We've just entered system mode via the
692 * syscall lcall. Continue single stepping
693 * silently until the syscall handler has
698 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
700 * The syscall handler has now saved the
701 * flags. Stop single stepping it.
703 frame.tf_eflags &= ~PSL_T;
707 * Ignore debug register trace traps due to
708 * accesses in the user's address space, which
709 * can happen under several conditions such as
710 * if a user sets a watchpoint on a buffer and
711 * then passes that buffer to a system call.
712 * We still want to get TRCTRAPS for addresses
713 * in kernel space because that is useful when
714 * debugging the kernel.
716 if (user_dbreg_trap()) {
718 * Reset breakpoint bits because the
721 load_dr6(rdr6() & 0xfffffff0);
725 * Fall through (TRCTRAP kernel mode, kernel address)
729 * If DDB is enabled, let it handle the debugger trap.
730 * Otherwise, debugger traps "can't happen".
733 if (kdb_trap (type, 0, &frame))
742 # define TIMER_FREQ 1193182
746 static unsigned lastalert = 0;
748 if(time_second - lastalert > 10)
750 log(LOG_WARNING, "NMI: power fail\n");
751 sysbeep(TIMER_FREQ/880, hz);
752 lastalert = time_second;
757 #else /* !POWERFAIL_NMI */
758 /* machine/parity/power fail/"kitchen sink" faults */
759 if (isa_nmi(code) == 0) {
762 * NMI can be hooked up to a pushbutton
766 printf ("NMI ... going to debugger\n");
767 kdb_trap (type, 0, &frame);
771 } else if (panic_on_nmi == 0)
774 #endif /* POWERFAIL_NMI */
775 #endif /* NISA > 0 */
778 trap_fatal(&frame, eva);
782 /* Translate fault for emulators (e.g. Linux) */
783 if (*p->p_sysent->sv_transtrap)
784 i = (*p->p_sysent->sv_transtrap)(i, type);
786 trapsignal(p, i, ucode);
789 if (type <= MAX_TRAP_MSG) {
790 uprintf("fatal process exception: %s",
792 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
793 uprintf(", fault VA = 0x%lx", (u_long)eva);
800 if (ISPL(frame.tf_cs) == SEL_UPL)
801 KASSERT(curthread->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
803 userret(p, &frame, sticks);
807 KKASSERT(curthread->td_mpcount > 0);
814 * This version doesn't allow a page fault to user space while
815 * in the kernel. The rest of the kernel needs to be made "safe"
816 * before this can be used. I think the only things remaining
817 * to be made safe are the iBCS2 code and the process tracing/
821 trap_pfault(frame, usermode, eva)
822 struct trapframe *frame;
827 struct vmspace *vm = NULL;
831 struct proc *p = curproc;
833 if (frame->tf_err & PGEX_W)
834 ftype = VM_PROT_WRITE;
836 ftype = VM_PROT_READ;
838 va = trunc_page(eva);
839 if (va < VM_MIN_KERNEL_ADDRESS) {
844 (!usermode && va < VM_MAXUSER_ADDRESS &&
845 (mycpu->gd_intr_nesting_level != 0 ||
846 curthread->td_pcb->pcb_onfault == NULL))) {
847 trap_fatal(frame, eva);
852 * This is a fault on non-kernel virtual memory.
853 * vm is initialized above to NULL. If curproc is NULL
854 * or curproc->p_vmspace is NULL the fault is fatal.
863 * Keep swapout from messing with us during this
869 * Grow the stack if necessary
871 /* grow_stack returns false only if va falls into
872 * a growable stack region and the stack growth
873 * fails. It returns true if va was not within
874 * a growable stack region, or if the stack
877 if (!grow_stack (p, va)) {
883 /* Fault in the user page: */
884 rv = vm_fault(map, va, ftype,
885 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
891 * Don't allow user-mode faults in kernel address space.
897 * Since we know that kernel virtual address addresses
898 * always have pte pages mapped, we just have to fault
901 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
904 if (rv == KERN_SUCCESS)
908 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
909 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
912 trap_fatal(frame, eva);
916 /* kludge to pass faulting virtual address to sendsig */
919 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
924 trap_pfault(frame, usermode, eva)
925 struct trapframe *frame;
930 struct vmspace *vm = NULL;
934 struct proc *p = curproc;
936 va = trunc_page(eva);
937 if (va >= KERNBASE) {
939 * Don't allow user-mode faults in kernel address space.
940 * An exception: if the faulting address is the invalid
941 * instruction entry in the IDT, then the Intel Pentium
942 * F00F bug workaround was triggered, and we need to
943 * treat it is as an illegal instruction, and not a page
946 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
947 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
948 frame->tf_trapno = T_PRIVINFLT;
958 * This is a fault on non-kernel virtual memory.
959 * vm is initialized above to NULL. If curproc is NULL
960 * or curproc->p_vmspace is NULL the fault is fatal.
971 if (frame->tf_err & PGEX_W)
972 ftype = VM_PROT_WRITE;
974 ftype = VM_PROT_READ;
976 if (map != kernel_map) {
978 * Keep swapout from messing with us during this
984 * Grow the stack if necessary
986 /* grow_stack returns false only if va falls into
987 * a growable stack region and the stack growth
988 * fails. It returns true if va was not within
989 * a growable stack region, or if the stack
992 if (!grow_stack (p, va)) {
998 /* Fault in the user page: */
999 rv = vm_fault(map, va, ftype,
1000 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
1006 * Don't have to worry about process locking or stacks in the kernel.
1008 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
1011 if (rv == KERN_SUCCESS)
1015 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
1016 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
1019 trap_fatal(frame, eva);
1023 /* kludge to pass faulting virtual address to sendsig */
1024 frame->tf_err = eva;
1026 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1030 trap_fatal(frame, eva)
1031 struct trapframe *frame;
1034 int code, type, ss, esp;
1035 struct soft_segment_descriptor softseg;
1037 code = frame->tf_err;
1038 type = frame->tf_trapno;
1039 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
1041 if (type <= MAX_TRAP_MSG)
1042 printf("\n\nFatal trap %d: %s while in %s mode\n",
1043 type, trap_msg[type],
1044 frame->tf_eflags & PSL_VM ? "vm86" :
1045 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1047 /* three separate prints in case of a trap on an unmapped page */
1048 printf("mp_lock = %08x; ", mp_lock);
1049 printf("cpuid = %d; ", mycpu->gd_cpuid);
1050 printf("lapic.id = %08x\n", lapic.id);
1052 if (type == T_PAGEFLT) {
1053 printf("fault virtual address = 0x%x\n", eva);
1054 printf("fault code = %s %s, %s\n",
1055 code & PGEX_U ? "user" : "supervisor",
1056 code & PGEX_W ? "write" : "read",
1057 code & PGEX_P ? "protection violation" : "page not present");
1059 printf("instruction pointer = 0x%x:0x%x\n",
1060 frame->tf_cs & 0xffff, frame->tf_eip);
1061 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1062 ss = frame->tf_ss & 0xffff;
1063 esp = frame->tf_esp;
1065 ss = GSEL(GDATA_SEL, SEL_KPL);
1066 esp = (int)&frame->tf_esp;
1068 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1069 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1070 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1071 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1072 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1073 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1075 printf("processor eflags = ");
1076 if (frame->tf_eflags & PSL_T)
1077 printf("trace trap, ");
1078 if (frame->tf_eflags & PSL_I)
1079 printf("interrupt enabled, ");
1080 if (frame->tf_eflags & PSL_NT)
1081 printf("nested task, ");
1082 if (frame->tf_eflags & PSL_RF)
1084 if (frame->tf_eflags & PSL_VM)
1086 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1087 printf("current process = ");
1089 printf("%lu (%s)\n",
1090 (u_long)curproc->p_pid, curproc->p_comm ?
1091 curproc->p_comm : "");
1095 printf("current thread = pri %d ", curthread->td_pri);
1096 if (curthread->td_pri >= TDPRI_CRIT)
1099 printf("interrupt mask = ");
1100 if ((curthread->td_cpl & net_imask) == net_imask)
1102 if ((curthread->td_cpl & tty_imask) == tty_imask)
1104 if ((curthread->td_cpl & bio_imask) == bio_imask)
1106 if ((curthread->td_cpl & cam_imask) == cam_imask)
1108 if (curthread->td_cpl == 0)
1113 * we probably SHOULD have stopped the other CPUs before now!
1114 * another CPU COULD have been touching cpl at this moment...
1116 printf(" <- SMP: XXX");
1125 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1128 printf("trap number = %d\n", type);
1129 if (type <= MAX_TRAP_MSG)
1130 panic("%s", trap_msg[type]);
1132 panic("unknown/reserved trap");
1136 * Double fault handler. Called when a fault occurs while writing
1137 * a frame for a trap/exception onto the stack. This usually occurs
1138 * when the stack overflows (such is the case with infinite recursion,
1141 * XXX Note that the current PTD gets replaced by IdlePTD when the
1142 * task switch occurs. This means that the stack that was active at
1143 * the time of the double fault is not available at <kstack> unless
1144 * the machine was idle when the double fault occurred. The downside
1145 * of this is that "trace <ebp>" in ddb won't work.
1150 struct mdglobaldata *gd = mdcpu;
1152 printf("\nFatal double fault:\n");
1153 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1154 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1155 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1157 /* three separate prints in case of a trap on an unmapped page */
1158 printf("mp_lock = %08x; ", mp_lock);
1159 printf("cpuid = %d; ", mycpu->gd_cpuid);
1160 printf("lapic.id = %08x\n", lapic.id);
1162 panic("double fault");
1166 * Compensate for 386 brain damage (missing URKR).
1167 * This is a little simpler than the pagefault handler in trap() because
1168 * it the page tables have already been faulted in and high addresses
1169 * are thrown out early for other reasons.
1179 va = trunc_page((vm_offset_t)addr);
1181 * XXX - MAX is END. Changed > to >= for temp. fix.
1183 if (va >= VM_MAXUSER_ADDRESS)
1191 if (!grow_stack (p, va)) {
1197 * fault the data page
1199 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1203 if (rv != KERN_SUCCESS)
1210 * syscall2 - MP aware system call request C handler
1212 * A system call is essentially treated as a trap except that the
1213 * MP lock is not held on entry or return. We are responsible for
1214 * obtaining the MP lock if necessary and for handling ASTs
1215 * (e.g. a task switch) prior to return.
1217 * In general, only simple access and manipulation of curproc and
1218 * the current stack is allowed without having to hold MP lock.
1221 syscall2(struct trapframe frame)
1223 struct thread *td = curthread;
1224 struct proc *p = td->td_proc;
1227 struct sysent *callp;
1228 register_t orig_tf_eflags;
1233 union sysunion args;
1236 if (ISPL(frame.tf_cs) != SEL_UPL) {
1244 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1248 * access non-atomic field from critical section. p_sticks is
1249 * updated by the clock interrupt. Also use this opportunity
1250 * to lazy-raise our LWKT priority.
1253 crit_enter_quick(td);
1254 sticks = curthread->td_sticks;
1255 crit_exit_quick(td);
1257 p->p_md.md_regs = &frame;
1258 params = (caddr_t)frame.tf_esp + sizeof(int);
1259 code = frame.tf_eax;
1260 orig_tf_eflags = frame.tf_eflags;
1262 if (p->p_sysent->sv_prepsyscall) {
1264 * The prep code is not MP aware.
1266 (*p->p_sysent->sv_prepsyscall)(&frame, (int *)(&args.nosys.usrmsg + 1), &code, ¶ms);
1269 * Need to check if this is a 32 bit or 64 bit syscall.
1270 * fuword is MP aware.
1272 if (code == SYS_syscall) {
1274 * Code is first argument, followed by actual args.
1276 code = fuword(params);
1277 params += sizeof(int);
1278 } else if (code == SYS___syscall) {
1280 * Like syscall, but code is a quad, so as to maintain
1281 * quad alignment for the rest of the arguments.
1283 code = fuword(params);
1284 params += sizeof(quad_t);
1288 if (p->p_sysent->sv_mask)
1289 code &= p->p_sysent->sv_mask;
1291 if (code >= p->p_sysent->sv_size)
1292 callp = &p->p_sysent->sv_table[0];
1294 callp = &p->p_sysent->sv_table[code];
1296 narg = callp->sy_narg & SYF_ARGMASK;
1299 * copyin is MP aware, but the tracing code is not
1301 if (params && (i = narg * sizeof(register_t)) &&
1302 (error = copyin(params, (caddr_t)(&args.nosys.usrmsg + 1), (u_int)i))) {
1304 if (KTRPOINT(td, KTR_SYSCALL))
1305 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.nosys.usrmsg + 1));
1312 * Try to run the syscall without the MP lock if the syscall
1313 * is MP safe. We have to obtain the MP lock no matter what if
1316 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1323 if (KTRPOINT(td, KTR_SYSCALL)) {
1324 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.nosys.usrmsg + 1));
1329 * For traditional syscall code edx is left untouched when 32 bit
1330 * results are returned. Since edx is loaded from fds[1] when the
1331 * system call returns we pre-set it here.
1333 lwkt_initmsg(&args.lmsg, &td->td_msgport, code);
1334 args.sysmsg_copyout = NULL;
1335 args.sysmsg_fds[0] = 0;
1336 args.sysmsg_fds[1] = frame.tf_edx;
1338 STOPEVENT(p, S_SCE, narg); /* MP aware */
1340 error = (*callp->sy_call)(&args);
1343 * MP SAFE (we may or may not have the MP lock at this point)
1348 * Reinitialize proc pointer `p' as it may be different
1349 * if this is a child returning from fork syscall.
1352 frame.tf_eax = args.sysmsg_fds[0];
1353 frame.tf_edx = args.sysmsg_fds[1];
1354 frame.tf_eflags &= ~PSL_C;
1358 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1359 * int 0x80 is 2 bytes. We saved this in tf_err.
1361 frame.tf_eip -= frame.tf_err;
1366 panic("Unexpected EASYNC return value (for now)");
1369 if (p->p_sysent->sv_errsize) {
1370 if (error >= p->p_sysent->sv_errsize)
1371 error = -1; /* XXX */
1373 error = p->p_sysent->sv_errtbl[error];
1375 frame.tf_eax = error;
1376 frame.tf_eflags |= PSL_C;
1381 * Traced syscall. trapsignal() is not MP aware.
1383 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1384 frame.tf_eflags &= ~PSL_T;
1385 trapsignal(p, SIGTRAP, 0);
1389 * Handle reschedule and other end-of-syscall issues
1391 userret(p, &frame, sticks);
1394 if (KTRPOINT(td, KTR_SYSRET)) {
1395 ktrsysret(p->p_tracep, code, error, args.sysmsg_result);
1400 * This works because errno is findable through the
1401 * register set. If we ever support an emulation where this
1402 * is not the case, this code will need to be revisited.
1404 STOPEVENT(p, S_SCX, code);
1409 * Release the MP lock if we had to get it
1411 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1417 * sendsys2 - MP aware system message request C handler
1420 sendsys2(struct trapframe frame)
1422 struct globaldata *gd;
1423 struct thread *td = curthread;
1424 struct proc *p = td->td_proc;
1425 register_t orig_tf_eflags;
1426 struct sysent *callp;
1427 union sysunion *sysun;
1437 if (ISPL(frame.tf_cs) != SEL_UPL) {
1445 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1449 * access non-atomic field from critical section. p_sticks is
1450 * updated by the clock interrupt. Also use this opportunity
1451 * to lazy-raise our LWKT priority.
1454 crit_enter_quick(td);
1455 sticks = curthread->td_sticks;
1456 crit_exit_quick(td);
1458 p->p_md.md_regs = &frame;
1459 orig_tf_eflags = frame.tf_eflags;
1463 * Handle the waitport/waitmsg/checkport/checkmsg case
1465 * YYY MOVE THIS TO INT 0x82! We don't really need to combine it
1468 if ((msgsize = frame.tf_edx) <= 0) {
1470 printf("waitmsg/checkmsg not yet supported: %08x\n",
1476 printf("waitport/checkport only the default port is supported at the moment\n");
1483 * Wait on port for message
1485 sysun = lwkt_getport(&td->td_msgport);
1490 * Test port for message
1492 sysun = lwkt_getport(&td->td_msgport);
1500 umsg = sysun->lmsg.opaque.ms_umsg;
1501 frame.tf_eax = (register_t)umsg;
1502 if (sysun->sysmsg_copyout)
1503 sysun->sysmsg_copyout(sysun);
1504 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, -1);
1505 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1506 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1507 sysun->nosys.usrmsg.umsg.ms_error = sysun->lmsg.ms_error;
1508 error = sysun->lmsg.ms_error;
1509 result = sysun->lmsg.u.ms_fds[0]; /* for ktrace */
1510 if (error != 0 || code != SYS_execve) {
1512 &sysun->nosys.usrmsg.umsg.ms_copyout_start,
1513 &umsg->ms_copyout_start,
1516 crit_enter_quick(td);
1517 sysun->lmsg.opaque.ms_sysunnext = gd->gd_freesysun;
1518 gd->gd_freesysun = sysun;
1519 crit_exit_quick(td);
1530 * Extract the system call message. If msgsize is zero we are
1531 * blocking on a message and/or message port. If msgsize is -1
1532 * we are testing a message for completion or a message port for
1535 * The userland system call message size includes the size of the
1536 * userland lwkt_msg plus arguments. We load it into the userland
1537 * portion of our sysunion structure then we initialize the kerneland
1544 if (msgsize < sizeof(struct lwkt_msg) ||
1545 msgsize > sizeof(union sysunion) - sizeof(struct sysmsg)
1552 * Obtain a sysun from our per-cpu cache or allocate a new one. Use
1553 * the opaque field to store the original (user) message pointer.
1554 * A critical section is necessary to interlock against interrupts
1555 * returning system messages to the thread cache.
1558 crit_enter_quick(td);
1559 if ((sysun = gd->gd_freesysun) != NULL) {
1560 gd->gd_freesysun = sysun->lmsg.opaque.ms_sysunnext;
1561 crit_exit_quick(td);
1563 crit_exit_quick(td);
1564 sysun = malloc(sizeof(union sysunion), M_SYSMSG, M_WAITOK);
1566 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, 1);
1569 * Copy the user request into the kernel copy of the user request.
1571 umsg = (void *)frame.tf_ecx;
1572 error = copyin(umsg, &sysun->nosys.usrmsg, msgsize);
1575 if ((sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC) &&
1576 (error = suser(td)) != 0
1582 * Initialize the kernel message from the copied-in data and
1583 * pull in appropriate flags from the userland message.
1585 lwkt_initmsg(&sysun->lmsg, &td->td_msgport,
1586 sysun->nosys.usrmsg.umsg.ms_cmd);
1587 sysun->sysmsg_copyout = NULL;
1588 sysun->lmsg.opaque.ms_umsg = umsg;
1589 sysun->lmsg.ms_flags |= sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC;
1592 * Extract the system call number, lookup the system call, and
1593 * set the default return value.
1595 code = (u_int)sysun->lmsg.ms_cmd;
1596 if (code >= p->p_sysent->sv_size) {
1601 callp = &p->p_sysent->sv_table[code];
1603 narg = (msgsize - sizeof(struct lwkt_msg)) / sizeof(register_t);
1606 if (KTRPOINT(td, KTR_SYSCALL)) {
1607 ktrsyscall(p->p_tracep, code, narg, (void *)(&sysun->nosys.usrmsg + 1));
1610 sysun->lmsg.u.ms_fds[0] = 0;
1611 sysun->lmsg.u.ms_fds[1] = 0;
1613 STOPEVENT(p, S_SCE, narg); /* MP aware */
1616 * Make the system call. An error code is always returned, results
1617 * are copied back via ms_result32 or ms_result64. YYY temporary
1618 * stage copy p_retval[] into ms_result32/64
1620 * NOTE! XXX if this is a child returning from a fork curproc
1621 * might be different. YYY huh? a child returning from a fork
1622 * should never 'return' from this call, it should go right to the
1623 * fork_trampoline function.
1625 error = (*callp->sy_call)(sysun);
1626 gd = td->td_gd; /* RELOAD, might have switched cpus */
1630 * If a synchronous return copy p_retval to ms_result64 and return
1631 * the sysmsg to the free pool.
1633 * YYY Don't writeback message if execve() YYY
1635 if (error != EASYNC) {
1636 atomic_add_int_nonlocked(&td->td_msgport.mp_refs, -1);
1637 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1638 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1639 result = sysun->nosys.usrmsg.umsg.u.ms_fds[0]; /* for ktrace */
1640 if (error != 0 || code != SYS_execve) {
1642 error2 = copyout(&sysun->nosys.usrmsg.umsg.ms_copyout_start,
1643 &umsg->ms_copyout_start,
1648 crit_enter_quick(td);
1649 sysun->lmsg.opaque.ms_sysunnext = gd->gd_freesysun;
1650 gd->gd_freesysun = sysun;
1651 crit_exit_quick(td);
1654 frame.tf_eax = error;
1658 * Traced syscall. trapsignal() is not MP aware.
1660 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1661 frame.tf_eflags &= ~PSL_T;
1662 trapsignal(p, SIGTRAP, 0);
1666 * Handle reschedule and other end-of-syscall issues
1668 userret(p, &frame, sticks);
1671 if (KTRPOINT(td, KTR_SYSRET)) {
1672 ktrsysret(p->p_tracep, code, error, result);
1677 * This works because errno is findable through the
1678 * register set. If we ever support an emulation where this
1679 * is not the case, this code will need to be revisited.
1681 STOPEVENT(p, S_SCX, code);
1686 * Release the MP lock if we had to get it
1688 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1694 * Simplified back end of syscall(), used when returning from fork()
1695 * directly into user mode. MP lock is held on entry and should be
1696 * released on return. This code will return back into the fork
1697 * trampoline code which then runs doreti.
1700 fork_return(p, frame)
1702 struct trapframe frame;
1704 frame.tf_eax = 0; /* Child returns zero */
1705 frame.tf_eflags &= ~PSL_C; /* success */
1708 userret(p, &frame, 0);
1710 if (KTRPOINT(p->p_thread, KTR_SYSRET))
1711 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1713 p->p_flag |= P_PASSIVE_ACQ;
1715 p->p_flag &= ~P_PASSIVE_ACQ;
1717 KKASSERT(curthread->td_mpcount == 1);