Finish migrating the cpl into the thread structure.
[dragonfly.git] / sys / platform / pc32 / i386 / trap.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
8f41e33b 39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.5 2003/06/22 08:54:18 dillon Exp $
984263bc
MD
40 */
41
42/*
43 * 386 Trap and System call handling
44 */
45
46#include "opt_cpu.h"
47#include "opt_ddb.h"
48#include "opt_ktrace.h"
49#include "opt_clock.h"
50#include "opt_trap.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/pioctl.h>
56#include <sys/kernel.h>
57#include <sys/resourcevar.h>
58#include <sys/signalvar.h>
59#include <sys/syscall.h>
60#include <sys/sysctl.h>
61#include <sys/sysent.h>
62#include <sys/uio.h>
63#include <sys/vmmeter.h>
64#ifdef KTRACE
65#include <sys/ktrace.h>
66#endif
67
68#include <vm/vm.h>
69#include <vm/vm_param.h>
70#include <sys/lock.h>
71#include <vm/pmap.h>
72#include <vm/vm_kern.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/vm_extern.h>
76
77#include <machine/cpu.h>
78#include <machine/ipl.h>
79#include <machine/md_var.h>
80#include <machine/pcb.h>
81#ifdef SMP
82#include <machine/smp.h>
83#endif
84#include <machine/tss.h>
85
86#include <i386/isa/intr_machdep.h>
87
88#ifdef POWERFAIL_NMI
89#include <sys/syslog.h>
90#include <machine/clock.h>
91#endif
92
93#include <machine/vm86.h>
94
95#include <ddb/ddb.h>
96
97#include "isa.h"
98#include "npx.h"
99
100int (*pmath_emulate) __P((struct trapframe *));
101
102extern void trap __P((struct trapframe frame));
103extern int trapwrite __P((unsigned addr));
104extern void syscall2 __P((struct trapframe frame));
105
106static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
107static void trap_fatal __P((struct trapframe *, vm_offset_t));
108void dblfault_handler __P((void));
109
110extern inthand_t IDTVEC(syscall);
111
112#define MAX_TRAP_MSG 28
113static char *trap_msg[] = {
114 "", /* 0 unused */
115 "privileged instruction fault", /* 1 T_PRIVINFLT */
116 "", /* 2 unused */
117 "breakpoint instruction fault", /* 3 T_BPTFLT */
118 "", /* 4 unused */
119 "", /* 5 unused */
120 "arithmetic trap", /* 6 T_ARITHTRAP */
121 "system forced exception", /* 7 T_ASTFLT */
122 "", /* 8 unused */
123 "general protection fault", /* 9 T_PROTFLT */
124 "trace trap", /* 10 T_TRCTRAP */
125 "", /* 11 unused */
126 "page fault", /* 12 T_PAGEFLT */
127 "", /* 13 unused */
128 "alignment fault", /* 14 T_ALIGNFLT */
129 "", /* 15 unused */
130 "", /* 16 unused */
131 "", /* 17 unused */
132 "integer divide fault", /* 18 T_DIVIDE */
133 "non-maskable interrupt trap", /* 19 T_NMI */
134 "overflow trap", /* 20 T_OFLOW */
135 "FPU bounds check fault", /* 21 T_BOUND */
136 "FPU device not available", /* 22 T_DNA */
137 "double fault", /* 23 T_DOUBLEFLT */
138 "FPU operand fetch fault", /* 24 T_FPOPFLT */
139 "invalid TSS fault", /* 25 T_TSSFLT */
140 "segment not present fault", /* 26 T_SEGNPFLT */
141 "stack fault", /* 27 T_STKFLT */
142 "machine check trap", /* 28 T_MCHK */
143};
144
145static __inline int userret __P((struct proc *p, struct trapframe *frame,
146 u_quad_t oticks, int have_mplock));
147
148#if defined(I586_CPU) && !defined(NO_F00F_HACK)
149extern int has_f00f_bug;
150#endif
151
152#ifdef DDB
153static int ddb_on_nmi = 1;
154SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
155 &ddb_on_nmi, 0, "Go to DDB on NMI");
156#endif
157static int panic_on_nmi = 1;
158SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
159 &panic_on_nmi, 0, "Panic on NMI");
160
161static __inline int
162userret(p, frame, oticks, have_mplock)
163 struct proc *p;
164 struct trapframe *frame;
165 u_quad_t oticks;
166 int have_mplock;
167{
168 int sig, s;
169
170 while ((sig = CURSIG(p)) != 0) {
171 if (have_mplock == 0) {
172 get_mplock();
173 have_mplock = 1;
174 }
175 postsig(sig);
176 }
177
178 p->p_priority = p->p_usrpri;
179 if (resched_wanted()) {
180 /*
181 * Since we are curproc, clock will normally just change
182 * our priority without moving us from one queue to another
183 * (since the running process is not on a queue.)
184 * If that happened after we setrunqueue ourselves but before we
185 * mi_switch()'ed, we might not be on the queue indicated by
186 * our priority.
187 */
188 if (have_mplock == 0) {
189 get_mplock();
190 have_mplock = 1;
191 }
192 s = splhigh();
193 setrunqueue(p);
194 p->p_stats->p_ru.ru_nivcsw++;
195 mi_switch();
196 splx(s);
197 while ((sig = CURSIG(p)) != 0)
198 postsig(sig);
199 }
200 /*
201 * Charge system time if profiling.
202 */
203 if (p->p_flag & P_PROFIL) {
204 if (have_mplock == 0) {
205 get_mplock();
206 have_mplock = 1;
207 }
208 addupc_task(p, frame->tf_eip,
209 (u_int)(p->p_sticks - oticks) * psratio);
210 }
211 curpriority = p->p_priority;
212 return(have_mplock);
213}
214
215#ifdef DEVICE_POLLING
216extern u_int32_t poll_in_trap;
217extern int ether_poll __P((int count));
218#endif /* DEVICE_POLLING */
219
220/*
221 * Exception, fault, and trap interface to the FreeBSD kernel.
222 * This common code is called from assembly language IDT gate entry
223 * routines that prepare a suitable stack frame, and restore this
224 * frame after the exception has been processed.
225 */
226
227void
228trap(frame)
229 struct trapframe frame;
230{
231 struct proc *p = curproc;
232 u_quad_t sticks = 0;
233 int i = 0, ucode = 0, type, code;
234 vm_offset_t eva;
235
236#ifdef DDB
237 if (db_active) {
238 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
239 trap_fatal(&frame, eva);
240 return;
241 }
242#endif
243
244 if (!(frame.tf_eflags & PSL_I)) {
245 /*
246 * Buggy application or kernel code has disabled interrupts
247 * and then trapped. Enabling interrupts now is wrong, but
248 * it is better than running with interrupts disabled until
249 * they are accidentally enabled later.
250 */
251 type = frame.tf_trapno;
252 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
253 printf(
254 "pid %ld (%s): trap %d with interrupts disabled\n",
255 (long)curproc->p_pid, curproc->p_comm, type);
256 else if (type != T_BPTFLT && type != T_TRCTRAP)
257 /*
258 * XXX not quite right, since this may be for a
259 * multiple fault in user mode.
260 */
261 printf("kernel trap %d with interrupts disabled\n",
262 type);
263 enable_intr();
264 }
265
266 eva = 0;
267 if (frame.tf_trapno == T_PAGEFLT) {
268 /*
269 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
270 * This problem is worked around by using an interrupt
271 * gate for the pagefault handler. We are finally ready
272 * to read %cr2 and then must reenable interrupts.
273 *
274 * XXX this should be in the switch statement, but the
275 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
276 * flow of control too much for this to be obviously
277 * correct.
278 */
279 eva = rcr2();
280 enable_intr();
281 }
282
283#ifdef DEVICE_POLLING
284 if (poll_in_trap)
285 ether_poll(poll_in_trap);
286#endif /* DEVICE_POLLING */
287
288#if defined(I586_CPU) && !defined(NO_F00F_HACK)
289restart:
290#endif
291 type = frame.tf_trapno;
292 code = frame.tf_err;
293
294 if (in_vm86call) {
295 if (frame.tf_eflags & PSL_VM &&
296 (type == T_PROTFLT || type == T_STKFLT)) {
297 i = vm86_emulate((struct vm86frame *)&frame);
298 if (i != 0)
299 /*
300 * returns to original process
301 */
302 vm86_trap((struct vm86frame *)&frame);
303 return;
304 }
305 switch (type) {
306 /*
307 * these traps want either a process context, or
308 * assume a normal userspace trap.
309 */
310 case T_PROTFLT:
311 case T_SEGNPFLT:
312 trap_fatal(&frame, eva);
313 return;
314 case T_TRCTRAP:
315 type = T_BPTFLT; /* kernel breakpoint */
316 /* FALL THROUGH */
317 }
318 goto kernel_trap; /* normal kernel trap handling */
319 }
320
321 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
322 /* user trap */
323
324 sticks = p->p_sticks;
325 p->p_md.md_regs = &frame;
326
327 switch (type) {
328 case T_PRIVINFLT: /* privileged instruction fault */
329 ucode = type;
330 i = SIGILL;
331 break;
332
333 case T_BPTFLT: /* bpt instruction fault */
334 case T_TRCTRAP: /* trace trap */
335 frame.tf_eflags &= ~PSL_T;
336 i = SIGTRAP;
337 break;
338
339 case T_ARITHTRAP: /* arithmetic trap */
340 ucode = code;
341 i = SIGFPE;
342 break;
343
344 case T_ASTFLT: /* Allow process switch */
345 astoff();
346 cnt.v_soft++;
347 if (p->p_flag & P_OWEUPC) {
348 p->p_flag &= ~P_OWEUPC;
349 addupc_task(p, p->p_stats->p_prof.pr_addr,
350 p->p_stats->p_prof.pr_ticks);
351 }
352 goto out;
353
354 /*
355 * The following two traps can happen in
356 * vm86 mode, and, if so, we want to handle
357 * them specially.
358 */
359 case T_PROTFLT: /* general protection fault */
360 case T_STKFLT: /* stack fault */
361 if (frame.tf_eflags & PSL_VM) {
362 i = vm86_emulate((struct vm86frame *)&frame);
363 if (i == 0)
364 goto out;
365 break;
366 }
367 /* FALL THROUGH */
368
369 case T_SEGNPFLT: /* segment not present fault */
370 case T_TSSFLT: /* invalid TSS fault */
371 case T_DOUBLEFLT: /* double fault */
372 default:
373 ucode = code + BUS_SEGM_FAULT ;
374 i = SIGBUS;
375 break;
376
377 case T_PAGEFLT: /* page fault */
378 i = trap_pfault(&frame, TRUE, eva);
379 if (i == -1)
380 return;
381#if defined(I586_CPU) && !defined(NO_F00F_HACK)
382 if (i == -2)
383 goto restart;
384#endif
385 if (i == 0)
386 goto out;
387
388 ucode = T_PAGEFLT;
389 break;
390
391 case T_DIVIDE: /* integer divide fault */
392 ucode = FPE_INTDIV;
393 i = SIGFPE;
394 break;
395
396#if NISA > 0
397 case T_NMI:
398#ifdef POWERFAIL_NMI
399 goto handle_powerfail;
400#else /* !POWERFAIL_NMI */
401 /* machine/parity/power fail/"kitchen sink" faults */
402 if (isa_nmi(code) == 0) {
403#ifdef DDB
404 /*
405 * NMI can be hooked up to a pushbutton
406 * for debugging.
407 */
408 if (ddb_on_nmi) {
409 printf ("NMI ... going to debugger\n");
410 kdb_trap (type, 0, &frame);
411 }
412#endif /* DDB */
413 return;
414 } else if (panic_on_nmi)
415 panic("NMI indicates hardware failure");
416 break;
417#endif /* POWERFAIL_NMI */
418#endif /* NISA > 0 */
419
420 case T_OFLOW: /* integer overflow fault */
421 ucode = FPE_INTOVF;
422 i = SIGFPE;
423 break;
424
425 case T_BOUND: /* bounds check fault */
426 ucode = FPE_FLTSUB;
427 i = SIGFPE;
428 break;
429
430 case T_DNA:
431#if NNPX > 0
432 /* if a transparent fault (due to context switch "late") */
433 if (npxdna())
434 return;
435#endif
436 if (!pmath_emulate) {
437 i = SIGFPE;
438 ucode = FPE_FPU_NP_TRAP;
439 break;
440 }
441 i = (*pmath_emulate)(&frame);
442 if (i == 0) {
443 if (!(frame.tf_eflags & PSL_T))
444 return;
445 frame.tf_eflags &= ~PSL_T;
446 i = SIGTRAP;
447 }
448 /* else ucode = emulator_only_knows() XXX */
449 break;
450
451 case T_FPOPFLT: /* FPU operand fetch fault */
452 ucode = T_FPOPFLT;
453 i = SIGILL;
454 break;
455
456 case T_XMMFLT: /* SIMD floating-point exception */
457 ucode = 0; /* XXX */
458 i = SIGFPE;
459 break;
460 }
461 } else {
462kernel_trap:
463 /* kernel trap */
464
465 switch (type) {
466 case T_PAGEFLT: /* page fault */
467 (void) trap_pfault(&frame, FALSE, eva);
468 return;
469
470 case T_DNA:
471#if NNPX > 0
472 /*
473 * The kernel is apparently using npx for copying.
474 * XXX this should be fatal unless the kernel has
475 * registered such use.
476 */
477 if (npxdna())
478 return;
479#endif
480 break;
481
482 case T_PROTFLT: /* general protection fault */
483 case T_SEGNPFLT: /* segment not present fault */
484 /*
485 * Invalid segment selectors and out of bounds
486 * %eip's and %esp's can be set up in user mode.
487 * This causes a fault in kernel mode when the
488 * kernel tries to return to user mode. We want
489 * to get this fault so that we can fix the
490 * problem here and not have to check all the
491 * selectors and pointers when the user changes
492 * them.
493 */
494#define MAYBE_DORETI_FAULT(where, whereto) \
495 do { \
496 if (frame.tf_eip == (int)where) { \
497 frame.tf_eip = (int)whereto; \
498 return; \
499 } \
500 } while (0)
501
502 if (intr_nesting_level == 0) {
503 /*
504 * Invalid %fs's and %gs's can be created using
505 * procfs or PT_SETREGS or by invalidating the
506 * underlying LDT entry. This causes a fault
507 * in kernel mode when the kernel attempts to
508 * switch contexts. Lose the bad context
509 * (XXX) so that we can continue, and generate
510 * a signal.
511 */
512 if (frame.tf_eip == (int)cpu_switch_load_gs) {
b7c628e4 513 curthread->td_pcb->pcb_gs = 0;
984263bc
MD
514 psignal(p, SIGBUS);
515 return;
516 }
517 MAYBE_DORETI_FAULT(doreti_iret,
518 doreti_iret_fault);
519 MAYBE_DORETI_FAULT(doreti_popl_ds,
520 doreti_popl_ds_fault);
521 MAYBE_DORETI_FAULT(doreti_popl_es,
522 doreti_popl_es_fault);
523 MAYBE_DORETI_FAULT(doreti_popl_fs,
524 doreti_popl_fs_fault);
b7c628e4
MD
525 if (curthread->td_pcb->pcb_onfault) {
526 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
527 return;
528 }
529 }
530 break;
531
532 case T_TSSFLT:
533 /*
534 * PSL_NT can be set in user mode and isn't cleared
535 * automatically when the kernel is entered. This
536 * causes a TSS fault when the kernel attempts to
537 * `iret' because the TSS link is uninitialized. We
538 * want to get this fault so that we can fix the
539 * problem here and not every time the kernel is
540 * entered.
541 */
542 if (frame.tf_eflags & PSL_NT) {
543 frame.tf_eflags &= ~PSL_NT;
544 return;
545 }
546 break;
547
548 case T_TRCTRAP: /* trace trap */
549 if (frame.tf_eip == (int)IDTVEC(syscall)) {
550 /*
551 * We've just entered system mode via the
552 * syscall lcall. Continue single stepping
553 * silently until the syscall handler has
554 * saved the flags.
555 */
556 return;
557 }
558 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
559 /*
560 * The syscall handler has now saved the
561 * flags. Stop single stepping it.
562 */
563 frame.tf_eflags &= ~PSL_T;
564 return;
565 }
566 /*
567 * Ignore debug register trace traps due to
568 * accesses in the user's address space, which
569 * can happen under several conditions such as
570 * if a user sets a watchpoint on a buffer and
571 * then passes that buffer to a system call.
572 * We still want to get TRCTRAPS for addresses
573 * in kernel space because that is useful when
574 * debugging the kernel.
575 */
576 if (user_dbreg_trap()) {
577 /*
578 * Reset breakpoint bits because the
579 * processor doesn't
580 */
581 load_dr6(rdr6() & 0xfffffff0);
582 return;
583 }
584 /*
585 * Fall through (TRCTRAP kernel mode, kernel address)
586 */
587 case T_BPTFLT:
588 /*
589 * If DDB is enabled, let it handle the debugger trap.
590 * Otherwise, debugger traps "can't happen".
591 */
592#ifdef DDB
593 if (kdb_trap (type, 0, &frame))
594 return;
595#endif
596 break;
597
598#if NISA > 0
599 case T_NMI:
600#ifdef POWERFAIL_NMI
601#ifndef TIMER_FREQ
602# define TIMER_FREQ 1193182
603#endif
604 handle_powerfail:
605 {
606 static unsigned lastalert = 0;
607
608 if(time_second - lastalert > 10)
609 {
610 log(LOG_WARNING, "NMI: power fail\n");
611 sysbeep(TIMER_FREQ/880, hz);
612 lastalert = time_second;
613 }
614 return;
615 }
616#else /* !POWERFAIL_NMI */
617 /* machine/parity/power fail/"kitchen sink" faults */
618 if (isa_nmi(code) == 0) {
619#ifdef DDB
620 /*
621 * NMI can be hooked up to a pushbutton
622 * for debugging.
623 */
624 if (ddb_on_nmi) {
625 printf ("NMI ... going to debugger\n");
626 kdb_trap (type, 0, &frame);
627 }
628#endif /* DDB */
629 return;
630 } else if (panic_on_nmi == 0)
631 return;
632 /* FALL THROUGH */
633#endif /* POWERFAIL_NMI */
634#endif /* NISA > 0 */
635 }
636
637 trap_fatal(&frame, eva);
638 return;
639 }
640
641 /* Translate fault for emulators (e.g. Linux) */
642 if (*p->p_sysent->sv_transtrap)
643 i = (*p->p_sysent->sv_transtrap)(i, type);
644
645 trapsignal(p, i, ucode);
646
647#ifdef DEBUG
648 if (type <= MAX_TRAP_MSG) {
649 uprintf("fatal process exception: %s",
650 trap_msg[type]);
651 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
652 uprintf(", fault VA = 0x%lx", (u_long)eva);
653 uprintf("\n");
654 }
655#endif
656
657out:
658 userret(p, &frame, sticks, 1);
659}
660
661#ifdef notyet
662/*
663 * This version doesn't allow a page fault to user space while
664 * in the kernel. The rest of the kernel needs to be made "safe"
665 * before this can be used. I think the only things remaining
666 * to be made safe are the iBCS2 code and the process tracing/
667 * debugging code.
668 */
669static int
670trap_pfault(frame, usermode, eva)
671 struct trapframe *frame;
672 int usermode;
673 vm_offset_t eva;
674{
675 vm_offset_t va;
676 struct vmspace *vm = NULL;
677 vm_map_t map = 0;
678 int rv = 0;
679 vm_prot_t ftype;
680 struct proc *p = curproc;
681
682 if (frame->tf_err & PGEX_W)
683 ftype = VM_PROT_WRITE;
684 else
685 ftype = VM_PROT_READ;
686
687 va = trunc_page(eva);
688 if (va < VM_MIN_KERNEL_ADDRESS) {
689 vm_offset_t v;
690 vm_page_t mpte;
691
692 if (p == NULL ||
693 (!usermode && va < VM_MAXUSER_ADDRESS &&
b7c628e4
MD
694 (intr_nesting_level != 0 ||
695 curthread->td_pcb->pcb_onfault == NULL))) {
984263bc
MD
696 trap_fatal(frame, eva);
697 return (-1);
698 }
699
700 /*
701 * This is a fault on non-kernel virtual memory.
702 * vm is initialized above to NULL. If curproc is NULL
703 * or curproc->p_vmspace is NULL the fault is fatal.
704 */
705 vm = p->p_vmspace;
706 if (vm == NULL)
707 goto nogo;
708
709 map = &vm->vm_map;
710
711 /*
712 * Keep swapout from messing with us during this
713 * critical time.
714 */
715 ++p->p_lock;
716
717 /*
718 * Grow the stack if necessary
719 */
720 /* grow_stack returns false only if va falls into
721 * a growable stack region and the stack growth
722 * fails. It returns true if va was not within
723 * a growable stack region, or if the stack
724 * growth succeeded.
725 */
726 if (!grow_stack (p, va)) {
727 rv = KERN_FAILURE;
728 --p->p_lock;
729 goto nogo;
730 }
731
732 /* Fault in the user page: */
733 rv = vm_fault(map, va, ftype,
734 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
735 : VM_FAULT_NORMAL);
736
737 --p->p_lock;
738 } else {
739 /*
740 * Don't allow user-mode faults in kernel address space.
741 */
742 if (usermode)
743 goto nogo;
744
745 /*
746 * Since we know that kernel virtual address addresses
747 * always have pte pages mapped, we just have to fault
748 * the page.
749 */
750 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
751 }
752
753 if (rv == KERN_SUCCESS)
754 return (0);
755nogo:
756 if (!usermode) {
b7c628e4
MD
757 if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
758 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
759 return (0);
760 }
761 trap_fatal(frame, eva);
762 return (-1);
763 }
764
765 /* kludge to pass faulting virtual address to sendsig */
766 frame->tf_err = eva;
767
768 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
769}
770#endif
771
772int
773trap_pfault(frame, usermode, eva)
774 struct trapframe *frame;
775 int usermode;
776 vm_offset_t eva;
777{
778 vm_offset_t va;
779 struct vmspace *vm = NULL;
780 vm_map_t map = 0;
781 int rv = 0;
782 vm_prot_t ftype;
783 struct proc *p = curproc;
784
785 va = trunc_page(eva);
786 if (va >= KERNBASE) {
787 /*
788 * Don't allow user-mode faults in kernel address space.
789 * An exception: if the faulting address is the invalid
790 * instruction entry in the IDT, then the Intel Pentium
791 * F00F bug workaround was triggered, and we need to
792 * treat it is as an illegal instruction, and not a page
793 * fault.
794 */
795#if defined(I586_CPU) && !defined(NO_F00F_HACK)
796 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
797 frame->tf_trapno = T_PRIVINFLT;
798 return -2;
799 }
800#endif
801 if (usermode)
802 goto nogo;
803
804 map = kernel_map;
805 } else {
806 /*
807 * This is a fault on non-kernel virtual memory.
808 * vm is initialized above to NULL. If curproc is NULL
809 * or curproc->p_vmspace is NULL the fault is fatal.
810 */
811 if (p != NULL)
812 vm = p->p_vmspace;
813
814 if (vm == NULL)
815 goto nogo;
816
817 map = &vm->vm_map;
818 }
819
820 if (frame->tf_err & PGEX_W)
821 ftype = VM_PROT_WRITE;
822 else
823 ftype = VM_PROT_READ;
824
825 if (map != kernel_map) {
826 /*
827 * Keep swapout from messing with us during this
828 * critical time.
829 */
830 ++p->p_lock;
831
832 /*
833 * Grow the stack if necessary
834 */
835 /* grow_stack returns false only if va falls into
836 * a growable stack region and the stack growth
837 * fails. It returns true if va was not within
838 * a growable stack region, or if the stack
839 * growth succeeded.
840 */
841 if (!grow_stack (p, va)) {
842 rv = KERN_FAILURE;
843 --p->p_lock;
844 goto nogo;
845 }
846
847 /* Fault in the user page: */
848 rv = vm_fault(map, va, ftype,
849 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
850 : VM_FAULT_NORMAL);
851
852 --p->p_lock;
853 } else {
854 /*
855 * Don't have to worry about process locking or stacks in the kernel.
856 */
857 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
858 }
859
860 if (rv == KERN_SUCCESS)
861 return (0);
862nogo:
863 if (!usermode) {
b7c628e4
MD
864 if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
865 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
866 return (0);
867 }
868 trap_fatal(frame, eva);
869 return (-1);
870 }
871
872 /* kludge to pass faulting virtual address to sendsig */
873 frame->tf_err = eva;
874
875 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
876}
877
878static void
879trap_fatal(frame, eva)
880 struct trapframe *frame;
881 vm_offset_t eva;
882{
883 int code, type, ss, esp;
884 struct soft_segment_descriptor softseg;
885
886 code = frame->tf_err;
887 type = frame->tf_trapno;
888 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
889
890 if (type <= MAX_TRAP_MSG)
891 printf("\n\nFatal trap %d: %s while in %s mode\n",
892 type, trap_msg[type],
893 frame->tf_eflags & PSL_VM ? "vm86" :
894 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
895#ifdef SMP
896 /* three seperate prints in case of a trap on an unmapped page */
897 printf("mp_lock = %08x; ", mp_lock);
898 printf("cpuid = %d; ", cpuid);
899 printf("lapic.id = %08x\n", lapic.id);
900#endif
901 if (type == T_PAGEFLT) {
902 printf("fault virtual address = 0x%x\n", eva);
903 printf("fault code = %s %s, %s\n",
904 code & PGEX_U ? "user" : "supervisor",
905 code & PGEX_W ? "write" : "read",
906 code & PGEX_P ? "protection violation" : "page not present");
907 }
908 printf("instruction pointer = 0x%x:0x%x\n",
909 frame->tf_cs & 0xffff, frame->tf_eip);
910 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
911 ss = frame->tf_ss & 0xffff;
912 esp = frame->tf_esp;
913 } else {
914 ss = GSEL(GDATA_SEL, SEL_KPL);
915 esp = (int)&frame->tf_esp;
916 }
917 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
918 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
919 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
920 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
921 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
922 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
923 softseg.ssd_gran);
924 printf("processor eflags = ");
925 if (frame->tf_eflags & PSL_T)
926 printf("trace trap, ");
927 if (frame->tf_eflags & PSL_I)
928 printf("interrupt enabled, ");
929 if (frame->tf_eflags & PSL_NT)
930 printf("nested task, ");
931 if (frame->tf_eflags & PSL_RF)
932 printf("resume, ");
933 if (frame->tf_eflags & PSL_VM)
934 printf("vm86, ");
935 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
936 printf("current process = ");
937 if (curproc) {
938 printf("%lu (%s)\n",
939 (u_long)curproc->p_pid, curproc->p_comm ?
940 curproc->p_comm : "");
941 } else {
942 printf("Idle\n");
943 }
f1d1c3fa
MD
944 printf("current thread = pri %d ", curthread->td_pri);
945 if (curthread->td_pri >= TDPRI_CRIT)
946 printf("(CRIT)");
947 printf("\n");
984263bc 948 printf("interrupt mask = ");
8f41e33b 949 if ((curthread->td_cpl & net_imask) == net_imask)
984263bc 950 printf("net ");
8f41e33b 951 if ((curthread->td_cpl & tty_imask) == tty_imask)
984263bc 952 printf("tty ");
8f41e33b 953 if ((curthread->td_cpl & bio_imask) == bio_imask)
984263bc 954 printf("bio ");
8f41e33b 955 if ((curthread->td_cpl & cam_imask) == cam_imask)
984263bc 956 printf("cam ");
8f41e33b 957 if (curthread->td_cpl == 0)
984263bc
MD
958 printf("none");
959#ifdef SMP
960/**
961 * XXX FIXME:
962 * we probably SHOULD have stopped the other CPUs before now!
963 * another CPU COULD have been touching cpl at this moment...
964 */
965 printf(" <- SMP: XXX");
966#endif
967 printf("\n");
968
969#ifdef KDB
970 if (kdb_trap(&psl))
971 return;
972#endif
973#ifdef DDB
974 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame))
975 return;
976#endif
977 printf("trap number = %d\n", type);
978 if (type <= MAX_TRAP_MSG)
979 panic("%s", trap_msg[type]);
980 else
981 panic("unknown/reserved trap");
982}
983
984/*
985 * Double fault handler. Called when a fault occurs while writing
986 * a frame for a trap/exception onto the stack. This usually occurs
987 * when the stack overflows (such is the case with infinite recursion,
988 * for example).
989 *
990 * XXX Note that the current PTD gets replaced by IdlePTD when the
991 * task switch occurs. This means that the stack that was active at
992 * the time of the double fault is not available at <kstack> unless
993 * the machine was idle when the double fault occurred. The downside
994 * of this is that "trace <ebp>" in ddb won't work.
995 */
996void
997dblfault_handler()
998{
999 printf("\nFatal double fault:\n");
1000 printf("eip = 0x%x\n", common_tss.tss_eip);
1001 printf("esp = 0x%x\n", common_tss.tss_esp);
1002 printf("ebp = 0x%x\n", common_tss.tss_ebp);
1003#ifdef SMP
1004 /* three seperate prints in case of a trap on an unmapped page */
1005 printf("mp_lock = %08x; ", mp_lock);
1006 printf("cpuid = %d; ", cpuid);
1007 printf("lapic.id = %08x\n", lapic.id);
1008#endif
1009 panic("double fault");
1010}
1011
1012/*
1013 * Compensate for 386 brain damage (missing URKR).
1014 * This is a little simpler than the pagefault handler in trap() because
1015 * it the page tables have already been faulted in and high addresses
1016 * are thrown out early for other reasons.
1017 */
1018int trapwrite(addr)
1019 unsigned addr;
1020{
1021 struct proc *p;
1022 vm_offset_t va;
1023 struct vmspace *vm;
1024 int rv;
1025
1026 va = trunc_page((vm_offset_t)addr);
1027 /*
1028 * XXX - MAX is END. Changed > to >= for temp. fix.
1029 */
1030 if (va >= VM_MAXUSER_ADDRESS)
1031 return (1);
1032
1033 p = curproc;
1034 vm = p->p_vmspace;
1035
1036 ++p->p_lock;
1037
1038 if (!grow_stack (p, va)) {
1039 --p->p_lock;
1040 return (1);
1041 }
1042
1043 /*
1044 * fault the data page
1045 */
1046 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1047
1048 --p->p_lock;
1049
1050 if (rv != KERN_SUCCESS)
1051 return 1;
1052
1053 return (0);
1054}
1055
1056/*
1057 * syscall2 - MP aware system call request C handler
1058 *
1059 * A system call is essentially treated as a trap except that the
1060 * MP lock is not held on entry or return. We are responsible for
1061 * obtaining the MP lock if necessary and for handling ASTs
1062 * (e.g. a task switch) prior to return.
1063 *
1064 * In general, only simple access and manipulation of curproc and
1065 * the current stack is allowed without having to hold MP lock.
1066 */
1067void
1068syscall2(frame)
1069 struct trapframe frame;
1070{
1071 caddr_t params;
1072 int i;
1073 struct sysent *callp;
1074 struct proc *p = curproc;
1075 register_t orig_tf_eflags;
1076 u_quad_t sticks;
1077 int error;
1078 int narg;
1079 int args[8];
1080 int have_mplock = 0;
1081 u_int code;
1082
1083#ifdef DIAGNOSTIC
1084 if (ISPL(frame.tf_cs) != SEL_UPL) {
1085 get_mplock();
1086 panic("syscall");
1087 /* NOT REACHED */
1088 }
1089#endif
1090
1091 /*
1092 * handle atomicy by looping since interrupts are enabled and the
1093 * MP lock is not held.
1094 */
1095 sticks = ((volatile struct proc *)p)->p_sticks;
1096 while (sticks != ((volatile struct proc *)p)->p_sticks)
1097 sticks = ((volatile struct proc *)p)->p_sticks;
1098
1099 p->p_md.md_regs = &frame;
1100 params = (caddr_t)frame.tf_esp + sizeof(int);
1101 code = frame.tf_eax;
1102 orig_tf_eflags = frame.tf_eflags;
1103
1104 if (p->p_sysent->sv_prepsyscall) {
1105 /*
1106 * The prep code is not MP aware.
1107 */
1108 get_mplock();
1109 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
1110 rel_mplock();
1111 } else {
1112 /*
1113 * Need to check if this is a 32 bit or 64 bit syscall.
1114 * fuword is MP aware.
1115 */
1116 if (code == SYS_syscall) {
1117 /*
1118 * Code is first argument, followed by actual args.
1119 */
1120 code = fuword(params);
1121 params += sizeof(int);
1122 } else if (code == SYS___syscall) {
1123 /*
1124 * Like syscall, but code is a quad, so as to maintain
1125 * quad alignment for the rest of the arguments.
1126 */
1127 code = fuword(params);
1128 params += sizeof(quad_t);
1129 }
1130 }
1131
1132 if (p->p_sysent->sv_mask)
1133 code &= p->p_sysent->sv_mask;
1134
1135 if (code >= p->p_sysent->sv_size)
1136 callp = &p->p_sysent->sv_table[0];
1137 else
1138 callp = &p->p_sysent->sv_table[code];
1139
1140 narg = callp->sy_narg & SYF_ARGMASK;
1141
1142 /*
1143 * copyin is MP aware, but the tracing code is not
1144 */
1145 if (params && (i = narg * sizeof(int)) &&
1146 (error = copyin(params, (caddr_t)args, (u_int)i))) {
1147 get_mplock();
1148 have_mplock = 1;
1149#ifdef KTRACE
1150 if (KTRPOINT(p, KTR_SYSCALL))
1151 ktrsyscall(p->p_tracep, code, narg, args);
1152#endif
1153 goto bad;
1154 }
1155
1156 /*
1157 * Try to run the syscall without the MP lock if the syscall
1158 * is MP safe. We have to obtain the MP lock no matter what if
1159 * we are ktracing
1160 */
1161 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1162 get_mplock();
1163 have_mplock = 1;
1164 }
1165
1166#ifdef KTRACE
1167 if (KTRPOINT(p, KTR_SYSCALL)) {
1168 if (have_mplock == 0) {
1169 get_mplock();
1170 have_mplock = 1;
1171 }
1172 ktrsyscall(p->p_tracep, code, narg, args);
1173 }
1174#endif
1175 p->p_retval[0] = 0;
1176 p->p_retval[1] = frame.tf_edx;
1177
1178 STOPEVENT(p, S_SCE, narg); /* MP aware */
1179
1180 error = (*callp->sy_call)(p, args);
1181
1182 /*
1183 * MP SAFE (we may or may not have the MP lock at this point)
1184 */
1185 switch (error) {
1186 case 0:
1187 /*
1188 * Reinitialize proc pointer `p' as it may be different
1189 * if this is a child returning from fork syscall.
1190 */
1191 p = curproc;
1192 frame.tf_eax = p->p_retval[0];
1193 frame.tf_edx = p->p_retval[1];
1194 frame.tf_eflags &= ~PSL_C;
1195 break;
1196
1197 case ERESTART:
1198 /*
1199 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1200 * int 0x80 is 2 bytes. We saved this in tf_err.
1201 */
1202 frame.tf_eip -= frame.tf_err;
1203 break;
1204
1205 case EJUSTRETURN:
1206 break;
1207
1208 default:
1209bad:
1210 if (p->p_sysent->sv_errsize) {
1211 if (error >= p->p_sysent->sv_errsize)
1212 error = -1; /* XXX */
1213 else
1214 error = p->p_sysent->sv_errtbl[error];
1215 }
1216 frame.tf_eax = error;
1217 frame.tf_eflags |= PSL_C;
1218 break;
1219 }
1220
1221 /*
1222 * Traced syscall. trapsignal() is not MP aware.
1223 */
1224 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1225 if (have_mplock == 0) {
1226 get_mplock();
1227 have_mplock = 1;
1228 }
1229 frame.tf_eflags &= ~PSL_T;
1230 trapsignal(p, SIGTRAP, 0);
1231 }
1232
1233 /*
1234 * Handle reschedule and other end-of-syscall issues
1235 */
1236 have_mplock = userret(p, &frame, sticks, have_mplock);
1237
1238#ifdef KTRACE
1239 if (KTRPOINT(p, KTR_SYSRET)) {
1240 if (have_mplock == 0) {
1241 get_mplock();
1242 have_mplock = 1;
1243 }
1244 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1245 }
1246#endif
1247
1248 /*
1249 * This works because errno is findable through the
1250 * register set. If we ever support an emulation where this
1251 * is not the case, this code will need to be revisited.
1252 */
1253 STOPEVENT(p, S_SCX, code);
1254
1255 /*
1256 * Release the MP lock if we had to get it
1257 */
1258 if (have_mplock)
1259 rel_mplock();
1260}
1261
1262/*
1263 * Simplified back end of syscall(), used when returning from fork()
1264 * directly into user mode. MP lock is held on entry and should be
1265 * held on return.
1266 */
1267void
1268fork_return(p, frame)
1269 struct proc *p;
1270 struct trapframe frame;
1271{
1272 frame.tf_eax = 0; /* Child returns zero */
1273 frame.tf_eflags &= ~PSL_C; /* success */
1274 frame.tf_edx = 1;
1275
1276 userret(p, &frame, 0, 1);
1277#ifdef KTRACE
1278 if (KTRPOINT(p, KTR_SYSRET))
1279 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1280#endif
1281}