proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / platform / pc32 / i386 / trap.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
dadab5e9 39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.8 2003/06/25 03:55:53 dillon Exp $
984263bc
MD
40 */
41
42/*
43 * 386 Trap and System call handling
44 */
45
46#include "opt_cpu.h"
47#include "opt_ddb.h"
48#include "opt_ktrace.h"
49#include "opt_clock.h"
50#include "opt_trap.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/pioctl.h>
56#include <sys/kernel.h>
57#include <sys/resourcevar.h>
58#include <sys/signalvar.h>
59#include <sys/syscall.h>
60#include <sys/sysctl.h>
61#include <sys/sysent.h>
62#include <sys/uio.h>
63#include <sys/vmmeter.h>
64#ifdef KTRACE
65#include <sys/ktrace.h>
66#endif
67
68#include <vm/vm.h>
69#include <vm/vm_param.h>
70#include <sys/lock.h>
71#include <vm/pmap.h>
72#include <vm/vm_kern.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/vm_extern.h>
76
77#include <machine/cpu.h>
78#include <machine/ipl.h>
79#include <machine/md_var.h>
80#include <machine/pcb.h>
81#ifdef SMP
82#include <machine/smp.h>
83#endif
84#include <machine/tss.h>
85
86#include <i386/isa/intr_machdep.h>
87
88#ifdef POWERFAIL_NMI
89#include <sys/syslog.h>
90#include <machine/clock.h>
91#endif
92
93#include <machine/vm86.h>
94
95#include <ddb/ddb.h>
41c20dac 96#include <sys/thread2.h>
984263bc
MD
97
98#include "isa.h"
99#include "npx.h"
100
101int (*pmath_emulate) __P((struct trapframe *));
102
103extern void trap __P((struct trapframe frame));
104extern int trapwrite __P((unsigned addr));
105extern void syscall2 __P((struct trapframe frame));
106
107static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
108static void trap_fatal __P((struct trapframe *, vm_offset_t));
109void dblfault_handler __P((void));
110
111extern inthand_t IDTVEC(syscall);
112
113#define MAX_TRAP_MSG 28
114static char *trap_msg[] = {
115 "", /* 0 unused */
116 "privileged instruction fault", /* 1 T_PRIVINFLT */
117 "", /* 2 unused */
118 "breakpoint instruction fault", /* 3 T_BPTFLT */
119 "", /* 4 unused */
120 "", /* 5 unused */
121 "arithmetic trap", /* 6 T_ARITHTRAP */
122 "system forced exception", /* 7 T_ASTFLT */
123 "", /* 8 unused */
124 "general protection fault", /* 9 T_PROTFLT */
125 "trace trap", /* 10 T_TRCTRAP */
126 "", /* 11 unused */
127 "page fault", /* 12 T_PAGEFLT */
128 "", /* 13 unused */
129 "alignment fault", /* 14 T_ALIGNFLT */
130 "", /* 15 unused */
131 "", /* 16 unused */
132 "", /* 17 unused */
133 "integer divide fault", /* 18 T_DIVIDE */
134 "non-maskable interrupt trap", /* 19 T_NMI */
135 "overflow trap", /* 20 T_OFLOW */
136 "FPU bounds check fault", /* 21 T_BOUND */
137 "FPU device not available", /* 22 T_DNA */
138 "double fault", /* 23 T_DOUBLEFLT */
139 "FPU operand fetch fault", /* 24 T_FPOPFLT */
140 "invalid TSS fault", /* 25 T_TSSFLT */
141 "segment not present fault", /* 26 T_SEGNPFLT */
142 "stack fault", /* 27 T_STKFLT */
143 "machine check trap", /* 28 T_MCHK */
144};
145
146static __inline int userret __P((struct proc *p, struct trapframe *frame,
147 u_quad_t oticks, int have_mplock));
148
149#if defined(I586_CPU) && !defined(NO_F00F_HACK)
150extern int has_f00f_bug;
151#endif
152
153#ifdef DDB
154static int ddb_on_nmi = 1;
155SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
156 &ddb_on_nmi, 0, "Go to DDB on NMI");
157#endif
158static int panic_on_nmi = 1;
159SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
160 &panic_on_nmi, 0, "Panic on NMI");
161
162static __inline int
163userret(p, frame, oticks, have_mplock)
164 struct proc *p;
165 struct trapframe *frame;
166 u_quad_t oticks;
167 int have_mplock;
168{
169 int sig, s;
d16a8831 170 struct thread *td;
984263bc
MD
171
172 while ((sig = CURSIG(p)) != 0) {
173 if (have_mplock == 0) {
174 get_mplock();
175 have_mplock = 1;
176 }
177 postsig(sig);
178 }
179
180 p->p_priority = p->p_usrpri;
181 if (resched_wanted()) {
182 /*
183 * Since we are curproc, clock will normally just change
184 * our priority without moving us from one queue to another
185 * (since the running process is not on a queue.)
186 * If that happened after we setrunqueue ourselves but before we
187 * mi_switch()'ed, we might not be on the queue indicated by
188 * our priority.
189 */
190 if (have_mplock == 0) {
191 get_mplock();
192 have_mplock = 1;
193 }
194 s = splhigh();
195 setrunqueue(p);
196 p->p_stats->p_ru.ru_nivcsw++;
197 mi_switch();
198 splx(s);
199 while ((sig = CURSIG(p)) != 0)
200 postsig(sig);
201 }
202 /*
203 * Charge system time if profiling.
204 */
205 if (p->p_flag & P_PROFIL) {
206 if (have_mplock == 0) {
207 get_mplock();
208 have_mplock = 1;
209 }
d16a8831
MD
210 td = curthread;
211 addupc_task(p, frame->tf_eip,
212 (u_int)(td->td_sticks - oticks) * psratio);
984263bc
MD
213 }
214 curpriority = p->p_priority;
215 return(have_mplock);
216}
217
218#ifdef DEVICE_POLLING
219extern u_int32_t poll_in_trap;
220extern int ether_poll __P((int count));
221#endif /* DEVICE_POLLING */
222
223/*
224 * Exception, fault, and trap interface to the FreeBSD kernel.
225 * This common code is called from assembly language IDT gate entry
226 * routines that prepare a suitable stack frame, and restore this
227 * frame after the exception has been processed.
228 */
229
230void
231trap(frame)
232 struct trapframe frame;
233{
234 struct proc *p = curproc;
235 u_quad_t sticks = 0;
236 int i = 0, ucode = 0, type, code;
237 vm_offset_t eva;
238
239#ifdef DDB
240 if (db_active) {
241 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
242 trap_fatal(&frame, eva);
243 return;
244 }
245#endif
246
247 if (!(frame.tf_eflags & PSL_I)) {
248 /*
249 * Buggy application or kernel code has disabled interrupts
250 * and then trapped. Enabling interrupts now is wrong, but
251 * it is better than running with interrupts disabled until
252 * they are accidentally enabled later.
253 */
254 type = frame.tf_trapno;
255 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
256 printf(
257 "pid %ld (%s): trap %d with interrupts disabled\n",
258 (long)curproc->p_pid, curproc->p_comm, type);
259 else if (type != T_BPTFLT && type != T_TRCTRAP)
260 /*
261 * XXX not quite right, since this may be for a
262 * multiple fault in user mode.
263 */
264 printf("kernel trap %d with interrupts disabled\n",
265 type);
266 enable_intr();
267 }
268
269 eva = 0;
270 if (frame.tf_trapno == T_PAGEFLT) {
271 /*
272 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
273 * This problem is worked around by using an interrupt
274 * gate for the pagefault handler. We are finally ready
275 * to read %cr2 and then must reenable interrupts.
276 *
277 * XXX this should be in the switch statement, but the
278 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
279 * flow of control too much for this to be obviously
280 * correct.
281 */
282 eva = rcr2();
283 enable_intr();
284 }
285
286#ifdef DEVICE_POLLING
287 if (poll_in_trap)
288 ether_poll(poll_in_trap);
289#endif /* DEVICE_POLLING */
290
291#if defined(I586_CPU) && !defined(NO_F00F_HACK)
292restart:
293#endif
294 type = frame.tf_trapno;
295 code = frame.tf_err;
296
297 if (in_vm86call) {
298 if (frame.tf_eflags & PSL_VM &&
299 (type == T_PROTFLT || type == T_STKFLT)) {
300 i = vm86_emulate((struct vm86frame *)&frame);
301 if (i != 0)
302 /*
303 * returns to original process
304 */
305 vm86_trap((struct vm86frame *)&frame);
306 return;
307 }
308 switch (type) {
309 /*
310 * these traps want either a process context, or
311 * assume a normal userspace trap.
312 */
313 case T_PROTFLT:
314 case T_SEGNPFLT:
315 trap_fatal(&frame, eva);
316 return;
317 case T_TRCTRAP:
318 type = T_BPTFLT; /* kernel breakpoint */
319 /* FALL THROUGH */
320 }
321 goto kernel_trap; /* normal kernel trap handling */
322 }
323
324 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
325 /* user trap */
326
d16a8831 327 sticks = curthread->td_sticks;
984263bc
MD
328 p->p_md.md_regs = &frame;
329
330 switch (type) {
331 case T_PRIVINFLT: /* privileged instruction fault */
332 ucode = type;
333 i = SIGILL;
334 break;
335
336 case T_BPTFLT: /* bpt instruction fault */
337 case T_TRCTRAP: /* trace trap */
338 frame.tf_eflags &= ~PSL_T;
339 i = SIGTRAP;
340 break;
341
342 case T_ARITHTRAP: /* arithmetic trap */
343 ucode = code;
344 i = SIGFPE;
345 break;
346
347 case T_ASTFLT: /* Allow process switch */
348 astoff();
349 cnt.v_soft++;
350 if (p->p_flag & P_OWEUPC) {
351 p->p_flag &= ~P_OWEUPC;
352 addupc_task(p, p->p_stats->p_prof.pr_addr,
353 p->p_stats->p_prof.pr_ticks);
354 }
355 goto out;
356
357 /*
358 * The following two traps can happen in
359 * vm86 mode, and, if so, we want to handle
360 * them specially.
361 */
362 case T_PROTFLT: /* general protection fault */
363 case T_STKFLT: /* stack fault */
364 if (frame.tf_eflags & PSL_VM) {
365 i = vm86_emulate((struct vm86frame *)&frame);
366 if (i == 0)
367 goto out;
368 break;
369 }
370 /* FALL THROUGH */
371
372 case T_SEGNPFLT: /* segment not present fault */
373 case T_TSSFLT: /* invalid TSS fault */
374 case T_DOUBLEFLT: /* double fault */
375 default:
376 ucode = code + BUS_SEGM_FAULT ;
377 i = SIGBUS;
378 break;
379
380 case T_PAGEFLT: /* page fault */
381 i = trap_pfault(&frame, TRUE, eva);
382 if (i == -1)
383 return;
384#if defined(I586_CPU) && !defined(NO_F00F_HACK)
385 if (i == -2)
386 goto restart;
387#endif
388 if (i == 0)
389 goto out;
390
391 ucode = T_PAGEFLT;
392 break;
393
394 case T_DIVIDE: /* integer divide fault */
395 ucode = FPE_INTDIV;
396 i = SIGFPE;
397 break;
398
399#if NISA > 0
400 case T_NMI:
401#ifdef POWERFAIL_NMI
402 goto handle_powerfail;
403#else /* !POWERFAIL_NMI */
404 /* machine/parity/power fail/"kitchen sink" faults */
405 if (isa_nmi(code) == 0) {
406#ifdef DDB
407 /*
408 * NMI can be hooked up to a pushbutton
409 * for debugging.
410 */
411 if (ddb_on_nmi) {
412 printf ("NMI ... going to debugger\n");
413 kdb_trap (type, 0, &frame);
414 }
415#endif /* DDB */
416 return;
417 } else if (panic_on_nmi)
418 panic("NMI indicates hardware failure");
419 break;
420#endif /* POWERFAIL_NMI */
421#endif /* NISA > 0 */
422
423 case T_OFLOW: /* integer overflow fault */
424 ucode = FPE_INTOVF;
425 i = SIGFPE;
426 break;
427
428 case T_BOUND: /* bounds check fault */
429 ucode = FPE_FLTSUB;
430 i = SIGFPE;
431 break;
432
433 case T_DNA:
434#if NNPX > 0
435 /* if a transparent fault (due to context switch "late") */
436 if (npxdna())
437 return;
438#endif
439 if (!pmath_emulate) {
440 i = SIGFPE;
441 ucode = FPE_FPU_NP_TRAP;
442 break;
443 }
444 i = (*pmath_emulate)(&frame);
445 if (i == 0) {
446 if (!(frame.tf_eflags & PSL_T))
447 return;
448 frame.tf_eflags &= ~PSL_T;
449 i = SIGTRAP;
450 }
451 /* else ucode = emulator_only_knows() XXX */
452 break;
453
454 case T_FPOPFLT: /* FPU operand fetch fault */
455 ucode = T_FPOPFLT;
456 i = SIGILL;
457 break;
458
459 case T_XMMFLT: /* SIMD floating-point exception */
460 ucode = 0; /* XXX */
461 i = SIGFPE;
462 break;
463 }
464 } else {
465kernel_trap:
466 /* kernel trap */
467
468 switch (type) {
469 case T_PAGEFLT: /* page fault */
470 (void) trap_pfault(&frame, FALSE, eva);
471 return;
472
473 case T_DNA:
474#if NNPX > 0
475 /*
476 * The kernel is apparently using npx for copying.
477 * XXX this should be fatal unless the kernel has
478 * registered such use.
479 */
480 if (npxdna())
481 return;
482#endif
483 break;
484
485 case T_PROTFLT: /* general protection fault */
486 case T_SEGNPFLT: /* segment not present fault */
487 /*
488 * Invalid segment selectors and out of bounds
489 * %eip's and %esp's can be set up in user mode.
490 * This causes a fault in kernel mode when the
491 * kernel tries to return to user mode. We want
492 * to get this fault so that we can fix the
493 * problem here and not have to check all the
494 * selectors and pointers when the user changes
495 * them.
496 */
497#define MAYBE_DORETI_FAULT(where, whereto) \
498 do { \
499 if (frame.tf_eip == (int)where) { \
500 frame.tf_eip = (int)whereto; \
501 return; \
502 } \
503 } while (0)
504
505 if (intr_nesting_level == 0) {
506 /*
507 * Invalid %fs's and %gs's can be created using
508 * procfs or PT_SETREGS or by invalidating the
509 * underlying LDT entry. This causes a fault
510 * in kernel mode when the kernel attempts to
511 * switch contexts. Lose the bad context
512 * (XXX) so that we can continue, and generate
513 * a signal.
514 */
515 if (frame.tf_eip == (int)cpu_switch_load_gs) {
b7c628e4 516 curthread->td_pcb->pcb_gs = 0;
984263bc
MD
517 psignal(p, SIGBUS);
518 return;
519 }
520 MAYBE_DORETI_FAULT(doreti_iret,
521 doreti_iret_fault);
522 MAYBE_DORETI_FAULT(doreti_popl_ds,
523 doreti_popl_ds_fault);
524 MAYBE_DORETI_FAULT(doreti_popl_es,
525 doreti_popl_es_fault);
526 MAYBE_DORETI_FAULT(doreti_popl_fs,
527 doreti_popl_fs_fault);
b7c628e4
MD
528 if (curthread->td_pcb->pcb_onfault) {
529 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
530 return;
531 }
532 }
533 break;
534
535 case T_TSSFLT:
536 /*
537 * PSL_NT can be set in user mode and isn't cleared
538 * automatically when the kernel is entered. This
539 * causes a TSS fault when the kernel attempts to
540 * `iret' because the TSS link is uninitialized. We
541 * want to get this fault so that we can fix the
542 * problem here and not every time the kernel is
543 * entered.
544 */
545 if (frame.tf_eflags & PSL_NT) {
546 frame.tf_eflags &= ~PSL_NT;
547 return;
548 }
549 break;
550
551 case T_TRCTRAP: /* trace trap */
552 if (frame.tf_eip == (int)IDTVEC(syscall)) {
553 /*
554 * We've just entered system mode via the
555 * syscall lcall. Continue single stepping
556 * silently until the syscall handler has
557 * saved the flags.
558 */
559 return;
560 }
561 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
562 /*
563 * The syscall handler has now saved the
564 * flags. Stop single stepping it.
565 */
566 frame.tf_eflags &= ~PSL_T;
567 return;
568 }
569 /*
570 * Ignore debug register trace traps due to
571 * accesses in the user's address space, which
572 * can happen under several conditions such as
573 * if a user sets a watchpoint on a buffer and
574 * then passes that buffer to a system call.
575 * We still want to get TRCTRAPS for addresses
576 * in kernel space because that is useful when
577 * debugging the kernel.
578 */
579 if (user_dbreg_trap()) {
580 /*
581 * Reset breakpoint bits because the
582 * processor doesn't
583 */
584 load_dr6(rdr6() & 0xfffffff0);
585 return;
586 }
587 /*
588 * Fall through (TRCTRAP kernel mode, kernel address)
589 */
590 case T_BPTFLT:
591 /*
592 * If DDB is enabled, let it handle the debugger trap.
593 * Otherwise, debugger traps "can't happen".
594 */
595#ifdef DDB
596 if (kdb_trap (type, 0, &frame))
597 return;
598#endif
599 break;
600
601#if NISA > 0
602 case T_NMI:
603#ifdef POWERFAIL_NMI
604#ifndef TIMER_FREQ
605# define TIMER_FREQ 1193182
606#endif
607 handle_powerfail:
608 {
609 static unsigned lastalert = 0;
610
611 if(time_second - lastalert > 10)
612 {
613 log(LOG_WARNING, "NMI: power fail\n");
614 sysbeep(TIMER_FREQ/880, hz);
615 lastalert = time_second;
616 }
617 return;
618 }
619#else /* !POWERFAIL_NMI */
620 /* machine/parity/power fail/"kitchen sink" faults */
621 if (isa_nmi(code) == 0) {
622#ifdef DDB
623 /*
624 * NMI can be hooked up to a pushbutton
625 * for debugging.
626 */
627 if (ddb_on_nmi) {
628 printf ("NMI ... going to debugger\n");
629 kdb_trap (type, 0, &frame);
630 }
631#endif /* DDB */
632 return;
633 } else if (panic_on_nmi == 0)
634 return;
635 /* FALL THROUGH */
636#endif /* POWERFAIL_NMI */
637#endif /* NISA > 0 */
638 }
639
640 trap_fatal(&frame, eva);
641 return;
642 }
643
644 /* Translate fault for emulators (e.g. Linux) */
645 if (*p->p_sysent->sv_transtrap)
646 i = (*p->p_sysent->sv_transtrap)(i, type);
647
648 trapsignal(p, i, ucode);
649
650#ifdef DEBUG
651 if (type <= MAX_TRAP_MSG) {
652 uprintf("fatal process exception: %s",
653 trap_msg[type]);
654 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
655 uprintf(", fault VA = 0x%lx", (u_long)eva);
656 uprintf("\n");
657 }
658#endif
659
660out:
661 userret(p, &frame, sticks, 1);
662}
663
664#ifdef notyet
665/*
666 * This version doesn't allow a page fault to user space while
667 * in the kernel. The rest of the kernel needs to be made "safe"
668 * before this can be used. I think the only things remaining
669 * to be made safe are the iBCS2 code and the process tracing/
670 * debugging code.
671 */
672static int
673trap_pfault(frame, usermode, eva)
674 struct trapframe *frame;
675 int usermode;
676 vm_offset_t eva;
677{
678 vm_offset_t va;
679 struct vmspace *vm = NULL;
680 vm_map_t map = 0;
681 int rv = 0;
682 vm_prot_t ftype;
683 struct proc *p = curproc;
684
685 if (frame->tf_err & PGEX_W)
686 ftype = VM_PROT_WRITE;
687 else
688 ftype = VM_PROT_READ;
689
690 va = trunc_page(eva);
691 if (va < VM_MIN_KERNEL_ADDRESS) {
692 vm_offset_t v;
693 vm_page_t mpte;
694
695 if (p == NULL ||
696 (!usermode && va < VM_MAXUSER_ADDRESS &&
b7c628e4
MD
697 (intr_nesting_level != 0 ||
698 curthread->td_pcb->pcb_onfault == NULL))) {
984263bc
MD
699 trap_fatal(frame, eva);
700 return (-1);
701 }
702
703 /*
704 * This is a fault on non-kernel virtual memory.
705 * vm is initialized above to NULL. If curproc is NULL
706 * or curproc->p_vmspace is NULL the fault is fatal.
707 */
708 vm = p->p_vmspace;
709 if (vm == NULL)
710 goto nogo;
711
712 map = &vm->vm_map;
713
714 /*
715 * Keep swapout from messing with us during this
716 * critical time.
717 */
718 ++p->p_lock;
719
720 /*
721 * Grow the stack if necessary
722 */
723 /* grow_stack returns false only if va falls into
724 * a growable stack region and the stack growth
725 * fails. It returns true if va was not within
726 * a growable stack region, or if the stack
727 * growth succeeded.
728 */
729 if (!grow_stack (p, va)) {
730 rv = KERN_FAILURE;
731 --p->p_lock;
732 goto nogo;
733 }
734
735 /* Fault in the user page: */
736 rv = vm_fault(map, va, ftype,
737 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
738 : VM_FAULT_NORMAL);
739
740 --p->p_lock;
741 } else {
742 /*
743 * Don't allow user-mode faults in kernel address space.
744 */
745 if (usermode)
746 goto nogo;
747
748 /*
749 * Since we know that kernel virtual address addresses
750 * always have pte pages mapped, we just have to fault
751 * the page.
752 */
753 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
754 }
755
756 if (rv == KERN_SUCCESS)
757 return (0);
758nogo:
759 if (!usermode) {
b7c628e4
MD
760 if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
761 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
762 return (0);
763 }
764 trap_fatal(frame, eva);
765 return (-1);
766 }
767
768 /* kludge to pass faulting virtual address to sendsig */
769 frame->tf_err = eva;
770
771 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
772}
773#endif
774
775int
776trap_pfault(frame, usermode, eva)
777 struct trapframe *frame;
778 int usermode;
779 vm_offset_t eva;
780{
781 vm_offset_t va;
782 struct vmspace *vm = NULL;
783 vm_map_t map = 0;
784 int rv = 0;
785 vm_prot_t ftype;
786 struct proc *p = curproc;
787
788 va = trunc_page(eva);
789 if (va >= KERNBASE) {
790 /*
791 * Don't allow user-mode faults in kernel address space.
792 * An exception: if the faulting address is the invalid
793 * instruction entry in the IDT, then the Intel Pentium
794 * F00F bug workaround was triggered, and we need to
795 * treat it is as an illegal instruction, and not a page
796 * fault.
797 */
798#if defined(I586_CPU) && !defined(NO_F00F_HACK)
799 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
800 frame->tf_trapno = T_PRIVINFLT;
801 return -2;
802 }
803#endif
804 if (usermode)
805 goto nogo;
806
807 map = kernel_map;
808 } else {
809 /*
810 * This is a fault on non-kernel virtual memory.
811 * vm is initialized above to NULL. If curproc is NULL
812 * or curproc->p_vmspace is NULL the fault is fatal.
813 */
814 if (p != NULL)
815 vm = p->p_vmspace;
816
817 if (vm == NULL)
818 goto nogo;
819
820 map = &vm->vm_map;
821 }
822
823 if (frame->tf_err & PGEX_W)
824 ftype = VM_PROT_WRITE;
825 else
826 ftype = VM_PROT_READ;
827
828 if (map != kernel_map) {
829 /*
830 * Keep swapout from messing with us during this
831 * critical time.
832 */
833 ++p->p_lock;
834
835 /*
836 * Grow the stack if necessary
837 */
838 /* grow_stack returns false only if va falls into
839 * a growable stack region and the stack growth
840 * fails. It returns true if va was not within
841 * a growable stack region, or if the stack
842 * growth succeeded.
843 */
844 if (!grow_stack (p, va)) {
845 rv = KERN_FAILURE;
846 --p->p_lock;
847 goto nogo;
848 }
849
850 /* Fault in the user page: */
851 rv = vm_fault(map, va, ftype,
852 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
853 : VM_FAULT_NORMAL);
854
855 --p->p_lock;
856 } else {
857 /*
858 * Don't have to worry about process locking or stacks in the kernel.
859 */
860 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
861 }
862
863 if (rv == KERN_SUCCESS)
864 return (0);
865nogo:
866 if (!usermode) {
b7c628e4
MD
867 if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
868 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
984263bc
MD
869 return (0);
870 }
871 trap_fatal(frame, eva);
872 return (-1);
873 }
874
875 /* kludge to pass faulting virtual address to sendsig */
876 frame->tf_err = eva;
877
878 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
879}
880
881static void
882trap_fatal(frame, eva)
883 struct trapframe *frame;
884 vm_offset_t eva;
885{
886 int code, type, ss, esp;
887 struct soft_segment_descriptor softseg;
888
889 code = frame->tf_err;
890 type = frame->tf_trapno;
891 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
892
893 if (type <= MAX_TRAP_MSG)
894 printf("\n\nFatal trap %d: %s while in %s mode\n",
895 type, trap_msg[type],
896 frame->tf_eflags & PSL_VM ? "vm86" :
897 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
898#ifdef SMP
899 /* three seperate prints in case of a trap on an unmapped page */
900 printf("mp_lock = %08x; ", mp_lock);
901 printf("cpuid = %d; ", cpuid);
902 printf("lapic.id = %08x\n", lapic.id);
903#endif
904 if (type == T_PAGEFLT) {
905 printf("fault virtual address = 0x%x\n", eva);
906 printf("fault code = %s %s, %s\n",
907 code & PGEX_U ? "user" : "supervisor",
908 code & PGEX_W ? "write" : "read",
909 code & PGEX_P ? "protection violation" : "page not present");
910 }
911 printf("instruction pointer = 0x%x:0x%x\n",
912 frame->tf_cs & 0xffff, frame->tf_eip);
913 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
914 ss = frame->tf_ss & 0xffff;
915 esp = frame->tf_esp;
916 } else {
917 ss = GSEL(GDATA_SEL, SEL_KPL);
918 esp = (int)&frame->tf_esp;
919 }
920 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
921 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
922 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
923 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
924 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
925 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
926 softseg.ssd_gran);
927 printf("processor eflags = ");
928 if (frame->tf_eflags & PSL_T)
929 printf("trace trap, ");
930 if (frame->tf_eflags & PSL_I)
931 printf("interrupt enabled, ");
932 if (frame->tf_eflags & PSL_NT)
933 printf("nested task, ");
934 if (frame->tf_eflags & PSL_RF)
935 printf("resume, ");
936 if (frame->tf_eflags & PSL_VM)
937 printf("vm86, ");
938 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
939 printf("current process = ");
940 if (curproc) {
941 printf("%lu (%s)\n",
942 (u_long)curproc->p_pid, curproc->p_comm ?
943 curproc->p_comm : "");
944 } else {
945 printf("Idle\n");
946 }
f1d1c3fa
MD
947 printf("current thread = pri %d ", curthread->td_pri);
948 if (curthread->td_pri >= TDPRI_CRIT)
949 printf("(CRIT)");
950 printf("\n");
984263bc 951 printf("interrupt mask = ");
8f41e33b 952 if ((curthread->td_cpl & net_imask) == net_imask)
984263bc 953 printf("net ");
8f41e33b 954 if ((curthread->td_cpl & tty_imask) == tty_imask)
984263bc 955 printf("tty ");
8f41e33b 956 if ((curthread->td_cpl & bio_imask) == bio_imask)
984263bc 957 printf("bio ");
8f41e33b 958 if ((curthread->td_cpl & cam_imask) == cam_imask)
984263bc 959 printf("cam ");
8f41e33b 960 if (curthread->td_cpl == 0)
984263bc
MD
961 printf("none");
962#ifdef SMP
963/**
964 * XXX FIXME:
965 * we probably SHOULD have stopped the other CPUs before now!
966 * another CPU COULD have been touching cpl at this moment...
967 */
968 printf(" <- SMP: XXX");
969#endif
970 printf("\n");
971
972#ifdef KDB
973 if (kdb_trap(&psl))
974 return;
975#endif
976#ifdef DDB
977 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame))
978 return;
979#endif
980 printf("trap number = %d\n", type);
981 if (type <= MAX_TRAP_MSG)
982 panic("%s", trap_msg[type]);
983 else
984 panic("unknown/reserved trap");
985}
986
987/*
988 * Double fault handler. Called when a fault occurs while writing
989 * a frame for a trap/exception onto the stack. This usually occurs
990 * when the stack overflows (such is the case with infinite recursion,
991 * for example).
992 *
993 * XXX Note that the current PTD gets replaced by IdlePTD when the
994 * task switch occurs. This means that the stack that was active at
995 * the time of the double fault is not available at <kstack> unless
996 * the machine was idle when the double fault occurred. The downside
997 * of this is that "trace <ebp>" in ddb won't work.
998 */
999void
1000dblfault_handler()
1001{
1002 printf("\nFatal double fault:\n");
1003 printf("eip = 0x%x\n", common_tss.tss_eip);
1004 printf("esp = 0x%x\n", common_tss.tss_esp);
1005 printf("ebp = 0x%x\n", common_tss.tss_ebp);
1006#ifdef SMP
1007 /* three seperate prints in case of a trap on an unmapped page */
1008 printf("mp_lock = %08x; ", mp_lock);
1009 printf("cpuid = %d; ", cpuid);
1010 printf("lapic.id = %08x\n", lapic.id);
1011#endif
1012 panic("double fault");
1013}
1014
1015/*
1016 * Compensate for 386 brain damage (missing URKR).
1017 * This is a little simpler than the pagefault handler in trap() because
1018 * it the page tables have already been faulted in and high addresses
1019 * are thrown out early for other reasons.
1020 */
1021int trapwrite(addr)
1022 unsigned addr;
1023{
1024 struct proc *p;
1025 vm_offset_t va;
1026 struct vmspace *vm;
1027 int rv;
1028
1029 va = trunc_page((vm_offset_t)addr);
1030 /*
1031 * XXX - MAX is END. Changed > to >= for temp. fix.
1032 */
1033 if (va >= VM_MAXUSER_ADDRESS)
1034 return (1);
1035
1036 p = curproc;
1037 vm = p->p_vmspace;
1038
1039 ++p->p_lock;
1040
1041 if (!grow_stack (p, va)) {
1042 --p->p_lock;
1043 return (1);
1044 }
1045
1046 /*
1047 * fault the data page
1048 */
1049 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1050
1051 --p->p_lock;
1052
1053 if (rv != KERN_SUCCESS)
1054 return 1;
1055
1056 return (0);
1057}
1058
1059/*
1060 * syscall2 - MP aware system call request C handler
1061 *
1062 * A system call is essentially treated as a trap except that the
1063 * MP lock is not held on entry or return. We are responsible for
1064 * obtaining the MP lock if necessary and for handling ASTs
1065 * (e.g. a task switch) prior to return.
1066 *
1067 * In general, only simple access and manipulation of curproc and
1068 * the current stack is allowed without having to hold MP lock.
1069 */
1070void
1071syscall2(frame)
1072 struct trapframe frame;
1073{
dadab5e9
MD
1074 struct thread *td = curthread;
1075 struct proc *p = td->td_proc;
984263bc
MD
1076 caddr_t params;
1077 int i;
1078 struct sysent *callp;
984263bc
MD
1079 register_t orig_tf_eflags;
1080 u_quad_t sticks;
1081 int error;
1082 int narg;
1083 int args[8];
1084 int have_mplock = 0;
1085 u_int code;
1086
1087#ifdef DIAGNOSTIC
1088 if (ISPL(frame.tf_cs) != SEL_UPL) {
1089 get_mplock();
1090 panic("syscall");
1091 /* NOT REACHED */
1092 }
1093#endif
1094
1095 /*
41c20dac
MD
1096 * access non-atomic field from critical section. p_sticks is
1097 * updated by the clock interrupt.
984263bc 1098 */
41c20dac 1099 crit_enter();
d16a8831 1100 sticks = curthread->td_sticks;
41c20dac 1101 crit_exit();
984263bc
MD
1102
1103 p->p_md.md_regs = &frame;
1104 params = (caddr_t)frame.tf_esp + sizeof(int);
1105 code = frame.tf_eax;
1106 orig_tf_eflags = frame.tf_eflags;
1107
1108 if (p->p_sysent->sv_prepsyscall) {
1109 /*
1110 * The prep code is not MP aware.
1111 */
1112 get_mplock();
1113 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
1114 rel_mplock();
1115 } else {
1116 /*
1117 * Need to check if this is a 32 bit or 64 bit syscall.
1118 * fuword is MP aware.
1119 */
1120 if (code == SYS_syscall) {
1121 /*
1122 * Code is first argument, followed by actual args.
1123 */
1124 code = fuword(params);
1125 params += sizeof(int);
1126 } else if (code == SYS___syscall) {
1127 /*
1128 * Like syscall, but code is a quad, so as to maintain
1129 * quad alignment for the rest of the arguments.
1130 */
1131 code = fuword(params);
1132 params += sizeof(quad_t);
1133 }
1134 }
1135
1136 if (p->p_sysent->sv_mask)
1137 code &= p->p_sysent->sv_mask;
1138
1139 if (code >= p->p_sysent->sv_size)
1140 callp = &p->p_sysent->sv_table[0];
1141 else
1142 callp = &p->p_sysent->sv_table[code];
1143
1144 narg = callp->sy_narg & SYF_ARGMASK;
1145
1146 /*
1147 * copyin is MP aware, but the tracing code is not
1148 */
1149 if (params && (i = narg * sizeof(int)) &&
1150 (error = copyin(params, (caddr_t)args, (u_int)i))) {
1151 get_mplock();
1152 have_mplock = 1;
1153#ifdef KTRACE
dadab5e9 1154 if (KTRPOINT(td, KTR_SYSCALL))
984263bc
MD
1155 ktrsyscall(p->p_tracep, code, narg, args);
1156#endif
1157 goto bad;
1158 }
1159
1160 /*
1161 * Try to run the syscall without the MP lock if the syscall
1162 * is MP safe. We have to obtain the MP lock no matter what if
1163 * we are ktracing
1164 */
1165 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1166 get_mplock();
1167 have_mplock = 1;
1168 }
1169
1170#ifdef KTRACE
dadab5e9 1171 if (KTRPOINT(td, KTR_SYSCALL)) {
984263bc
MD
1172 if (have_mplock == 0) {
1173 get_mplock();
1174 have_mplock = 1;
1175 }
1176 ktrsyscall(p->p_tracep, code, narg, args);
1177 }
1178#endif
1179 p->p_retval[0] = 0;
1180 p->p_retval[1] = frame.tf_edx;
1181
1182 STOPEVENT(p, S_SCE, narg); /* MP aware */
1183
41c20dac 1184 error = (*callp->sy_call)(args);
984263bc
MD
1185
1186 /*
1187 * MP SAFE (we may or may not have the MP lock at this point)
1188 */
1189 switch (error) {
1190 case 0:
1191 /*
1192 * Reinitialize proc pointer `p' as it may be different
1193 * if this is a child returning from fork syscall.
1194 */
1195 p = curproc;
1196 frame.tf_eax = p->p_retval[0];
1197 frame.tf_edx = p->p_retval[1];
1198 frame.tf_eflags &= ~PSL_C;
1199 break;
1200
1201 case ERESTART:
1202 /*
1203 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1204 * int 0x80 is 2 bytes. We saved this in tf_err.
1205 */
1206 frame.tf_eip -= frame.tf_err;
1207 break;
1208
1209 case EJUSTRETURN:
1210 break;
1211
1212 default:
1213bad:
1214 if (p->p_sysent->sv_errsize) {
1215 if (error >= p->p_sysent->sv_errsize)
1216 error = -1; /* XXX */
1217 else
1218 error = p->p_sysent->sv_errtbl[error];
1219 }
1220 frame.tf_eax = error;
1221 frame.tf_eflags |= PSL_C;
1222 break;
1223 }
1224
1225 /*
1226 * Traced syscall. trapsignal() is not MP aware.
1227 */
1228 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1229 if (have_mplock == 0) {
1230 get_mplock();
1231 have_mplock = 1;
1232 }
1233 frame.tf_eflags &= ~PSL_T;
1234 trapsignal(p, SIGTRAP, 0);
1235 }
1236
1237 /*
1238 * Handle reschedule and other end-of-syscall issues
1239 */
1240 have_mplock = userret(p, &frame, sticks, have_mplock);
1241
1242#ifdef KTRACE
dadab5e9 1243 if (KTRPOINT(td, KTR_SYSRET)) {
984263bc
MD
1244 if (have_mplock == 0) {
1245 get_mplock();
1246 have_mplock = 1;
1247 }
1248 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1249 }
1250#endif
1251
1252 /*
1253 * This works because errno is findable through the
1254 * register set. If we ever support an emulation where this
1255 * is not the case, this code will need to be revisited.
1256 */
1257 STOPEVENT(p, S_SCX, code);
1258
1259 /*
1260 * Release the MP lock if we had to get it
1261 */
1262 if (have_mplock)
1263 rel_mplock();
1264}
1265
1266/*
1267 * Simplified back end of syscall(), used when returning from fork()
1268 * directly into user mode. MP lock is held on entry and should be
1269 * held on return.
1270 */
1271void
1272fork_return(p, frame)
1273 struct proc *p;
1274 struct trapframe frame;
1275{
1276 frame.tf_eax = 0; /* Child returns zero */
1277 frame.tf_eflags &= ~PSL_C; /* success */
1278 frame.tf_edx = 1;
1279
1280 userret(p, &frame, 0, 1);
1281#ifdef KTRACE
dadab5e9 1282 if (KTRPOINT(p->p_thread, KTR_SYSRET))
984263bc
MD
1283 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1284#endif
1285}