MP Implementation 1/2: Get the APIC code working again, sweetly integrate the
[dragonfly.git] / sys / i386 / i386 / trap.c
... / ...
CommitLineData
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.18 2003/07/06 21:23:48 dillon Exp $
40 */
41
42/*
43 * 386 Trap and System call handling
44 */
45
46#include "opt_cpu.h"
47#include "opt_ddb.h"
48#include "opt_ktrace.h"
49#include "opt_clock.h"
50#include "opt_trap.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/pioctl.h>
56#include <sys/kernel.h>
57#include <sys/resourcevar.h>
58#include <sys/signalvar.h>
59#include <sys/syscall.h>
60#include <sys/sysctl.h>
61#include <sys/sysent.h>
62#include <sys/uio.h>
63#include <sys/vmmeter.h>
64#ifdef KTRACE
65#include <sys/ktrace.h>
66#endif
67
68#include <vm/vm.h>
69#include <vm/vm_param.h>
70#include <sys/lock.h>
71#include <vm/pmap.h>
72#include <vm/vm_kern.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/vm_extern.h>
76
77#include <machine/cpu.h>
78#include <machine/ipl.h>
79#include <machine/md_var.h>
80#include <machine/pcb.h>
81#ifdef SMP
82#include <machine/smp.h>
83#endif
84#include <machine/tss.h>
85#include <machine/globaldata.h>
86
87#include <i386/isa/intr_machdep.h>
88
89#ifdef POWERFAIL_NMI
90#include <sys/syslog.h>
91#include <machine/clock.h>
92#endif
93
94#include <machine/vm86.h>
95
96#include <ddb/ddb.h>
97#include <sys/thread2.h>
98
99#include "isa.h"
100#include "npx.h"
101
102int (*pmath_emulate) __P((struct trapframe *));
103
104extern void trap __P((struct trapframe frame));
105extern int trapwrite __P((unsigned addr));
106extern void syscall2 __P((struct trapframe frame));
107
108static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
109static void trap_fatal __P((struct trapframe *, vm_offset_t));
110void dblfault_handler __P((void));
111
112extern inthand_t IDTVEC(syscall);
113
114#define MAX_TRAP_MSG 28
115static char *trap_msg[] = {
116 "", /* 0 unused */
117 "privileged instruction fault", /* 1 T_PRIVINFLT */
118 "", /* 2 unused */
119 "breakpoint instruction fault", /* 3 T_BPTFLT */
120 "", /* 4 unused */
121 "", /* 5 unused */
122 "arithmetic trap", /* 6 T_ARITHTRAP */
123 "system forced exception", /* 7 T_ASTFLT */
124 "", /* 8 unused */
125 "general protection fault", /* 9 T_PROTFLT */
126 "trace trap", /* 10 T_TRCTRAP */
127 "", /* 11 unused */
128 "page fault", /* 12 T_PAGEFLT */
129 "", /* 13 unused */
130 "alignment fault", /* 14 T_ALIGNFLT */
131 "", /* 15 unused */
132 "", /* 16 unused */
133 "", /* 17 unused */
134 "integer divide fault", /* 18 T_DIVIDE */
135 "non-maskable interrupt trap", /* 19 T_NMI */
136 "overflow trap", /* 20 T_OFLOW */
137 "FPU bounds check fault", /* 21 T_BOUND */
138 "FPU device not available", /* 22 T_DNA */
139 "double fault", /* 23 T_DOUBLEFLT */
140 "FPU operand fetch fault", /* 24 T_FPOPFLT */
141 "invalid TSS fault", /* 25 T_TSSFLT */
142 "segment not present fault", /* 26 T_SEGNPFLT */
143 "stack fault", /* 27 T_STKFLT */
144 "machine check trap", /* 28 T_MCHK */
145};
146
147#if defined(I586_CPU) && !defined(NO_F00F_HACK)
148extern int has_f00f_bug;
149#endif
150
151#ifdef DDB
152static int ddb_on_nmi = 1;
153SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
154 &ddb_on_nmi, 0, "Go to DDB on NMI");
155#endif
156static int panic_on_nmi = 1;
157SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
158 &panic_on_nmi, 0, "Panic on NMI");
159
160/*
161 * USER->KERNEL transition. Do not transition us out of userland from the
162 * point of view of the userland scheduler unless we actually have to
163 * switch.
164 *
165 * usertdsw is called from within a critical section, but the BGL will
166 * have already been released by lwkt_switch() so only call MP safe functions
167 * that don't block!
168 */
169static void
170usertdsw(struct thread *ntd)
171{
172 struct thread *td = curthread;
173
174 td->td_switch = cpu_heavy_switch;
175 lwkt_setpri_self(TDPRI_KERN_USER);
176#if 0
177 /*
178 * This is where we might want to catch the P_CURPROC designation
179 * and fix it for *any* switchout rather then just an mi_switch()
180 * switchout (move from mi_switch()?) YYY
181 */
182 if (p->p_flag & P_CURPROC) {
183 ...
184 }
185#endif
186 td->td_switch(ntd);
187}
188
189/*
190 * userenter() passively intercepts the thread switch function to increase
191 * the thread priority from a user priority to a kernel priority, reducing
192 * syscall and trap overhead for the case where no switch occurs.
193 */
194
195static __inline void
196userenter(void)
197{
198 struct thread *td;
199
200 td = curthread;
201 KASSERT(td->td_switch == cpu_heavy_switch,
202 ("userenter: bad td_switch = %p", td->td_switch));
203#if 0
204 KASSERT(td->td_switch == cpu_heavy_switch || td->td_switch == usertdsw,
205 ("userenter: bad td_switch = %p", td->td_switch));
206#endif
207 td->td_switch = usertdsw;
208}
209
210static void
211userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
212{
213 int sig, s;
214 struct thread *td = curthread;
215
216 /*
217 * Post any pending signals
218 */
219 crit_enter();
220 while ((sig = CURSIG(p)) != 0) {
221 crit_exit();
222 postsig(sig);
223 crit_enter();
224 }
225
226 /*
227 * Set our priority properly and restore our switch function. If
228 * we did not hit our lazy switch function in the first place we
229 * do not need to restore anything.
230 */
231 if (td->td_switch == cpu_heavy_switch) {
232 switch(p->p_rtprio.type) {
233 case RTP_PRIO_IDLE:
234 lwkt_setpri_self(TDPRI_USER_IDLE);
235 break;
236 case RTP_PRIO_REALTIME:
237 case RTP_PRIO_FIFO:
238 lwkt_setpri_self(TDPRI_USER_REAL);
239 break;
240 default:
241 lwkt_setpri_self(TDPRI_USER_NORM);
242 break;
243 }
244 } else {
245 KKASSERT(td->td_switch == usertdsw);
246 td->td_switch = cpu_heavy_switch;
247 }
248 crit_exit();
249
250 /*
251 * If a reschedule has been requested we call chooseproc() to locate
252 * the next runnable process. When we wakeup from that we check
253 * for pending signals again.
254 */
255 if (resched_wanted()) {
256 uio_yield();
257 while ((sig = CURSIG(p)) != 0)
258 postsig(sig);
259 }
260
261 /*
262 * Charge system time if profiling.
263 */
264 if (p->p_flag & P_PROFIL) {
265 addupc_task(p, frame->tf_eip,
266 (u_int)(curthread->td_sticks - oticks) * psratio);
267 }
268
269 /*
270 * In order to return to userland we need to be the designated
271 * current (user) process on this cpu. We have to wait for
272 * the userland scheduler to schedule as P_CURPROC.
273 */
274 s = splhigh();
275 while ((p->p_flag & P_CURPROC) == 0) {
276 p->p_stats->p_ru.ru_nivcsw++;
277 lwkt_deschedule_self();
278 mi_switch();
279 }
280 splx(s);
281 KKASSERT(mycpu->gd_uprocscheduled == 1);
282}
283
284#ifdef DEVICE_POLLING
285extern u_int32_t poll_in_trap;
286extern int ether_poll __P((int count));
287#endif /* DEVICE_POLLING */
288
289/*
290 * Exception, fault, and trap interface to the FreeBSD kernel.
291 * This common code is called from assembly language IDT gate entry
292 * routines that prepare a suitable stack frame, and restore this
293 * frame after the exception has been processed.
294 */
295
296void
297trap(frame)
298 struct trapframe frame;
299{
300 struct proc *p = curproc;
301 u_quad_t sticks = 0;
302 int i = 0, ucode = 0, type, code;
303 vm_offset_t eva;
304
305 get_mplock();
306
307#ifdef DDB
308 if (db_active) {
309 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
310 trap_fatal(&frame, eva);
311 goto out2;
312 }
313#endif
314
315 if (!(frame.tf_eflags & PSL_I)) {
316 /*
317 * Buggy application or kernel code has disabled interrupts
318 * and then trapped. Enabling interrupts now is wrong, but
319 * it is better than running with interrupts disabled until
320 * they are accidentally enabled later.
321 */
322 type = frame.tf_trapno;
323 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
324 printf(
325 "pid %ld (%s): trap %d with interrupts disabled\n",
326 (long)curproc->p_pid, curproc->p_comm, type);
327 else if (type != T_BPTFLT && type != T_TRCTRAP)
328 /*
329 * XXX not quite right, since this may be for a
330 * multiple fault in user mode.
331 */
332 printf("kernel trap %d with interrupts disabled\n",
333 type);
334 cpu_enable_intr();
335 }
336
337 eva = 0;
338 if (frame.tf_trapno == T_PAGEFLT) {
339 /*
340 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
341 * This problem is worked around by using an interrupt
342 * gate for the pagefault handler. We are finally ready
343 * to read %cr2 and then must reenable interrupts.
344 *
345 * XXX this should be in the switch statement, but the
346 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
347 * flow of control too much for this to be obviously
348 * correct.
349 */
350 eva = rcr2();
351 cpu_enable_intr();
352 }
353
354#ifdef DEVICE_POLLING
355 if (poll_in_trap)
356 ether_poll(poll_in_trap);
357#endif /* DEVICE_POLLING */
358
359#if defined(I586_CPU) && !defined(NO_F00F_HACK)
360restart:
361#endif
362 type = frame.tf_trapno;
363 code = frame.tf_err;
364
365 if (in_vm86call) {
366 if (frame.tf_eflags & PSL_VM &&
367 (type == T_PROTFLT || type == T_STKFLT)) {
368 i = vm86_emulate((struct vm86frame *)&frame);
369 if (i != 0) {
370 /*
371 * returns to original process
372 */
373 vm86_trap((struct vm86frame *)&frame);
374 }
375 goto out2;
376 }
377 switch (type) {
378 /*
379 * these traps want either a process context, or
380 * assume a normal userspace trap.
381 */
382 case T_PROTFLT:
383 case T_SEGNPFLT:
384 trap_fatal(&frame, eva);
385 goto out2;
386 case T_TRCTRAP:
387 type = T_BPTFLT; /* kernel breakpoint */
388 /* FALL THROUGH */
389 }
390 goto kernel_trap; /* normal kernel trap handling */
391 }
392
393 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
394 /* user trap */
395
396 userenter();
397
398 sticks = curthread->td_sticks;
399 p->p_md.md_regs = &frame;
400
401 switch (type) {
402 case T_PRIVINFLT: /* privileged instruction fault */
403 ucode = type;
404 i = SIGILL;
405 break;
406
407 case T_BPTFLT: /* bpt instruction fault */
408 case T_TRCTRAP: /* trace trap */
409 frame.tf_eflags &= ~PSL_T;
410 i = SIGTRAP;
411 break;
412
413 case T_ARITHTRAP: /* arithmetic trap */
414 ucode = code;
415 i = SIGFPE;
416 break;
417
418 case T_ASTFLT: /* Allow process switch */
419 astoff();
420 mycpu->gd_cnt.v_soft++;
421 if (p->p_flag & P_OWEUPC) {
422 p->p_flag &= ~P_OWEUPC;
423 addupc_task(p, p->p_stats->p_prof.pr_addr,
424 p->p_stats->p_prof.pr_ticks);
425 }
426 goto out;
427
428 /*
429 * The following two traps can happen in
430 * vm86 mode, and, if so, we want to handle
431 * them specially.
432 */
433 case T_PROTFLT: /* general protection fault */
434 case T_STKFLT: /* stack fault */
435 if (frame.tf_eflags & PSL_VM) {
436 i = vm86_emulate((struct vm86frame *)&frame);
437 if (i == 0)
438 goto out;
439 break;
440 }
441 /* FALL THROUGH */
442
443 case T_SEGNPFLT: /* segment not present fault */
444 case T_TSSFLT: /* invalid TSS fault */
445 case T_DOUBLEFLT: /* double fault */
446 default:
447 ucode = code + BUS_SEGM_FAULT ;
448 i = SIGBUS;
449 break;
450
451 case T_PAGEFLT: /* page fault */
452 i = trap_pfault(&frame, TRUE, eva);
453 if (i == -1)
454 goto out;
455#if defined(I586_CPU) && !defined(NO_F00F_HACK)
456 if (i == -2)
457 goto restart;
458#endif
459 if (i == 0)
460 goto out;
461
462 ucode = T_PAGEFLT;
463 break;
464
465 case T_DIVIDE: /* integer divide fault */
466 ucode = FPE_INTDIV;
467 i = SIGFPE;
468 break;
469
470#if NISA > 0
471 case T_NMI:
472#ifdef POWERFAIL_NMI
473 goto handle_powerfail;
474#else /* !POWERFAIL_NMI */
475 /* machine/parity/power fail/"kitchen sink" faults */
476 if (isa_nmi(code) == 0) {
477#ifdef DDB
478 /*
479 * NMI can be hooked up to a pushbutton
480 * for debugging.
481 */
482 if (ddb_on_nmi) {
483 printf ("NMI ... going to debugger\n");
484 kdb_trap (type, 0, &frame);
485 }
486#endif /* DDB */
487 goto out2;
488 } else if (panic_on_nmi)
489 panic("NMI indicates hardware failure");
490 break;
491#endif /* POWERFAIL_NMI */
492#endif /* NISA > 0 */
493
494 case T_OFLOW: /* integer overflow fault */
495 ucode = FPE_INTOVF;
496 i = SIGFPE;
497 break;
498
499 case T_BOUND: /* bounds check fault */
500 ucode = FPE_FLTSUB;
501 i = SIGFPE;
502 break;
503
504 case T_DNA:
505#if NNPX > 0
506 /* if a transparent fault (due to context switch "late") */
507 if (npxdna())
508 goto out;
509#endif
510 if (!pmath_emulate) {
511 i = SIGFPE;
512 ucode = FPE_FPU_NP_TRAP;
513 break;
514 }
515 i = (*pmath_emulate)(&frame);
516 if (i == 0) {
517 if (!(frame.tf_eflags & PSL_T))
518 goto out2;
519 frame.tf_eflags &= ~PSL_T;
520 i = SIGTRAP;
521 }
522 /* else ucode = emulator_only_knows() XXX */
523 break;
524
525 case T_FPOPFLT: /* FPU operand fetch fault */
526 ucode = T_FPOPFLT;
527 i = SIGILL;
528 break;
529
530 case T_XMMFLT: /* SIMD floating-point exception */
531 ucode = 0; /* XXX */
532 i = SIGFPE;
533 break;
534 }
535 } else {
536kernel_trap:
537 /* kernel trap */
538
539 switch (type) {
540 case T_PAGEFLT: /* page fault */
541 (void) trap_pfault(&frame, FALSE, eva);
542 goto out2;
543
544 case T_DNA:
545#if NNPX > 0
546 /*
547 * The kernel is apparently using npx for copying.
548 * XXX this should be fatal unless the kernel has
549 * registered such use.
550 */
551 if (npxdna())
552 goto out2;
553#endif
554 break;
555
556 case T_PROTFLT: /* general protection fault */
557 case T_SEGNPFLT: /* segment not present fault */
558 /*
559 * Invalid segment selectors and out of bounds
560 * %eip's and %esp's can be set up in user mode.
561 * This causes a fault in kernel mode when the
562 * kernel tries to return to user mode. We want
563 * to get this fault so that we can fix the
564 * problem here and not have to check all the
565 * selectors and pointers when the user changes
566 * them.
567 */
568#define MAYBE_DORETI_FAULT(where, whereto) \
569 do { \
570 if (frame.tf_eip == (int)where) { \
571 frame.tf_eip = (int)whereto; \
572 goto out2; \
573 } \
574 } while (0)
575
576 if (mycpu->gd_intr_nesting_level == 0) {
577 /*
578 * Invalid %fs's and %gs's can be created using
579 * procfs or PT_SETREGS or by invalidating the
580 * underlying LDT entry. This causes a fault
581 * in kernel mode when the kernel attempts to
582 * switch contexts. Lose the bad context
583 * (XXX) so that we can continue, and generate
584 * a signal.
585 */
586 if (frame.tf_eip == (int)cpu_switch_load_gs) {
587 curthread->td_pcb->pcb_gs = 0;
588 psignal(p, SIGBUS);
589 goto out2;
590 }
591 MAYBE_DORETI_FAULT(doreti_iret,
592 doreti_iret_fault);
593 MAYBE_DORETI_FAULT(doreti_popl_ds,
594 doreti_popl_ds_fault);
595 MAYBE_DORETI_FAULT(doreti_popl_es,
596 doreti_popl_es_fault);
597 MAYBE_DORETI_FAULT(doreti_popl_fs,
598 doreti_popl_fs_fault);
599 if (curthread->td_pcb->pcb_onfault) {
600 frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
601 goto out2;
602 }
603 }
604 break;
605
606 case T_TSSFLT:
607 /*
608 * PSL_NT can be set in user mode and isn't cleared
609 * automatically when the kernel is entered. This
610 * causes a TSS fault when the kernel attempts to
611 * `iret' because the TSS link is uninitialized. We
612 * want to get this fault so that we can fix the
613 * problem here and not every time the kernel is
614 * entered.
615 */
616 if (frame.tf_eflags & PSL_NT) {
617 frame.tf_eflags &= ~PSL_NT;
618 goto out2;
619 }
620 break;
621
622 case T_TRCTRAP: /* trace trap */
623 if (frame.tf_eip == (int)IDTVEC(syscall)) {
624 /*
625 * We've just entered system mode via the
626 * syscall lcall. Continue single stepping
627 * silently until the syscall handler has
628 * saved the flags.
629 */
630 goto out2;
631 }
632 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
633 /*
634 * The syscall handler has now saved the
635 * flags. Stop single stepping it.
636 */
637 frame.tf_eflags &= ~PSL_T;
638 goto out2;
639 }
640 /*
641 * Ignore debug register trace traps due to
642 * accesses in the user's address space, which
643 * can happen under several conditions such as
644 * if a user sets a watchpoint on a buffer and
645 * then passes that buffer to a system call.
646 * We still want to get TRCTRAPS for addresses
647 * in kernel space because that is useful when
648 * debugging the kernel.
649 */
650 if (user_dbreg_trap()) {
651 /*
652 * Reset breakpoint bits because the
653 * processor doesn't
654 */
655 load_dr6(rdr6() & 0xfffffff0);
656 goto out2;
657 }
658 /*
659 * Fall through (TRCTRAP kernel mode, kernel address)
660 */
661 case T_BPTFLT:
662 /*
663 * If DDB is enabled, let it handle the debugger trap.
664 * Otherwise, debugger traps "can't happen".
665 */
666#ifdef DDB
667 if (kdb_trap (type, 0, &frame))
668 goto out2;
669#endif
670 break;
671
672#if NISA > 0
673 case T_NMI:
674#ifdef POWERFAIL_NMI
675#ifndef TIMER_FREQ
676# define TIMER_FREQ 1193182
677#endif
678 handle_powerfail:
679 {
680 static unsigned lastalert = 0;
681
682 if(time_second - lastalert > 10)
683 {
684 log(LOG_WARNING, "NMI: power fail\n");
685 sysbeep(TIMER_FREQ/880, hz);
686 lastalert = time_second;
687 }
688 /* YYY mp count */
689 goto out2;
690 }
691#else /* !POWERFAIL_NMI */
692 /* machine/parity/power fail/"kitchen sink" faults */
693 if (isa_nmi(code) == 0) {
694#ifdef DDB
695 /*
696 * NMI can be hooked up to a pushbutton
697 * for debugging.
698 */
699 if (ddb_on_nmi) {
700 printf ("NMI ... going to debugger\n");
701 kdb_trap (type, 0, &frame);
702 }
703#endif /* DDB */
704 goto out2;
705 } else if (panic_on_nmi == 0)
706 goto out2;
707 /* FALL THROUGH */
708#endif /* POWERFAIL_NMI */
709#endif /* NISA > 0 */
710 }
711
712 trap_fatal(&frame, eva);
713 goto out2;
714 }
715
716 /* Translate fault for emulators (e.g. Linux) */
717 if (*p->p_sysent->sv_transtrap)
718 i = (*p->p_sysent->sv_transtrap)(i, type);
719
720 trapsignal(p, i, ucode);
721
722#ifdef DEBUG
723 if (type <= MAX_TRAP_MSG) {
724 uprintf("fatal process exception: %s",
725 trap_msg[type]);
726 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
727 uprintf(", fault VA = 0x%lx", (u_long)eva);
728 uprintf("\n");
729 }
730#endif
731
732out:
733#ifdef SMP
734 if (ISPL(frame.tf_cs) == SEL_UPL)
735 KASSERT(curthread->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
736#endif
737 userret(p, &frame, sticks);
738out2:
739 rel_mplock();
740}
741
742#ifdef notyet
743/*
744 * This version doesn't allow a page fault to user space while
745 * in the kernel. The rest of the kernel needs to be made "safe"
746 * before this can be used. I think the only things remaining
747 * to be made safe are the iBCS2 code and the process tracing/
748 * debugging code.
749 */
750static int
751trap_pfault(frame, usermode, eva)
752 struct trapframe *frame;
753 int usermode;
754 vm_offset_t eva;
755{
756 vm_offset_t va;
757 struct vmspace *vm = NULL;
758 vm_map_t map = 0;
759 int rv = 0;
760 vm_prot_t ftype;
761 struct proc *p = curproc;
762
763 if (frame->tf_err & PGEX_W)
764 ftype = VM_PROT_WRITE;
765 else
766 ftype = VM_PROT_READ;
767
768 va = trunc_page(eva);
769 if (va < VM_MIN_KERNEL_ADDRESS) {
770 vm_offset_t v;
771 vm_page_t mpte;
772
773 if (p == NULL ||
774 (!usermode && va < VM_MAXUSER_ADDRESS &&
775 (mycpu->gd_intr_nesting_level != 0 ||
776 curthread->td_pcb->pcb_onfault == NULL))) {
777 trap_fatal(frame, eva);
778 return (-1);
779 }
780
781 /*
782 * This is a fault on non-kernel virtual memory.
783 * vm is initialized above to NULL. If curproc is NULL
784 * or curproc->p_vmspace is NULL the fault is fatal.
785 */
786 vm = p->p_vmspace;
787 if (vm == NULL)
788 goto nogo;
789
790 map = &vm->vm_map;
791
792 /*
793 * Keep swapout from messing with us during this
794 * critical time.
795 */
796 ++p->p_lock;
797
798 /*
799 * Grow the stack if necessary
800 */
801 /* grow_stack returns false only if va falls into
802 * a growable stack region and the stack growth
803 * fails. It returns true if va was not within
804 * a growable stack region, or if the stack
805 * growth succeeded.
806 */
807 if (!grow_stack (p, va)) {
808 rv = KERN_FAILURE;
809 --p->p_lock;
810 goto nogo;
811 }
812
813 /* Fault in the user page: */
814 rv = vm_fault(map, va, ftype,
815 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
816 : VM_FAULT_NORMAL);
817
818 --p->p_lock;
819 } else {
820 /*
821 * Don't allow user-mode faults in kernel address space.
822 */
823 if (usermode)
824 goto nogo;
825
826 /*
827 * Since we know that kernel virtual address addresses
828 * always have pte pages mapped, we just have to fault
829 * the page.
830 */
831 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
832 }
833
834 if (rv == KERN_SUCCESS)
835 return (0);
836nogo:
837 if (!usermode) {
838 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
839 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
840 return (0);
841 }
842 trap_fatal(frame, eva);
843 return (-1);
844 }
845
846 /* kludge to pass faulting virtual address to sendsig */
847 frame->tf_err = eva;
848
849 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
850}
851#endif
852
853int
854trap_pfault(frame, usermode, eva)
855 struct trapframe *frame;
856 int usermode;
857 vm_offset_t eva;
858{
859 vm_offset_t va;
860 struct vmspace *vm = NULL;
861 vm_map_t map = 0;
862 int rv = 0;
863 vm_prot_t ftype;
864 struct proc *p = curproc;
865
866 va = trunc_page(eva);
867 if (va >= KERNBASE) {
868 /*
869 * Don't allow user-mode faults in kernel address space.
870 * An exception: if the faulting address is the invalid
871 * instruction entry in the IDT, then the Intel Pentium
872 * F00F bug workaround was triggered, and we need to
873 * treat it is as an illegal instruction, and not a page
874 * fault.
875 */
876#if defined(I586_CPU) && !defined(NO_F00F_HACK)
877 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
878 frame->tf_trapno = T_PRIVINFLT;
879 return -2;
880 }
881#endif
882 if (usermode)
883 goto nogo;
884
885 map = kernel_map;
886 } else {
887 /*
888 * This is a fault on non-kernel virtual memory.
889 * vm is initialized above to NULL. If curproc is NULL
890 * or curproc->p_vmspace is NULL the fault is fatal.
891 */
892 if (p != NULL)
893 vm = p->p_vmspace;
894
895 if (vm == NULL)
896 goto nogo;
897
898 map = &vm->vm_map;
899 }
900
901 if (frame->tf_err & PGEX_W)
902 ftype = VM_PROT_WRITE;
903 else
904 ftype = VM_PROT_READ;
905
906 if (map != kernel_map) {
907 /*
908 * Keep swapout from messing with us during this
909 * critical time.
910 */
911 ++p->p_lock;
912
913 /*
914 * Grow the stack if necessary
915 */
916 /* grow_stack returns false only if va falls into
917 * a growable stack region and the stack growth
918 * fails. It returns true if va was not within
919 * a growable stack region, or if the stack
920 * growth succeeded.
921 */
922 if (!grow_stack (p, va)) {
923 rv = KERN_FAILURE;
924 --p->p_lock;
925 goto nogo;
926 }
927
928 /* Fault in the user page: */
929 rv = vm_fault(map, va, ftype,
930 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
931 : VM_FAULT_NORMAL);
932
933 --p->p_lock;
934 } else {
935 /*
936 * Don't have to worry about process locking or stacks in the kernel.
937 */
938 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
939 }
940
941 if (rv == KERN_SUCCESS)
942 return (0);
943nogo:
944 if (!usermode) {
945 if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
946 frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
947 return (0);
948 }
949 trap_fatal(frame, eva);
950 return (-1);
951 }
952
953 /* kludge to pass faulting virtual address to sendsig */
954 frame->tf_err = eva;
955
956 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
957}
958
959static void
960trap_fatal(frame, eva)
961 struct trapframe *frame;
962 vm_offset_t eva;
963{
964 int code, type, ss, esp;
965 struct soft_segment_descriptor softseg;
966
967 code = frame->tf_err;
968 type = frame->tf_trapno;
969 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
970
971 if (type <= MAX_TRAP_MSG)
972 printf("\n\nFatal trap %d: %s while in %s mode\n",
973 type, trap_msg[type],
974 frame->tf_eflags & PSL_VM ? "vm86" :
975 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
976#ifdef SMP
977 /* three seperate prints in case of a trap on an unmapped page */
978 printf("mp_lock = %08x; ", mp_lock);
979 printf("cpuid = %d; ", mycpu->gd_cpuid);
980 printf("lapic.id = %08x\n", lapic.id);
981#endif
982 if (type == T_PAGEFLT) {
983 printf("fault virtual address = 0x%x\n", eva);
984 printf("fault code = %s %s, %s\n",
985 code & PGEX_U ? "user" : "supervisor",
986 code & PGEX_W ? "write" : "read",
987 code & PGEX_P ? "protection violation" : "page not present");
988 }
989 printf("instruction pointer = 0x%x:0x%x\n",
990 frame->tf_cs & 0xffff, frame->tf_eip);
991 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
992 ss = frame->tf_ss & 0xffff;
993 esp = frame->tf_esp;
994 } else {
995 ss = GSEL(GDATA_SEL, SEL_KPL);
996 esp = (int)&frame->tf_esp;
997 }
998 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
999 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1000 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1001 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1002 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1003 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1004 softseg.ssd_gran);
1005 printf("processor eflags = ");
1006 if (frame->tf_eflags & PSL_T)
1007 printf("trace trap, ");
1008 if (frame->tf_eflags & PSL_I)
1009 printf("interrupt enabled, ");
1010 if (frame->tf_eflags & PSL_NT)
1011 printf("nested task, ");
1012 if (frame->tf_eflags & PSL_RF)
1013 printf("resume, ");
1014 if (frame->tf_eflags & PSL_VM)
1015 printf("vm86, ");
1016 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1017 printf("current process = ");
1018 if (curproc) {
1019 printf("%lu (%s)\n",
1020 (u_long)curproc->p_pid, curproc->p_comm ?
1021 curproc->p_comm : "");
1022 } else {
1023 printf("Idle\n");
1024 }
1025 printf("current thread = pri %d ", curthread->td_pri);
1026 if (curthread->td_pri >= TDPRI_CRIT)
1027 printf("(CRIT)");
1028 printf("\n");
1029 printf("interrupt mask = ");
1030 if ((curthread->td_cpl & net_imask) == net_imask)
1031 printf("net ");
1032 if ((curthread->td_cpl & tty_imask) == tty_imask)
1033 printf("tty ");
1034 if ((curthread->td_cpl & bio_imask) == bio_imask)
1035 printf("bio ");
1036 if ((curthread->td_cpl & cam_imask) == cam_imask)
1037 printf("cam ");
1038 if (curthread->td_cpl == 0)
1039 printf("none");
1040#ifdef SMP
1041/**
1042 * XXX FIXME:
1043 * we probably SHOULD have stopped the other CPUs before now!
1044 * another CPU COULD have been touching cpl at this moment...
1045 */
1046 printf(" <- SMP: XXX");
1047#endif
1048 printf("\n");
1049
1050#ifdef KDB
1051 if (kdb_trap(&psl))
1052 return;
1053#endif
1054#ifdef DDB
1055 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame))
1056 return;
1057#endif
1058 printf("trap number = %d\n", type);
1059 if (type <= MAX_TRAP_MSG)
1060 panic("%s", trap_msg[type]);
1061 else
1062 panic("unknown/reserved trap");
1063}
1064
1065/*
1066 * Double fault handler. Called when a fault occurs while writing
1067 * a frame for a trap/exception onto the stack. This usually occurs
1068 * when the stack overflows (such is the case with infinite recursion,
1069 * for example).
1070 *
1071 * XXX Note that the current PTD gets replaced by IdlePTD when the
1072 * task switch occurs. This means that the stack that was active at
1073 * the time of the double fault is not available at <kstack> unless
1074 * the machine was idle when the double fault occurred. The downside
1075 * of this is that "trace <ebp>" in ddb won't work.
1076 */
1077void
1078dblfault_handler()
1079{
1080 struct mdglobaldata *gd = mdcpu;
1081
1082 printf("\nFatal double fault:\n");
1083 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1084 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1085 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1086#ifdef SMP
1087 /* three seperate prints in case of a trap on an unmapped page */
1088 printf("mp_lock = %08x; ", mp_lock);
1089 printf("cpuid = %d; ", mycpu->gd_cpuid);
1090 printf("lapic.id = %08x\n", lapic.id);
1091#endif
1092 panic("double fault");
1093}
1094
1095/*
1096 * Compensate for 386 brain damage (missing URKR).
1097 * This is a little simpler than the pagefault handler in trap() because
1098 * it the page tables have already been faulted in and high addresses
1099 * are thrown out early for other reasons.
1100 */
1101int trapwrite(addr)
1102 unsigned addr;
1103{
1104 struct proc *p;
1105 vm_offset_t va;
1106 struct vmspace *vm;
1107 int rv;
1108
1109 va = trunc_page((vm_offset_t)addr);
1110 /*
1111 * XXX - MAX is END. Changed > to >= for temp. fix.
1112 */
1113 if (va >= VM_MAXUSER_ADDRESS)
1114 return (1);
1115
1116 p = curproc;
1117 vm = p->p_vmspace;
1118
1119 ++p->p_lock;
1120
1121 if (!grow_stack (p, va)) {
1122 --p->p_lock;
1123 return (1);
1124 }
1125
1126 /*
1127 * fault the data page
1128 */
1129 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1130
1131 --p->p_lock;
1132
1133 if (rv != KERN_SUCCESS)
1134 return 1;
1135
1136 return (0);
1137}
1138
1139/*
1140 * syscall2 - MP aware system call request C handler
1141 *
1142 * A system call is essentially treated as a trap except that the
1143 * MP lock is not held on entry or return. We are responsible for
1144 * obtaining the MP lock if necessary and for handling ASTs
1145 * (e.g. a task switch) prior to return.
1146 *
1147 * In general, only simple access and manipulation of curproc and
1148 * the current stack is allowed without having to hold MP lock.
1149 */
1150void
1151syscall2(frame)
1152 struct trapframe frame;
1153{
1154 struct thread *td = curthread;
1155 struct proc *p = td->td_proc;
1156 caddr_t params;
1157 int i;
1158 struct sysent *callp;
1159 register_t orig_tf_eflags;
1160 u_quad_t sticks;
1161 int error;
1162 int narg;
1163 int args[8];
1164 u_int code;
1165
1166#ifdef DIAGNOSTIC
1167 if (ISPL(frame.tf_cs) != SEL_UPL) {
1168 get_mplock();
1169 panic("syscall");
1170 /* NOT REACHED */
1171 }
1172#endif
1173
1174#ifdef SMP
1175 KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1176 get_mplock();
1177#endif
1178 /*
1179 * access non-atomic field from critical section. p_sticks is
1180 * updated by the clock interrupt. Also use this opportunity
1181 * to lazy-raise our LWKT priority.
1182 */
1183 crit_enter();
1184 userenter();
1185 sticks = curthread->td_sticks;
1186 crit_exit();
1187
1188 p->p_md.md_regs = &frame;
1189 params = (caddr_t)frame.tf_esp + sizeof(int);
1190 code = frame.tf_eax;
1191 orig_tf_eflags = frame.tf_eflags;
1192
1193 if (p->p_sysent->sv_prepsyscall) {
1194 /*
1195 * The prep code is not MP aware.
1196 */
1197 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
1198 } else {
1199 /*
1200 * Need to check if this is a 32 bit or 64 bit syscall.
1201 * fuword is MP aware.
1202 */
1203 if (code == SYS_syscall) {
1204 /*
1205 * Code is first argument, followed by actual args.
1206 */
1207 code = fuword(params);
1208 params += sizeof(int);
1209 } else if (code == SYS___syscall) {
1210 /*
1211 * Like syscall, but code is a quad, so as to maintain
1212 * quad alignment for the rest of the arguments.
1213 */
1214 code = fuword(params);
1215 params += sizeof(quad_t);
1216 }
1217 }
1218
1219 if (p->p_sysent->sv_mask)
1220 code &= p->p_sysent->sv_mask;
1221
1222 if (code >= p->p_sysent->sv_size)
1223 callp = &p->p_sysent->sv_table[0];
1224 else
1225 callp = &p->p_sysent->sv_table[code];
1226
1227 narg = callp->sy_narg & SYF_ARGMASK;
1228
1229 /*
1230 * copyin is MP aware, but the tracing code is not
1231 */
1232 if (params && (i = narg * sizeof(int)) &&
1233 (error = copyin(params, (caddr_t)args, (u_int)i))) {
1234#ifdef KTRACE
1235 if (KTRPOINT(td, KTR_SYSCALL))
1236 ktrsyscall(p->p_tracep, code, narg, args);
1237#endif
1238 goto bad;
1239 }
1240
1241#if 0
1242 /*
1243 * Try to run the syscall without the MP lock if the syscall
1244 * is MP safe. We have to obtain the MP lock no matter what if
1245 * we are ktracing
1246 */
1247 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1248 get_mplock();
1249 have_mplock = 1;
1250 }
1251#endif
1252
1253#ifdef KTRACE
1254 if (KTRPOINT(td, KTR_SYSCALL)) {
1255 ktrsyscall(p->p_tracep, code, narg, args);
1256 }
1257#endif
1258 p->p_retval[0] = 0;
1259 p->p_retval[1] = frame.tf_edx;
1260
1261 STOPEVENT(p, S_SCE, narg); /* MP aware */
1262
1263 error = (*callp->sy_call)(args);
1264
1265 /*
1266 * MP SAFE (we may or may not have the MP lock at this point)
1267 */
1268 switch (error) {
1269 case 0:
1270 /*
1271 * Reinitialize proc pointer `p' as it may be different
1272 * if this is a child returning from fork syscall.
1273 */
1274 p = curproc;
1275 frame.tf_eax = p->p_retval[0];
1276 frame.tf_edx = p->p_retval[1];
1277 frame.tf_eflags &= ~PSL_C;
1278 break;
1279
1280 case ERESTART:
1281 /*
1282 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1283 * int 0x80 is 2 bytes. We saved this in tf_err.
1284 */
1285 frame.tf_eip -= frame.tf_err;
1286 break;
1287
1288 case EJUSTRETURN:
1289 break;
1290
1291 default:
1292bad:
1293 if (p->p_sysent->sv_errsize) {
1294 if (error >= p->p_sysent->sv_errsize)
1295 error = -1; /* XXX */
1296 else
1297 error = p->p_sysent->sv_errtbl[error];
1298 }
1299 frame.tf_eax = error;
1300 frame.tf_eflags |= PSL_C;
1301 break;
1302 }
1303
1304 /*
1305 * Traced syscall. trapsignal() is not MP aware.
1306 */
1307 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1308 frame.tf_eflags &= ~PSL_T;
1309 trapsignal(p, SIGTRAP, 0);
1310 }
1311
1312 /*
1313 * Handle reschedule and other end-of-syscall issues
1314 */
1315 userret(p, &frame, sticks);
1316
1317#ifdef KTRACE
1318 if (KTRPOINT(td, KTR_SYSRET)) {
1319 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
1320 }
1321#endif
1322
1323 /*
1324 * This works because errno is findable through the
1325 * register set. If we ever support an emulation where this
1326 * is not the case, this code will need to be revisited.
1327 */
1328 STOPEVENT(p, S_SCX, code);
1329
1330#ifdef SMP
1331 /*
1332 * Release the MP lock if we had to get it
1333 */
1334 KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
1335 rel_mplock();
1336#endif
1337}
1338
1339/*
1340 * Simplified back end of syscall(), used when returning from fork()
1341 * directly into user mode. MP lock is held on entry and should be
1342 * released on return. This code will return back into the fork
1343 * trampoline code which then runs doreti.
1344 */
1345void
1346fork_return(p, frame)
1347 struct proc *p;
1348 struct trapframe frame;
1349{
1350 frame.tf_eax = 0; /* Child returns zero */
1351 frame.tf_eflags &= ~PSL_C; /* success */
1352 frame.tf_edx = 1;
1353
1354 userret(p, &frame, 0);
1355#ifdef KTRACE
1356 if (KTRPOINT(p->p_thread, KTR_SYSRET))
1357 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1358#endif
1359#ifdef SMP
1360 KKASSERT(curthread->td_mpcount == 1);
1361 rel_mplock();
1362#endif
1363}
1364