Remove upc_{control,register} syscalls and everything that has to do with it.
[dragonfly.git] / sys / platform / pc32 / i386 / trap.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 */
40
41/*
42 * 386 Trap and System call handling
43 */
44
1f2de5d4
MD
45#include "use_isa.h"
46#include "use_npx.h"
47
984263bc
MD
48#include "opt_cpu.h"
49#include "opt_ddb.h"
50#include "opt_ktrace.h"
51#include "opt_clock.h"
52#include "opt_trap.h"
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/proc.h>
57#include <sys/pioctl.h>
58#include <sys/kernel.h>
5ea440eb 59#include <sys/kerneldump.h>
984263bc
MD
60#include <sys/resourcevar.h>
61#include <sys/signalvar.h>
b1b4e5a6 62#include <sys/signal2.h>
984263bc
MD
63#include <sys/syscall.h>
64#include <sys/sysctl.h>
65#include <sys/sysent.h>
66#include <sys/uio.h>
67#include <sys/vmmeter.h>
4fd10eb6 68#include <sys/malloc.h>
984263bc
MD
69#ifdef KTRACE
70#include <sys/ktrace.h>
71#endif
80bbc910 72#include <sys/ktr.h>
4a22e893 73#include <sys/vkernel.h>
a64ba182
MD
74#include <sys/sysproto.h>
75#include <sys/sysunion.h>
984263bc
MD
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <sys/lock.h>
80#include <vm/pmap.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_map.h>
83#include <vm/vm_page.h>
84#include <vm/vm_extern.h>
85
86#include <machine/cpu.h>
984263bc
MD
87#include <machine/md_var.h>
88#include <machine/pcb.h>
984263bc 89#include <machine/smp.h>
984263bc 90#include <machine/tss.h>
431d0fef 91#include <machine/specialreg.h>
85100692 92#include <machine/globaldata.h>
87cf6827 93#include <machine/intr_machdep.h>
984263bc 94
87cf6827 95#include <machine_base/isa/isa_intr.h>
cb7d6921 96#include <machine_base/apic/lapic.h>
984263bc
MD
97
98#ifdef POWERFAIL_NMI
99#include <sys/syslog.h>
100#include <machine/clock.h>
101#endif
102
103#include <machine/vm86.h>
104
105#include <ddb/ddb.h>
684a93c4 106
245e4f17 107#include <sys/msgport2.h>
41c20dac 108#include <sys/thread2.h>
684a93c4 109#include <sys/mplock2.h>
984263bc 110
9753ab6c
MD
111#define MAKEMPSAFE(have_mplock) \
112 if (have_mplock == 0) { \
113 get_mplock(); \
114 have_mplock = 1; \
115 }
116
3ae0cd58 117int (*pmath_emulate) (struct trapframe *);
984263bc 118
c7eb0589 119extern void trap (struct trapframe *frame);
c7eb0589 120extern void syscall2 (struct trapframe *frame);
984263bc 121
3ae0cd58
RG
122static int trap_pfault (struct trapframe *, int, vm_offset_t);
123static void trap_fatal (struct trapframe *, vm_offset_t);
124void dblfault_handler (void);
984263bc
MD
125
126extern inthand_t IDTVEC(syscall);
127
128#define MAX_TRAP_MSG 28
129static char *trap_msg[] = {
130 "", /* 0 unused */
131 "privileged instruction fault", /* 1 T_PRIVINFLT */
132 "", /* 2 unused */
133 "breakpoint instruction fault", /* 3 T_BPTFLT */
134 "", /* 4 unused */
135 "", /* 5 unused */
136 "arithmetic trap", /* 6 T_ARITHTRAP */
137 "system forced exception", /* 7 T_ASTFLT */
138 "", /* 8 unused */
139 "general protection fault", /* 9 T_PROTFLT */
140 "trace trap", /* 10 T_TRCTRAP */
141 "", /* 11 unused */
142 "page fault", /* 12 T_PAGEFLT */
143 "", /* 13 unused */
144 "alignment fault", /* 14 T_ALIGNFLT */
145 "", /* 15 unused */
146 "", /* 16 unused */
147 "", /* 17 unused */
148 "integer divide fault", /* 18 T_DIVIDE */
149 "non-maskable interrupt trap", /* 19 T_NMI */
150 "overflow trap", /* 20 T_OFLOW */
151 "FPU bounds check fault", /* 21 T_BOUND */
152 "FPU device not available", /* 22 T_DNA */
153 "double fault", /* 23 T_DOUBLEFLT */
154 "FPU operand fetch fault", /* 24 T_FPOPFLT */
155 "invalid TSS fault", /* 25 T_TSSFLT */
156 "segment not present fault", /* 26 T_SEGNPFLT */
157 "stack fault", /* 27 T_STKFLT */
158 "machine check trap", /* 28 T_MCHK */
159};
160
984263bc
MD
161#if defined(I586_CPU) && !defined(NO_F00F_HACK)
162extern int has_f00f_bug;
163#endif
164
165#ifdef DDB
166static int ddb_on_nmi = 1;
167SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
168 &ddb_on_nmi, 0, "Go to DDB on NMI");
169#endif
170static int panic_on_nmi = 1;
171SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
172 &panic_on_nmi, 0, "Panic on NMI");
d9eea1a5
MD
173static int fast_release;
174SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
175 &fast_release, 0, "Passive Release was optimal");
176static int slow_release;
177SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
178 &slow_release, 0, "Passive Release was nonoptimal");
984263bc 179
4fd10eb6 180MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
7062f5b4 181extern int max_sysmsg;
4fd10eb6 182
26a0694b 183/*
7bd858e5
MD
184 * Passively intercepts the thread switch function to increase the thread
185 * priority from a user priority to a kernel priority, reducing
d81ccc3e 186 * syscall and trap overhead for the case where no switch occurs.
7bd858e5 187 *
d86a23e0 188 * Synchronizes td_ucred with p_ucred. This is used by system calls,
7bd858e5
MD
189 * signal handling, faults, AST traps, and anything else that enters the
190 * kernel from userland and provides the kernel with a stable read-only
191 * copy of the process ucred.
26a0694b
MD
192 */
193static __inline void
d86a23e0 194userenter(struct thread *curtd, struct proc *curp)
26a0694b 195{
7bd858e5
MD
196 struct ucred *ocred;
197 struct ucred *ncred;
198
3824f392 199 curtd->td_release = lwkt_passive_release;
7bd858e5 200
d86a23e0
MD
201 if (curtd->td_ucred != curp->p_ucred) {
202 ncred = crhold(curp->p_ucred);
203 ocred = curtd->td_ucred;
204 curtd->td_ucred = ncred;
7bd858e5
MD
205 if (ocred)
206 crfree(ocred);
207 }
208
26a0694b
MD
209}
210
0a3f9b47 211/*
7adb15b6 212 * Handle signals, profiling, and other AST's and/or tasks that
8ec60c3f 213 * must be completed before we can return to or try to return to userland.
0a3f9b47 214 *
8ec60c3f
MD
215 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
216 * arithmatic on the delta calculation so the absolute tick values are
217 * truncated to an integer.
37af14fe 218 */
a2a5ad0d 219static void
81eea9f4 220userret(struct lwp *lp, struct trapframe *frame, int sticks)
a2a5ad0d 221{
81eea9f4 222 struct proc *p = lp->lwp_proc;
8ba5f7ef 223 void (*hook)(void);
a2a5ad0d
MD
224 int sig;
225
8ba5f7ef
AH
226 if (p->p_userret != NULL) {
227 hook = p->p_userret;
228 p->p_userret = NULL;
229 (*hook)();
230 }
231
a2a5ad0d 232 /*
344ad853
MD
233 * Charge system time if profiling. Note: times are in microseconds.
234 * This may do a copyout and block, so do it first even though it
235 * means some system time will be charged as user time.
236 */
4643740a 237 if (p->p_flags & P_PROFIL) {
344ad853 238 addupc_task(p, frame->tf_eip,
bb3cd951 239 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
344ad853
MD
240 }
241
242recheck:
243 /*
62ae46c9
MD
244 * Specific on-return-to-usermode checks (LWP_MP_WEXIT,
245 * LWP_MP_VNLRU, etc).
70d3d461 246 */
62ae46c9
MD
247 if (lp->lwp_mpflags & LWP_MP_URETMASK)
248 lwpuserret(lp);
70d3d461
SS
249
250 /*
344ad853
MD
251 * Block here if we are in a stopped state.
252 */
5ea440eb 253 if (p->p_stat == SSTOP || dump_stop_usertds) {
4643740a 254 lwkt_gettoken(&p->p_token);
9a379a4a 255 tstop();
4643740a 256 lwkt_reltoken(&p->p_token);
344ad853
MD
257 goto recheck;
258 }
259
260 /*
4e7c41c5
MD
261 * Post any pending upcalls. If running a virtual kernel be sure
262 * to restore the virtual kernel's vmspace before posting the upcall.
a722be49 263 */
7adb15b6 264 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) {
fc509460 265 lwkt_gettoken(&p->p_token);
4643740a
MD
266 if (p->p_flags & P_SIGVTALRM) {
267 p->p_flags &= ~P_SIGVTALRM;
898e34b3
MD
268 ksignal(p, SIGVTALRM);
269 }
4643740a
MD
270 if (p->p_flags & P_SIGPROF) {
271 p->p_flags &= ~P_SIGPROF;
898e34b3
MD
272 ksignal(p, SIGPROF);
273 }
fc509460 274 lwkt_reltoken(&p->p_token);
344ad853 275 goto recheck;
a722be49
MD
276 }
277
278 /*
4e7c41c5
MD
279 * Post any pending signals. If running a virtual kernel be sure
280 * to restore the virtual kernel's vmspace before posting the signal.
2883d2d8
MD
281 *
282 * WARNING! postsig() can exit and not return.
a2a5ad0d 283 */
e473f776 284 if ((sig = CURSIG_TRACE(lp)) != 0) {
fc509460 285 lwkt_gettoken(&p->p_token);
a2a5ad0d 286 postsig(sig);
fc509460 287 lwkt_reltoken(&p->p_token);
344ad853 288 goto recheck;
a2a5ad0d 289 }
26a0694b
MD
290
291 /*
344ad853
MD
292 * block here if we are swapped out, but still process signals
293 * (such as SIGKILL). proc0 (the swapin scheduler) is already
294 * aware of our situation, we do not have to wake it up.
984263bc 295 */
4643740a 296 if (p->p_flags & P_SWAPPEDOUT) {
616516c8 297 lwkt_gettoken(&p->p_token);
144ce500 298 get_mplock();
4643740a 299 p->p_flags |= P_SWAPWAIT;
344ad853 300 swapin_request();
4643740a 301 if (p->p_flags & P_SWAPWAIT)
344ad853 302 tsleep(p, PCATCH, "SWOUT", 0);
4643740a 303 p->p_flags &= ~P_SWAPWAIT;
144ce500 304 rel_mplock();
616516c8 305 lwkt_reltoken(&p->p_token);
344ad853 306 goto recheck;
984263bc 307 }
ae7cb1b5
NT
308
309 /*
6562e2d8
MD
310 * In a multi-threaded program it is possible for a thread to change
311 * signal state during a system call which temporarily changes the
312 * signal mask. In this case postsig() might not be run and we
313 * have to restore the mask ourselves.
ae7cb1b5 314 */
6562e2d8
MD
315 if (lp->lwp_flags & LWP_OLDMASK) {
316 lp->lwp_flags &= ~LWP_OLDMASK;
317 lp->lwp_sigmask = lp->lwp_oldsigmask;
318 goto recheck;
319 }
984263bc
MD
320}
321
8ec60c3f
MD
322/*
323 * Cleanup from userenter and any passive release that might have occured.
324 * We must reclaim the current-process designation before we can return
325 * to usermode. We also handle both LWKT and USER reschedule requests.
326 */
327static __inline void
81eea9f4 328userexit(struct lwp *lp)
8ec60c3f 329{
81eea9f4 330 struct thread *td = lp->lwp_thread;
8397969c 331 /* globaldata_t gd = td->td_gd; */
8ec60c3f 332
8ec60c3f 333 /*
b9eb1c19
MD
334 * Handle stop requests at kernel priority. Any requests queued
335 * after this loop will generate another AST.
8ec60c3f 336 */
b9eb1c19 337 while (lp->lwp_proc->p_stat == SSTOP) {
4643740a 338 lwkt_gettoken(&lp->lwp_proc->p_token);
b9eb1c19 339 tstop();
4643740a 340 lwkt_reltoken(&lp->lwp_proc->p_token);
fede0c7f
NT
341 }
342
8ec60c3f 343 /*
b9eb1c19
MD
344 * Become the current user scheduled process if we aren't already,
345 * and deal with reschedule requests and other factors.
346 */
347 lp->lwp_proc->p_usched->acquire_curproc(lp);
348 /* WARNING: we may have migrated cpu's */
349 /* gd = td->td_gd; */
e3e6be1f
MD
350
351 /*
352 * Reduce our priority in preparation for a return to userland. If
353 * our passive release function was still in place, our priority was
354 * never raised and does not need to be reduced.
355 */
356 lwkt_passive_recover(td);
8ec60c3f
MD
357}
358
80bbc910
SS
359#if !defined(KTR_KERNENTRY)
360#define KTR_KERNENTRY KTR_ALL
361#endif
362KTR_INFO_MASTER(kernentry);
5bf48697
AE
363KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0,
364 "TRAP(pid %d, tid %d, trapno %d, eva %lu)",
365 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva);
366KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)",
367 pid_t pid, lwpid_t tid);
368KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %d)",
369 pid_t pid, lwpid_t tid, register_t trapno);
370KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)",
371 pid_t pid, lwpid_t tid, int err);
372KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)",
373 pid_t pid, lwpid_t tid);
80bbc910 374
984263bc 375/*
7517cb03 376 * Exception, fault, and trap interface to the kernel.
984263bc
MD
377 * This common code is called from assembly language IDT gate entry
378 * routines that prepare a suitable stack frame, and restore this
379 * frame after the exception has been processed.
a2a5ad0d
MD
380 *
381 * This function is also called from doreti in an interlock to handle ASTs.
382 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
383 *
384 * NOTE! We have to retrieve the fault address prior to obtaining the
385 * MP lock because get_mplock() may switch out. YYY cr2 really ought
386 * to be retrieved by the assembly code, not here.
27e88a6e
MD
387 *
388 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
389 * if an attempt is made to switch from a fast interrupt or IPI. This is
390 * necessary to properly take fatal kernel traps on SMP machines if
391 * get_mplock() has to block.
984263bc 392 */
9753ab6c 393
984263bc 394void
c7eb0589 395trap(struct trapframe *frame)
984263bc 396{
27e88a6e
MD
397 struct globaldata *gd = mycpu;
398 struct thread *td = gd->gd_curthread;
81eea9f4 399 struct lwp *lp = td->td_lwp;
7966cb69 400 struct proc *p;
37af14fe 401 int sticks = 0;
984263bc 402 int i = 0, ucode = 0, type, code;
9753ab6c 403 int have_mplock = 0;
7527bc81 404#ifdef INVARIANTS
f9235b6d 405 int crit_count = td->td_critcount;
3933a3ab 406 lwkt_tokref_t curstop = td->td_toks_stop;
7527bc81 407#endif
984263bc
MD
408 vm_offset_t eva;
409
7966cb69 410 p = td->td_proc;
984263bc 411#ifdef DDB
bfb6d329
AH
412 /*
413 * We need to allow T_DNA faults when the debugger is active since
2921c824 414 * some dumping paths do large bcopy() which use the floating
bfb6d329
AH
415 * point registers for faster copying.
416 */
5ea440eb 417 if (db_active && frame->tf_trapno != T_DNA) {
c7eb0589 418 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
27e88a6e 419 ++gd->gd_trap_nesting_level;
9753ab6c 420 MAKEMPSAFE(have_mplock);
c7eb0589 421 trap_fatal(frame, eva);
27e88a6e 422 --gd->gd_trap_nesting_level;
8a8d5d85 423 goto out2;
984263bc
MD
424 }
425#endif
426
a2a5ad0d 427 eva = 0;
27e88a6e 428 ++gd->gd_trap_nesting_level;
c7eb0589 429 if (frame->tf_trapno == T_PAGEFLT) {
a2a5ad0d
MD
430 /*
431 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
432 * This problem is worked around by using an interrupt
433 * gate for the pagefault handler. We are finally ready
434 * to read %cr2 and then must reenable interrupts.
435 *
436 * XXX this should be in the switch statement, but the
437 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
438 * flow of control too much for this to be obviously
439 * correct.
440 */
441 eva = rcr2();
a2a5ad0d 442 cpu_enable_intr();
a2a5ad0d 443 }
80bbc910 444
27e88a6e 445 --gd->gd_trap_nesting_level;
a2a5ad0d 446
c7eb0589 447 if (!(frame->tf_eflags & PSL_I)) {
984263bc
MD
448 /*
449 * Buggy application or kernel code has disabled interrupts
450 * and then trapped. Enabling interrupts now is wrong, but
451 * it is better than running with interrupts disabled until
452 * they are accidentally enabled later.
453 */
c7eb0589
SS
454 type = frame->tf_trapno;
455 if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) {
9753ab6c 456 MAKEMPSAFE(have_mplock);
26be20a0 457 kprintf(
984263bc
MD
458 "pid %ld (%s): trap %d with interrupts disabled\n",
459 (long)curproc->p_pid, curproc->p_comm, type);
a2a5ad0d 460 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
984263bc
MD
461 /*
462 * XXX not quite right, since this may be for a
463 * multiple fault in user mode.
464 */
9753ab6c 465 MAKEMPSAFE(have_mplock);
26be20a0 466 kprintf("kernel trap %d with interrupts disabled\n",
984263bc 467 type);
a2a5ad0d 468 }
8a8d5d85 469 cpu_enable_intr();
984263bc
MD
470 }
471
984263bc
MD
472#if defined(I586_CPU) && !defined(NO_F00F_HACK)
473restart:
474#endif
c7eb0589
SS
475 type = frame->tf_trapno;
476 code = frame->tf_err;
984263bc
MD
477
478 if (in_vm86call) {
c7eb0589 479 if (frame->tf_eflags & PSL_VM &&
984263bc 480 (type == T_PROTFLT || type == T_STKFLT)) {
b5d16701 481 KKASSERT(get_mplock_count(curthread) > 0);
c7eb0589 482 i = vm86_emulate((struct vm86frame *)frame);
b5d16701 483 KKASSERT(get_mplock_count(curthread) > 0);
8a8d5d85 484 if (i != 0) {
984263bc
MD
485 /*
486 * returns to original process
487 */
c7eb0589 488 vm86_trap((struct vm86frame *)frame,
d3d32139 489 have_mplock);
d3d32139 490 KKASSERT(0); /* NOT REACHED */
8a8d5d85
MD
491 }
492 goto out2;
984263bc
MD
493 }
494 switch (type) {
495 /*
496 * these traps want either a process context, or
497 * assume a normal userspace trap.
498 */
499 case T_PROTFLT:
500 case T_SEGNPFLT:
c7eb0589 501 trap_fatal(frame, eva);
8a8d5d85 502 goto out2;
984263bc
MD
503 case T_TRCTRAP:
504 type = T_BPTFLT; /* kernel breakpoint */
505 /* FALL THROUGH */
506 }
507 goto kernel_trap; /* normal kernel trap handling */
508 }
509
c7eb0589 510 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
984263bc
MD
511 /* user trap */
512
80bbc910
SS
513 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
514 frame->tf_trapno, eva);
515
d86a23e0 516 userenter(td, p);
26a0694b 517
37af14fe 518 sticks = (int)td->td_sticks;
c7eb0589 519 lp->lwp_md.md_regs = frame;
984263bc
MD
520
521 switch (type) {
522 case T_PRIVINFLT: /* privileged instruction fault */
984263bc 523 i = SIGILL;
bab69519 524 ucode = ILL_PRVOPC;
984263bc
MD
525 break;
526
527 case T_BPTFLT: /* bpt instruction fault */
528 case T_TRCTRAP: /* trace trap */
c7eb0589 529 frame->tf_eflags &= ~PSL_T;
984263bc 530 i = SIGTRAP;
bab69519 531 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
984263bc
MD
532 break;
533
534 case T_ARITHTRAP: /* arithmetic trap */
535 ucode = code;
536 i = SIGFPE;
537 break;
538
539 case T_ASTFLT: /* Allow process switch */
12e4aaff 540 mycpu->gd_cnt.v_soft++;
235957ed 541 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
2a418930
MD
542 atomic_clear_int(&mycpu->gd_reqflags,
543 RQF_AST_OWEUPC);
93328593
SS
544 addupc_task(p, p->p_prof.pr_addr,
545 p->p_prof.pr_ticks);
984263bc
MD
546 }
547 goto out;
548
549 /*
550 * The following two traps can happen in
551 * vm86 mode, and, if so, we want to handle
552 * them specially.
553 */
554 case T_PROTFLT: /* general protection fault */
555 case T_STKFLT: /* stack fault */
c7eb0589
SS
556 if (frame->tf_eflags & PSL_VM) {
557 i = vm86_emulate((struct vm86frame *)frame);
984263bc
MD
558 if (i == 0)
559 goto out;
560 break;
561 }
f2642d5a
AH
562 i = SIGBUS;
563 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
564 break;
984263bc 565 case T_SEGNPFLT: /* segment not present fault */
f2642d5a
AH
566 i = SIGBUS;
567 ucode = BUS_ADRERR;
568 break;
984263bc
MD
569 case T_TSSFLT: /* invalid TSS fault */
570 case T_DOUBLEFLT: /* double fault */
571 default:
984263bc 572 i = SIGBUS;
bab69519 573 ucode = BUS_OBJERR;
984263bc
MD
574 break;
575
576 case T_PAGEFLT: /* page fault */
c7eb0589 577 i = trap_pfault(frame, TRUE, eva);
984263bc 578 if (i == -1)
d81ccc3e 579 goto out;
984263bc
MD
580#if defined(I586_CPU) && !defined(NO_F00F_HACK)
581 if (i == -2)
582 goto restart;
583#endif
584 if (i == 0)
585 goto out;
bab69519 586
f2642d5a
AH
587 if (i == SIGSEGV)
588 ucode = SEGV_MAPERR;
bab69519
JM
589 else {
590 i = SIGSEGV;
591 ucode = SEGV_ACCERR;
592 }
984263bc
MD
593 break;
594
595 case T_DIVIDE: /* integer divide fault */
596 ucode = FPE_INTDIV;
597 i = SIGFPE;
598 break;
599
600#if NISA > 0
601 case T_NMI:
9753ab6c 602 MAKEMPSAFE(have_mplock);
984263bc
MD
603#ifdef POWERFAIL_NMI
604 goto handle_powerfail;
605#else /* !POWERFAIL_NMI */
606 /* machine/parity/power fail/"kitchen sink" faults */
607 if (isa_nmi(code) == 0) {
608#ifdef DDB
609 /*
610 * NMI can be hooked up to a pushbutton
611 * for debugging.
612 */
613 if (ddb_on_nmi) {
26be20a0 614 kprintf ("NMI ... going to debugger\n");
c7eb0589 615 kdb_trap (type, 0, frame);
984263bc
MD
616 }
617#endif /* DDB */
8a8d5d85 618 goto out2;
984263bc
MD
619 } else if (panic_on_nmi)
620 panic("NMI indicates hardware failure");
621 break;
622#endif /* POWERFAIL_NMI */
623#endif /* NISA > 0 */
624
625 case T_OFLOW: /* integer overflow fault */
626 ucode = FPE_INTOVF;
627 i = SIGFPE;
628 break;
629
630 case T_BOUND: /* bounds check fault */
631 ucode = FPE_FLTSUB;
632 i = SIGFPE;
633 break;
634
635 case T_DNA:
431d0fef
MD
636 /*
637 * Virtual kernel intercept - pass the DNA exception
638 * to the virtual kernel if it asked to handle it.
639 * This occurs when the virtual kernel is holding
640 * onto the FP context for a different emulated
641 * process then the one currently running.
642 *
643 * We must still call npxdna() since we may have
644 * saved FP state that the virtual kernel needs
645 * to hand over to a different emulated process.
646 */
39005e16 647 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
431d0fef
MD
648 (td->td_pcb->pcb_flags & FP_VIRTFP)
649 ) {
650 npxdna();
651 break;
652 }
653
984263bc 654#if NNPX > 0
a02705a9
MD
655 /*
656 * The kernel may have switched out the FP unit's
657 * state, causing the user process to take a fault
658 * when it tries to use the FP unit. Restore the
659 * state here
660 */
984263bc 661 if (npxdna())
d81ccc3e 662 goto out;
984263bc
MD
663#endif
664 if (!pmath_emulate) {
665 i = SIGFPE;
666 ucode = FPE_FPU_NP_TRAP;
667 break;
668 }
c7eb0589 669 i = (*pmath_emulate)(frame);
984263bc 670 if (i == 0) {
c7eb0589 671 if (!(frame->tf_eflags & PSL_T))
8a8d5d85 672 goto out2;
c7eb0589 673 frame->tf_eflags &= ~PSL_T;
984263bc
MD
674 i = SIGTRAP;
675 }
676 /* else ucode = emulator_only_knows() XXX */
677 break;
678
679 case T_FPOPFLT: /* FPU operand fetch fault */
bab69519 680 ucode = ILL_COPROC;
984263bc
MD
681 i = SIGILL;
682 break;
683
684 case T_XMMFLT: /* SIMD floating-point exception */
685 ucode = 0; /* XXX */
686 i = SIGFPE;
687 break;
688 }
689 } else {
690kernel_trap:
691 /* kernel trap */
692
693 switch (type) {
694 case T_PAGEFLT: /* page fault */
c7eb0589 695 trap_pfault(frame, FALSE, eva);
8a8d5d85 696 goto out2;
984263bc
MD
697
698 case T_DNA:
699#if NNPX > 0
700 /*
a02705a9
MD
701 * The kernel may be using npx for copying or other
702 * purposes.
984263bc
MD
703 */
704 if (npxdna())
8a8d5d85 705 goto out2;
984263bc
MD
706#endif
707 break;
708
709 case T_PROTFLT: /* general protection fault */
710 case T_SEGNPFLT: /* segment not present fault */
711 /*
712 * Invalid segment selectors and out of bounds
713 * %eip's and %esp's can be set up in user mode.
714 * This causes a fault in kernel mode when the
715 * kernel tries to return to user mode. We want
716 * to get this fault so that we can fix the
717 * problem here and not have to check all the
718 * selectors and pointers when the user changes
719 * them.
720 */
721#define MAYBE_DORETI_FAULT(where, whereto) \
722 do { \
c7eb0589
SS
723 if (frame->tf_eip == (int)where) { \
724 frame->tf_eip = (int)whereto; \
8a8d5d85 725 goto out2; \
984263bc
MD
726 } \
727 } while (0)
ef0fdad1 728 if (mycpu->gd_intr_nesting_level == 0) {
984263bc
MD
729 /*
730 * Invalid %fs's and %gs's can be created using
731 * procfs or PT_SETREGS or by invalidating the
732 * underlying LDT entry. This causes a fault
733 * in kernel mode when the kernel attempts to
734 * switch contexts. Lose the bad context
735 * (XXX) so that we can continue, and generate
736 * a signal.
737 */
984263bc
MD
738 MAYBE_DORETI_FAULT(doreti_iret,
739 doreti_iret_fault);
740 MAYBE_DORETI_FAULT(doreti_popl_ds,
741 doreti_popl_ds_fault);
742 MAYBE_DORETI_FAULT(doreti_popl_es,
743 doreti_popl_es_fault);
744 MAYBE_DORETI_FAULT(doreti_popl_fs,
745 doreti_popl_fs_fault);
4e7c41c5
MD
746 MAYBE_DORETI_FAULT(doreti_popl_gs,
747 doreti_popl_gs_fault);
93ad6da2
MD
748
749 /*
750 * NOTE: cpu doesn't push esp on kernel trap
751 */
752 if (td->td_pcb->pcb_onfault &&
753 td->td_pcb->pcb_onfault_sp ==
754 (int)&frame->tf_esp) {
c7eb0589 755 frame->tf_eip =
37af14fe 756 (register_t)td->td_pcb->pcb_onfault;
8a8d5d85 757 goto out2;
984263bc
MD
758 }
759 }
760 break;
761
762 case T_TSSFLT:
763 /*
764 * PSL_NT can be set in user mode and isn't cleared
765 * automatically when the kernel is entered. This
766 * causes a TSS fault when the kernel attempts to
767 * `iret' because the TSS link is uninitialized. We
768 * want to get this fault so that we can fix the
769 * problem here and not every time the kernel is
770 * entered.
771 */
c7eb0589
SS
772 if (frame->tf_eflags & PSL_NT) {
773 frame->tf_eflags &= ~PSL_NT;
8a8d5d85 774 goto out2;
984263bc
MD
775 }
776 break;
777
778 case T_TRCTRAP: /* trace trap */
c7eb0589 779 if (frame->tf_eip == (int)IDTVEC(syscall)) {
984263bc
MD
780 /*
781 * We've just entered system mode via the
782 * syscall lcall. Continue single stepping
783 * silently until the syscall handler has
784 * saved the flags.
785 */
8a8d5d85 786 goto out2;
984263bc 787 }
c7eb0589 788 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
984263bc
MD
789 /*
790 * The syscall handler has now saved the
791 * flags. Stop single stepping it.
792 */
c7eb0589 793 frame->tf_eflags &= ~PSL_T;
8a8d5d85 794 goto out2;
984263bc
MD
795 }
796 /*
797 * Ignore debug register trace traps due to
798 * accesses in the user's address space, which
799 * can happen under several conditions such as
800 * if a user sets a watchpoint on a buffer and
801 * then passes that buffer to a system call.
802 * We still want to get TRCTRAPS for addresses
803 * in kernel space because that is useful when
804 * debugging the kernel.
805 */
806 if (user_dbreg_trap()) {
807 /*
808 * Reset breakpoint bits because the
809 * processor doesn't
810 */
811 load_dr6(rdr6() & 0xfffffff0);
8a8d5d85 812 goto out2;
984263bc
MD
813 }
814 /*
bab69519 815 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
984263bc
MD
816 */
817 case T_BPTFLT:
818 /*
819 * If DDB is enabled, let it handle the debugger trap.
820 * Otherwise, debugger traps "can't happen".
821 */
f2642d5a 822 ucode = TRAP_BRKPT;
984263bc 823#ifdef DDB
9753ab6c 824 MAKEMPSAFE(have_mplock);
c7eb0589 825 if (kdb_trap (type, 0, frame))
8a8d5d85 826 goto out2;
984263bc
MD
827#endif
828 break;
829
830#if NISA > 0
831 case T_NMI:
9753ab6c 832 MAKEMPSAFE(have_mplock);
984263bc
MD
833#ifdef POWERFAIL_NMI
834#ifndef TIMER_FREQ
835# define TIMER_FREQ 1193182
836#endif
837 handle_powerfail:
838 {
839 static unsigned lastalert = 0;
840
841 if(time_second - lastalert > 10)
842 {
843 log(LOG_WARNING, "NMI: power fail\n");
844 sysbeep(TIMER_FREQ/880, hz);
845 lastalert = time_second;
846 }
8a8d5d85
MD
847 /* YYY mp count */
848 goto out2;
984263bc
MD
849 }
850#else /* !POWERFAIL_NMI */
851 /* machine/parity/power fail/"kitchen sink" faults */
852 if (isa_nmi(code) == 0) {
853#ifdef DDB
854 /*
855 * NMI can be hooked up to a pushbutton
856 * for debugging.
857 */
858 if (ddb_on_nmi) {
26be20a0 859 kprintf ("NMI ... going to debugger\n");
c7eb0589 860 kdb_trap (type, 0, frame);
984263bc
MD
861 }
862#endif /* DDB */
8a8d5d85 863 goto out2;
984263bc 864 } else if (panic_on_nmi == 0)
8a8d5d85 865 goto out2;
984263bc
MD
866 /* FALL THROUGH */
867#endif /* POWERFAIL_NMI */
868#endif /* NISA > 0 */
869 }
870
9753ab6c 871 MAKEMPSAFE(have_mplock);
c7eb0589 872 trap_fatal(frame, eva);
8a8d5d85 873 goto out2;
984263bc
MD
874 }
875
4a22e893
MD
876 /*
877 * Virtual kernel intercept - if the fault is directly related to a
878 * VM context managed by a virtual kernel then let the virtual kernel
879 * handle it.
880 */
39005e16 881 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
287ebb09 882 vkernel_trap(lp, frame);
4a22e893
MD
883 goto out;
884 }
885
bab69519 886 /* Translate fault for emulators (e.g. Linux) */
984263bc
MD
887 if (*p->p_sysent->sv_transtrap)
888 i = (*p->p_sysent->sv_transtrap)(i, type);
889
9753ab6c 890 MAKEMPSAFE(have_mplock);
08f2f1bb 891 trapsignal(lp, i, ucode);
984263bc
MD
892
893#ifdef DEBUG
894 if (type <= MAX_TRAP_MSG) {
895 uprintf("fatal process exception: %s",
896 trap_msg[type]);
897 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
898 uprintf(", fault VA = 0x%lx", (u_long)eva);
899 uprintf("\n");
900 }
901#endif
902
903out:
c7eb0589 904 userret(lp, frame, sticks);
81eea9f4 905 userexit(lp);
9753ab6c 906out2: ;
9753ab6c
MD
907 if (have_mplock)
908 rel_mplock();
80bbc910
SS
909 if (p != NULL && lp != NULL)
910 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
7527bc81 911#ifdef INVARIANTS
f9235b6d 912 KASSERT(crit_count == td->td_critcount,
3933a3ab 913 ("trap: critical section count mismatch! %d/%d",
f9235b6d 914 crit_count, td->td_pri));
3933a3ab
MD
915 KASSERT(curstop == td->td_toks_stop,
916 ("trap: extra tokens held after trap! %zd/%zd",
917 curstop - &td->td_toks_base,
918 td->td_toks_stop - &td->td_toks_base));
7527bc81 919#endif
984263bc
MD
920}
921
984263bc 922int
f123d5a1 923trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
984263bc
MD
924{
925 vm_offset_t va;
926 struct vmspace *vm = NULL;
927 vm_map_t map = 0;
928 int rv = 0;
1b9d3514 929 int fault_flags;
984263bc 930 vm_prot_t ftype;
37af14fe 931 thread_t td = curthread;
287ebb09 932 struct lwp *lp = td->td_lwp;
984263bc
MD
933
934 va = trunc_page(eva);
935 if (va >= KERNBASE) {
936 /*
937 * Don't allow user-mode faults in kernel address space.
938 * An exception: if the faulting address is the invalid
939 * instruction entry in the IDT, then the Intel Pentium
940 * F00F bug workaround was triggered, and we need to
941 * treat it is as an illegal instruction, and not a page
942 * fault.
943 */
944#if defined(I586_CPU) && !defined(NO_F00F_HACK)
945 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
946 frame->tf_trapno = T_PRIVINFLT;
947 return -2;
948 }
949#endif
950 if (usermode)
951 goto nogo;
952
e4846942 953 map = &kernel_map;
984263bc
MD
954 } else {
955 /*
956 * This is a fault on non-kernel virtual memory.
957 * vm is initialized above to NULL. If curproc is NULL
958 * or curproc->p_vmspace is NULL the fault is fatal.
959 */
287ebb09
MD
960 if (lp != NULL)
961 vm = lp->lwp_vmspace;
984263bc
MD
962
963 if (vm == NULL)
964 goto nogo;
965
966 map = &vm->vm_map;
967 }
968
969 if (frame->tf_err & PGEX_W)
970 ftype = VM_PROT_WRITE;
971 else
972 ftype = VM_PROT_READ;
973
e4846942 974 if (map != &kernel_map) {
984263bc
MD
975 /*
976 * Keep swapout from messing with us during this
977 * critical time.
978 */
287ebb09 979 PHOLD(lp->lwp_proc);
984263bc
MD
980
981 /*
1b9d3514
MD
982 * Issue fault
983 */
984 fault_flags = 0;
985 if (usermode)
986 fault_flags |= VM_FAULT_BURST;
987 if (ftype & VM_PROT_WRITE)
988 fault_flags |= VM_FAULT_DIRTY;
989 else
990 fault_flags |= VM_FAULT_NORMAL;
991 rv = vm_fault(map, va, ftype, fault_flags);
287ebb09 992 PRELE(lp->lwp_proc);
984263bc
MD
993 } else {
994 /*
bab69519
JM
995 * Don't have to worry about process locking or stacks in the
996 * kernel.
984263bc
MD
997 */
998 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
999 }
984263bc
MD
1000 if (rv == KERN_SUCCESS)
1001 return (0);
1002nogo:
1003 if (!usermode) {
93ad6da2
MD
1004 /*
1005 * NOTE: cpu doesn't push esp on kernel trap
1006 */
37af14fe 1007 if (td->td_gd->gd_intr_nesting_level == 0 &&
93ad6da2
MD
1008 td->td_pcb->pcb_onfault &&
1009 td->td_pcb->pcb_onfault_sp == (int)&frame->tf_esp) {
37af14fe 1010 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
984263bc
MD
1011 return (0);
1012 }
93ad6da2
MD
1013 if (td->td_gd->gd_intr_nesting_level == 0 &&
1014 td->td_pcb->pcb_onfault) {
1015 kprintf("ESP mismatch %p %08x\n",
1016 &frame->tf_esp, td->td_pcb->pcb_onfault_sp);
1017 }
984263bc
MD
1018 trap_fatal(frame, eva);
1019 return (-1);
1020 }
1021
1022 /* kludge to pass faulting virtual address to sendsig */
4e7c41c5 1023 frame->tf_xflags = frame->tf_err;
984263bc
MD
1024 frame->tf_err = eva;
1025
1026 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1027}
1028
1029static void
f123d5a1 1030trap_fatal(struct trapframe *frame, vm_offset_t eva)
984263bc
MD
1031{
1032 int code, type, ss, esp;
1033 struct soft_segment_descriptor softseg;
1034
1035 code = frame->tf_err;
1036 type = frame->tf_trapno;
3951a45f 1037 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
984263bc
MD
1038
1039 if (type <= MAX_TRAP_MSG)
26be20a0 1040 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
984263bc
MD
1041 type, trap_msg[type],
1042 frame->tf_eflags & PSL_VM ? "vm86" :
1043 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
7a44d1cb 1044 /* three separate prints in case of a trap on an unmapped page */
26be20a0 1045 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
cb7d6921 1046 kprintf("lapic.id = %08x\n", lapic->id);
984263bc 1047 if (type == T_PAGEFLT) {
d557216f 1048 kprintf("fault virtual address = %p\n", (void *)eva);
26be20a0 1049 kprintf("fault code = %s %s, %s\n",
984263bc
MD
1050 code & PGEX_U ? "user" : "supervisor",
1051 code & PGEX_W ? "write" : "read",
1052 code & PGEX_P ? "protection violation" : "page not present");
1053 }
26be20a0 1054 kprintf("instruction pointer = 0x%x:0x%x\n",
984263bc
MD
1055 frame->tf_cs & 0xffff, frame->tf_eip);
1056 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1057 ss = frame->tf_ss & 0xffff;
1058 esp = frame->tf_esp;
1059 } else {
1060 ss = GSEL(GDATA_SEL, SEL_KPL);
1061 esp = (int)&frame->tf_esp;
1062 }
26be20a0
SW
1063 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
1064 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1065 kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
984263bc 1066 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
26be20a0 1067 kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n",
984263bc
MD
1068 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1069 softseg.ssd_gran);
26be20a0 1070 kprintf("processor eflags = ");
984263bc 1071 if (frame->tf_eflags & PSL_T)
26be20a0 1072 kprintf("trace trap, ");
984263bc 1073 if (frame->tf_eflags & PSL_I)
26be20a0 1074 kprintf("interrupt enabled, ");
984263bc 1075 if (frame->tf_eflags & PSL_NT)
26be20a0 1076 kprintf("nested task, ");
984263bc 1077 if (frame->tf_eflags & PSL_RF)
26be20a0 1078 kprintf("resume, ");
984263bc 1079 if (frame->tf_eflags & PSL_VM)
26be20a0
SW
1080 kprintf("vm86, ");
1081 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1082 kprintf("current process = ");
984263bc 1083 if (curproc) {
26be20a0 1084 kprintf("%lu (%s)\n",
984263bc
MD
1085 (u_long)curproc->p_pid, curproc->p_comm ?
1086 curproc->p_comm : "");
1087 } else {
26be20a0 1088 kprintf("Idle\n");
984263bc 1089 }
26be20a0 1090 kprintf("current thread = pri %d ", curthread->td_pri);
f9235b6d 1091 if (curthread->td_critcount)
26be20a0
SW
1092 kprintf("(CRIT)");
1093 kprintf("\n");
984263bc
MD
1094/**
1095 * XXX FIXME:
1096 * we probably SHOULD have stopped the other CPUs before now!
1097 * another CPU COULD have been touching cpl at this moment...
1098 */
26be20a0 1099 kprintf(" <- SMP: XXX");
26be20a0 1100 kprintf("\n");
984263bc
MD
1101
1102#ifdef KDB
1103 if (kdb_trap(&psl))
1104 return;
1105#endif
1106#ifdef DDB
f7bc9806 1107 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
984263bc
MD
1108 return;
1109#endif
26be20a0 1110 kprintf("trap number = %d\n", type);
984263bc
MD
1111 if (type <= MAX_TRAP_MSG)
1112 panic("%s", trap_msg[type]);
1113 else
1114 panic("unknown/reserved trap");
1115}
1116
1117/*
1118 * Double fault handler. Called when a fault occurs while writing
1119 * a frame for a trap/exception onto the stack. This usually occurs
1120 * when the stack overflows (such is the case with infinite recursion,
1121 * for example).
1122 *
1123 * XXX Note that the current PTD gets replaced by IdlePTD when the
1124 * task switch occurs. This means that the stack that was active at
1125 * the time of the double fault is not available at <kstack> unless
1126 * the machine was idle when the double fault occurred. The downside
1127 * of this is that "trace <ebp>" in ddb won't work.
1128 */
093565f2
MD
1129static __inline
1130int
1131in_kstack_guard(register_t rptr)
1132{
1133 thread_t td = curthread;
1134
1135 if ((char *)rptr >= td->td_kstack &&
1136 (char *)rptr < td->td_kstack + PAGE_SIZE) {
1137 return 1;
1138 }
1139 return 0;
1140}
1141
984263bc 1142void
f123d5a1 1143dblfault_handler(void)
984263bc 1144{
85100692 1145 struct mdglobaldata *gd = mdcpu;
17a9f566 1146
093565f2
MD
1147 if (in_kstack_guard(gd->gd_common_tss.tss_esp) ||
1148 in_kstack_guard(gd->gd_common_tss.tss_ebp)) {
0174149e 1149 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
093565f2 1150 } else {
0174149e 1151 kprintf("DOUBLE FAULT:\n");
093565f2 1152 }
26be20a0
SW
1153 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1154 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1155 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
7a44d1cb 1156 /* three separate prints in case of a trap on an unmapped page */
093565f2 1157 kprintf("cpuid = %d; ", gd->mi.gd_cpuid);
cb7d6921 1158 kprintf("lapic.id = %08x\n", lapic->id);
984263bc
MD
1159 panic("double fault");
1160}
1161
1162/*
0d355d3b 1163 * syscall2 - MP aware system call request C handler
984263bc 1164 *
0d355d3b
MD
1165 * A system call is essentially treated as a trap. The MP lock is not
1166 * held on entry or return. We are responsible for handling ASTs
1167 * (e.g. a task switch) prior to return.
984263bc 1168 *
0d355d3b 1169 * MPSAFE
984263bc
MD
1170 */
1171void
c7eb0589 1172syscall2(struct trapframe *frame)
984263bc 1173{
dadab5e9
MD
1174 struct thread *td = curthread;
1175 struct proc *p = td->td_proc;
81eea9f4 1176 struct lwp *lp = td->td_lwp;
984263bc 1177 caddr_t params;
984263bc 1178 struct sysent *callp;
984263bc 1179 register_t orig_tf_eflags;
37af14fe 1180 int sticks;
984263bc
MD
1181 int error;
1182 int narg;
7527bc81 1183#ifdef INVARIANTS
f9235b6d 1184 int crit_count = td->td_critcount;
7527bc81 1185#endif
270ac911 1186 int have_mplock = 0;
984263bc 1187 u_int code;
a64ba182 1188 union sysunion args;
984263bc
MD
1189
1190#ifdef DIAGNOSTIC
c7eb0589 1191 if (ISPL(frame->tf_cs) != SEL_UPL) {
984263bc
MD
1192 get_mplock();
1193 panic("syscall");
1194 /* NOT REACHED */
1195 }
1196#endif
1197
80bbc910
SS
1198 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
1199 frame->tf_eax);
1200
d86a23e0 1201 userenter(td, p); /* lazy raise our priority */
37af14fe 1202
4a22e893
MD
1203 /*
1204 * Misc
1205 */
37af14fe 1206 sticks = (int)td->td_sticks;
c7eb0589 1207 orig_tf_eflags = frame->tf_eflags;
4a22e893
MD
1208
1209 /*
1210 * Virtual kernel intercept - if a VM context managed by a virtual
1211 * kernel issues a system call the virtual kernel handles it, not us.
1212 * Restore the virtual kernel context and return from its system
1213 * call. The current frame is copied out to the virtual kernel.
1214 */
39005e16 1215 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
bb47c072 1216 vkernel_trap(lp, frame);
4a22e893 1217 error = EJUSTRETURN;
e7912058 1218 callp = NULL;
4a22e893
MD
1219 goto out;
1220 }
984263bc 1221
4a22e893
MD
1222 /*
1223 * Get the system call parameters and account for time
1224 */
c7eb0589
SS
1225 lp->lwp_md.md_regs = frame;
1226 params = (caddr_t)frame->tf_esp + sizeof(int);
1227 code = frame->tf_eax;
984263bc
MD
1228
1229 if (p->p_sysent->sv_prepsyscall) {
270ac911 1230 (*p->p_sysent->sv_prepsyscall)(
c7eb0589 1231 frame, (int *)(&args.nosys.sysmsg + 1),
270ac911 1232 &code, &params);
984263bc
MD
1233 } else {
1234 /*
1235 * Need to check if this is a 32 bit or 64 bit syscall.
1236 * fuword is MP aware.
1237 */
1238 if (code == SYS_syscall) {
1239 /*
1240 * Code is first argument, followed by actual args.
1241 */
1242 code = fuword(params);
1243 params += sizeof(int);
1244 } else if (code == SYS___syscall) {
1245 /*
1246 * Like syscall, but code is a quad, so as to maintain
1247 * quad alignment for the rest of the arguments.
1248 */
1249 code = fuword(params);
1250 params += sizeof(quad_t);
1251 }
1252 }
1253
8ec60c3f 1254 code &= p->p_sysent->sv_mask;
8ba5f7ef 1255
7062f5b4
EN
1256 if (code >= p->p_sysent->sv_size)
1257 callp = &p->p_sysent->sv_table[0];
1258 else
1259 callp = &p->p_sysent->sv_table[code];
984263bc
MD
1260
1261 narg = callp->sy_narg & SYF_ARGMASK;
1262
8ba5f7ef
AH
1263#if 0
1264 if (p->p_sysent->sv_name[0] == 'L')
1265 kprintf("Linux syscall, code = %d\n", code);
1266#endif
1267
984263bc
MD
1268 /*
1269 * copyin is MP aware, but the tracing code is not
1270 */
8ec60c3f 1271 if (narg && params) {
f9a13fc4 1272 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
8ec60c3f
MD
1273 narg * sizeof(register_t));
1274 if (error) {
984263bc 1275#ifdef KTRACE
270ac911
MD
1276 if (KTRPOINT(td, KTR_SYSCALL)) {
1277 MAKEMPSAFE(have_mplock);
1278
9fb04d14 1279 ktrsyscall(lp, code, narg,
f9a13fc4 1280 (void *)(&args.nosys.sysmsg + 1));
270ac911 1281 }
984263bc 1282#endif
8ec60c3f
MD
1283 goto bad;
1284 }
984263bc
MD
1285 }
1286
984263bc 1287#ifdef KTRACE
dadab5e9 1288 if (KTRPOINT(td, KTR_SYSCALL)) {
270ac911 1289 MAKEMPSAFE(have_mplock);
9fb04d14 1290 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
984263bc
MD
1291 }
1292#endif
01dce7bb
MD
1293
1294 /*
1295 * For traditional syscall code edx is left untouched when 32 bit
1296 * results are returned. Since edx is loaded from fds[1] when the
1297 * system call returns we pre-set it here.
1298 */
c7114eea 1299 args.sysmsg_fds[0] = 0;
c7eb0589 1300 args.sysmsg_fds[1] = frame->tf_edx;
984263bc 1301
4a22e893
MD
1302 /*
1303 * The syscall might manipulate the trap frame. If it does it
1304 * will probably return EJUSTRETURN.
1305 */
c7eb0589 1306 args.sysmsg_frame = frame;
4a22e893 1307
984263bc
MD
1308 STOPEVENT(p, S_SCE, narg); /* MP aware */
1309
270ac911 1310 /*
3919ced0
MD
1311 * NOTE: All system calls run MPSAFE now. The system call itself
1312 * is responsible for getting the MP lock.
270ac911 1313 */
a64ba182 1314 error = (*callp->sy_call)(&args);
984263bc 1315
4a22e893 1316out:
984263bc
MD
1317 /*
1318 * MP SAFE (we may or may not have the MP lock at this point)
1319 */
1320 switch (error) {
1321 case 0:
1322 /*
1323 * Reinitialize proc pointer `p' as it may be different
1324 * if this is a child returning from fork syscall.
1325 */
1326 p = curproc;
81eea9f4 1327 lp = curthread->td_lwp;
c7eb0589
SS
1328 frame->tf_eax = args.sysmsg_fds[0];
1329 frame->tf_edx = args.sysmsg_fds[1];
1330 frame->tf_eflags &= ~PSL_C;
984263bc 1331 break;
984263bc
MD
1332 case ERESTART:
1333 /*
1334 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1335 * int 0x80 is 2 bytes. We saved this in tf_err.
1336 */
c7eb0589 1337 frame->tf_eip -= frame->tf_err;
984263bc 1338 break;
984263bc
MD
1339 case EJUSTRETURN:
1340 break;
245e4f17
MD
1341 case EASYNC:
1342 panic("Unexpected EASYNC return value (for now)");
984263bc
MD
1343 default:
1344bad:
7062f5b4
EN
1345 if (p->p_sysent->sv_errsize) {
1346 if (error >= p->p_sysent->sv_errsize)
1347 error = -1; /* XXX */
1348 else
1349 error = p->p_sysent->sv_errtbl[error];
984263bc 1350 }
c7eb0589
SS
1351 frame->tf_eax = error;
1352 frame->tf_eflags |= PSL_C;
984263bc
MD
1353 break;
1354 }
1355
1356 /*
1357 * Traced syscall. trapsignal() is not MP aware.
1358 */
1359 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
270ac911 1360 MAKEMPSAFE(have_mplock);
c7eb0589 1361 frame->tf_eflags &= ~PSL_T;
f2642d5a 1362 trapsignal(lp, SIGTRAP, TRAP_TRACE);
984263bc
MD
1363 }
1364
1365 /*
1366 * Handle reschedule and other end-of-syscall issues
1367 */
c7eb0589 1368 userret(lp, frame, sticks);
984263bc
MD
1369
1370#ifdef KTRACE
dadab5e9 1371 if (KTRPOINT(td, KTR_SYSRET)) {
270ac911 1372 MAKEMPSAFE(have_mplock);
9fb04d14 1373 ktrsysret(lp, code, error, args.sysmsg_result);
984263bc
MD
1374 }
1375#endif
1376
1377 /*
1378 * This works because errno is findable through the
1379 * register set. If we ever support an emulation where this
1380 * is not the case, this code will need to be revisited.
1381 */
1382 STOPEVENT(p, S_SCX, code);
1383
81eea9f4 1384 userexit(lp);
984263bc
MD
1385 /*
1386 * Release the MP lock if we had to get it
1387 */
270ac911
MD
1388 if (have_mplock)
1389 rel_mplock();
80bbc910 1390 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
7527bc81 1391#ifdef INVARIANTS
f9235b6d 1392 KASSERT(crit_count == td->td_critcount,
7527bc81 1393 ("syscall: critical section count mismatch! %d/%d",
f9235b6d 1394 crit_count, td->td_pri));
3933a3ab
MD
1395 KASSERT(&td->td_toks_base == td->td_toks_stop,
1396 ("syscall: extra tokens held after trap! %zd",
1397 td->td_toks_stop - &td->td_toks_base));
7527bc81 1398#endif
984263bc
MD
1399}
1400
2b0bd8aa
MD
1401/*
1402 * NOTE: MP lock not held at any point.
1403 */
91bd9c1e
SS
1404void
1405fork_return(struct lwp *lp, struct trapframe *frame)
1406{
1407 frame->tf_eax = 0; /* Child returns zero */
1408 frame->tf_eflags &= ~PSL_C; /* success */
1409 frame->tf_edx = 1;
1410
1411 generic_lwp_return(lp, frame);
80bbc910 1412 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
91bd9c1e
SS
1413}
1414
a64ba182 1415/*
984263bc 1416 * Simplified back end of syscall(), used when returning from fork()
2b0bd8aa
MD
1417 * directly into user mode.
1418 *
1419 * This code will return back into the fork trampoline code which then
1420 * runs doreti.
1421 *
1422 * NOTE: The mplock is not held at any point.
984263bc
MD
1423 */
1424void
91bd9c1e 1425generic_lwp_return(struct lwp *lp, struct trapframe *frame)
984263bc 1426{
bb3cd951 1427 struct proc *p = lp->lwp_proc;
81eea9f4 1428
3a57cd9c
MD
1429 /*
1430 * Newly forked processes are given a kernel priority. We have to
1431 * adjust the priority to a normal user priority and fake entry
1432 * into the kernel (call userenter()) to install a passive release
1433 * function just in case userret() decides to stop the process. This
1434 * can occur when ^Z races a fork. If we do not install the passive
1435 * release function the current process designation will not be
1436 * released when the thread goes to sleep.
1437 */
1438 lwkt_setpri_self(TDPRI_USER_NORM);
d86a23e0 1439 userenter(lp->lwp_thread, p);
c7eb0589 1440 userret(lp, frame, 0);
984263bc 1441#ifdef KTRACE
81eea9f4 1442 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
9fb04d14 1443 ktrsysret(lp, SYS_fork, 0, 0);
984263bc 1444#endif
4643740a 1445 lp->lwp_flags |= LWP_PASSIVE_ACQ;
81eea9f4 1446 userexit(lp);
4643740a 1447 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
984263bc 1448}
431d0fef
MD
1449
1450/*
4b486183 1451 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
431d0fef
MD
1452 * fault (which is then passed back to the virtual kernel) if an attempt is
1453 * made to use the FP unit.
1454 *
1455 * XXX this is a fairly big hack.
1456 */
1457void
1458set_vkernel_fp(struct trapframe *frame)
1459{
1460 struct thread *td = curthread;
1461
4b486183 1462 if (frame->tf_xflags & PGEX_FPFAULT) {
431d0fef
MD
1463 td->td_pcb->pcb_flags |= FP_VIRTFP;
1464 if (mdcpu->gd_npxthread == td)
1465 npxexit();
1466 } else {
1467 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1468 }
1469}
1470
bb47c072
MD
1471/*
1472 * Called from vkernel_trap() to fixup the vkernel's syscall
1473 * frame for vmspace_ctl() return.
1474 */
1475void
1476cpu_vkernel_trap(struct trapframe *frame, int error)
1477{
1478 frame->tf_eax = error;
1479 if (error)
1480 frame->tf_eflags |= PSL_C;
1481 else
1482 frame->tf_eflags &= ~PSL_C;
1483}