kernel - Make pcb_onfault more robust.
[dragonfly.git] / sys / platform / pc64 / x86_64 / trap.c
CommitLineData
d7f50089 1/*-
d7f50089
YY
2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
c8fe38ae
MD
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
d7f50089
YY
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
c8fe38ae 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
d7f50089 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
d7f50089
YY
41 */
42
c8fe38ae 43/*
b2b3ffcd 44 * x86_64 Trap and System call handling
c8fe38ae
MD
45 */
46
775ce2da
SW
47#include "use_isa.h"
48
c8fe38ae
MD
49#include "opt_ddb.h"
50#include "opt_ktrace.h"
d7f50089
YY
51
52#include <machine/frame.h>
c8fe38ae
MD
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
5ea440eb 56#include <sys/kerneldump.h>
c8fe38ae
MD
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/types.h>
60#include <sys/signal2.h>
61#include <sys/syscall.h>
62#include <sys/sysctl.h>
63#include <sys/sysent.h>
64#include <sys/systm.h>
65#ifdef KTRACE
66#include <sys/ktrace.h>
67#endif
68#include <sys/ktr.h>
69#include <sys/sysmsg.h>
70#include <sys/sysproto.h>
71#include <sys/sysunion.h>
72
73#include <vm/pmap.h>
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_param.h>
78#include <machine/cpu.h>
79#include <machine/pcb.h>
46d4e165 80#include <machine/smp.h>
c8fe38ae 81#include <machine/thread.h>
d2d8515b 82#include <machine/clock.h>
c8fe38ae
MD
83#include <machine/vmparam.h>
84#include <machine/md_var.h>
57a9c56b 85#include <machine_base/isa/isa_intr.h>
83c8d73e 86#include <machine_base/apic/lapic.h>
c8fe38ae
MD
87
88#include <ddb/ddb.h>
684a93c4 89
46d4e165 90#include <sys/thread2.h>
684a93c4 91#include <sys/mplock2.h>
c8fe38ae
MD
92
93#ifdef SMP
94
95#define MAKEMPSAFE(have_mplock) \
96 if (have_mplock == 0) { \
97 get_mplock(); \
98 have_mplock = 1; \
99 }
100
101#else
102
103#define MAKEMPSAFE(have_mplock)
104
105#endif
106
107extern void trap(struct trapframe *frame);
c8fe38ae
MD
108
109static int trap_pfault(struct trapframe *, int);
110static void trap_fatal(struct trapframe *, vm_offset_t);
111void dblfault_handler(struct trapframe *frame);
112
c8fe38ae
MD
113#define MAX_TRAP_MSG 30
114static char *trap_msg[] = {
115 "", /* 0 unused */
116 "privileged instruction fault", /* 1 T_PRIVINFLT */
117 "", /* 2 unused */
118 "breakpoint instruction fault", /* 3 T_BPTFLT */
119 "", /* 4 unused */
120 "", /* 5 unused */
121 "arithmetic trap", /* 6 T_ARITHTRAP */
122 "system forced exception", /* 7 T_ASTFLT */
123 "", /* 8 unused */
124 "general protection fault", /* 9 T_PROTFLT */
125 "trace trap", /* 10 T_TRCTRAP */
126 "", /* 11 unused */
127 "page fault", /* 12 T_PAGEFLT */
128 "", /* 13 unused */
129 "alignment fault", /* 14 T_ALIGNFLT */
130 "", /* 15 unused */
131 "", /* 16 unused */
132 "", /* 17 unused */
133 "integer divide fault", /* 18 T_DIVIDE */
134 "non-maskable interrupt trap", /* 19 T_NMI */
135 "overflow trap", /* 20 T_OFLOW */
136 "FPU bounds check fault", /* 21 T_BOUND */
137 "FPU device not available", /* 22 T_DNA */
138 "double fault", /* 23 T_DOUBLEFLT */
139 "FPU operand fetch fault", /* 24 T_FPOPFLT */
140 "invalid TSS fault", /* 25 T_TSSFLT */
141 "segment not present fault", /* 26 T_SEGNPFLT */
142 "stack fault", /* 27 T_STKFLT */
143 "machine check trap", /* 28 T_MCHK */
144 "SIMD floating-point exception", /* 29 T_XMMFLT */
145 "reserved (unknown) fault", /* 30 T_RESERVED */
146};
147
148#ifdef DDB
149static int ddb_on_nmi = 1;
150SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
151 &ddb_on_nmi, 0, "Go to DDB on NMI");
2883d2d8
MD
152static int ddb_on_seg_fault = 0;
153SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW,
154 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault");
d2d8515b
MD
155static int freeze_on_seg_fault = 0;
156SYSCTL_INT(_machdep, OID_AUTO, freeze_on_seg_fault, CTLFLAG_RW,
157 &freeze_on_seg_fault, 0, "Go to DDB on user seg-fault");
c8fe38ae
MD
158#endif
159static int panic_on_nmi = 1;
160SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
161 &panic_on_nmi, 0, "Panic on NMI");
162static int fast_release;
163SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
164 &fast_release, 0, "Passive Release was optimal");
165static int slow_release;
166SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
167 &slow_release, 0, "Passive Release was nonoptimal");
c8fe38ae 168
c8fe38ae 169/*
d2d8515b
MD
170 * System call debugging records the worst-case system call
171 * overhead (inclusive of blocking), but may be inaccurate.
172 */
173/*#define SYSCALL_DEBUG*/
174#ifdef SYSCALL_DEBUG
175uint64_t SysCallsWorstCase[SYS_MAXSYSCALL];
176#endif
177
178/*
7bd858e5 179 * Passively intercepts the thread switch function to increase
c8fe38ae
MD
180 * the thread priority from a user priority to a kernel priority, reducing
181 * syscall and trap overhead for the case where no switch occurs.
7bd858e5 182 *
d86a23e0 183 * Synchronizes td_ucred with p_ucred. This is used by system calls,
7bd858e5
MD
184 * signal handling, faults, AST traps, and anything else that enters the
185 * kernel from userland and provides the kernel with a stable read-only
186 * copy of the process ucred.
c8fe38ae 187 */
c8fe38ae 188static __inline void
d86a23e0 189userenter(struct thread *curtd, struct proc *curp)
c8fe38ae 190{
7bd858e5
MD
191 struct ucred *ocred;
192 struct ucred *ncred;
193
3824f392 194 curtd->td_release = lwkt_passive_release;
7bd858e5 195
d86a23e0
MD
196 if (curtd->td_ucred != curp->p_ucred) {
197 ncred = crhold(curp->p_ucred);
198 ocred = curtd->td_ucred;
199 curtd->td_ucred = ncred;
7bd858e5
MD
200 if (ocred)
201 crfree(ocred);
202 }
4643740a 203
1fdd109e 204#ifdef DDB
4643740a
MD
205 /*
206 * Debugging, remove top two user stack pages to catch kernel faults
207 */
208 if (freeze_on_seg_fault > 1 && curtd->td_lwp) {
209 pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace),
210 0x00007FFFFFFFD000LU,
211 0x0000800000000000LU);
212 }
1fdd109e 213#endif
c8fe38ae
MD
214}
215
216/*
217 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
218 * must be completed before we can return to or try to return to userland.
219 *
220 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
221 * arithmatic on the delta calculation so the absolute tick values are
222 * truncated to an integer.
223 */
224static void
225userret(struct lwp *lp, struct trapframe *frame, int sticks)
226{
227 struct proc *p = lp->lwp_proc;
228 int sig;
229
230 /*
231 * Charge system time if profiling. Note: times are in microseconds.
232 * This may do a copyout and block, so do it first even though it
233 * means some system time will be charged as user time.
234 */
4643740a 235 if (p->p_flags & P_PROFIL) {
bab69519 236 addupc_task(p, frame->tf_rip,
c8fe38ae
MD
237 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
238 }
239
240recheck:
241 /*
242 * If the jungle wants us dead, so be it.
243 */
4643740a 244 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
b5c4d81f 245 lwkt_gettoken(&p->p_token);
c8fe38ae 246 lwp_exit(0);
b5c4d81f 247 lwkt_reltoken(&p->p_token); /* NOT REACHED */
c8fe38ae
MD
248 }
249
250 /*
251 * Block here if we are in a stopped state.
252 */
5ea440eb 253 if (p->p_stat == SSTOP || dump_stop_usertds) {
4643740a 254 lwkt_gettoken(&p->p_token);
c8fe38ae 255 tstop();
4643740a 256 lwkt_reltoken(&p->p_token);
c8fe38ae
MD
257 goto recheck;
258 }
259
260 /*
261 * Post any pending upcalls. If running a virtual kernel be sure
262 * to restore the virtual kernel's vmspace before posting the upcall.
263 */
4643740a 264 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) {
fc509460 265 lwkt_gettoken(&p->p_token);
4643740a
MD
266 if (p->p_flags & P_SIGVTALRM) {
267 p->p_flags &= ~P_SIGVTALRM;
898e34b3
MD
268 ksignal(p, SIGVTALRM);
269 }
4643740a
MD
270 if (p->p_flags & P_SIGPROF) {
271 p->p_flags &= ~P_SIGPROF;
898e34b3
MD
272 ksignal(p, SIGPROF);
273 }
4643740a
MD
274 if (p->p_flags & P_UPCALLPEND) {
275 p->p_flags &= ~P_UPCALLPEND;
898e34b3
MD
276 postupcall(lp);
277 }
fc509460 278 lwkt_reltoken(&p->p_token);
c8fe38ae
MD
279 goto recheck;
280 }
281
282 /*
283 * Post any pending signals. If running a virtual kernel be sure
284 * to restore the virtual kernel's vmspace before posting the signal.
2883d2d8
MD
285 *
286 * WARNING! postsig() can exit and not return.
c8fe38ae 287 */
e473f776 288 if ((sig = CURSIG_TRACE(lp)) != 0) {
fc509460 289 lwkt_gettoken(&p->p_token);
c8fe38ae 290 postsig(sig);
fc509460 291 lwkt_reltoken(&p->p_token);
c8fe38ae
MD
292 goto recheck;
293 }
294
295 /*
296 * block here if we are swapped out, but still process signals
297 * (such as SIGKILL). proc0 (the swapin scheduler) is already
298 * aware of our situation, we do not have to wake it up.
299 */
4643740a 300 if (p->p_flags & P_SWAPPEDOUT) {
616516c8 301 lwkt_gettoken(&p->p_token);
c8fe38ae 302 get_mplock();
4643740a 303 p->p_flags |= P_SWAPWAIT;
c8fe38ae 304 swapin_request();
4643740a 305 if (p->p_flags & P_SWAPWAIT)
c8fe38ae 306 tsleep(p, PCATCH, "SWOUT", 0);
4643740a 307 p->p_flags &= ~P_SWAPWAIT;
c8fe38ae 308 rel_mplock();
616516c8 309 lwkt_reltoken(&p->p_token);
c8fe38ae
MD
310 goto recheck;
311 }
312
313 /*
6562e2d8
MD
314 * In a multi-threaded program it is possible for a thread to change
315 * signal state during a system call which temporarily changes the
316 * signal mask. In this case postsig() might not be run and we
317 * have to restore the mask ourselves.
c8fe38ae 318 */
6562e2d8
MD
319 if (lp->lwp_flags & LWP_OLDMASK) {
320 lp->lwp_flags &= ~LWP_OLDMASK;
321 lp->lwp_sigmask = lp->lwp_oldsigmask;
322 goto recheck;
323 }
c8fe38ae
MD
324}
325
326/*
327 * Cleanup from userenter and any passive release that might have occured.
328 * We must reclaim the current-process designation before we can return
329 * to usermode. We also handle both LWKT and USER reschedule requests.
330 */
331static __inline void
332userexit(struct lwp *lp)
333{
334 struct thread *td = lp->lwp_thread;
bab69519 335 /* globaldata_t gd = td->td_gd; */
c8fe38ae 336
c8fe38ae 337 /*
89ffa1cf
SS
338 * Handle stop requests at kernel priority. Any requests queued
339 * after this loop will generate another AST.
c8fe38ae 340 */
89ffa1cf 341 while (lp->lwp_proc->p_stat == SSTOP) {
4643740a 342 lwkt_gettoken(&lp->lwp_proc->p_token);
89ffa1cf 343 tstop();
4643740a 344 lwkt_reltoken(&lp->lwp_proc->p_token);
c8fe38ae
MD
345 }
346
347 /*
c8fe38ae
MD
348 * Reduce our priority in preparation for a return to userland. If
349 * our passive release function was still in place, our priority was
350 * never raised and does not need to be reduced.
351 */
3824f392 352 lwkt_passive_recover(td);
89ffa1cf 353
e3e6be1f
MD
354 /* WARNING: we may have migrated cpu's */
355 /* gd = td->td_gd; */
356
89ffa1cf
SS
357 /*
358 * Become the current user scheduled process if we aren't already,
359 * and deal with reschedule requests and other factors.
360 */
361 lp->lwp_proc->p_usched->acquire_curproc(lp);
c8fe38ae
MD
362}
363
0855a2af
JG
364#if !defined(KTR_KERNENTRY)
365#define KTR_KERNENTRY KTR_ALL
366#endif
367KTR_INFO_MASTER(kernentry);
5bf48697 368KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0,
0cccd3b7 369 "TRAP(pid %d, tid %d, trapno %ld, eva %lu)",
5bf48697 370 pid_t pid, lwpid_t tid, register_t trapno, vm_offset_t eva);
0cccd3b7 371KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "TRAP_RET(pid %d, tid %d)",
5bf48697 372 pid_t pid, lwpid_t tid);
0cccd3b7 373KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "SYSC(pid %d, tid %d, nr %ld)",
5bf48697 374 pid_t pid, lwpid_t tid, register_t trapno);
0cccd3b7 375KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "SYSRET(pid %d, tid %d, err %d)",
5bf48697 376 pid_t pid, lwpid_t tid, int err);
0cccd3b7 377KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "FORKRET(pid %d, tid %d)",
5bf48697 378 pid_t pid, lwpid_t tid);
0855a2af 379
c8fe38ae
MD
380/*
381 * Exception, fault, and trap interface to the kernel.
382 * This common code is called from assembly language IDT gate entry
383 * routines that prepare a suitable stack frame, and restore this
384 * frame after the exception has been processed.
385 *
386 * This function is also called from doreti in an interlock to handle ASTs.
387 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
388 *
389 * NOTE! We have to retrieve the fault address prior to obtaining the
390 * MP lock because get_mplock() may switch out. YYY cr2 really ought
391 * to be retrieved by the assembly code, not here.
392 *
393 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
394 * if an attempt is made to switch from a fast interrupt or IPI. This is
bab69519 395 * necessary to properly take fatal kernel traps on SMP machines if
c8fe38ae
MD
396 * get_mplock() has to block.
397 */
398
399void
400trap(struct trapframe *frame)
401{
402 struct globaldata *gd = mycpu;
403 struct thread *td = gd->gd_curthread;
404 struct lwp *lp = td->td_lwp;
405 struct proc *p;
406 int sticks = 0;
407 int i = 0, ucode = 0, type, code;
408#ifdef SMP
409 int have_mplock = 0;
410#endif
411#ifdef INVARIANTS
f9235b6d 412 int crit_count = td->td_critcount;
3933a3ab 413 lwkt_tokref_t curstop = td->td_toks_stop;
c8fe38ae
MD
414#endif
415 vm_offset_t eva;
416
417 p = td->td_proc;
f2081646 418 clear_quickret();
c8fe38ae 419
c8fe38ae 420#ifdef DDB
bfb6d329
AH
421 /*
422 * We need to allow T_DNA faults when the debugger is active since
2921c824 423 * some dumping paths do large bcopy() which use the floating
bfb6d329
AH
424 * point registers for faster copying.
425 */
5ea440eb 426 if (db_active && frame->tf_trapno != T_DNA) {
c8fe38ae
MD
427 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0);
428 ++gd->gd_trap_nesting_level;
429 MAKEMPSAFE(have_mplock);
430 trap_fatal(frame, eva);
431 --gd->gd_trap_nesting_level;
432 goto out2;
433 }
434#endif
435
436 eva = 0;
437
c8fe38ae
MD
438 if ((frame->tf_rflags & PSL_I) == 0) {
439 /*
440 * Buggy application or kernel code has disabled interrupts
441 * and then trapped. Enabling interrupts now is wrong, but
442 * it is better than running with interrupts disabled until
443 * they are accidentally enabled later.
444 */
445 type = frame->tf_trapno;
446 if (ISPL(frame->tf_cs) == SEL_UPL) {
447 MAKEMPSAFE(have_mplock);
448 /* JG curproc can be NULL */
449 kprintf(
450 "pid %ld (%s): trap %d with interrupts disabled\n",
451 (long)curproc->p_pid, curproc->p_comm, type);
452 } else if (type != T_NMI && type != T_BPTFLT &&
453 type != T_TRCTRAP) {
454 /*
455 * XXX not quite right, since this may be for a
456 * multiple fault in user mode.
457 */
458 MAKEMPSAFE(have_mplock);
459 kprintf("kernel trap %d with interrupts disabled\n",
460 type);
461 }
462 cpu_enable_intr();
463 }
464
465 type = frame->tf_trapno;
466 code = frame->tf_err;
467
468 if (ISPL(frame->tf_cs) == SEL_UPL) {
469 /* user trap */
470
471 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
472 frame->tf_trapno, eva);
473
d86a23e0 474 userenter(td, p);
c8fe38ae
MD
475
476 sticks = (int)td->td_sticks;
d1368d1a
MD
477 KASSERT(lp->lwp_md.md_regs == frame,
478 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
c8fe38ae
MD
479
480 switch (type) {
481 case T_PRIVINFLT: /* privileged instruction fault */
c8fe38ae 482 i = SIGILL;
bab69519 483 ucode = ILL_PRVOPC;
c8fe38ae
MD
484 break;
485
486 case T_BPTFLT: /* bpt instruction fault */
487 case T_TRCTRAP: /* trace trap */
488 frame->tf_rflags &= ~PSL_T;
489 i = SIGTRAP;
bab69519 490 ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
c8fe38ae
MD
491 break;
492
493 case T_ARITHTRAP: /* arithmetic trap */
494 ucode = code;
495 i = SIGFPE;
c8fe38ae
MD
496 break;
497
498 case T_ASTFLT: /* Allow process switch */
499 mycpu->gd_cnt.v_soft++;
500 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
2a418930
MD
501 atomic_clear_int(&mycpu->gd_reqflags,
502 RQF_AST_OWEUPC);
c8fe38ae
MD
503 addupc_task(p, p->p_prof.pr_addr,
504 p->p_prof.pr_ticks);
505 }
506 goto out;
507
508 case T_PROTFLT: /* general protection fault */
f2642d5a
AH
509 i = SIGBUS;
510 ucode = BUS_OBJERR;
511 break;
bab69519 512 case T_STKFLT: /* stack fault */
c8fe38ae 513 case T_SEGNPFLT: /* segment not present fault */
f2642d5a
AH
514 i = SIGBUS;
515 ucode = BUS_ADRERR;
516 break;
c8fe38ae
MD
517 case T_TSSFLT: /* invalid TSS fault */
518 case T_DOUBLEFLT: /* double fault */
519 default:
c8fe38ae 520 i = SIGBUS;
bab69519 521 ucode = BUS_OBJERR;
c8fe38ae
MD
522 break;
523
524 case T_PAGEFLT: /* page fault */
c8fe38ae 525 i = trap_pfault(frame, TRUE);
d2d8515b 526 if (frame->tf_rip == 0) {
973c11b9 527 kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
1fdd109e
SW
528#ifdef DDB
529 while (freeze_on_seg_fault)
d2d8515b 530 tsleep(p, 0, "freeze", hz * 20);
1fdd109e 531#endif
d2d8515b 532 }
bab69519 533 if (i == -1 || i == 0)
c8fe38ae
MD
534 goto out;
535
bab69519 536
f2642d5a
AH
537 if (i == SIGSEGV)
538 ucode = SEGV_MAPERR;
bab69519
JM
539 else {
540 i = SIGSEGV;
541 ucode = SEGV_ACCERR;
542 }
c8fe38ae
MD
543 break;
544
545 case T_DIVIDE: /* integer divide fault */
546 ucode = FPE_INTDIV;
547 i = SIGFPE;
548 break;
549
775ce2da 550#if NISA > 0
c8fe38ae
MD
551 case T_NMI:
552 MAKEMPSAFE(have_mplock);
553 /* machine/parity/power fail/"kitchen sink" faults */
554 if (isa_nmi(code) == 0) {
555#ifdef DDB
556 /*
557 * NMI can be hooked up to a pushbutton
558 * for debugging.
559 */
560 if (ddb_on_nmi) {
561 kprintf ("NMI ... going to debugger\n");
562 kdb_trap(type, 0, frame);
563 }
564#endif /* DDB */
565 goto out2;
566 } else if (panic_on_nmi)
567 panic("NMI indicates hardware failure");
568 break;
775ce2da 569#endif /* NISA > 0 */
c8fe38ae
MD
570
571 case T_OFLOW: /* integer overflow fault */
572 ucode = FPE_INTOVF;
573 i = SIGFPE;
574 break;
575
576 case T_BOUND: /* bounds check fault */
577 ucode = FPE_FLTSUB;
578 i = SIGFPE;
579 break;
580
581 case T_DNA:
582 /*
583 * Virtual kernel intercept - pass the DNA exception
584 * to the virtual kernel if it asked to handle it.
585 * This occurs when the virtual kernel is holding
586 * onto the FP context for a different emulated
587 * process then the one currently running.
588 *
589 * We must still call npxdna() since we may have
590 * saved FP state that the virtual kernel needs
591 * to hand over to a different emulated process.
592 */
593 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
594 (td->td_pcb->pcb_flags & FP_VIRTFP)
595 ) {
596 npxdna();
597 break;
598 }
599
600 /*
601 * The kernel may have switched out the FP unit's
602 * state, causing the user process to take a fault
603 * when it tries to use the FP unit. Restore the
604 * state here
605 */
606 if (npxdna())
607 goto out;
608 i = SIGFPE;
609 ucode = FPE_FPU_NP_TRAP;
610 break;
611
612 case T_FPOPFLT: /* FPU operand fetch fault */
f2642d5a 613 ucode = ILL_COPROC;
c8fe38ae
MD
614 i = SIGILL;
615 break;
616
617 case T_XMMFLT: /* SIMD floating-point exception */
618 ucode = 0; /* XXX */
619 i = SIGFPE;
620 break;
621 }
622 } else {
623 /* kernel trap */
624
625 switch (type) {
626 case T_PAGEFLT: /* page fault */
c8fe38ae
MD
627 trap_pfault(frame, FALSE);
628 goto out2;
629
630 case T_DNA:
631 /*
632 * The kernel is apparently using fpu for copying.
633 * XXX this should be fatal unless the kernel has
634 * registered such use.
635 */
636 if (npxdna())
637 goto out2;
638 break;
639
640 case T_STKFLT: /* stack fault */
641 break;
642
643 case T_PROTFLT: /* general protection fault */
644 case T_SEGNPFLT: /* segment not present fault */
645 /*
646 * Invalid segment selectors and out of bounds
647 * %rip's and %rsp's can be set up in user mode.
648 * This causes a fault in kernel mode when the
649 * kernel tries to return to user mode. We want
650 * to get this fault so that we can fix the
651 * problem here and not have to check all the
652 * selectors and pointers when the user changes
653 * them.
654 */
c8fe38ae 655 if (mycpu->gd_intr_nesting_level == 0) {
93ad6da2
MD
656 /*
657 * NOTE: in 64-bit mode traps push rsp/ss
658 * even if no ring change occurs.
659 */
660 if (td->td_pcb->pcb_onfault &&
661 td->td_pcb->pcb_onfault_sp ==
662 frame->tf_rsp) {
c8fe38ae
MD
663 frame->tf_rip = (register_t)
664 td->td_pcb->pcb_onfault;
665 goto out2;
666 }
89ffa1cf
SS
667 if (frame->tf_rip == (long)doreti_iret) {
668 frame->tf_rip = (long)doreti_iret_fault;
669 goto out2;
670 }
c8fe38ae
MD
671 }
672 break;
673
674 case T_TSSFLT:
675 /*
676 * PSL_NT can be set in user mode and isn't cleared
677 * automatically when the kernel is entered. This
678 * causes a TSS fault when the kernel attempts to
679 * `iret' because the TSS link is uninitialized. We
680 * want to get this fault so that we can fix the
681 * problem here and not every time the kernel is
682 * entered.
683 */
684 if (frame->tf_rflags & PSL_NT) {
685 frame->tf_rflags &= ~PSL_NT;
686 goto out2;
687 }
688 break;
689
690 case T_TRCTRAP: /* trace trap */
691#if 0
692 if (frame->tf_rip == (int)IDTVEC(syscall)) {
693 /*
694 * We've just entered system mode via the
695 * syscall lcall. Continue single stepping
696 * silently until the syscall handler has
697 * saved the flags.
698 */
699 goto out2;
700 }
701 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) {
702 /*
703 * The syscall handler has now saved the
704 * flags. Stop single stepping it.
705 */
706 frame->tf_rflags &= ~PSL_T;
707 goto out2;
708 }
709#endif
710
711 /*
712 * Ignore debug register trace traps due to
713 * accesses in the user's address space, which
714 * can happen under several conditions such as
715 * if a user sets a watchpoint on a buffer and
716 * then passes that buffer to a system call.
717 * We still want to get TRCTRAPS for addresses
718 * in kernel space because that is useful when
719 * debugging the kernel.
720 */
721#if JG
722 if (user_dbreg_trap()) {
723 /*
724 * Reset breakpoint bits because the
725 * processor doesn't
726 */
727 /* XXX check upper bits here */
728 load_dr6(rdr6() & 0xfffffff0);
729 goto out2;
730 }
731#endif
732 /*
733 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
734 */
735 case T_BPTFLT:
736 /*
737 * If DDB is enabled, let it handle the debugger trap.
738 * Otherwise, debugger traps "can't happen".
739 */
f2642d5a 740 ucode = TRAP_BRKPT;
c8fe38ae
MD
741#ifdef DDB
742 MAKEMPSAFE(have_mplock);
743 if (kdb_trap(type, 0, frame))
744 goto out2;
745#endif
746 break;
747
775ce2da 748#if NISA > 0
c8fe38ae
MD
749 case T_NMI:
750 MAKEMPSAFE(have_mplock);
751 /* machine/parity/power fail/"kitchen sink" faults */
c8fe38ae
MD
752 if (isa_nmi(code) == 0) {
753#ifdef DDB
754 /*
755 * NMI can be hooked up to a pushbutton
756 * for debugging.
757 */
758 if (ddb_on_nmi) {
759 kprintf ("NMI ... going to debugger\n");
760 kdb_trap(type, 0, frame);
761 }
762#endif /* DDB */
763 goto out2;
764 } else if (panic_on_nmi == 0)
765 goto out2;
766 /* FALL THROUGH */
767#endif /* NISA > 0 */
768 }
769 MAKEMPSAFE(have_mplock);
770 trap_fatal(frame, 0);
73e24181
MD
771 goto out2;
772 }
773
774 /*
775 * Virtual kernel intercept - if the fault is directly related to a
776 * VM context managed by a virtual kernel then let the virtual kernel
777 * handle it.
778 */
779 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
780 vkernel_trap(lp, frame);
c8fe38ae
MD
781 goto out;
782 }
783
bab69519 784 /* Translate fault for emulators (e.g. Linux) */
c8fe38ae
MD
785 if (*p->p_sysent->sv_transtrap)
786 i = (*p->p_sysent->sv_transtrap)(i, type);
787
788 MAKEMPSAFE(have_mplock);
789 trapsignal(lp, i, ucode);
790
791#ifdef DEBUG
792 if (type <= MAX_TRAP_MSG) {
793 uprintf("fatal process exception: %s",
794 trap_msg[type]);
795 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
796 uprintf(", fault VA = 0x%lx", frame->tf_addr);
797 uprintf("\n");
798 }
799#endif
800
801out:
c8fe38ae
MD
802 userret(lp, frame, sticks);
803 userexit(lp);
804out2: ;
805#ifdef SMP
806 if (have_mplock)
807 rel_mplock();
808#endif
809 if (p != NULL && lp != NULL)
810 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
811#ifdef INVARIANTS
f9235b6d 812 KASSERT(crit_count == td->td_critcount,
3933a3ab 813 ("trap: critical section count mismatch! %d/%d",
f9235b6d 814 crit_count, td->td_pri));
3933a3ab
MD
815 KASSERT(curstop == td->td_toks_stop,
816 ("trap: extra tokens held after trap! %ld/%ld",
817 curstop - &td->td_toks_base,
818 td->td_toks_stop - &td->td_toks_base));
c8fe38ae
MD
819#endif
820}
821
822static int
823trap_pfault(struct trapframe *frame, int usermode)
824{
825 vm_offset_t va;
826 struct vmspace *vm = NULL;
827 vm_map_t map;
828 int rv = 0;
1b9d3514 829 int fault_flags;
c8fe38ae
MD
830 vm_prot_t ftype;
831 thread_t td = curthread;
832 struct lwp *lp = td->td_lwp;
2883d2d8 833 struct proc *p;
c8fe38ae
MD
834
835 va = trunc_page(frame->tf_addr);
836 if (va >= VM_MIN_KERNEL_ADDRESS) {
837 /*
838 * Don't allow user-mode faults in kernel address space.
839 */
2883d2d8
MD
840 if (usermode) {
841 fault_flags = -1;
842 ftype = -1;
c8fe38ae 843 goto nogo;
2883d2d8 844 }
c8fe38ae
MD
845
846 map = &kernel_map;
847 } else {
848 /*
849 * This is a fault on non-kernel virtual memory.
850 * vm is initialized above to NULL. If curproc is NULL
851 * or curproc->p_vmspace is NULL the fault is fatal.
852 */
853 if (lp != NULL)
854 vm = lp->lwp_vmspace;
855
2883d2d8
MD
856 if (vm == NULL) {
857 fault_flags = -1;
858 ftype = -1;
c8fe38ae 859 goto nogo;
2883d2d8 860 }
c8fe38ae 861
4643740a
MD
862 /*
863 * Debugging, try to catch kernel faults on the user address space when not inside
864 * on onfault (e.g. copyin/copyout) routine.
865 */
93ad6da2
MD
866 if (usermode == 0 && (td->td_pcb == NULL ||
867 td->td_pcb->pcb_onfault == NULL)) {
1fdd109e 868#ifdef DDB
4643740a
MD
869 if (freeze_on_seg_fault) {
870 kprintf("trap_pfault: user address fault from kernel mode "
871 "%016lx\n", (long)frame->tf_addr);
1fdd109e 872 while (freeze_on_seg_fault)
4643740a 873 tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20);
4643740a 874 }
1fdd109e 875#endif
4643740a 876 }
c8fe38ae
MD
877 map = &vm->vm_map;
878 }
879
880 /*
881 * PGEX_I is defined only if the execute disable bit capability is
882 * supported and enabled.
883 */
884 if (frame->tf_err & PGEX_W)
885 ftype = VM_PROT_WRITE;
886#if JG
887 else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
888 ftype = VM_PROT_EXECUTE;
889#endif
890 else
891 ftype = VM_PROT_READ;
892
893 if (map != &kernel_map) {
894 /*
895 * Keep swapout from messing with us during this
896 * critical time.
897 */
898 PHOLD(lp->lwp_proc);
899
900 /*
1b9d3514
MD
901 * Issue fault
902 */
903 fault_flags = 0;
904 if (usermode)
905 fault_flags |= VM_FAULT_BURST;
906 if (ftype & VM_PROT_WRITE)
907 fault_flags |= VM_FAULT_DIRTY;
908 else
909 fault_flags |= VM_FAULT_NORMAL;
910 rv = vm_fault(map, va, ftype, fault_flags);
c8fe38ae
MD
911
912 PRELE(lp->lwp_proc);
913 } else {
914 /*
bab69519
JM
915 * Don't have to worry about process locking or stacks in the
916 * kernel.
c8fe38ae 917 */
2883d2d8 918 fault_flags = VM_FAULT_NORMAL;
c8fe38ae
MD
919 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
920 }
c8fe38ae
MD
921 if (rv == KERN_SUCCESS)
922 return (0);
923nogo:
924 if (!usermode) {
93ad6da2
MD
925 /*
926 * NOTE: in 64-bit mode traps push rsp/ss
927 * even if no ring change occurs.
928 */
929 if (td->td_pcb->pcb_onfault &&
930 td->td_pcb->pcb_onfault_sp == frame->tf_rsp &&
931 td->td_gd->gd_intr_nesting_level == 0) {
c8fe38ae
MD
932 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
933 return (0);
934 }
935 trap_fatal(frame, frame->tf_addr);
936 return (-1);
937 }
938
939 /*
b2b3ffcd 940 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
c8fe38ae
MD
941 * kludge is needed to pass the fault address to signal handlers.
942 */
2883d2d8 943 p = td->td_proc;
2deef24c 944 if (td->td_lwp->lwp_vkernel == NULL) {
1fdd109e 945#ifdef DDB
d2d8515b 946 if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) {
1fdd109e
SW
947#else
948 if (bootverbose) {
949#endif
c98f2169 950 kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
3338cc67 951 "pid=%d cpu=%d p_comm=%s\n",
c98f2169
SW
952 ftype, fault_flags,
953 (void *)frame->tf_addr,
954 (void *)frame->tf_rip,
3338cc67 955 p->p_pid, mycpu->gd_cpuid, p->p_comm);
d2d8515b 956 }
3f6724a9 957#ifdef DDB
d2d8515b
MD
958 while (freeze_on_seg_fault) {
959 tsleep(p, 0, "freeze", hz * 20);
960 }
2883d2d8
MD
961 if (ddb_on_seg_fault)
962 Debugger("ddb_on_seg_fault");
3f6724a9 963#endif
2deef24c 964 }
c8fe38ae
MD
965
966 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
967}
968
969static void
970trap_fatal(struct trapframe *frame, vm_offset_t eva)
971{
972 int code, ss;
973 u_int type;
974 long rsp;
975 struct soft_segment_descriptor softseg;
976 char *msg;
977
978 code = frame->tf_err;
979 type = frame->tf_trapno;
980 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
981
982 if (type <= MAX_TRAP_MSG)
983 msg = trap_msg[type];
984 else
985 msg = "UNKNOWN";
986 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
987 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
988#ifdef SMP
46d4e165 989 /* three separate prints in case of a trap on an unmapped page */
46d4e165
JG
990 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
991 kprintf("lapic->id = %08x\n", lapic->id);
c8fe38ae
MD
992#endif
993 if (type == T_PAGEFLT) {
994 kprintf("fault virtual address = 0x%lx\n", eva);
995 kprintf("fault code = %s %s %s, %s\n",
996 code & PGEX_U ? "user" : "supervisor",
997 code & PGEX_W ? "write" : "read",
998 code & PGEX_I ? "instruction" : "data",
999 code & PGEX_P ? "protection violation" : "page not present");
1000 }
1001 kprintf("instruction pointer = 0x%lx:0x%lx\n",
1002 frame->tf_cs & 0xffff, frame->tf_rip);
1003 if (ISPL(frame->tf_cs) == SEL_UPL) {
1004 ss = frame->tf_ss & 0xffff;
1005 rsp = frame->tf_rsp;
1006 } else {
93ad6da2
MD
1007 /*
1008 * NOTE: in 64-bit mode traps push rsp/ss even if no ring
1009 * change occurs.
1010 */
c8fe38ae 1011 ss = GSEL(GDATA_SEL, SEL_KPL);
93ad6da2 1012 rsp = frame->tf_rsp;
c8fe38ae
MD
1013 }
1014 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
1015 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
1016 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
1017 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1018 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
1019 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
1020 softseg.ssd_gran);
1021 kprintf("processor eflags = ");
1022 if (frame->tf_rflags & PSL_T)
1023 kprintf("trace trap, ");
1024 if (frame->tf_rflags & PSL_I)
1025 kprintf("interrupt enabled, ");
1026 if (frame->tf_rflags & PSL_NT)
1027 kprintf("nested task, ");
1028 if (frame->tf_rflags & PSL_RF)
1029 kprintf("resume, ");
1030 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
1031 kprintf("current process = ");
1032 if (curproc) {
1033 kprintf("%lu\n",
1034 (u_long)curproc->p_pid);
1035 } else {
1036 kprintf("Idle\n");
1037 }
1038 kprintf("current thread = pri %d ", curthread->td_pri);
f9235b6d 1039 if (curthread->td_critcount)
c8fe38ae
MD
1040 kprintf("(CRIT)");
1041 kprintf("\n");
1042
1043#ifdef DDB
1044 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1045 return;
1046#endif
1047 kprintf("trap number = %d\n", type);
1048 if (type <= MAX_TRAP_MSG)
1049 panic("%s", trap_msg[type]);
1050 else
1051 panic("unknown/reserved trap");
1052}
1053
1054/*
1055 * Double fault handler. Called when a fault occurs while writing
1056 * a frame for a trap/exception onto the stack. This usually occurs
1057 * when the stack overflows (such is the case with infinite recursion,
1058 * for example).
1059 */
093565f2
MD
1060static __inline
1061int
1062in_kstack_guard(register_t rptr)
1063{
1064 thread_t td = curthread;
1065
1066 if ((char *)rptr >= td->td_kstack &&
1067 (char *)rptr < td->td_kstack + PAGE_SIZE) {
1068 return 1;
1069 }
1070 return 0;
1071}
1072
c8fe38ae
MD
1073void
1074dblfault_handler(struct trapframe *frame)
1075{
093565f2
MD
1076 thread_t td = curthread;
1077
1078 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) {
0174149e 1079 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
093565f2
MD
1080 if (in_kstack_guard(frame->tf_rsp))
1081 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE);
1082 if (in_kstack_guard(frame->tf_rbp))
1083 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE);
1084 } else {
0174149e 1085 kprintf("DOUBLE FAULT\n");
093565f2 1086 }
c8fe38ae
MD
1087 kprintf("\nFatal double fault\n");
1088 kprintf("rip = 0x%lx\n", frame->tf_rip);
1089 kprintf("rsp = 0x%lx\n", frame->tf_rsp);
1090 kprintf("rbp = 0x%lx\n", frame->tf_rbp);
1091#ifdef SMP
46d4e165 1092 /* three separate prints in case of a trap on an unmapped page */
46d4e165
JG
1093 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1094 kprintf("lapic->id = %08x\n", lapic->id);
c8fe38ae
MD
1095#endif
1096 panic("double fault");
1097}
1098
1099/*
7bd858e5 1100 * syscall2 - MP aware system call request C handler
c8fe38ae 1101 *
7bd858e5
MD
1102 * A system call is essentially treated as a trap except that the
1103 * MP lock is not held on entry or return. We are responsible for
1104 * obtaining the MP lock if necessary and for handling ASTs
1105 * (e.g. a task switch) prior to return.
c8fe38ae 1106 *
7bd858e5 1107 * MPSAFE
c8fe38ae
MD
1108 */
1109void
1110syscall2(struct trapframe *frame)
1111{
1112 struct thread *td = curthread;
1113 struct proc *p = td->td_proc;
1114 struct lwp *lp = td->td_lwp;
1115 caddr_t params;
1116 struct sysent *callp;
1117 register_t orig_tf_rflags;
1118 int sticks;
1119 int error;
1120 int narg;
1121#ifdef INVARIANTS
f9235b6d 1122 int crit_count = td->td_critcount;
c8fe38ae
MD
1123#endif
1124#ifdef SMP
1125 int have_mplock = 0;
1126#endif
1127 register_t *argp;
1128 u_int code;
1129 int reg, regcnt;
1130 union sysunion args;
1131 register_t *argsdst;
c8fe38ae 1132
46d4e165 1133 mycpu->gd_cnt.v_syscall++;
c8fe38ae 1134
c8fe38ae
MD
1135#ifdef DIAGNOSTIC
1136 if (ISPL(frame->tf_cs) != SEL_UPL) {
1137 get_mplock();
1138 panic("syscall");
1139 /* NOT REACHED */
1140 }
1141#endif
1142
1143 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
4afe74da 1144 frame->tf_rax);
c8fe38ae 1145
d86a23e0 1146 userenter(td, p); /* lazy raise our priority */
c8fe38ae
MD
1147
1148 reg = 0;
1149 regcnt = 6;
1150 /*
1151 * Misc
1152 */
1153 sticks = (int)td->td_sticks;
1154 orig_tf_rflags = frame->tf_rflags;
1155
1156 /*
1157 * Virtual kernel intercept - if a VM context managed by a virtual
1158 * kernel issues a system call the virtual kernel handles it, not us.
1159 * Restore the virtual kernel context and return from its system
1160 * call. The current frame is copied out to the virtual kernel.
1161 */
1162 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
bb47c072 1163 vkernel_trap(lp, frame);
c8fe38ae
MD
1164 error = EJUSTRETURN;
1165 goto out;
1166 }
1167
1168 /*
1169 * Get the system call parameters and account for time
1170 */
d1368d1a
MD
1171 KASSERT(lp->lwp_md.md_regs == frame,
1172 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
c8fe38ae
MD
1173 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1174 code = frame->tf_rax;
1175
1176 if (p->p_sysent->sv_prepsyscall) {
1177 (*p->p_sysent->sv_prepsyscall)(
1178 frame, (int *)(&args.nosys.sysmsg + 1),
1179 &code, &params);
1180 } else {
1181 if (code == SYS_syscall || code == SYS___syscall) {
1182 code = frame->tf_rdi;
1183 reg++;
1184 regcnt--;
1185 }
1186 }
1187
bab69519
JM
1188 if (p->p_sysent->sv_mask)
1189 code &= p->p_sysent->sv_mask;
c8fe38ae
MD
1190
1191 if (code >= p->p_sysent->sv_size)
1192 callp = &p->p_sysent->sv_table[0];
1193 else
1194 callp = &p->p_sysent->sv_table[code];
1195
1196 narg = callp->sy_narg & SYF_ARGMASK;
1197
1198 /*
b2b3ffcd 1199 * On x86_64 we get up to six arguments in registers. The rest are
a6a09809 1200 * on the stack. The first six members of 'struct trapframe' happen
c8fe38ae
MD
1201 * to be the registers used to pass arguments, in exactly the right
1202 * order.
1203 */
1204 argp = &frame->tf_rdi;
1205 argp += reg;
1206 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1207 /*
1208 * JG can we overflow the space pointed to by 'argsdst'
1209 * either with 'bcopy' or with 'copyin'?
1210 */
1211 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1212 /*
1213 * copyin is MP aware, but the tracing code is not
1214 */
1215 if (narg > regcnt) {
1216 KASSERT(params != NULL, ("copyin args with no params!"));
1217 error = copyin(params, &argsdst[regcnt],
bab69519 1218 (narg - regcnt) * sizeof(register_t));
c8fe38ae
MD
1219 if (error) {
1220#ifdef KTRACE
1221 if (KTRPOINT(td, KTR_SYSCALL)) {
1222 MAKEMPSAFE(have_mplock);
bab69519 1223
c8fe38ae
MD
1224 ktrsyscall(lp, code, narg,
1225 (void *)(&args.nosys.sysmsg + 1));
1226 }
1227#endif
1228 goto bad;
1229 }
1230 }
1231
1232#ifdef KTRACE
1233 if (KTRPOINT(td, KTR_SYSCALL)) {
1234 MAKEMPSAFE(have_mplock);
1235 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1236 }
1237#endif
1238
1239 /*
1240 * Default return value is 0 (will be copied to %rax). Double-value
1241 * returns use %rax and %rdx. %rdx is left unchanged for system
1242 * calls which return only one result.
1243 */
1244 args.sysmsg_fds[0] = 0;
1245 args.sysmsg_fds[1] = frame->tf_rdx;
1246
1247 /*
1248 * The syscall might manipulate the trap frame. If it does it
1249 * will probably return EJUSTRETURN.
1250 */
1251 args.sysmsg_frame = frame;
1252
1253 STOPEVENT(p, S_SCE, narg); /* MP aware */
1254
c8fe38ae 1255 /*
3919ced0
MD
1256 * NOTE: All system calls run MPSAFE now. The system call itself
1257 * is responsible for getting the MP lock.
c8fe38ae 1258 */
d2d8515b
MD
1259#ifdef SYSCALL_DEBUG
1260 uint64_t tscval = rdtsc();
1261#endif
c8fe38ae 1262 error = (*callp->sy_call)(&args);
d2d8515b
MD
1263#ifdef SYSCALL_DEBUG
1264 tscval = rdtsc() - tscval;
1265 tscval = tscval * 1000000 / tsc_frequency;
1266 if (SysCallsWorstCase[code] < tscval)
1267 SysCallsWorstCase[code] = tscval;
1268#endif
c8fe38ae
MD
1269
1270out:
1271 /*
1272 * MP SAFE (we may or may not have the MP lock at this point)
1273 */
0855a2af 1274 //kprintf("SYSMSG %d ", error);
c8fe38ae
MD
1275 switch (error) {
1276 case 0:
1277 /*
1278 * Reinitialize proc pointer `p' as it may be different
1279 * if this is a child returning from fork syscall.
1280 */
1281 p = curproc;
1282 lp = curthread->td_lwp;
1283 frame->tf_rax = args.sysmsg_fds[0];
1284 frame->tf_rdx = args.sysmsg_fds[1];
c8fe38ae
MD
1285 frame->tf_rflags &= ~PSL_C;
1286 break;
1287 case ERESTART:
1288 /*
1289 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1290 * We have to do a full context restore so that %r10
1291 * (which was holding the value of %rcx) is restored for
1292 * the next iteration.
1293 */
3338cc67
MD
1294 if (frame->tf_err != 0 && frame->tf_err != 2)
1295 kprintf("lp %s:%d frame->tf_err is weird %ld\n",
1296 td->td_comm, lp->lwp_proc->p_pid, frame->tf_err);
c8fe38ae
MD
1297 frame->tf_rip -= frame->tf_err;
1298 frame->tf_r10 = frame->tf_rcx;
c8fe38ae
MD
1299 break;
1300 case EJUSTRETURN:
1301 break;
1302 case EASYNC:
1303 panic("Unexpected EASYNC return value (for now)");
1304 default:
1305bad:
1306 if (p->p_sysent->sv_errsize) {
1307 if (error >= p->p_sysent->sv_errsize)
1308 error = -1; /* XXX */
1309 else
1310 error = p->p_sysent->sv_errtbl[error];
1311 }
c8fe38ae
MD
1312 frame->tf_rax = error;
1313 frame->tf_rflags |= PSL_C;
1314 break;
1315 }
1316
1317 /*
1318 * Traced syscall. trapsignal() is not MP aware.
1319 */
1320 if (orig_tf_rflags & PSL_T) {
1321 MAKEMPSAFE(have_mplock);
1322 frame->tf_rflags &= ~PSL_T;
f2642d5a 1323 trapsignal(lp, SIGTRAP, TRAP_TRACE);
c8fe38ae
MD
1324 }
1325
1326 /*
1327 * Handle reschedule and other end-of-syscall issues
1328 */
1329 userret(lp, frame, sticks);
1330
1331#ifdef KTRACE
1332 if (KTRPOINT(td, KTR_SYSRET)) {
1333 MAKEMPSAFE(have_mplock);
1334 ktrsysret(lp, code, error, args.sysmsg_result);
1335 }
1336#endif
1337
1338 /*
1339 * This works because errno is findable through the
1340 * register set. If we ever support an emulation where this
1341 * is not the case, this code will need to be revisited.
1342 */
1343 STOPEVENT(p, S_SCX, code);
1344
1345 userexit(lp);
1346#ifdef SMP
1347 /*
1348 * Release the MP lock if we had to get it
1349 */
c8fe38ae
MD
1350 if (have_mplock)
1351 rel_mplock();
1352#endif
1353 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1354#ifdef INVARIANTS
f9235b6d 1355 KASSERT(crit_count == td->td_critcount,
c8fe38ae 1356 ("syscall: critical section count mismatch! %d/%d",
f9235b6d 1357 crit_count, td->td_pri));
3933a3ab
MD
1358 KASSERT(&td->td_toks_base == td->td_toks_stop,
1359 ("syscall: extra tokens held after trap! %ld",
1360 td->td_toks_stop - &td->td_toks_base));
c8fe38ae
MD
1361#endif
1362}
d7f50089 1363
2b0bd8aa
MD
1364/*
1365 * NOTE: mplock not held at any point
1366 */
d7f50089
YY
1367void
1368fork_return(struct lwp *lp, struct trapframe *frame)
1369{
c8fe38ae
MD
1370 frame->tf_rax = 0; /* Child returns zero */
1371 frame->tf_rflags &= ~PSL_C; /* success */
1372 frame->tf_rdx = 1;
1373
1374 generic_lwp_return(lp, frame);
1375 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
d7f50089
YY
1376}
1377
1378/*
1379 * Simplified back end of syscall(), used when returning from fork()
2b0bd8aa
MD
1380 * directly into user mode.
1381 *
1382 * This code will return back into the fork trampoline code which then
1383 * runs doreti.
1384 *
1385 * NOTE: The mplock is not held at any point.
d7f50089
YY
1386 */
1387void
1388generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1389{
c8fe38ae
MD
1390 struct proc *p = lp->lwp_proc;
1391
1392 /*
1393 * Newly forked processes are given a kernel priority. We have to
1394 * adjust the priority to a normal user priority and fake entry
1395 * into the kernel (call userenter()) to install a passive release
1396 * function just in case userret() decides to stop the process. This
1397 * can occur when ^Z races a fork. If we do not install the passive
1398 * release function the current process designation will not be
1399 * released when the thread goes to sleep.
1400 */
1401 lwkt_setpri_self(TDPRI_USER_NORM);
d86a23e0 1402 userenter(lp->lwp_thread, p);
c8fe38ae
MD
1403 userret(lp, frame, 0);
1404#ifdef KTRACE
1405 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1406 ktrsysret(lp, SYS_fork, 0, 0);
1407#endif
4643740a 1408 lp->lwp_flags |= LWP_PASSIVE_ACQ;
c8fe38ae 1409 userexit(lp);
4643740a 1410 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
d7f50089
YY
1411}
1412
1413/*
1414 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1415 * fault (which is then passed back to the virtual kernel) if an attempt is
1416 * made to use the FP unit.
c8fe38ae 1417 *
d7f50089
YY
1418 * XXX this is a fairly big hack.
1419 */
1420void
1421set_vkernel_fp(struct trapframe *frame)
1422{
2deef24c
JG
1423 struct thread *td = curthread;
1424
1425 if (frame->tf_xflags & PGEX_FPFAULT) {
1426 td->td_pcb->pcb_flags |= FP_VIRTFP;
1427 if (mdcpu->gd_npxthread == td)
1428 npxexit();
1429 } else {
1430 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1431 }
d7f50089 1432}
bb47c072
MD
1433
1434/*
1435 * Called from vkernel_trap() to fixup the vkernel's syscall
1436 * frame for vmspace_ctl() return.
1437 */
1438void
1439cpu_vkernel_trap(struct trapframe *frame, int error)
1440{
1441 frame->tf_rax = error;
1442 if (error)
1443 frame->tf_rflags |= PSL_C;
1444 else
1445 frame->tf_rflags &= ~PSL_C;
1446}