kernel: In the x86_64 kernel, add a missing include to trap.c.
[dragonfly.git] / sys / platform / pc64 / x86_64 / trap.c
CommitLineData
d7f50089 1/*-
d7f50089
YY
2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
c8fe38ae
MD
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 2008 The DragonFly Project.
6 * Copyright (c) 2008 Jordan Gordeev.
d7f50089
YY
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
c8fe38ae 39 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
d7f50089 40 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
d7f50089
YY
41 */
42
c8fe38ae 43/*
b2b3ffcd 44 * x86_64 Trap and System call handling
c8fe38ae
MD
45 */
46
775ce2da
SW
47#include "use_isa.h"
48
c8fe38ae
MD
49#include "opt_ddb.h"
50#include "opt_ktrace.h"
d7f50089
YY
51
52#include <machine/frame.h>
c8fe38ae
MD
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
5ea440eb 56#include <sys/kerneldump.h>
c8fe38ae
MD
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/types.h>
60#include <sys/signal2.h>
61#include <sys/syscall.h>
62#include <sys/sysctl.h>
63#include <sys/sysent.h>
64#include <sys/systm.h>
65#ifdef KTRACE
66#include <sys/ktrace.h>
67#endif
68#include <sys/ktr.h>
69#include <sys/sysmsg.h>
70#include <sys/sysproto.h>
71#include <sys/sysunion.h>
72
73#include <vm/pmap.h>
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_param.h>
78#include <machine/cpu.h>
79#include <machine/pcb.h>
46d4e165 80#include <machine/smp.h>
c8fe38ae
MD
81#include <machine/thread.h>
82#include <machine/vmparam.h>
83#include <machine/md_var.h>
57a9c56b 84#include <machine_base/isa/isa_intr.h>
c8fe38ae
MD
85
86#include <ddb/ddb.h>
684a93c4 87
46d4e165 88#include <sys/thread2.h>
684a93c4 89#include <sys/mplock2.h>
c8fe38ae
MD
90
91#ifdef SMP
92
93#define MAKEMPSAFE(have_mplock) \
94 if (have_mplock == 0) { \
95 get_mplock(); \
96 have_mplock = 1; \
97 }
98
99#else
100
101#define MAKEMPSAFE(have_mplock)
102
103#endif
104
105extern void trap(struct trapframe *frame);
c8fe38ae
MD
106
107static int trap_pfault(struct trapframe *, int);
108static void trap_fatal(struct trapframe *, vm_offset_t);
109void dblfault_handler(struct trapframe *frame);
110
c8fe38ae
MD
111#define MAX_TRAP_MSG 30
112static char *trap_msg[] = {
113 "", /* 0 unused */
114 "privileged instruction fault", /* 1 T_PRIVINFLT */
115 "", /* 2 unused */
116 "breakpoint instruction fault", /* 3 T_BPTFLT */
117 "", /* 4 unused */
118 "", /* 5 unused */
119 "arithmetic trap", /* 6 T_ARITHTRAP */
120 "system forced exception", /* 7 T_ASTFLT */
121 "", /* 8 unused */
122 "general protection fault", /* 9 T_PROTFLT */
123 "trace trap", /* 10 T_TRCTRAP */
124 "", /* 11 unused */
125 "page fault", /* 12 T_PAGEFLT */
126 "", /* 13 unused */
127 "alignment fault", /* 14 T_ALIGNFLT */
128 "", /* 15 unused */
129 "", /* 16 unused */
130 "", /* 17 unused */
131 "integer divide fault", /* 18 T_DIVIDE */
132 "non-maskable interrupt trap", /* 19 T_NMI */
133 "overflow trap", /* 20 T_OFLOW */
134 "FPU bounds check fault", /* 21 T_BOUND */
135 "FPU device not available", /* 22 T_DNA */
136 "double fault", /* 23 T_DOUBLEFLT */
137 "FPU operand fetch fault", /* 24 T_FPOPFLT */
138 "invalid TSS fault", /* 25 T_TSSFLT */
139 "segment not present fault", /* 26 T_SEGNPFLT */
140 "stack fault", /* 27 T_STKFLT */
141 "machine check trap", /* 28 T_MCHK */
142 "SIMD floating-point exception", /* 29 T_XMMFLT */
143 "reserved (unknown) fault", /* 30 T_RESERVED */
144};
145
146#ifdef DDB
147static int ddb_on_nmi = 1;
148SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
149 &ddb_on_nmi, 0, "Go to DDB on NMI");
2883d2d8
MD
150static int ddb_on_seg_fault = 0;
151SYSCTL_INT(_machdep, OID_AUTO, ddb_on_seg_fault, CTLFLAG_RW,
152 &ddb_on_seg_fault, 0, "Go to DDB on user seg-fault");
c8fe38ae
MD
153#endif
154static int panic_on_nmi = 1;
155SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
156 &panic_on_nmi, 0, "Panic on NMI");
157static int fast_release;
158SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
159 &fast_release, 0, "Passive Release was optimal");
160static int slow_release;
161SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
162 &slow_release, 0, "Passive Release was nonoptimal");
c8fe38ae 163
c8fe38ae 164/*
7bd858e5 165 * Passively intercepts the thread switch function to increase
c8fe38ae
MD
166 * the thread priority from a user priority to a kernel priority, reducing
167 * syscall and trap overhead for the case where no switch occurs.
7bd858e5 168 *
d86a23e0 169 * Synchronizes td_ucred with p_ucred. This is used by system calls,
7bd858e5
MD
170 * signal handling, faults, AST traps, and anything else that enters the
171 * kernel from userland and provides the kernel with a stable read-only
172 * copy of the process ucred.
c8fe38ae 173 */
c8fe38ae 174static __inline void
d86a23e0 175userenter(struct thread *curtd, struct proc *curp)
c8fe38ae 176{
7bd858e5
MD
177 struct ucred *ocred;
178 struct ucred *ncred;
179
3824f392 180 curtd->td_release = lwkt_passive_release;
7bd858e5 181
d86a23e0
MD
182 if (curtd->td_ucred != curp->p_ucred) {
183 ncred = crhold(curp->p_ucred);
184 ocred = curtd->td_ucred;
185 curtd->td_ucred = ncred;
7bd858e5
MD
186 if (ocred)
187 crfree(ocred);
188 }
c8fe38ae
MD
189}
190
191/*
192 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
193 * must be completed before we can return to or try to return to userland.
194 *
195 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
196 * arithmatic on the delta calculation so the absolute tick values are
197 * truncated to an integer.
198 */
199static void
200userret(struct lwp *lp, struct trapframe *frame, int sticks)
201{
202 struct proc *p = lp->lwp_proc;
203 int sig;
204
205 /*
206 * Charge system time if profiling. Note: times are in microseconds.
207 * This may do a copyout and block, so do it first even though it
208 * means some system time will be charged as user time.
209 */
210 if (p->p_flag & P_PROFIL) {
211 addupc_task(p, frame->tf_rip,
212 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
213 }
214
215recheck:
216 /*
217 * If the jungle wants us dead, so be it.
218 */
219 if (lp->lwp_flag & LWP_WEXIT) {
b5c4d81f 220 lwkt_gettoken(&p->p_token);
c8fe38ae 221 lwp_exit(0);
b5c4d81f 222 lwkt_reltoken(&p->p_token); /* NOT REACHED */
c8fe38ae
MD
223 }
224
225 /*
226 * Block here if we are in a stopped state.
227 */
5ea440eb 228 if (p->p_stat == SSTOP || dump_stop_usertds) {
c8fe38ae
MD
229 get_mplock();
230 tstop();
231 rel_mplock();
232 goto recheck;
233 }
234
235 /*
236 * Post any pending upcalls. If running a virtual kernel be sure
237 * to restore the virtual kernel's vmspace before posting the upcall.
238 */
239 if (p->p_flag & P_UPCALLPEND) {
240 p->p_flag &= ~P_UPCALLPEND;
241 get_mplock();
242 postupcall(lp);
243 rel_mplock();
244 goto recheck;
245 }
246
247 /*
248 * Post any pending signals. If running a virtual kernel be sure
249 * to restore the virtual kernel's vmspace before posting the signal.
2883d2d8
MD
250 *
251 * WARNING! postsig() can exit and not return.
c8fe38ae 252 */
e473f776 253 if ((sig = CURSIG_TRACE(lp)) != 0) {
c8fe38ae
MD
254 get_mplock();
255 postsig(sig);
256 rel_mplock();
257 goto recheck;
258 }
259
260 /*
261 * block here if we are swapped out, but still process signals
262 * (such as SIGKILL). proc0 (the swapin scheduler) is already
263 * aware of our situation, we do not have to wake it up.
264 */
265 if (p->p_flag & P_SWAPPEDOUT) {
266 get_mplock();
267 p->p_flag |= P_SWAPWAIT;
268 swapin_request();
269 if (p->p_flag & P_SWAPWAIT)
270 tsleep(p, PCATCH, "SWOUT", 0);
271 p->p_flag &= ~P_SWAPWAIT;
272 rel_mplock();
273 goto recheck;
274 }
275
276 /*
277 * Make sure postsig() handled request to restore old signal mask after
278 * running signal handler.
279 */
280 KKASSERT((lp->lwp_flag & LWP_OLDMASK) == 0);
281}
282
283/*
284 * Cleanup from userenter and any passive release that might have occured.
285 * We must reclaim the current-process designation before we can return
286 * to usermode. We also handle both LWKT and USER reschedule requests.
287 */
288static __inline void
289userexit(struct lwp *lp)
290{
291 struct thread *td = lp->lwp_thread;
973c11b9 292/* globaldata_t gd = td->td_gd;*/
c8fe38ae 293
c8fe38ae 294 /*
89ffa1cf
SS
295 * Handle stop requests at kernel priority. Any requests queued
296 * after this loop will generate another AST.
c8fe38ae 297 */
89ffa1cf
SS
298 while (lp->lwp_proc->p_stat == SSTOP) {
299 get_mplock();
300 tstop();
301 rel_mplock();
c8fe38ae
MD
302 }
303
304 /*
c8fe38ae
MD
305 * Reduce our priority in preparation for a return to userland. If
306 * our passive release function was still in place, our priority was
307 * never raised and does not need to be reduced.
308 */
3824f392 309 lwkt_passive_recover(td);
89ffa1cf
SS
310
311 /*
312 * Become the current user scheduled process if we aren't already,
313 * and deal with reschedule requests and other factors.
314 */
315 lp->lwp_proc->p_usched->acquire_curproc(lp);
316 /* WARNING: we may have migrated cpu's */
317 /* gd = td->td_gd; */
c8fe38ae
MD
318}
319
0855a2af
JG
320#if !defined(KTR_KERNENTRY)
321#define KTR_KERNENTRY KTR_ALL
322#endif
323KTR_INFO_MASTER(kernentry);
324KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "STR",
325 sizeof(long) + sizeof(long) + sizeof(long) + sizeof(vm_offset_t));
326KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "STR",
327 sizeof(long) + sizeof(long));
328KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "STR",
329 sizeof(long) + sizeof(long) + sizeof(long));
330KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "STR",
331 sizeof(long) + sizeof(long) + sizeof(long));
332KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "STR",
333 sizeof(long) + sizeof(long));
334
c8fe38ae
MD
335/*
336 * Exception, fault, and trap interface to the kernel.
337 * This common code is called from assembly language IDT gate entry
338 * routines that prepare a suitable stack frame, and restore this
339 * frame after the exception has been processed.
340 *
341 * This function is also called from doreti in an interlock to handle ASTs.
342 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
343 *
344 * NOTE! We have to retrieve the fault address prior to obtaining the
345 * MP lock because get_mplock() may switch out. YYY cr2 really ought
346 * to be retrieved by the assembly code, not here.
347 *
348 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
349 * if an attempt is made to switch from a fast interrupt or IPI. This is
350 * necessary to properly take fatal kernel traps on SMP machines if
351 * get_mplock() has to block.
352 */
353
354void
355trap(struct trapframe *frame)
356{
357 struct globaldata *gd = mycpu;
358 struct thread *td = gd->gd_curthread;
359 struct lwp *lp = td->td_lwp;
360 struct proc *p;
361 int sticks = 0;
362 int i = 0, ucode = 0, type, code;
363#ifdef SMP
364 int have_mplock = 0;
365#endif
366#ifdef INVARIANTS
f9235b6d 367 int crit_count = td->td_critcount;
3933a3ab 368 lwkt_tokref_t curstop = td->td_toks_stop;
c8fe38ae
MD
369#endif
370 vm_offset_t eva;
371
372 p = td->td_proc;
373
c8fe38ae 374#ifdef DDB
bfb6d329
AH
375 /*
376 * We need to allow T_DNA faults when the debugger is active since
2921c824 377 * some dumping paths do large bcopy() which use the floating
bfb6d329
AH
378 * point registers for faster copying.
379 */
5ea440eb 380 if (db_active && frame->tf_trapno != T_DNA) {
c8fe38ae
MD
381 eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0);
382 ++gd->gd_trap_nesting_level;
383 MAKEMPSAFE(have_mplock);
384 trap_fatal(frame, eva);
385 --gd->gd_trap_nesting_level;
386 goto out2;
387 }
388#endif
389
390 eva = 0;
391
c8fe38ae
MD
392 if ((frame->tf_rflags & PSL_I) == 0) {
393 /*
394 * Buggy application or kernel code has disabled interrupts
395 * and then trapped. Enabling interrupts now is wrong, but
396 * it is better than running with interrupts disabled until
397 * they are accidentally enabled later.
398 */
399 type = frame->tf_trapno;
400 if (ISPL(frame->tf_cs) == SEL_UPL) {
401 MAKEMPSAFE(have_mplock);
402 /* JG curproc can be NULL */
403 kprintf(
404 "pid %ld (%s): trap %d with interrupts disabled\n",
405 (long)curproc->p_pid, curproc->p_comm, type);
406 } else if (type != T_NMI && type != T_BPTFLT &&
407 type != T_TRCTRAP) {
408 /*
409 * XXX not quite right, since this may be for a
410 * multiple fault in user mode.
411 */
412 MAKEMPSAFE(have_mplock);
413 kprintf("kernel trap %d with interrupts disabled\n",
414 type);
415 }
416 cpu_enable_intr();
417 }
418
419 type = frame->tf_trapno;
420 code = frame->tf_err;
421
422 if (ISPL(frame->tf_cs) == SEL_UPL) {
423 /* user trap */
424
425 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
426 frame->tf_trapno, eva);
427
d86a23e0 428 userenter(td, p);
c8fe38ae
MD
429
430 sticks = (int)td->td_sticks;
d1368d1a
MD
431 KASSERT(lp->lwp_md.md_regs == frame,
432 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
c8fe38ae
MD
433
434 switch (type) {
435 case T_PRIVINFLT: /* privileged instruction fault */
436 ucode = ILL_PRVOPC;
437 i = SIGILL;
438 break;
439
440 case T_BPTFLT: /* bpt instruction fault */
441 case T_TRCTRAP: /* trace trap */
442 frame->tf_rflags &= ~PSL_T;
f2642d5a 443 ucode = TRAP_TRACE;
c8fe38ae
MD
444 i = SIGTRAP;
445 break;
446
447 case T_ARITHTRAP: /* arithmetic trap */
448 ucode = code;
449 i = SIGFPE;
450#if 0
451#if JG
452 ucode = fputrap();
453#else
454 ucode = code;
455#endif
456 i = SIGFPE;
457#endif
458 break;
459
460 case T_ASTFLT: /* Allow process switch */
461 mycpu->gd_cnt.v_soft++;
462 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
2a418930
MD
463 atomic_clear_int(&mycpu->gd_reqflags,
464 RQF_AST_OWEUPC);
c8fe38ae
MD
465 addupc_task(p, p->p_prof.pr_addr,
466 p->p_prof.pr_ticks);
467 }
468 goto out;
469
470 case T_PROTFLT: /* general protection fault */
f2642d5a
AH
471 i = SIGBUS;
472 ucode = BUS_OBJERR;
473 break;
c8fe38ae 474 case T_SEGNPFLT: /* segment not present fault */
f2642d5a
AH
475 i = SIGBUS;
476 ucode = BUS_ADRERR;
477 break;
c8fe38ae
MD
478 case T_TSSFLT: /* invalid TSS fault */
479 case T_DOUBLEFLT: /* double fault */
f2642d5a
AH
480 i = SIGBUS;
481 ucode = BUS_OBJERR;
c8fe38ae 482 default:
f2642d5a
AH
483#if 0
484 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
485#endif
486 ucode = BUS_OBJERR;
c8fe38ae
MD
487 i = SIGBUS;
488 break;
489
490 case T_PAGEFLT: /* page fault */
c8fe38ae 491 i = trap_pfault(frame, TRUE);
c8fe38ae 492 if (frame->tf_rip == 0)
973c11b9 493 kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
c8fe38ae
MD
494 if (i == -1)
495 goto out;
496 if (i == 0)
497 goto out;
498
f2642d5a 499#if 0
c8fe38ae 500 ucode = T_PAGEFLT;
f2642d5a
AH
501#endif
502 if (i == SIGSEGV)
503 ucode = SEGV_MAPERR;
504 else
505 ucode = BUS_ADRERR;
c8fe38ae
MD
506 break;
507
508 case T_DIVIDE: /* integer divide fault */
509 ucode = FPE_INTDIV;
510 i = SIGFPE;
511 break;
512
775ce2da 513#if NISA > 0
c8fe38ae
MD
514 case T_NMI:
515 MAKEMPSAFE(have_mplock);
516 /* machine/parity/power fail/"kitchen sink" faults */
517 if (isa_nmi(code) == 0) {
518#ifdef DDB
519 /*
520 * NMI can be hooked up to a pushbutton
521 * for debugging.
522 */
523 if (ddb_on_nmi) {
524 kprintf ("NMI ... going to debugger\n");
525 kdb_trap(type, 0, frame);
526 }
527#endif /* DDB */
528 goto out2;
529 } else if (panic_on_nmi)
530 panic("NMI indicates hardware failure");
531 break;
775ce2da 532#endif /* NISA > 0 */
c8fe38ae
MD
533
534 case T_OFLOW: /* integer overflow fault */
535 ucode = FPE_INTOVF;
536 i = SIGFPE;
537 break;
538
539 case T_BOUND: /* bounds check fault */
540 ucode = FPE_FLTSUB;
541 i = SIGFPE;
542 break;
543
544 case T_DNA:
545 /*
546 * Virtual kernel intercept - pass the DNA exception
547 * to the virtual kernel if it asked to handle it.
548 * This occurs when the virtual kernel is holding
549 * onto the FP context for a different emulated
550 * process then the one currently running.
551 *
552 * We must still call npxdna() since we may have
553 * saved FP state that the virtual kernel needs
554 * to hand over to a different emulated process.
555 */
556 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
557 (td->td_pcb->pcb_flags & FP_VIRTFP)
558 ) {
559 npxdna();
560 break;
561 }
562
563 /*
564 * The kernel may have switched out the FP unit's
565 * state, causing the user process to take a fault
566 * when it tries to use the FP unit. Restore the
567 * state here
568 */
569 if (npxdna())
570 goto out;
571 i = SIGFPE;
572 ucode = FPE_FPU_NP_TRAP;
573 break;
574
575 case T_FPOPFLT: /* FPU operand fetch fault */
f2642d5a 576 ucode = ILL_COPROC;
c8fe38ae
MD
577 i = SIGILL;
578 break;
579
580 case T_XMMFLT: /* SIMD floating-point exception */
581 ucode = 0; /* XXX */
582 i = SIGFPE;
583 break;
584 }
585 } else {
586 /* kernel trap */
587
588 switch (type) {
589 case T_PAGEFLT: /* page fault */
c8fe38ae
MD
590 trap_pfault(frame, FALSE);
591 goto out2;
592
593 case T_DNA:
594 /*
595 * The kernel is apparently using fpu for copying.
596 * XXX this should be fatal unless the kernel has
597 * registered such use.
598 */
599 if (npxdna())
600 goto out2;
601 break;
602
603 case T_STKFLT: /* stack fault */
604 break;
605
606 case T_PROTFLT: /* general protection fault */
607 case T_SEGNPFLT: /* segment not present fault */
608 /*
609 * Invalid segment selectors and out of bounds
610 * %rip's and %rsp's can be set up in user mode.
611 * This causes a fault in kernel mode when the
612 * kernel tries to return to user mode. We want
613 * to get this fault so that we can fix the
614 * problem here and not have to check all the
615 * selectors and pointers when the user changes
616 * them.
617 */
c8fe38ae
MD
618 if (mycpu->gd_intr_nesting_level == 0) {
619 if (td->td_pcb->pcb_onfault) {
620 frame->tf_rip = (register_t)
621 td->td_pcb->pcb_onfault;
622 goto out2;
623 }
89ffa1cf
SS
624 if (frame->tf_rip == (long)doreti_iret) {
625 frame->tf_rip = (long)doreti_iret_fault;
626 goto out2;
627 }
c8fe38ae
MD
628 }
629 break;
630
631 case T_TSSFLT:
632 /*
633 * PSL_NT can be set in user mode and isn't cleared
634 * automatically when the kernel is entered. This
635 * causes a TSS fault when the kernel attempts to
636 * `iret' because the TSS link is uninitialized. We
637 * want to get this fault so that we can fix the
638 * problem here and not every time the kernel is
639 * entered.
640 */
641 if (frame->tf_rflags & PSL_NT) {
642 frame->tf_rflags &= ~PSL_NT;
643 goto out2;
644 }
645 break;
646
647 case T_TRCTRAP: /* trace trap */
648#if 0
649 if (frame->tf_rip == (int)IDTVEC(syscall)) {
650 /*
651 * We've just entered system mode via the
652 * syscall lcall. Continue single stepping
653 * silently until the syscall handler has
654 * saved the flags.
655 */
656 goto out2;
657 }
658 if (frame->tf_rip == (int)IDTVEC(syscall) + 1) {
659 /*
660 * The syscall handler has now saved the
661 * flags. Stop single stepping it.
662 */
663 frame->tf_rflags &= ~PSL_T;
664 goto out2;
665 }
666#endif
667
668 /*
669 * Ignore debug register trace traps due to
670 * accesses in the user's address space, which
671 * can happen under several conditions such as
672 * if a user sets a watchpoint on a buffer and
673 * then passes that buffer to a system call.
674 * We still want to get TRCTRAPS for addresses
675 * in kernel space because that is useful when
676 * debugging the kernel.
677 */
678#if JG
679 if (user_dbreg_trap()) {
680 /*
681 * Reset breakpoint bits because the
682 * processor doesn't
683 */
684 /* XXX check upper bits here */
685 load_dr6(rdr6() & 0xfffffff0);
686 goto out2;
687 }
688#endif
689 /*
690 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
691 */
692 case T_BPTFLT:
693 /*
694 * If DDB is enabled, let it handle the debugger trap.
695 * Otherwise, debugger traps "can't happen".
696 */
f2642d5a 697 ucode = TRAP_BRKPT;
c8fe38ae
MD
698#ifdef DDB
699 MAKEMPSAFE(have_mplock);
700 if (kdb_trap(type, 0, frame))
701 goto out2;
702#endif
703 break;
704
775ce2da 705#if NISA > 0
c8fe38ae
MD
706 case T_NMI:
707 MAKEMPSAFE(have_mplock);
708 /* machine/parity/power fail/"kitchen sink" faults */
c8fe38ae
MD
709 if (isa_nmi(code) == 0) {
710#ifdef DDB
711 /*
712 * NMI can be hooked up to a pushbutton
713 * for debugging.
714 */
715 if (ddb_on_nmi) {
716 kprintf ("NMI ... going to debugger\n");
717 kdb_trap(type, 0, frame);
718 }
719#endif /* DDB */
720 goto out2;
721 } else if (panic_on_nmi == 0)
722 goto out2;
723 /* FALL THROUGH */
724#endif /* NISA > 0 */
725 }
726 MAKEMPSAFE(have_mplock);
727 trap_fatal(frame, 0);
73e24181
MD
728 goto out2;
729 }
730
731 /*
732 * Virtual kernel intercept - if the fault is directly related to a
733 * VM context managed by a virtual kernel then let the virtual kernel
734 * handle it.
735 */
736 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
737 vkernel_trap(lp, frame);
c8fe38ae
MD
738 goto out;
739 }
740
741 /*
742 * Translate fault for emulators (e.g. Linux)
743 */
744 if (*p->p_sysent->sv_transtrap)
745 i = (*p->p_sysent->sv_transtrap)(i, type);
746
747 MAKEMPSAFE(have_mplock);
748 trapsignal(lp, i, ucode);
749
750#ifdef DEBUG
751 if (type <= MAX_TRAP_MSG) {
752 uprintf("fatal process exception: %s",
753 trap_msg[type]);
754 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
755 uprintf(", fault VA = 0x%lx", frame->tf_addr);
756 uprintf("\n");
757 }
758#endif
759
760out:
c8fe38ae
MD
761 userret(lp, frame, sticks);
762 userexit(lp);
763out2: ;
764#ifdef SMP
765 if (have_mplock)
766 rel_mplock();
767#endif
768 if (p != NULL && lp != NULL)
769 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
770#ifdef INVARIANTS
f9235b6d 771 KASSERT(crit_count == td->td_critcount,
3933a3ab 772 ("trap: critical section count mismatch! %d/%d",
f9235b6d 773 crit_count, td->td_pri));
3933a3ab
MD
774 KASSERT(curstop == td->td_toks_stop,
775 ("trap: extra tokens held after trap! %ld/%ld",
776 curstop - &td->td_toks_base,
777 td->td_toks_stop - &td->td_toks_base));
c8fe38ae
MD
778#endif
779}
780
781static int
782trap_pfault(struct trapframe *frame, int usermode)
783{
784 vm_offset_t va;
785 struct vmspace *vm = NULL;
786 vm_map_t map;
787 int rv = 0;
1b9d3514 788 int fault_flags;
c8fe38ae
MD
789 vm_prot_t ftype;
790 thread_t td = curthread;
791 struct lwp *lp = td->td_lwp;
2883d2d8 792 struct proc *p;
c8fe38ae
MD
793
794 va = trunc_page(frame->tf_addr);
795 if (va >= VM_MIN_KERNEL_ADDRESS) {
796 /*
797 * Don't allow user-mode faults in kernel address space.
798 */
2883d2d8
MD
799 if (usermode) {
800 fault_flags = -1;
801 ftype = -1;
c8fe38ae 802 goto nogo;
2883d2d8 803 }
c8fe38ae
MD
804
805 map = &kernel_map;
806 } else {
807 /*
808 * This is a fault on non-kernel virtual memory.
809 * vm is initialized above to NULL. If curproc is NULL
810 * or curproc->p_vmspace is NULL the fault is fatal.
811 */
812 if (lp != NULL)
813 vm = lp->lwp_vmspace;
814
2883d2d8
MD
815 if (vm == NULL) {
816 fault_flags = -1;
817 ftype = -1;
c8fe38ae 818 goto nogo;
2883d2d8 819 }
c8fe38ae
MD
820
821 map = &vm->vm_map;
822 }
823
824 /*
825 * PGEX_I is defined only if the execute disable bit capability is
826 * supported and enabled.
827 */
828 if (frame->tf_err & PGEX_W)
829 ftype = VM_PROT_WRITE;
830#if JG
831 else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
832 ftype = VM_PROT_EXECUTE;
833#endif
834 else
835 ftype = VM_PROT_READ;
836
837 if (map != &kernel_map) {
838 /*
839 * Keep swapout from messing with us during this
840 * critical time.
841 */
842 PHOLD(lp->lwp_proc);
843
844 /*
1b9d3514
MD
845 * Issue fault
846 */
847 fault_flags = 0;
848 if (usermode)
849 fault_flags |= VM_FAULT_BURST;
850 if (ftype & VM_PROT_WRITE)
851 fault_flags |= VM_FAULT_DIRTY;
852 else
853 fault_flags |= VM_FAULT_NORMAL;
854 rv = vm_fault(map, va, ftype, fault_flags);
c8fe38ae
MD
855
856 PRELE(lp->lwp_proc);
857 } else {
858 /*
859 * Don't have to worry about process locking or stacks
860 * in the kernel.
861 */
2883d2d8 862 fault_flags = VM_FAULT_NORMAL;
c8fe38ae
MD
863 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
864 }
865
866 if (rv == KERN_SUCCESS)
867 return (0);
868nogo:
869 if (!usermode) {
870 if (td->td_gd->gd_intr_nesting_level == 0 &&
871 td->td_pcb->pcb_onfault) {
872 frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
873 return (0);
874 }
875 trap_fatal(frame, frame->tf_addr);
876 return (-1);
877 }
878
879 /*
b2b3ffcd 880 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
c8fe38ae
MD
881 * kludge is needed to pass the fault address to signal handlers.
882 */
2883d2d8 883 p = td->td_proc;
2deef24c 884 if (td->td_lwp->lwp_vkernel == NULL) {
c98f2169
SW
885 if (bootverbose)
886 kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
887 "pid=%d p_comm=%s\n",
888 ftype, fault_flags,
889 (void *)frame->tf_addr,
890 (void *)frame->tf_rip,
891 p->p_pid, p->p_comm);
3f6724a9 892#ifdef DDB
2883d2d8
MD
893 if (ddb_on_seg_fault)
894 Debugger("ddb_on_seg_fault");
3f6724a9 895#endif
2deef24c 896 }
c8fe38ae
MD
897
898 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
899}
900
901static void
902trap_fatal(struct trapframe *frame, vm_offset_t eva)
903{
904 int code, ss;
905 u_int type;
906 long rsp;
907 struct soft_segment_descriptor softseg;
908 char *msg;
909
910 code = frame->tf_err;
911 type = frame->tf_trapno;
912 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
913
914 if (type <= MAX_TRAP_MSG)
915 msg = trap_msg[type];
916 else
917 msg = "UNKNOWN";
918 kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
919 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
920#ifdef SMP
46d4e165 921 /* three separate prints in case of a trap on an unmapped page */
46d4e165
JG
922 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
923 kprintf("lapic->id = %08x\n", lapic->id);
c8fe38ae
MD
924#endif
925 if (type == T_PAGEFLT) {
926 kprintf("fault virtual address = 0x%lx\n", eva);
927 kprintf("fault code = %s %s %s, %s\n",
928 code & PGEX_U ? "user" : "supervisor",
929 code & PGEX_W ? "write" : "read",
930 code & PGEX_I ? "instruction" : "data",
931 code & PGEX_P ? "protection violation" : "page not present");
932 }
933 kprintf("instruction pointer = 0x%lx:0x%lx\n",
934 frame->tf_cs & 0xffff, frame->tf_rip);
935 if (ISPL(frame->tf_cs) == SEL_UPL) {
936 ss = frame->tf_ss & 0xffff;
937 rsp = frame->tf_rsp;
938 } else {
939 ss = GSEL(GDATA_SEL, SEL_KPL);
940 rsp = (long)&frame->tf_rsp;
941 }
942 kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp);
943 kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
944 kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
945 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
946 kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
947 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
948 softseg.ssd_gran);
949 kprintf("processor eflags = ");
950 if (frame->tf_rflags & PSL_T)
951 kprintf("trace trap, ");
952 if (frame->tf_rflags & PSL_I)
953 kprintf("interrupt enabled, ");
954 if (frame->tf_rflags & PSL_NT)
955 kprintf("nested task, ");
956 if (frame->tf_rflags & PSL_RF)
957 kprintf("resume, ");
958 kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
959 kprintf("current process = ");
960 if (curproc) {
961 kprintf("%lu\n",
962 (u_long)curproc->p_pid);
963 } else {
964 kprintf("Idle\n");
965 }
966 kprintf("current thread = pri %d ", curthread->td_pri);
f9235b6d 967 if (curthread->td_critcount)
c8fe38ae
MD
968 kprintf("(CRIT)");
969 kprintf("\n");
970
971#ifdef DDB
972 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
973 return;
974#endif
975 kprintf("trap number = %d\n", type);
976 if (type <= MAX_TRAP_MSG)
977 panic("%s", trap_msg[type]);
978 else
979 panic("unknown/reserved trap");
980}
981
982/*
983 * Double fault handler. Called when a fault occurs while writing
984 * a frame for a trap/exception onto the stack. This usually occurs
985 * when the stack overflows (such is the case with infinite recursion,
986 * for example).
987 */
093565f2
MD
988static __inline
989int
990in_kstack_guard(register_t rptr)
991{
992 thread_t td = curthread;
993
994 if ((char *)rptr >= td->td_kstack &&
995 (char *)rptr < td->td_kstack + PAGE_SIZE) {
996 return 1;
997 }
998 return 0;
999}
1000
c8fe38ae
MD
1001void
1002dblfault_handler(struct trapframe *frame)
1003{
093565f2
MD
1004 thread_t td = curthread;
1005
1006 if (in_kstack_guard(frame->tf_rsp) || in_kstack_guard(frame->tf_rbp)) {
0174149e 1007 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
093565f2
MD
1008 if (in_kstack_guard(frame->tf_rsp))
1009 frame->tf_rsp = (register_t)(td->td_kstack + PAGE_SIZE);
1010 if (in_kstack_guard(frame->tf_rbp))
1011 frame->tf_rbp = (register_t)(td->td_kstack + PAGE_SIZE);
1012 } else {
0174149e 1013 kprintf("DOUBLE FAULT\n");
093565f2 1014 }
c8fe38ae
MD
1015 kprintf("\nFatal double fault\n");
1016 kprintf("rip = 0x%lx\n", frame->tf_rip);
1017 kprintf("rsp = 0x%lx\n", frame->tf_rsp);
1018 kprintf("rbp = 0x%lx\n", frame->tf_rbp);
1019#ifdef SMP
46d4e165 1020 /* three separate prints in case of a trap on an unmapped page */
46d4e165
JG
1021 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1022 kprintf("lapic->id = %08x\n", lapic->id);
c8fe38ae
MD
1023#endif
1024 panic("double fault");
1025}
1026
1027/*
7bd858e5 1028 * syscall2 - MP aware system call request C handler
c8fe38ae 1029 *
7bd858e5
MD
1030 * A system call is essentially treated as a trap except that the
1031 * MP lock is not held on entry or return. We are responsible for
1032 * obtaining the MP lock if necessary and for handling ASTs
1033 * (e.g. a task switch) prior to return.
c8fe38ae 1034 *
7bd858e5 1035 * MPSAFE
c8fe38ae
MD
1036 */
1037void
1038syscall2(struct trapframe *frame)
1039{
1040 struct thread *td = curthread;
1041 struct proc *p = td->td_proc;
1042 struct lwp *lp = td->td_lwp;
1043 caddr_t params;
1044 struct sysent *callp;
1045 register_t orig_tf_rflags;
1046 int sticks;
1047 int error;
1048 int narg;
1049#ifdef INVARIANTS
f9235b6d 1050 int crit_count = td->td_critcount;
c8fe38ae
MD
1051#endif
1052#ifdef SMP
1053 int have_mplock = 0;
1054#endif
1055 register_t *argp;
1056 u_int code;
1057 int reg, regcnt;
1058 union sysunion args;
1059 register_t *argsdst;
c8fe38ae 1060
46d4e165 1061 mycpu->gd_cnt.v_syscall++;
c8fe38ae 1062
c8fe38ae
MD
1063#ifdef DIAGNOSTIC
1064 if (ISPL(frame->tf_cs) != SEL_UPL) {
1065 get_mplock();
1066 panic("syscall");
1067 /* NOT REACHED */
1068 }
1069#endif
1070
1071 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
4afe74da 1072 frame->tf_rax);
c8fe38ae 1073
d86a23e0 1074 userenter(td, p); /* lazy raise our priority */
c8fe38ae
MD
1075
1076 reg = 0;
1077 regcnt = 6;
1078 /*
1079 * Misc
1080 */
1081 sticks = (int)td->td_sticks;
1082 orig_tf_rflags = frame->tf_rflags;
1083
1084 /*
1085 * Virtual kernel intercept - if a VM context managed by a virtual
1086 * kernel issues a system call the virtual kernel handles it, not us.
1087 * Restore the virtual kernel context and return from its system
1088 * call. The current frame is copied out to the virtual kernel.
1089 */
1090 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
bb47c072 1091 vkernel_trap(lp, frame);
c8fe38ae
MD
1092 error = EJUSTRETURN;
1093 goto out;
1094 }
1095
1096 /*
1097 * Get the system call parameters and account for time
1098 */
d1368d1a
MD
1099 KASSERT(lp->lwp_md.md_regs == frame,
1100 ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame));
c8fe38ae
MD
1101 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1102 code = frame->tf_rax;
1103
1104 if (p->p_sysent->sv_prepsyscall) {
1105 (*p->p_sysent->sv_prepsyscall)(
1106 frame, (int *)(&args.nosys.sysmsg + 1),
1107 &code, &params);
1108 } else {
1109 if (code == SYS_syscall || code == SYS___syscall) {
1110 code = frame->tf_rdi;
1111 reg++;
1112 regcnt--;
1113 }
1114 }
1115
1116 if (p->p_sysent->sv_mask)
1117 code &= p->p_sysent->sv_mask;
1118
1119 if (code >= p->p_sysent->sv_size)
1120 callp = &p->p_sysent->sv_table[0];
1121 else
1122 callp = &p->p_sysent->sv_table[code];
1123
1124 narg = callp->sy_narg & SYF_ARGMASK;
1125
1126 /*
b2b3ffcd 1127 * On x86_64 we get up to six arguments in registers. The rest are
a6a09809 1128 * on the stack. The first six members of 'struct trapframe' happen
c8fe38ae
MD
1129 * to be the registers used to pass arguments, in exactly the right
1130 * order.
1131 */
1132 argp = &frame->tf_rdi;
1133 argp += reg;
1134 argsdst = (register_t *)(&args.nosys.sysmsg + 1);
1135 /*
1136 * JG can we overflow the space pointed to by 'argsdst'
1137 * either with 'bcopy' or with 'copyin'?
1138 */
1139 bcopy(argp, argsdst, sizeof(register_t) * regcnt);
1140 /*
1141 * copyin is MP aware, but the tracing code is not
1142 */
1143 if (narg > regcnt) {
1144 KASSERT(params != NULL, ("copyin args with no params!"));
1145 error = copyin(params, &argsdst[regcnt],
1146 (narg - regcnt) * sizeof(register_t));
1147 if (error) {
1148#ifdef KTRACE
1149 if (KTRPOINT(td, KTR_SYSCALL)) {
1150 MAKEMPSAFE(have_mplock);
1151
1152 ktrsyscall(lp, code, narg,
1153 (void *)(&args.nosys.sysmsg + 1));
1154 }
1155#endif
1156 goto bad;
1157 }
1158 }
1159
1160#ifdef KTRACE
1161 if (KTRPOINT(td, KTR_SYSCALL)) {
1162 MAKEMPSAFE(have_mplock);
1163 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1164 }
1165#endif
1166
1167 /*
1168 * Default return value is 0 (will be copied to %rax). Double-value
1169 * returns use %rax and %rdx. %rdx is left unchanged for system
1170 * calls which return only one result.
1171 */
1172 args.sysmsg_fds[0] = 0;
1173 args.sysmsg_fds[1] = frame->tf_rdx;
1174
1175 /*
1176 * The syscall might manipulate the trap frame. If it does it
1177 * will probably return EJUSTRETURN.
1178 */
1179 args.sysmsg_frame = frame;
1180
1181 STOPEVENT(p, S_SCE, narg); /* MP aware */
1182
c8fe38ae 1183 /*
3919ced0
MD
1184 * NOTE: All system calls run MPSAFE now. The system call itself
1185 * is responsible for getting the MP lock.
c8fe38ae 1186 */
c8fe38ae
MD
1187 error = (*callp->sy_call)(&args);
1188
1189out:
1190 /*
1191 * MP SAFE (we may or may not have the MP lock at this point)
1192 */
0855a2af 1193 //kprintf("SYSMSG %d ", error);
c8fe38ae
MD
1194 switch (error) {
1195 case 0:
1196 /*
1197 * Reinitialize proc pointer `p' as it may be different
1198 * if this is a child returning from fork syscall.
1199 */
1200 p = curproc;
1201 lp = curthread->td_lwp;
1202 frame->tf_rax = args.sysmsg_fds[0];
1203 frame->tf_rdx = args.sysmsg_fds[1];
c8fe38ae
MD
1204 frame->tf_rflags &= ~PSL_C;
1205 break;
1206 case ERESTART:
1207 /*
1208 * Reconstruct pc, we know that 'syscall' is 2 bytes.
1209 * We have to do a full context restore so that %r10
1210 * (which was holding the value of %rcx) is restored for
1211 * the next iteration.
1212 */
1213 frame->tf_rip -= frame->tf_err;
1214 frame->tf_r10 = frame->tf_rcx;
c8fe38ae
MD
1215 break;
1216 case EJUSTRETURN:
1217 break;
1218 case EASYNC:
1219 panic("Unexpected EASYNC return value (for now)");
1220 default:
1221bad:
1222 if (p->p_sysent->sv_errsize) {
1223 if (error >= p->p_sysent->sv_errsize)
1224 error = -1; /* XXX */
1225 else
1226 error = p->p_sysent->sv_errtbl[error];
1227 }
c8fe38ae
MD
1228 frame->tf_rax = error;
1229 frame->tf_rflags |= PSL_C;
1230 break;
1231 }
1232
1233 /*
1234 * Traced syscall. trapsignal() is not MP aware.
1235 */
1236 if (orig_tf_rflags & PSL_T) {
1237 MAKEMPSAFE(have_mplock);
1238 frame->tf_rflags &= ~PSL_T;
f2642d5a 1239 trapsignal(lp, SIGTRAP, TRAP_TRACE);
c8fe38ae
MD
1240 }
1241
1242 /*
1243 * Handle reschedule and other end-of-syscall issues
1244 */
1245 userret(lp, frame, sticks);
1246
1247#ifdef KTRACE
1248 if (KTRPOINT(td, KTR_SYSRET)) {
1249 MAKEMPSAFE(have_mplock);
1250 ktrsysret(lp, code, error, args.sysmsg_result);
1251 }
1252#endif
1253
1254 /*
1255 * This works because errno is findable through the
1256 * register set. If we ever support an emulation where this
1257 * is not the case, this code will need to be revisited.
1258 */
1259 STOPEVENT(p, S_SCX, code);
1260
1261 userexit(lp);
1262#ifdef SMP
1263 /*
1264 * Release the MP lock if we had to get it
1265 */
c8fe38ae
MD
1266 if (have_mplock)
1267 rel_mplock();
1268#endif
1269 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1270#ifdef INVARIANTS
f9235b6d 1271 KASSERT(crit_count == td->td_critcount,
c8fe38ae 1272 ("syscall: critical section count mismatch! %d/%d",
f9235b6d 1273 crit_count, td->td_pri));
3933a3ab
MD
1274 KASSERT(&td->td_toks_base == td->td_toks_stop,
1275 ("syscall: extra tokens held after trap! %ld",
1276 td->td_toks_stop - &td->td_toks_base));
c8fe38ae
MD
1277#endif
1278}
d7f50089 1279
2b0bd8aa
MD
1280/*
1281 * NOTE: mplock not held at any point
1282 */
d7f50089
YY
1283void
1284fork_return(struct lwp *lp, struct trapframe *frame)
1285{
c8fe38ae
MD
1286 frame->tf_rax = 0; /* Child returns zero */
1287 frame->tf_rflags &= ~PSL_C; /* success */
1288 frame->tf_rdx = 1;
1289
1290 generic_lwp_return(lp, frame);
1291 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
d7f50089
YY
1292}
1293
1294/*
1295 * Simplified back end of syscall(), used when returning from fork()
2b0bd8aa
MD
1296 * directly into user mode.
1297 *
1298 * This code will return back into the fork trampoline code which then
1299 * runs doreti.
1300 *
1301 * NOTE: The mplock is not held at any point.
d7f50089
YY
1302 */
1303void
1304generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1305{
c8fe38ae
MD
1306 struct proc *p = lp->lwp_proc;
1307
1308 /*
1309 * Newly forked processes are given a kernel priority. We have to
1310 * adjust the priority to a normal user priority and fake entry
1311 * into the kernel (call userenter()) to install a passive release
1312 * function just in case userret() decides to stop the process. This
1313 * can occur when ^Z races a fork. If we do not install the passive
1314 * release function the current process designation will not be
1315 * released when the thread goes to sleep.
1316 */
1317 lwkt_setpri_self(TDPRI_USER_NORM);
d86a23e0 1318 userenter(lp->lwp_thread, p);
c8fe38ae
MD
1319 userret(lp, frame, 0);
1320#ifdef KTRACE
1321 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1322 ktrsysret(lp, SYS_fork, 0, 0);
1323#endif
1324 p->p_flag |= P_PASSIVE_ACQ;
1325 userexit(lp);
1326 p->p_flag &= ~P_PASSIVE_ACQ;
d7f50089
YY
1327}
1328
1329/*
1330 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1331 * fault (which is then passed back to the virtual kernel) if an attempt is
1332 * made to use the FP unit.
c8fe38ae 1333 *
d7f50089
YY
1334 * XXX this is a fairly big hack.
1335 */
1336void
1337set_vkernel_fp(struct trapframe *frame)
1338{
2deef24c
JG
1339 struct thread *td = curthread;
1340
1341 if (frame->tf_xflags & PGEX_FPFAULT) {
1342 td->td_pcb->pcb_flags |= FP_VIRTFP;
1343 if (mdcpu->gd_npxthread == td)
1344 npxexit();
1345 } else {
1346 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1347 }
d7f50089 1348}
bb47c072
MD
1349
1350/*
1351 * Called from vkernel_trap() to fixup the vkernel's syscall
1352 * frame for vmspace_ctl() return.
1353 */
1354void
1355cpu_vkernel_trap(struct trapframe *frame, int error)
1356{
1357 frame->tf_rax = error;
1358 if (error)
1359 frame->tf_rflags |= PSL_C;
1360 else
1361 frame->tf_rflags &= ~PSL_C;
1362}