kernel - Major signal path adjustments to fix races, tsleep race fixes, +more
[dragonfly.git] / sys / platform / pc32 / i386 / trap.c
... / ...
CommitLineData
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
39 * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.115 2008/09/09 04:06:17 dillon Exp $
40 */
41
42/*
43 * 386 Trap and System call handling
44 */
45
46#include "use_isa.h"
47#include "use_npx.h"
48
49#include "opt_cpu.h"
50#include "opt_ddb.h"
51#include "opt_ktrace.h"
52#include "opt_clock.h"
53#include "opt_trap.h"
54
55#include <sys/param.h>
56#include <sys/systm.h>
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/kernel.h>
60#include <sys/kerneldump.h>
61#include <sys/resourcevar.h>
62#include <sys/signalvar.h>
63#include <sys/signal2.h>
64#include <sys/syscall.h>
65#include <sys/sysctl.h>
66#include <sys/sysent.h>
67#include <sys/uio.h>
68#include <sys/vmmeter.h>
69#include <sys/malloc.h>
70#ifdef KTRACE
71#include <sys/ktrace.h>
72#endif
73#include <sys/ktr.h>
74#include <sys/upcall.h>
75#include <sys/vkernel.h>
76#include <sys/sysproto.h>
77#include <sys/sysunion.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <sys/lock.h>
82#include <vm/pmap.h>
83#include <vm/vm_kern.h>
84#include <vm/vm_map.h>
85#include <vm/vm_page.h>
86#include <vm/vm_extern.h>
87
88#include <machine/cpu.h>
89#include <machine/md_var.h>
90#include <machine/pcb.h>
91#include <machine/smp.h>
92#include <machine/tss.h>
93#include <machine/specialreg.h>
94#include <machine/globaldata.h>
95#include <machine/intr_machdep.h>
96
97#include <machine_base/isa/isa_intr.h>
98#include <machine_base/apic/lapic.h>
99
100#ifdef POWERFAIL_NMI
101#include <sys/syslog.h>
102#include <machine/clock.h>
103#endif
104
105#include <machine/vm86.h>
106
107#include <ddb/ddb.h>
108
109#include <sys/msgport2.h>
110#include <sys/thread2.h>
111#include <sys/mplock2.h>
112
113#ifdef SMP
114
115#define MAKEMPSAFE(have_mplock) \
116 if (have_mplock == 0) { \
117 get_mplock(); \
118 have_mplock = 1; \
119 }
120
121#else
122
123#define MAKEMPSAFE(have_mplock)
124
125#endif
126
127int (*pmath_emulate) (struct trapframe *);
128
129extern void trap (struct trapframe *frame);
130extern void syscall2 (struct trapframe *frame);
131
132static int trap_pfault (struct trapframe *, int, vm_offset_t);
133static void trap_fatal (struct trapframe *, vm_offset_t);
134void dblfault_handler (void);
135
136extern inthand_t IDTVEC(syscall);
137
138#define MAX_TRAP_MSG 28
139static char *trap_msg[] = {
140 "", /* 0 unused */
141 "privileged instruction fault", /* 1 T_PRIVINFLT */
142 "", /* 2 unused */
143 "breakpoint instruction fault", /* 3 T_BPTFLT */
144 "", /* 4 unused */
145 "", /* 5 unused */
146 "arithmetic trap", /* 6 T_ARITHTRAP */
147 "system forced exception", /* 7 T_ASTFLT */
148 "", /* 8 unused */
149 "general protection fault", /* 9 T_PROTFLT */
150 "trace trap", /* 10 T_TRCTRAP */
151 "", /* 11 unused */
152 "page fault", /* 12 T_PAGEFLT */
153 "", /* 13 unused */
154 "alignment fault", /* 14 T_ALIGNFLT */
155 "", /* 15 unused */
156 "", /* 16 unused */
157 "", /* 17 unused */
158 "integer divide fault", /* 18 T_DIVIDE */
159 "non-maskable interrupt trap", /* 19 T_NMI */
160 "overflow trap", /* 20 T_OFLOW */
161 "FPU bounds check fault", /* 21 T_BOUND */
162 "FPU device not available", /* 22 T_DNA */
163 "double fault", /* 23 T_DOUBLEFLT */
164 "FPU operand fetch fault", /* 24 T_FPOPFLT */
165 "invalid TSS fault", /* 25 T_TSSFLT */
166 "segment not present fault", /* 26 T_SEGNPFLT */
167 "stack fault", /* 27 T_STKFLT */
168 "machine check trap", /* 28 T_MCHK */
169};
170
171#if defined(I586_CPU) && !defined(NO_F00F_HACK)
172extern int has_f00f_bug;
173#endif
174
175#ifdef DDB
176static int ddb_on_nmi = 1;
177SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
178 &ddb_on_nmi, 0, "Go to DDB on NMI");
179#endif
180static int panic_on_nmi = 1;
181SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
182 &panic_on_nmi, 0, "Panic on NMI");
183static int fast_release;
184SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
185 &fast_release, 0, "Passive Release was optimal");
186static int slow_release;
187SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
188 &slow_release, 0, "Passive Release was nonoptimal");
189
190MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
191extern int max_sysmsg;
192
193/*
194 * Passively intercepts the thread switch function to increase the thread
195 * priority from a user priority to a kernel priority, reducing
196 * syscall and trap overhead for the case where no switch occurs.
197 *
198 * Synchronizes td_ucred with p_ucred. This is used by system calls,
199 * signal handling, faults, AST traps, and anything else that enters the
200 * kernel from userland and provides the kernel with a stable read-only
201 * copy of the process ucred.
202 */
203static __inline void
204userenter(struct thread *curtd, struct proc *curp)
205{
206 struct ucred *ocred;
207 struct ucred *ncred;
208
209 curtd->td_release = lwkt_passive_release;
210
211 if (curtd->td_ucred != curp->p_ucred) {
212 ncred = crhold(curp->p_ucred);
213 ocred = curtd->td_ucred;
214 curtd->td_ucred = ncred;
215 if (ocred)
216 crfree(ocred);
217 }
218
219}
220
221/*
222 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
223 * must be completed before we can return to or try to return to userland.
224 *
225 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
226 * arithmatic on the delta calculation so the absolute tick values are
227 * truncated to an integer.
228 */
229static void
230userret(struct lwp *lp, struct trapframe *frame, int sticks)
231{
232 struct proc *p = lp->lwp_proc;
233 void (*hook)(void);
234 int sig;
235
236 if (p->p_userret != NULL) {
237 hook = p->p_userret;
238 p->p_userret = NULL;
239 (*hook)();
240 }
241
242 /*
243 * Charge system time if profiling. Note: times are in microseconds.
244 * This may do a copyout and block, so do it first even though it
245 * means some system time will be charged as user time.
246 */
247 if (p->p_flags & P_PROFIL) {
248 addupc_task(p, frame->tf_eip,
249 (u_int)((int)lp->lwp_thread->td_sticks - sticks));
250 }
251
252recheck:
253 /*
254 * If the jungle wants us dead, so be it.
255 */
256 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
257 lwkt_gettoken(&p->p_token);
258 lwp_exit(0);
259 lwkt_reltoken(&p->p_token); /* NOT REACHED */
260 }
261
262 /*
263 * Block here if we are in a stopped state.
264 */
265 if (p->p_stat == SSTOP || dump_stop_usertds) {
266 lwkt_gettoken(&p->p_token);
267 tstop();
268 lwkt_reltoken(&p->p_token);
269 goto recheck;
270 }
271
272 /*
273 * Post any pending upcalls. If running a virtual kernel be sure
274 * to restore the virtual kernel's vmspace before posting the upcall.
275 */
276 if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) {
277 lwkt_gettoken(&p->p_token);
278 if (p->p_flags & P_SIGVTALRM) {
279 p->p_flags &= ~P_SIGVTALRM;
280 ksignal(p, SIGVTALRM);
281 }
282 if (p->p_flags & P_SIGPROF) {
283 p->p_flags &= ~P_SIGPROF;
284 ksignal(p, SIGPROF);
285 }
286 if (p->p_flags & P_UPCALLPEND) {
287 p->p_flags &= ~P_UPCALLPEND;
288 postupcall(lp);
289 }
290 lwkt_reltoken(&p->p_token);
291 goto recheck;
292 }
293
294 /*
295 * Post any pending signals. If running a virtual kernel be sure
296 * to restore the virtual kernel's vmspace before posting the signal.
297 *
298 * WARNING! postsig() can exit and not return.
299 */
300 if ((sig = CURSIG_TRACE(lp)) != 0) {
301 lwkt_gettoken(&p->p_token);
302 postsig(sig);
303 lwkt_reltoken(&p->p_token);
304 goto recheck;
305 }
306
307 /*
308 * block here if we are swapped out, but still process signals
309 * (such as SIGKILL). proc0 (the swapin scheduler) is already
310 * aware of our situation, we do not have to wake it up.
311 */
312 if (p->p_flags & P_SWAPPEDOUT) {
313 lwkt_gettoken(&p->p_token);
314 get_mplock();
315 p->p_flags |= P_SWAPWAIT;
316 swapin_request();
317 if (p->p_flags & P_SWAPWAIT)
318 tsleep(p, PCATCH, "SWOUT", 0);
319 p->p_flags &= ~P_SWAPWAIT;
320 rel_mplock();
321 lwkt_reltoken(&p->p_token);
322 goto recheck;
323 }
324
325 /*
326 * Make sure postsig() handled request to restore old signal mask after
327 * running signal handler.
328 */
329 KKASSERT((lp->lwp_flags & LWP_OLDMASK) == 0);
330}
331
332/*
333 * Cleanup from userenter and any passive release that might have occured.
334 * We must reclaim the current-process designation before we can return
335 * to usermode. We also handle both LWKT and USER reschedule requests.
336 */
337static __inline void
338userexit(struct lwp *lp)
339{
340 struct thread *td = lp->lwp_thread;
341 /* globaldata_t gd = td->td_gd; */
342
343 /*
344 * Handle stop requests at kernel priority. Any requests queued
345 * after this loop will generate another AST.
346 */
347 while (lp->lwp_proc->p_stat == SSTOP) {
348 lwkt_gettoken(&lp->lwp_proc->p_token);
349 tstop();
350 lwkt_reltoken(&lp->lwp_proc->p_token);
351 }
352
353 /*
354 * Reduce our priority in preparation for a return to userland. If
355 * our passive release function was still in place, our priority was
356 * never raised and does not need to be reduced.
357 */
358 lwkt_passive_recover(td);
359
360 /*
361 * Become the current user scheduled process if we aren't already,
362 * and deal with reschedule requests and other factors.
363 */
364 lp->lwp_proc->p_usched->acquire_curproc(lp);
365 /* WARNING: we may have migrated cpu's */
366 /* gd = td->td_gd; */
367}
368
369#if !defined(KTR_KERNENTRY)
370#define KTR_KERNENTRY KTR_ALL
371#endif
372KTR_INFO_MASTER(kernentry);
373KTR_INFO(KTR_KERNENTRY, kernentry, trap, 0, "pid=%d, tid=%d, trapno=%d, eva=%p",
374 sizeof(int) + sizeof(int) + sizeof(int) + sizeof(vm_offset_t));
375KTR_INFO(KTR_KERNENTRY, kernentry, trap_ret, 0, "pid=%d, tid=%d",
376 sizeof(int) + sizeof(int));
377KTR_INFO(KTR_KERNENTRY, kernentry, syscall, 0, "pid=%d, tid=%d, call=%d",
378 sizeof(int) + sizeof(int) + sizeof(int));
379KTR_INFO(KTR_KERNENTRY, kernentry, syscall_ret, 0, "pid=%d, tid=%d, err=%d",
380 sizeof(int) + sizeof(int) + sizeof(int));
381KTR_INFO(KTR_KERNENTRY, kernentry, fork_ret, 0, "pid=%d, tid=%d",
382 sizeof(int) + sizeof(int));
383
384/*
385 * Exception, fault, and trap interface to the kernel.
386 * This common code is called from assembly language IDT gate entry
387 * routines that prepare a suitable stack frame, and restore this
388 * frame after the exception has been processed.
389 *
390 * This function is also called from doreti in an interlock to handle ASTs.
391 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
392 *
393 * NOTE! We have to retrieve the fault address prior to obtaining the
394 * MP lock because get_mplock() may switch out. YYY cr2 really ought
395 * to be retrieved by the assembly code, not here.
396 *
397 * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing
398 * if an attempt is made to switch from a fast interrupt or IPI. This is
399 * necessary to properly take fatal kernel traps on SMP machines if
400 * get_mplock() has to block.
401 */
402
403void
404trap(struct trapframe *frame)
405{
406 struct globaldata *gd = mycpu;
407 struct thread *td = gd->gd_curthread;
408 struct lwp *lp = td->td_lwp;
409 struct proc *p;
410 int sticks = 0;
411 int i = 0, ucode = 0, type, code;
412#ifdef SMP
413 int have_mplock = 0;
414#endif
415#ifdef INVARIANTS
416 int crit_count = td->td_critcount;
417 lwkt_tokref_t curstop = td->td_toks_stop;
418#endif
419 vm_offset_t eva;
420
421 p = td->td_proc;
422#ifdef DDB
423 /*
424 * We need to allow T_DNA faults when the debugger is active since
425 * some dumping paths do large bcopy() which use the floating
426 * point registers for faster copying.
427 */
428 if (db_active && frame->tf_trapno != T_DNA) {
429 eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0);
430 ++gd->gd_trap_nesting_level;
431 MAKEMPSAFE(have_mplock);
432 trap_fatal(frame, eva);
433 --gd->gd_trap_nesting_level;
434 goto out2;
435 }
436#endif
437
438 eva = 0;
439 ++gd->gd_trap_nesting_level;
440 if (frame->tf_trapno == T_PAGEFLT) {
441 /*
442 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
443 * This problem is worked around by using an interrupt
444 * gate for the pagefault handler. We are finally ready
445 * to read %cr2 and then must reenable interrupts.
446 *
447 * XXX this should be in the switch statement, but the
448 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
449 * flow of control too much for this to be obviously
450 * correct.
451 */
452 eva = rcr2();
453 cpu_enable_intr();
454 }
455
456 --gd->gd_trap_nesting_level;
457
458 if (!(frame->tf_eflags & PSL_I)) {
459 /*
460 * Buggy application or kernel code has disabled interrupts
461 * and then trapped. Enabling interrupts now is wrong, but
462 * it is better than running with interrupts disabled until
463 * they are accidentally enabled later.
464 */
465 type = frame->tf_trapno;
466 if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) {
467 MAKEMPSAFE(have_mplock);
468 kprintf(
469 "pid %ld (%s): trap %d with interrupts disabled\n",
470 (long)curproc->p_pid, curproc->p_comm, type);
471 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
472 /*
473 * XXX not quite right, since this may be for a
474 * multiple fault in user mode.
475 */
476 MAKEMPSAFE(have_mplock);
477 kprintf("kernel trap %d with interrupts disabled\n",
478 type);
479 }
480 cpu_enable_intr();
481 }
482
483#if defined(I586_CPU) && !defined(NO_F00F_HACK)
484restart:
485#endif
486 type = frame->tf_trapno;
487 code = frame->tf_err;
488
489 if (in_vm86call) {
490 if (frame->tf_eflags & PSL_VM &&
491 (type == T_PROTFLT || type == T_STKFLT)) {
492#ifdef SMP
493 KKASSERT(get_mplock_count(curthread) > 0);
494#endif
495 i = vm86_emulate((struct vm86frame *)frame);
496#ifdef SMP
497 KKASSERT(get_mplock_count(curthread) > 0);
498#endif
499 if (i != 0) {
500 /*
501 * returns to original process
502 */
503#ifdef SMP
504 vm86_trap((struct vm86frame *)frame,
505 have_mplock);
506#else
507 vm86_trap((struct vm86frame *)frame, 0);
508#endif
509 KKASSERT(0); /* NOT REACHED */
510 }
511 goto out2;
512 }
513 switch (type) {
514 /*
515 * these traps want either a process context, or
516 * assume a normal userspace trap.
517 */
518 case T_PROTFLT:
519 case T_SEGNPFLT:
520 trap_fatal(frame, eva);
521 goto out2;
522 case T_TRCTRAP:
523 type = T_BPTFLT; /* kernel breakpoint */
524 /* FALL THROUGH */
525 }
526 goto kernel_trap; /* normal kernel trap handling */
527 }
528
529 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
530 /* user trap */
531
532 KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid,
533 frame->tf_trapno, eva);
534
535 userenter(td, p);
536
537 sticks = (int)td->td_sticks;
538 lp->lwp_md.md_regs = frame;
539
540 switch (type) {
541 case T_PRIVINFLT: /* privileged instruction fault */
542 ucode = ILL_COPROC;
543 i = SIGILL;
544 break;
545
546 case T_BPTFLT: /* bpt instruction fault */
547 case T_TRCTRAP: /* trace trap */
548 frame->tf_eflags &= ~PSL_T;
549 ucode = TRAP_TRACE;
550 i = SIGTRAP;
551 break;
552
553 case T_ARITHTRAP: /* arithmetic trap */
554 ucode = code;
555 i = SIGFPE;
556 break;
557
558 case T_ASTFLT: /* Allow process switch */
559 mycpu->gd_cnt.v_soft++;
560 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
561 atomic_clear_int(&mycpu->gd_reqflags,
562 RQF_AST_OWEUPC);
563 addupc_task(p, p->p_prof.pr_addr,
564 p->p_prof.pr_ticks);
565 }
566 goto out;
567
568 /*
569 * The following two traps can happen in
570 * vm86 mode, and, if so, we want to handle
571 * them specially.
572 */
573 case T_PROTFLT: /* general protection fault */
574 case T_STKFLT: /* stack fault */
575 if (frame->tf_eflags & PSL_VM) {
576 i = vm86_emulate((struct vm86frame *)frame);
577 if (i == 0)
578 goto out;
579 break;
580 }
581 i = SIGBUS;
582 ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
583 break;
584 case T_SEGNPFLT: /* segment not present fault */
585 i = SIGBUS;
586 ucode = BUS_ADRERR;
587 break;
588 case T_TSSFLT: /* invalid TSS fault */
589 case T_DOUBLEFLT: /* double fault */
590 i = SIGBUS;
591 ucode = BUS_OBJERR;
592 default:
593#if 0
594 ucode = code + BUS_SEGM_FAULT ; /* XXX: ???*/
595#endif
596 ucode = BUS_OBJERR;
597 i = SIGBUS;
598 break;
599
600 case T_PAGEFLT: /* page fault */
601 i = trap_pfault(frame, TRUE, eva);
602 if (i == -1)
603 goto out;
604#if defined(I586_CPU) && !defined(NO_F00F_HACK)
605 if (i == -2)
606 goto restart;
607#endif
608 if (i == 0)
609 goto out;
610#if 0
611 ucode = T_PAGEFLT;
612#endif
613 if (i == SIGSEGV)
614 ucode = SEGV_MAPERR;
615 else
616 ucode = BUS_ADRERR; /* XXX */
617 break;
618
619 case T_DIVIDE: /* integer divide fault */
620 ucode = FPE_INTDIV;
621 i = SIGFPE;
622 break;
623
624#if NISA > 0
625 case T_NMI:
626 MAKEMPSAFE(have_mplock);
627#ifdef POWERFAIL_NMI
628 goto handle_powerfail;
629#else /* !POWERFAIL_NMI */
630 /* machine/parity/power fail/"kitchen sink" faults */
631 if (isa_nmi(code) == 0) {
632#ifdef DDB
633 /*
634 * NMI can be hooked up to a pushbutton
635 * for debugging.
636 */
637 if (ddb_on_nmi) {
638 kprintf ("NMI ... going to debugger\n");
639 kdb_trap (type, 0, frame);
640 }
641#endif /* DDB */
642 goto out2;
643 } else if (panic_on_nmi)
644 panic("NMI indicates hardware failure");
645 break;
646#endif /* POWERFAIL_NMI */
647#endif /* NISA > 0 */
648
649 case T_OFLOW: /* integer overflow fault */
650 ucode = FPE_INTOVF;
651 i = SIGFPE;
652 break;
653
654 case T_BOUND: /* bounds check fault */
655 ucode = FPE_FLTSUB;
656 i = SIGFPE;
657 break;
658
659 case T_DNA:
660 /*
661 * Virtual kernel intercept - pass the DNA exception
662 * to the virtual kernel if it asked to handle it.
663 * This occurs when the virtual kernel is holding
664 * onto the FP context for a different emulated
665 * process then the one currently running.
666 *
667 * We must still call npxdna() since we may have
668 * saved FP state that the virtual kernel needs
669 * to hand over to a different emulated process.
670 */
671 if (lp->lwp_vkernel && lp->lwp_vkernel->ve &&
672 (td->td_pcb->pcb_flags & FP_VIRTFP)
673 ) {
674 npxdna();
675 break;
676 }
677
678#if NNPX > 0
679 /*
680 * The kernel may have switched out the FP unit's
681 * state, causing the user process to take a fault
682 * when it tries to use the FP unit. Restore the
683 * state here
684 */
685 if (npxdna())
686 goto out;
687#endif
688 if (!pmath_emulate) {
689 i = SIGFPE;
690 ucode = FPE_FPU_NP_TRAP;
691 break;
692 }
693 i = (*pmath_emulate)(frame);
694 if (i == 0) {
695 if (!(frame->tf_eflags & PSL_T))
696 goto out2;
697 frame->tf_eflags &= ~PSL_T;
698 i = SIGTRAP;
699 }
700 /* else ucode = emulator_only_knows() XXX */
701 break;
702
703 case T_FPOPFLT: /* FPU operand fetch fault */
704 ucode = ILL_ILLOPN;
705 i = SIGILL;
706 break;
707
708 case T_XMMFLT: /* SIMD floating-point exception */
709 ucode = 0; /* XXX */
710 i = SIGFPE;
711 break;
712 }
713 } else {
714kernel_trap:
715 /* kernel trap */
716
717 switch (type) {
718 case T_PAGEFLT: /* page fault */
719 trap_pfault(frame, FALSE, eva);
720 goto out2;
721
722 case T_DNA:
723#if NNPX > 0
724 /*
725 * The kernel may be using npx for copying or other
726 * purposes.
727 */
728 if (npxdna())
729 goto out2;
730#endif
731 break;
732
733 case T_PROTFLT: /* general protection fault */
734 case T_SEGNPFLT: /* segment not present fault */
735 /*
736 * Invalid segment selectors and out of bounds
737 * %eip's and %esp's can be set up in user mode.
738 * This causes a fault in kernel mode when the
739 * kernel tries to return to user mode. We want
740 * to get this fault so that we can fix the
741 * problem here and not have to check all the
742 * selectors and pointers when the user changes
743 * them.
744 */
745#define MAYBE_DORETI_FAULT(where, whereto) \
746 do { \
747 if (frame->tf_eip == (int)where) { \
748 frame->tf_eip = (int)whereto; \
749 goto out2; \
750 } \
751 } while (0)
752 if (mycpu->gd_intr_nesting_level == 0) {
753 /*
754 * Invalid %fs's and %gs's can be created using
755 * procfs or PT_SETREGS or by invalidating the
756 * underlying LDT entry. This causes a fault
757 * in kernel mode when the kernel attempts to
758 * switch contexts. Lose the bad context
759 * (XXX) so that we can continue, and generate
760 * a signal.
761 */
762 MAYBE_DORETI_FAULT(doreti_iret,
763 doreti_iret_fault);
764 MAYBE_DORETI_FAULT(doreti_popl_ds,
765 doreti_popl_ds_fault);
766 MAYBE_DORETI_FAULT(doreti_popl_es,
767 doreti_popl_es_fault);
768 MAYBE_DORETI_FAULT(doreti_popl_fs,
769 doreti_popl_fs_fault);
770 MAYBE_DORETI_FAULT(doreti_popl_gs,
771 doreti_popl_gs_fault);
772 if (td->td_pcb->pcb_onfault) {
773 frame->tf_eip =
774 (register_t)td->td_pcb->pcb_onfault;
775 goto out2;
776 }
777 }
778 break;
779
780 case T_TSSFLT:
781 /*
782 * PSL_NT can be set in user mode and isn't cleared
783 * automatically when the kernel is entered. This
784 * causes a TSS fault when the kernel attempts to
785 * `iret' because the TSS link is uninitialized. We
786 * want to get this fault so that we can fix the
787 * problem here and not every time the kernel is
788 * entered.
789 */
790 if (frame->tf_eflags & PSL_NT) {
791 frame->tf_eflags &= ~PSL_NT;
792 goto out2;
793 }
794 break;
795
796 case T_TRCTRAP: /* trace trap */
797 if (frame->tf_eip == (int)IDTVEC(syscall)) {
798 /*
799 * We've just entered system mode via the
800 * syscall lcall. Continue single stepping
801 * silently until the syscall handler has
802 * saved the flags.
803 */
804 goto out2;
805 }
806 if (frame->tf_eip == (int)IDTVEC(syscall) + 1) {
807 /*
808 * The syscall handler has now saved the
809 * flags. Stop single stepping it.
810 */
811 frame->tf_eflags &= ~PSL_T;
812 goto out2;
813 }
814 /*
815 * Ignore debug register trace traps due to
816 * accesses in the user's address space, which
817 * can happen under several conditions such as
818 * if a user sets a watchpoint on a buffer and
819 * then passes that buffer to a system call.
820 * We still want to get TRCTRAPS for addresses
821 * in kernel space because that is useful when
822 * debugging the kernel.
823 */
824 if (user_dbreg_trap()) {
825 /*
826 * Reset breakpoint bits because the
827 * processor doesn't
828 */
829 load_dr6(rdr6() & 0xfffffff0);
830 goto out2;
831 }
832 /*
833 * Fall through (TRCTRAP kernel mode, kernel address)
834 */
835 case T_BPTFLT:
836 /*
837 * If DDB is enabled, let it handle the debugger trap.
838 * Otherwise, debugger traps "can't happen".
839 */
840 ucode = TRAP_BRKPT;
841#ifdef DDB
842 MAKEMPSAFE(have_mplock);
843 if (kdb_trap (type, 0, frame))
844 goto out2;
845#endif
846 break;
847
848#if NISA > 0
849 case T_NMI:
850 MAKEMPSAFE(have_mplock);
851#ifdef POWERFAIL_NMI
852#ifndef TIMER_FREQ
853# define TIMER_FREQ 1193182
854#endif
855 handle_powerfail:
856 {
857 static unsigned lastalert = 0;
858
859 if(time_second - lastalert > 10)
860 {
861 log(LOG_WARNING, "NMI: power fail\n");
862 sysbeep(TIMER_FREQ/880, hz);
863 lastalert = time_second;
864 }
865 /* YYY mp count */
866 goto out2;
867 }
868#else /* !POWERFAIL_NMI */
869 /* machine/parity/power fail/"kitchen sink" faults */
870 if (isa_nmi(code) == 0) {
871#ifdef DDB
872 /*
873 * NMI can be hooked up to a pushbutton
874 * for debugging.
875 */
876 if (ddb_on_nmi) {
877 kprintf ("NMI ... going to debugger\n");
878 kdb_trap (type, 0, frame);
879 }
880#endif /* DDB */
881 goto out2;
882 } else if (panic_on_nmi == 0)
883 goto out2;
884 /* FALL THROUGH */
885#endif /* POWERFAIL_NMI */
886#endif /* NISA > 0 */
887 }
888
889 MAKEMPSAFE(have_mplock);
890 trap_fatal(frame, eva);
891 goto out2;
892 }
893
894 /*
895 * Virtual kernel intercept - if the fault is directly related to a
896 * VM context managed by a virtual kernel then let the virtual kernel
897 * handle it.
898 */
899 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
900 vkernel_trap(lp, frame);
901 goto out;
902 }
903
904 /*
905 * Translate fault for emulators (e.g. Linux)
906 */
907 if (*p->p_sysent->sv_transtrap)
908 i = (*p->p_sysent->sv_transtrap)(i, type);
909
910 MAKEMPSAFE(have_mplock);
911 trapsignal(lp, i, ucode);
912
913#ifdef DEBUG
914 if (type <= MAX_TRAP_MSG) {
915 uprintf("fatal process exception: %s",
916 trap_msg[type]);
917 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
918 uprintf(", fault VA = 0x%lx", (u_long)eva);
919 uprintf("\n");
920 }
921#endif
922
923out:
924 userret(lp, frame, sticks);
925 userexit(lp);
926out2: ;
927#ifdef SMP
928 if (have_mplock)
929 rel_mplock();
930#endif
931 if (p != NULL && lp != NULL)
932 KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid);
933#ifdef INVARIANTS
934 KASSERT(crit_count == td->td_critcount,
935 ("trap: critical section count mismatch! %d/%d",
936 crit_count, td->td_pri));
937 KASSERT(curstop == td->td_toks_stop,
938 ("trap: extra tokens held after trap! %zd/%zd",
939 curstop - &td->td_toks_base,
940 td->td_toks_stop - &td->td_toks_base));
941#endif
942}
943
944int
945trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
946{
947 vm_offset_t va;
948 struct vmspace *vm = NULL;
949 vm_map_t map = 0;
950 int rv = 0;
951 int fault_flags;
952 vm_prot_t ftype;
953 thread_t td = curthread;
954 struct lwp *lp = td->td_lwp;
955
956 va = trunc_page(eva);
957 if (va >= KERNBASE) {
958 /*
959 * Don't allow user-mode faults in kernel address space.
960 * An exception: if the faulting address is the invalid
961 * instruction entry in the IDT, then the Intel Pentium
962 * F00F bug workaround was triggered, and we need to
963 * treat it is as an illegal instruction, and not a page
964 * fault.
965 */
966#if defined(I586_CPU) && !defined(NO_F00F_HACK)
967 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
968 frame->tf_trapno = T_PRIVINFLT;
969 return -2;
970 }
971#endif
972 if (usermode)
973 goto nogo;
974
975 map = &kernel_map;
976 } else {
977 /*
978 * This is a fault on non-kernel virtual memory.
979 * vm is initialized above to NULL. If curproc is NULL
980 * or curproc->p_vmspace is NULL the fault is fatal.
981 */
982 if (lp != NULL)
983 vm = lp->lwp_vmspace;
984
985 if (vm == NULL)
986 goto nogo;
987
988 map = &vm->vm_map;
989 }
990
991 if (frame->tf_err & PGEX_W)
992 ftype = VM_PROT_WRITE;
993 else
994 ftype = VM_PROT_READ;
995
996 if (map != &kernel_map) {
997 /*
998 * Keep swapout from messing with us during this
999 * critical time.
1000 */
1001 PHOLD(lp->lwp_proc);
1002
1003 /*
1004 * Issue fault
1005 */
1006 fault_flags = 0;
1007 if (usermode)
1008 fault_flags |= VM_FAULT_BURST;
1009 if (ftype & VM_PROT_WRITE)
1010 fault_flags |= VM_FAULT_DIRTY;
1011 else
1012 fault_flags |= VM_FAULT_NORMAL;
1013 rv = vm_fault(map, va, ftype, fault_flags);
1014 PRELE(lp->lwp_proc);
1015 } else {
1016 /*
1017 * Don't have to worry about process locking or stacks
1018 * in the kernel.
1019 */
1020 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
1021 }
1022
1023 if (rv == KERN_SUCCESS)
1024 return (0);
1025nogo:
1026 if (!usermode) {
1027 if (td->td_gd->gd_intr_nesting_level == 0 &&
1028 td->td_pcb->pcb_onfault) {
1029 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
1030 return (0);
1031 }
1032 trap_fatal(frame, eva);
1033 return (-1);
1034 }
1035
1036 /* kludge to pass faulting virtual address to sendsig */
1037 frame->tf_xflags = frame->tf_err;
1038 frame->tf_err = eva;
1039
1040 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1041}
1042
1043static void
1044trap_fatal(struct trapframe *frame, vm_offset_t eva)
1045{
1046 int code, type, ss, esp;
1047 struct soft_segment_descriptor softseg;
1048
1049 code = frame->tf_err;
1050 type = frame->tf_trapno;
1051 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
1052
1053 if (type <= MAX_TRAP_MSG)
1054 kprintf("\n\nFatal trap %d: %s while in %s mode\n",
1055 type, trap_msg[type],
1056 frame->tf_eflags & PSL_VM ? "vm86" :
1057 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1058#ifdef SMP
1059 /* three separate prints in case of a trap on an unmapped page */
1060 kprintf("cpuid = %d; ", mycpu->gd_cpuid);
1061 kprintf("lapic.id = %08x\n", lapic->id);
1062#endif
1063 if (type == T_PAGEFLT) {
1064 kprintf("fault virtual address = %p\n", (void *)eva);
1065 kprintf("fault code = %s %s, %s\n",
1066 code & PGEX_U ? "user" : "supervisor",
1067 code & PGEX_W ? "write" : "read",
1068 code & PGEX_P ? "protection violation" : "page not present");
1069 }
1070 kprintf("instruction pointer = 0x%x:0x%x\n",
1071 frame->tf_cs & 0xffff, frame->tf_eip);
1072 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1073 ss = frame->tf_ss & 0xffff;
1074 esp = frame->tf_esp;
1075 } else {
1076 ss = GSEL(GDATA_SEL, SEL_KPL);
1077 esp = (int)&frame->tf_esp;
1078 }
1079 kprintf("stack pointer = 0x%x:0x%x\n", ss, esp);
1080 kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1081 kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1082 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1083 kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1084 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1085 softseg.ssd_gran);
1086 kprintf("processor eflags = ");
1087 if (frame->tf_eflags & PSL_T)
1088 kprintf("trace trap, ");
1089 if (frame->tf_eflags & PSL_I)
1090 kprintf("interrupt enabled, ");
1091 if (frame->tf_eflags & PSL_NT)
1092 kprintf("nested task, ");
1093 if (frame->tf_eflags & PSL_RF)
1094 kprintf("resume, ");
1095 if (frame->tf_eflags & PSL_VM)
1096 kprintf("vm86, ");
1097 kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1098 kprintf("current process = ");
1099 if (curproc) {
1100 kprintf("%lu (%s)\n",
1101 (u_long)curproc->p_pid, curproc->p_comm ?
1102 curproc->p_comm : "");
1103 } else {
1104 kprintf("Idle\n");
1105 }
1106 kprintf("current thread = pri %d ", curthread->td_pri);
1107 if (curthread->td_critcount)
1108 kprintf("(CRIT)");
1109 kprintf("\n");
1110#ifdef SMP
1111/**
1112 * XXX FIXME:
1113 * we probably SHOULD have stopped the other CPUs before now!
1114 * another CPU COULD have been touching cpl at this moment...
1115 */
1116 kprintf(" <- SMP: XXX");
1117#endif
1118 kprintf("\n");
1119
1120#ifdef KDB
1121 if (kdb_trap(&psl))
1122 return;
1123#endif
1124#ifdef DDB
1125 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
1126 return;
1127#endif
1128 kprintf("trap number = %d\n", type);
1129 if (type <= MAX_TRAP_MSG)
1130 panic("%s", trap_msg[type]);
1131 else
1132 panic("unknown/reserved trap");
1133}
1134
1135/*
1136 * Double fault handler. Called when a fault occurs while writing
1137 * a frame for a trap/exception onto the stack. This usually occurs
1138 * when the stack overflows (such is the case with infinite recursion,
1139 * for example).
1140 *
1141 * XXX Note that the current PTD gets replaced by IdlePTD when the
1142 * task switch occurs. This means that the stack that was active at
1143 * the time of the double fault is not available at <kstack> unless
1144 * the machine was idle when the double fault occurred. The downside
1145 * of this is that "trace <ebp>" in ddb won't work.
1146 */
1147static __inline
1148int
1149in_kstack_guard(register_t rptr)
1150{
1151 thread_t td = curthread;
1152
1153 if ((char *)rptr >= td->td_kstack &&
1154 (char *)rptr < td->td_kstack + PAGE_SIZE) {
1155 return 1;
1156 }
1157 return 0;
1158}
1159
1160void
1161dblfault_handler(void)
1162{
1163 struct mdglobaldata *gd = mdcpu;
1164
1165 if (in_kstack_guard(gd->gd_common_tss.tss_esp) ||
1166 in_kstack_guard(gd->gd_common_tss.tss_ebp)) {
1167 kprintf("DOUBLE FAULT - KERNEL STACK GUARD HIT!\n");
1168 } else {
1169 kprintf("DOUBLE FAULT:\n");
1170 }
1171 kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1172 kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1173 kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
1174#ifdef SMP
1175 /* three separate prints in case of a trap on an unmapped page */
1176 kprintf("cpuid = %d; ", gd->mi.gd_cpuid);
1177 kprintf("lapic.id = %08x\n", lapic->id);
1178#endif
1179 panic("double fault");
1180}
1181
1182/*
1183 * syscall2 - MP aware system call request C handler
1184 *
1185 * A system call is essentially treated as a trap. The MP lock is not
1186 * held on entry or return. We are responsible for handling ASTs
1187 * (e.g. a task switch) prior to return.
1188 *
1189 * MPSAFE
1190 */
1191void
1192syscall2(struct trapframe *frame)
1193{
1194 struct thread *td = curthread;
1195 struct proc *p = td->td_proc;
1196 struct lwp *lp = td->td_lwp;
1197 caddr_t params;
1198 struct sysent *callp;
1199 register_t orig_tf_eflags;
1200 int sticks;
1201 int error;
1202 int narg;
1203#ifdef INVARIANTS
1204 int crit_count = td->td_critcount;
1205#endif
1206#ifdef SMP
1207 int have_mplock = 0;
1208#endif
1209 u_int code;
1210 union sysunion args;
1211
1212#ifdef DIAGNOSTIC
1213 if (ISPL(frame->tf_cs) != SEL_UPL) {
1214 get_mplock();
1215 panic("syscall");
1216 /* NOT REACHED */
1217 }
1218#endif
1219
1220 KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid,
1221 frame->tf_eax);
1222
1223 userenter(td, p); /* lazy raise our priority */
1224
1225 /*
1226 * Misc
1227 */
1228 sticks = (int)td->td_sticks;
1229 orig_tf_eflags = frame->tf_eflags;
1230
1231 /*
1232 * Virtual kernel intercept - if a VM context managed by a virtual
1233 * kernel issues a system call the virtual kernel handles it, not us.
1234 * Restore the virtual kernel context and return from its system
1235 * call. The current frame is copied out to the virtual kernel.
1236 */
1237 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
1238 vkernel_trap(lp, frame);
1239 error = EJUSTRETURN;
1240 callp = NULL;
1241 goto out;
1242 }
1243
1244 /*
1245 * Get the system call parameters and account for time
1246 */
1247 lp->lwp_md.md_regs = frame;
1248 params = (caddr_t)frame->tf_esp + sizeof(int);
1249 code = frame->tf_eax;
1250
1251 if (p->p_sysent->sv_prepsyscall) {
1252 (*p->p_sysent->sv_prepsyscall)(
1253 frame, (int *)(&args.nosys.sysmsg + 1),
1254 &code, &params);
1255 } else {
1256 /*
1257 * Need to check if this is a 32 bit or 64 bit syscall.
1258 * fuword is MP aware.
1259 */
1260 if (code == SYS_syscall) {
1261 /*
1262 * Code is first argument, followed by actual args.
1263 */
1264 code = fuword(params);
1265 params += sizeof(int);
1266 } else if (code == SYS___syscall) {
1267 /*
1268 * Like syscall, but code is a quad, so as to maintain
1269 * quad alignment for the rest of the arguments.
1270 */
1271 code = fuword(params);
1272 params += sizeof(quad_t);
1273 }
1274 }
1275
1276 code &= p->p_sysent->sv_mask;
1277
1278 if (code >= p->p_sysent->sv_size)
1279 callp = &p->p_sysent->sv_table[0];
1280 else
1281 callp = &p->p_sysent->sv_table[code];
1282
1283 narg = callp->sy_narg & SYF_ARGMASK;
1284
1285#if 0
1286 if (p->p_sysent->sv_name[0] == 'L')
1287 kprintf("Linux syscall, code = %d\n", code);
1288#endif
1289
1290 /*
1291 * copyin is MP aware, but the tracing code is not
1292 */
1293 if (narg && params) {
1294 error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1),
1295 narg * sizeof(register_t));
1296 if (error) {
1297#ifdef KTRACE
1298 if (KTRPOINT(td, KTR_SYSCALL)) {
1299 MAKEMPSAFE(have_mplock);
1300
1301 ktrsyscall(lp, code, narg,
1302 (void *)(&args.nosys.sysmsg + 1));
1303 }
1304#endif
1305 goto bad;
1306 }
1307 }
1308
1309#ifdef KTRACE
1310 if (KTRPOINT(td, KTR_SYSCALL)) {
1311 MAKEMPSAFE(have_mplock);
1312 ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1));
1313 }
1314#endif
1315
1316 /*
1317 * For traditional syscall code edx is left untouched when 32 bit
1318 * results are returned. Since edx is loaded from fds[1] when the
1319 * system call returns we pre-set it here.
1320 */
1321 args.sysmsg_fds[0] = 0;
1322 args.sysmsg_fds[1] = frame->tf_edx;
1323
1324 /*
1325 * The syscall might manipulate the trap frame. If it does it
1326 * will probably return EJUSTRETURN.
1327 */
1328 args.sysmsg_frame = frame;
1329
1330 STOPEVENT(p, S_SCE, narg); /* MP aware */
1331
1332 /*
1333 * NOTE: All system calls run MPSAFE now. The system call itself
1334 * is responsible for getting the MP lock.
1335 */
1336 error = (*callp->sy_call)(&args);
1337
1338out:
1339 /*
1340 * MP SAFE (we may or may not have the MP lock at this point)
1341 */
1342 switch (error) {
1343 case 0:
1344 /*
1345 * Reinitialize proc pointer `p' as it may be different
1346 * if this is a child returning from fork syscall.
1347 */
1348 p = curproc;
1349 lp = curthread->td_lwp;
1350 frame->tf_eax = args.sysmsg_fds[0];
1351 frame->tf_edx = args.sysmsg_fds[1];
1352 frame->tf_eflags &= ~PSL_C;
1353 break;
1354 case ERESTART:
1355 /*
1356 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1357 * int 0x80 is 2 bytes. We saved this in tf_err.
1358 */
1359 frame->tf_eip -= frame->tf_err;
1360 break;
1361 case EJUSTRETURN:
1362 break;
1363 case EASYNC:
1364 panic("Unexpected EASYNC return value (for now)");
1365 default:
1366bad:
1367 if (p->p_sysent->sv_errsize) {
1368 if (error >= p->p_sysent->sv_errsize)
1369 error = -1; /* XXX */
1370 else
1371 error = p->p_sysent->sv_errtbl[error];
1372 }
1373 frame->tf_eax = error;
1374 frame->tf_eflags |= PSL_C;
1375 break;
1376 }
1377
1378 /*
1379 * Traced syscall. trapsignal() is not MP aware.
1380 */
1381 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1382 MAKEMPSAFE(have_mplock);
1383 frame->tf_eflags &= ~PSL_T;
1384 trapsignal(lp, SIGTRAP, TRAP_TRACE);
1385 }
1386
1387 /*
1388 * Handle reschedule and other end-of-syscall issues
1389 */
1390 userret(lp, frame, sticks);
1391
1392#ifdef KTRACE
1393 if (KTRPOINT(td, KTR_SYSRET)) {
1394 MAKEMPSAFE(have_mplock);
1395 ktrsysret(lp, code, error, args.sysmsg_result);
1396 }
1397#endif
1398
1399 /*
1400 * This works because errno is findable through the
1401 * register set. If we ever support an emulation where this
1402 * is not the case, this code will need to be revisited.
1403 */
1404 STOPEVENT(p, S_SCX, code);
1405
1406 userexit(lp);
1407#ifdef SMP
1408 /*
1409 * Release the MP lock if we had to get it
1410 */
1411 if (have_mplock)
1412 rel_mplock();
1413#endif
1414 KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error);
1415#ifdef INVARIANTS
1416 KASSERT(crit_count == td->td_critcount,
1417 ("syscall: critical section count mismatch! %d/%d",
1418 crit_count, td->td_pri));
1419 KASSERT(&td->td_toks_base == td->td_toks_stop,
1420 ("syscall: extra tokens held after trap! %zd",
1421 td->td_toks_stop - &td->td_toks_base));
1422#endif
1423}
1424
1425/*
1426 * NOTE: MP lock not held at any point.
1427 */
1428void
1429fork_return(struct lwp *lp, struct trapframe *frame)
1430{
1431 frame->tf_eax = 0; /* Child returns zero */
1432 frame->tf_eflags &= ~PSL_C; /* success */
1433 frame->tf_edx = 1;
1434
1435 generic_lwp_return(lp, frame);
1436 KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid);
1437}
1438
1439/*
1440 * Simplified back end of syscall(), used when returning from fork()
1441 * directly into user mode.
1442 *
1443 * This code will return back into the fork trampoline code which then
1444 * runs doreti.
1445 *
1446 * NOTE: The mplock is not held at any point.
1447 */
1448void
1449generic_lwp_return(struct lwp *lp, struct trapframe *frame)
1450{
1451 struct proc *p = lp->lwp_proc;
1452
1453 /*
1454 * Newly forked processes are given a kernel priority. We have to
1455 * adjust the priority to a normal user priority and fake entry
1456 * into the kernel (call userenter()) to install a passive release
1457 * function just in case userret() decides to stop the process. This
1458 * can occur when ^Z races a fork. If we do not install the passive
1459 * release function the current process designation will not be
1460 * released when the thread goes to sleep.
1461 */
1462 lwkt_setpri_self(TDPRI_USER_NORM);
1463 userenter(lp->lwp_thread, p);
1464 userret(lp, frame, 0);
1465#ifdef KTRACE
1466 if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
1467 ktrsysret(lp, SYS_fork, 0, 0);
1468#endif
1469 lp->lwp_flags |= LWP_PASSIVE_ACQ;
1470 userexit(lp);
1471 lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
1472}
1473
1474/*
1475 * If PGEX_FPFAULT is set then set FP_VIRTFP in the PCB to force a T_DNA
1476 * fault (which is then passed back to the virtual kernel) if an attempt is
1477 * made to use the FP unit.
1478 *
1479 * XXX this is a fairly big hack.
1480 */
1481void
1482set_vkernel_fp(struct trapframe *frame)
1483{
1484 struct thread *td = curthread;
1485
1486 if (frame->tf_xflags & PGEX_FPFAULT) {
1487 td->td_pcb->pcb_flags |= FP_VIRTFP;
1488 if (mdcpu->gd_npxthread == td)
1489 npxexit();
1490 } else {
1491 td->td_pcb->pcb_flags &= ~FP_VIRTFP;
1492 }
1493}
1494
1495/*
1496 * Called from vkernel_trap() to fixup the vkernel's syscall
1497 * frame for vmspace_ctl() return.
1498 */
1499void
1500cpu_vkernel_trap(struct trapframe *frame, int error)
1501{
1502 frame->tf_eax = error;
1503 if (error)
1504 frame->tf_eflags |= PSL_C;
1505 else
1506 frame->tf_eflags &= ~PSL_C;
1507}