Remove all remaining SPL code. Replace the mtd_cpl field in the machine
[dragonfly.git] / sys / i386 / i386 / trap.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
38 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
38787eef 39 * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.57 2005/06/16 21:12:44 dillon Exp $
984263bc
MD
40 */
41
42/*
43 * 386 Trap and System call handling
44 */
45
1f2de5d4
MD
46#include "use_isa.h"
47#include "use_npx.h"
48
984263bc
MD
49#include "opt_cpu.h"
50#include "opt_ddb.h"
51#include "opt_ktrace.h"
52#include "opt_clock.h"
53#include "opt_trap.h"
54
55#include <sys/param.h>
56#include <sys/systm.h>
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/kernel.h>
60#include <sys/resourcevar.h>
61#include <sys/signalvar.h>
62#include <sys/syscall.h>
63#include <sys/sysctl.h>
64#include <sys/sysent.h>
65#include <sys/uio.h>
66#include <sys/vmmeter.h>
4fd10eb6 67#include <sys/malloc.h>
984263bc
MD
68#ifdef KTRACE
69#include <sys/ktrace.h>
70#endif
a722be49 71#include <sys/upcall.h>
a64ba182
MD
72#include <sys/sysproto.h>
73#include <sys/sysunion.h>
984263bc
MD
74
75#include <vm/vm.h>
76#include <vm/vm_param.h>
77#include <sys/lock.h>
78#include <vm/pmap.h>
79#include <vm/vm_kern.h>
80#include <vm/vm_map.h>
81#include <vm/vm_page.h>
82#include <vm/vm_extern.h>
83
84#include <machine/cpu.h>
85#include <machine/ipl.h>
86#include <machine/md_var.h>
87#include <machine/pcb.h>
88#ifdef SMP
89#include <machine/smp.h>
90#endif
91#include <machine/tss.h>
85100692 92#include <machine/globaldata.h>
984263bc
MD
93
94#include <i386/isa/intr_machdep.h>
95
96#ifdef POWERFAIL_NMI
97#include <sys/syslog.h>
98#include <machine/clock.h>
99#endif
100
101#include <machine/vm86.h>
102
103#include <ddb/ddb.h>
245e4f17 104#include <sys/msgport2.h>
41c20dac 105#include <sys/thread2.h>
984263bc 106
3ae0cd58 107int (*pmath_emulate) (struct trapframe *);
984263bc 108
3ae0cd58
RG
109extern void trap (struct trapframe frame);
110extern int trapwrite (unsigned addr);
111extern void syscall2 (struct trapframe frame);
112extern void sendsys2 (struct trapframe frame);
7062f5b4 113extern void waitsys2 (struct trapframe frame);
984263bc 114
3ae0cd58
RG
115static int trap_pfault (struct trapframe *, int, vm_offset_t);
116static void trap_fatal (struct trapframe *, vm_offset_t);
117void dblfault_handler (void);
984263bc
MD
118
119extern inthand_t IDTVEC(syscall);
120
121#define MAX_TRAP_MSG 28
122static char *trap_msg[] = {
123 "", /* 0 unused */
124 "privileged instruction fault", /* 1 T_PRIVINFLT */
125 "", /* 2 unused */
126 "breakpoint instruction fault", /* 3 T_BPTFLT */
127 "", /* 4 unused */
128 "", /* 5 unused */
129 "arithmetic trap", /* 6 T_ARITHTRAP */
130 "system forced exception", /* 7 T_ASTFLT */
131 "", /* 8 unused */
132 "general protection fault", /* 9 T_PROTFLT */
133 "trace trap", /* 10 T_TRCTRAP */
134 "", /* 11 unused */
135 "page fault", /* 12 T_PAGEFLT */
136 "", /* 13 unused */
137 "alignment fault", /* 14 T_ALIGNFLT */
138 "", /* 15 unused */
139 "", /* 16 unused */
140 "", /* 17 unused */
141 "integer divide fault", /* 18 T_DIVIDE */
142 "non-maskable interrupt trap", /* 19 T_NMI */
143 "overflow trap", /* 20 T_OFLOW */
144 "FPU bounds check fault", /* 21 T_BOUND */
145 "FPU device not available", /* 22 T_DNA */
146 "double fault", /* 23 T_DOUBLEFLT */
147 "FPU operand fetch fault", /* 24 T_FPOPFLT */
148 "invalid TSS fault", /* 25 T_TSSFLT */
149 "segment not present fault", /* 26 T_SEGNPFLT */
150 "stack fault", /* 27 T_STKFLT */
151 "machine check trap", /* 28 T_MCHK */
152};
153
984263bc
MD
154#if defined(I586_CPU) && !defined(NO_F00F_HACK)
155extern int has_f00f_bug;
156#endif
157
158#ifdef DDB
159static int ddb_on_nmi = 1;
160SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW,
161 &ddb_on_nmi, 0, "Go to DDB on NMI");
162#endif
163static int panic_on_nmi = 1;
164SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
165 &panic_on_nmi, 0, "Panic on NMI");
d9eea1a5
MD
166static int fast_release;
167SYSCTL_INT(_machdep, OID_AUTO, fast_release, CTLFLAG_RW,
168 &fast_release, 0, "Passive Release was optimal");
169static int slow_release;
170SYSCTL_INT(_machdep, OID_AUTO, slow_release, CTLFLAG_RW,
171 &slow_release, 0, "Passive Release was nonoptimal");
984263bc 172
4fd10eb6 173MALLOC_DEFINE(M_SYSMSG, "sysmsg", "sysmsg structure");
7062f5b4 174extern int max_sysmsg;
4fd10eb6 175
26a0694b 176/*
8ec60c3f
MD
177 * Passive USER->KERNEL transition. This only occurs if we block in the
178 * kernel while still holding our userland priority. We have to fixup our
179 * priority in order to avoid potential deadlocks before we allow the system
180 * to switch us to another thread.
26a0694b
MD
181 */
182static void
a2a5ad0d 183passive_release(struct thread *td)
26a0694b 184{
a2a5ad0d 185 struct proc *p = td->td_proc;
26a0694b 186
0b0ee71f 187 td->td_release = NULL;
8ec60c3f 188 lwkt_setpri_self(TDPRI_KERN_USER);
0a3f9b47 189 release_curproc(p);
26a0694b
MD
190}
191
192/*
d81ccc3e
MD
193 * userenter() passively intercepts the thread switch function to increase
194 * the thread priority from a user priority to a kernel priority, reducing
195 * syscall and trap overhead for the case where no switch occurs.
26a0694b 196 */
efd3c4c3 197
26a0694b 198static __inline void
7966cb69 199userenter(struct thread *curtd)
26a0694b 200{
7966cb69 201 curtd->td_release = passive_release;
26a0694b
MD
202}
203
0a3f9b47 204/*
8ec60c3f
MD
205 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
206 * must be completed before we can return to or try to return to userland.
0a3f9b47 207 *
8ec60c3f
MD
208 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
209 * arithmatic on the delta calculation so the absolute tick values are
210 * truncated to an integer.
37af14fe 211 */
a2a5ad0d 212static void
37af14fe 213userret(struct proc *p, struct trapframe *frame, int sticks)
a2a5ad0d
MD
214{
215 int sig;
216
a722be49
MD
217 /*
218 * Post any pending upcalls
219 */
220 if (p->p_flag & P_UPCALLPEND) {
221 p->p_flag &= ~P_UPCALLPEND;
222 postupcall(p);
223 }
224
a2a5ad0d
MD
225 /*
226 * Post any pending signals
227 */
228 while ((sig = CURSIG(p)) != 0) {
229 postsig(sig);
230 }
26a0694b 231
984263bc 232 /*
6ad39cae 233 * Charge system time if profiling. Note: times are in microseconds.
984263bc
MD
234 */
235 if (p->p_flag & P_PROFIL) {
d16a8831 236 addupc_task(p, frame->tf_eip,
37af14fe 237 (u_int)((int)p->p_thread->td_sticks - sticks));
984263bc 238 }
26a0694b
MD
239
240 /*
a2a5ad0d 241 * Post any pending signals XXX
26a0694b 242 */
a2a5ad0d
MD
243 while ((sig = CURSIG(p)) != 0)
244 postsig(sig);
984263bc
MD
245}
246
8ec60c3f
MD
247/*
248 * Cleanup from userenter and any passive release that might have occured.
249 * We must reclaim the current-process designation before we can return
250 * to usermode. We also handle both LWKT and USER reschedule requests.
251 */
252static __inline void
253userexit(struct proc *p)
254{
255 struct thread *td = p->p_thread;
256 globaldata_t gd = td->td_gd;
257
258#if 0
259 /*
260 * If a user reschedule is requested force a new process to be
261 * chosen by releasing the current process. Our process will only
262 * be chosen again if it has a considerably better priority.
263 */
264 if (user_resched_wanted())
265 release_curproc(p);
266#endif
267
268again:
269 /*
270 * Handle a LWKT reschedule request first. Since our passive release
271 * is still in place we do not have to do anything special.
272 */
273 if (lwkt_resched_wanted())
274 lwkt_switch();
275
276 /*
277 * Acquire the current process designation if we do not own it.
278 * Note that acquire_curproc() does not reset the user reschedule
279 * bit on purpose, because we may need to accumulate over several
280 * threads waking up at the same time.
281 *
282 * NOTE: userland scheduler cruft: because processes are removed
283 * from the userland scheduler's queue we run through loops to try
284 * to figure out which is the best of [ existing, waking-up ]
285 * threads.
286 */
287 if (p != gd->gd_uschedcp) {
288 ++slow_release;
289 acquire_curproc(p);
290 /* We may have switched cpus on acquisition */
291 gd = td->td_gd;
292 } else {
293 ++fast_release;
294 }
295
296 /*
297 * Reduce our priority in preparation for a return to userland. If
298 * our passive release function was still in place, our priority was
299 * never raised and does not need to be reduced.
300 */
301 if (td->td_release == NULL)
302 lwkt_setpri_self(TDPRI_USER_NORM);
303 td->td_release = NULL;
304
305 /*
306 * After reducing our priority there might be other kernel-level
307 * LWKTs that now have a greater priority. Run them as necessary.
308 * We don't have to worry about losing cpu to userland because
309 * we still control the current-process designation and we no longer
310 * have a passive release function installed.
311 */
312 if (lwkt_checkpri_self())
313 lwkt_switch();
314
315 /*
316 * If a userland reschedule is [still] pending we may not be the best
317 * selected process. Select a better one. If another LWKT resched
318 * is pending the trap will be re-entered.
319 */
320 if (user_resched_wanted()) {
321 select_curproc(gd);
322 if (p != gd->gd_uschedcp) {
323 lwkt_setpri_self(TDPRI_KERN_USER);
324 goto again;
325 }
326 }
327}
328
984263bc 329/*
7517cb03 330 * Exception, fault, and trap interface to the kernel.
984263bc
MD
331 * This common code is called from assembly language IDT gate entry
332 * routines that prepare a suitable stack frame, and restore this
333 * frame after the exception has been processed.
a2a5ad0d
MD
334 *
335 * This function is also called from doreti in an interlock to handle ASTs.
336 * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap
337 *
338 * NOTE! We have to retrieve the fault address prior to obtaining the
339 * MP lock because get_mplock() may switch out. YYY cr2 really ought
340 * to be retrieved by the assembly code, not here.
984263bc 341 */
984263bc
MD
342void
343trap(frame)
344 struct trapframe frame;
345{
7966cb69
MD
346 struct thread *td = curthread;
347 struct proc *p;
37af14fe 348 int sticks = 0;
984263bc
MD
349 int i = 0, ucode = 0, type, code;
350 vm_offset_t eva;
351
7966cb69 352 p = td->td_proc;
984263bc
MD
353#ifdef DDB
354 if (db_active) {
355 eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
a2a5ad0d 356 get_mplock();
984263bc 357 trap_fatal(&frame, eva);
8a8d5d85 358 goto out2;
984263bc
MD
359 }
360#endif
361
a2a5ad0d
MD
362 eva = 0;
363 if (frame.tf_trapno == T_PAGEFLT) {
364 /*
365 * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
366 * This problem is worked around by using an interrupt
367 * gate for the pagefault handler. We are finally ready
368 * to read %cr2 and then must reenable interrupts.
369 *
370 * XXX this should be in the switch statement, but the
371 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
372 * flow of control too much for this to be obviously
373 * correct.
374 */
375 eva = rcr2();
376 get_mplock();
377 cpu_enable_intr();
378 } else {
379 get_mplock();
380 }
381 /*
382 * MP lock is held at this point
383 */
384
984263bc
MD
385 if (!(frame.tf_eflags & PSL_I)) {
386 /*
387 * Buggy application or kernel code has disabled interrupts
388 * and then trapped. Enabling interrupts now is wrong, but
389 * it is better than running with interrupts disabled until
390 * they are accidentally enabled later.
391 */
392 type = frame.tf_trapno;
a2a5ad0d 393 if (ISPL(frame.tf_cs)==SEL_UPL || (frame.tf_eflags & PSL_VM)) {
984263bc
MD
394 printf(
395 "pid %ld (%s): trap %d with interrupts disabled\n",
396 (long)curproc->p_pid, curproc->p_comm, type);
a2a5ad0d 397 } else if (type != T_BPTFLT && type != T_TRCTRAP) {
984263bc
MD
398 /*
399 * XXX not quite right, since this may be for a
400 * multiple fault in user mode.
401 */
402 printf("kernel trap %d with interrupts disabled\n",
403 type);
a2a5ad0d 404 }
8a8d5d85 405 cpu_enable_intr();
984263bc
MD
406 }
407
984263bc
MD
408#if defined(I586_CPU) && !defined(NO_F00F_HACK)
409restart:
410#endif
411 type = frame.tf_trapno;
412 code = frame.tf_err;
413
414 if (in_vm86call) {
415 if (frame.tf_eflags & PSL_VM &&
416 (type == T_PROTFLT || type == T_STKFLT)) {
96728c05 417#ifdef SMP
37af14fe 418 KKASSERT(td->td_mpcount > 0);
96728c05 419#endif
984263bc 420 i = vm86_emulate((struct vm86frame *)&frame);
96728c05 421#ifdef SMP
37af14fe 422 KKASSERT(td->td_mpcount > 0);
96728c05 423#endif
8a8d5d85 424 if (i != 0) {
984263bc
MD
425 /*
426 * returns to original process
427 */
428 vm86_trap((struct vm86frame *)&frame);
96728c05 429 KKASSERT(0);
8a8d5d85
MD
430 }
431 goto out2;
984263bc
MD
432 }
433 switch (type) {
434 /*
435 * these traps want either a process context, or
436 * assume a normal userspace trap.
437 */
438 case T_PROTFLT:
439 case T_SEGNPFLT:
440 trap_fatal(&frame, eva);
8a8d5d85 441 goto out2;
984263bc
MD
442 case T_TRCTRAP:
443 type = T_BPTFLT; /* kernel breakpoint */
444 /* FALL THROUGH */
445 }
446 goto kernel_trap; /* normal kernel trap handling */
447 }
448
449 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
450 /* user trap */
451
7966cb69 452 userenter(td);
26a0694b 453
37af14fe 454 sticks = (int)td->td_sticks;
984263bc
MD
455 p->p_md.md_regs = &frame;
456
457 switch (type) {
458 case T_PRIVINFLT: /* privileged instruction fault */
459 ucode = type;
460 i = SIGILL;
461 break;
462
463 case T_BPTFLT: /* bpt instruction fault */
464 case T_TRCTRAP: /* trace trap */
465 frame.tf_eflags &= ~PSL_T;
466 i = SIGTRAP;
467 break;
468
469 case T_ARITHTRAP: /* arithmetic trap */
470 ucode = code;
471 i = SIGFPE;
472 break;
473
474 case T_ASTFLT: /* Allow process switch */
12e4aaff 475 mycpu->gd_cnt.v_soft++;
235957ed
MD
476 if (mycpu->gd_reqflags & RQF_AST_OWEUPC) {
477 atomic_clear_int_nonlocked(&mycpu->gd_reqflags,
478 RQF_AST_OWEUPC);
984263bc
MD
479 addupc_task(p, p->p_stats->p_prof.pr_addr,
480 p->p_stats->p_prof.pr_ticks);
481 }
482 goto out;
483
484 /*
485 * The following two traps can happen in
486 * vm86 mode, and, if so, we want to handle
487 * them specially.
488 */
489 case T_PROTFLT: /* general protection fault */
490 case T_STKFLT: /* stack fault */
491 if (frame.tf_eflags & PSL_VM) {
492 i = vm86_emulate((struct vm86frame *)&frame);
493 if (i == 0)
494 goto out;
495 break;
496 }
497 /* FALL THROUGH */
498
499 case T_SEGNPFLT: /* segment not present fault */
500 case T_TSSFLT: /* invalid TSS fault */
501 case T_DOUBLEFLT: /* double fault */
502 default:
503 ucode = code + BUS_SEGM_FAULT ;
504 i = SIGBUS;
505 break;
506
507 case T_PAGEFLT: /* page fault */
508 i = trap_pfault(&frame, TRUE, eva);
509 if (i == -1)
d81ccc3e 510 goto out;
984263bc
MD
511#if defined(I586_CPU) && !defined(NO_F00F_HACK)
512 if (i == -2)
513 goto restart;
514#endif
515 if (i == 0)
516 goto out;
517
518 ucode = T_PAGEFLT;
519 break;
520
521 case T_DIVIDE: /* integer divide fault */
522 ucode = FPE_INTDIV;
523 i = SIGFPE;
524 break;
525
526#if NISA > 0
527 case T_NMI:
528#ifdef POWERFAIL_NMI
529 goto handle_powerfail;
530#else /* !POWERFAIL_NMI */
531 /* machine/parity/power fail/"kitchen sink" faults */
532 if (isa_nmi(code) == 0) {
533#ifdef DDB
534 /*
535 * NMI can be hooked up to a pushbutton
536 * for debugging.
537 */
538 if (ddb_on_nmi) {
539 printf ("NMI ... going to debugger\n");
540 kdb_trap (type, 0, &frame);
541 }
542#endif /* DDB */
8a8d5d85 543 goto out2;
984263bc
MD
544 } else if (panic_on_nmi)
545 panic("NMI indicates hardware failure");
546 break;
547#endif /* POWERFAIL_NMI */
548#endif /* NISA > 0 */
549
550 case T_OFLOW: /* integer overflow fault */
551 ucode = FPE_INTOVF;
552 i = SIGFPE;
553 break;
554
555 case T_BOUND: /* bounds check fault */
556 ucode = FPE_FLTSUB;
557 i = SIGFPE;
558 break;
559
560 case T_DNA:
561#if NNPX > 0
a02705a9
MD
562 /*
563 * The kernel may have switched out the FP unit's
564 * state, causing the user process to take a fault
565 * when it tries to use the FP unit. Restore the
566 * state here
567 */
984263bc 568 if (npxdna())
d81ccc3e 569 goto out;
984263bc
MD
570#endif
571 if (!pmath_emulate) {
572 i = SIGFPE;
573 ucode = FPE_FPU_NP_TRAP;
574 break;
575 }
576 i = (*pmath_emulate)(&frame);
577 if (i == 0) {
578 if (!(frame.tf_eflags & PSL_T))
8a8d5d85 579 goto out2;
984263bc
MD
580 frame.tf_eflags &= ~PSL_T;
581 i = SIGTRAP;
582 }
583 /* else ucode = emulator_only_knows() XXX */
584 break;
585
586 case T_FPOPFLT: /* FPU operand fetch fault */
587 ucode = T_FPOPFLT;
588 i = SIGILL;
589 break;
590
591 case T_XMMFLT: /* SIMD floating-point exception */
592 ucode = 0; /* XXX */
593 i = SIGFPE;
594 break;
595 }
596 } else {
597kernel_trap:
598 /* kernel trap */
599
600 switch (type) {
601 case T_PAGEFLT: /* page fault */
602 (void) trap_pfault(&frame, FALSE, eva);
8a8d5d85 603 goto out2;
984263bc
MD
604
605 case T_DNA:
606#if NNPX > 0
607 /*
a02705a9
MD
608 * The kernel may be using npx for copying or other
609 * purposes.
984263bc
MD
610 */
611 if (npxdna())
8a8d5d85 612 goto out2;
984263bc
MD
613#endif
614 break;
615
616 case T_PROTFLT: /* general protection fault */
617 case T_SEGNPFLT: /* segment not present fault */
618 /*
619 * Invalid segment selectors and out of bounds
620 * %eip's and %esp's can be set up in user mode.
621 * This causes a fault in kernel mode when the
622 * kernel tries to return to user mode. We want
623 * to get this fault so that we can fix the
624 * problem here and not have to check all the
625 * selectors and pointers when the user changes
626 * them.
627 */
628#define MAYBE_DORETI_FAULT(where, whereto) \
629 do { \
630 if (frame.tf_eip == (int)where) { \
631 frame.tf_eip = (int)whereto; \
8a8d5d85 632 goto out2; \
984263bc
MD
633 } \
634 } while (0)
fe8c5e17
MD
635 /*
636 * Since we don't save %gs across an interrupt
637 * frame this check must occur outside the intr
638 * nesting level check.
639 */
640 if (frame.tf_eip == (int)cpu_switch_load_gs) {
37af14fe 641 td->td_pcb->pcb_gs = 0;
fe8c5e17
MD
642 psignal(p, SIGBUS);
643 goto out2;
644 }
ef0fdad1 645 if (mycpu->gd_intr_nesting_level == 0) {
984263bc
MD
646 /*
647 * Invalid %fs's and %gs's can be created using
648 * procfs or PT_SETREGS or by invalidating the
649 * underlying LDT entry. This causes a fault
650 * in kernel mode when the kernel attempts to
651 * switch contexts. Lose the bad context
652 * (XXX) so that we can continue, and generate
653 * a signal.
654 */
984263bc
MD
655 MAYBE_DORETI_FAULT(doreti_iret,
656 doreti_iret_fault);
657 MAYBE_DORETI_FAULT(doreti_popl_ds,
658 doreti_popl_ds_fault);
659 MAYBE_DORETI_FAULT(doreti_popl_es,
660 doreti_popl_es_fault);
661 MAYBE_DORETI_FAULT(doreti_popl_fs,
662 doreti_popl_fs_fault);
37af14fe
MD
663 if (td->td_pcb->pcb_onfault) {
664 frame.tf_eip =
665 (register_t)td->td_pcb->pcb_onfault;
8a8d5d85 666 goto out2;
984263bc
MD
667 }
668 }
669 break;
670
671 case T_TSSFLT:
672 /*
673 * PSL_NT can be set in user mode and isn't cleared
674 * automatically when the kernel is entered. This
675 * causes a TSS fault when the kernel attempts to
676 * `iret' because the TSS link is uninitialized. We
677 * want to get this fault so that we can fix the
678 * problem here and not every time the kernel is
679 * entered.
680 */
681 if (frame.tf_eflags & PSL_NT) {
682 frame.tf_eflags &= ~PSL_NT;
8a8d5d85 683 goto out2;
984263bc
MD
684 }
685 break;
686
687 case T_TRCTRAP: /* trace trap */
688 if (frame.tf_eip == (int)IDTVEC(syscall)) {
689 /*
690 * We've just entered system mode via the
691 * syscall lcall. Continue single stepping
692 * silently until the syscall handler has
693 * saved the flags.
694 */
8a8d5d85 695 goto out2;
984263bc
MD
696 }
697 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
698 /*
699 * The syscall handler has now saved the
700 * flags. Stop single stepping it.
701 */
702 frame.tf_eflags &= ~PSL_T;
8a8d5d85 703 goto out2;
984263bc
MD
704 }
705 /*
706 * Ignore debug register trace traps due to
707 * accesses in the user's address space, which
708 * can happen under several conditions such as
709 * if a user sets a watchpoint on a buffer and
710 * then passes that buffer to a system call.
711 * We still want to get TRCTRAPS for addresses
712 * in kernel space because that is useful when
713 * debugging the kernel.
714 */
715 if (user_dbreg_trap()) {
716 /*
717 * Reset breakpoint bits because the
718 * processor doesn't
719 */
720 load_dr6(rdr6() & 0xfffffff0);
8a8d5d85 721 goto out2;
984263bc
MD
722 }
723 /*
724 * Fall through (TRCTRAP kernel mode, kernel address)
725 */
726 case T_BPTFLT:
727 /*
728 * If DDB is enabled, let it handle the debugger trap.
729 * Otherwise, debugger traps "can't happen".
730 */
731#ifdef DDB
732 if (kdb_trap (type, 0, &frame))
8a8d5d85 733 goto out2;
984263bc
MD
734#endif
735 break;
736
737#if NISA > 0
738 case T_NMI:
739#ifdef POWERFAIL_NMI
740#ifndef TIMER_FREQ
741# define TIMER_FREQ 1193182
742#endif
743 handle_powerfail:
744 {
745 static unsigned lastalert = 0;
746
747 if(time_second - lastalert > 10)
748 {
749 log(LOG_WARNING, "NMI: power fail\n");
750 sysbeep(TIMER_FREQ/880, hz);
751 lastalert = time_second;
752 }
8a8d5d85
MD
753 /* YYY mp count */
754 goto out2;
984263bc
MD
755 }
756#else /* !POWERFAIL_NMI */
757 /* machine/parity/power fail/"kitchen sink" faults */
758 if (isa_nmi(code) == 0) {
759#ifdef DDB
760 /*
761 * NMI can be hooked up to a pushbutton
762 * for debugging.
763 */
764 if (ddb_on_nmi) {
765 printf ("NMI ... going to debugger\n");
766 kdb_trap (type, 0, &frame);
767 }
768#endif /* DDB */
8a8d5d85 769 goto out2;
984263bc 770 } else if (panic_on_nmi == 0)
8a8d5d85 771 goto out2;
984263bc
MD
772 /* FALL THROUGH */
773#endif /* POWERFAIL_NMI */
774#endif /* NISA > 0 */
775 }
776
777 trap_fatal(&frame, eva);
8a8d5d85 778 goto out2;
984263bc
MD
779 }
780
781 /* Translate fault for emulators (e.g. Linux) */
782 if (*p->p_sysent->sv_transtrap)
783 i = (*p->p_sysent->sv_transtrap)(i, type);
784
785 trapsignal(p, i, ucode);
786
787#ifdef DEBUG
788 if (type <= MAX_TRAP_MSG) {
789 uprintf("fatal process exception: %s",
790 trap_msg[type]);
791 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
792 uprintf(", fault VA = 0x%lx", (u_long)eva);
793 uprintf("\n");
794 }
795#endif
796
797out:
8a8d5d85
MD
798#ifdef SMP
799 if (ISPL(frame.tf_cs) == SEL_UPL)
37af14fe 800 KASSERT(td->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
8a8d5d85
MD
801#endif
802 userret(p, &frame, sticks);
a2a5ad0d 803 userexit(p);
8a8d5d85 804out2:
96728c05 805#ifdef SMP
37af14fe 806 KKASSERT(td->td_mpcount > 0);
96728c05 807#endif
8a8d5d85 808 rel_mplock();
984263bc
MD
809}
810
811#ifdef notyet
812/*
813 * This version doesn't allow a page fault to user space while
814 * in the kernel. The rest of the kernel needs to be made "safe"
815 * before this can be used. I think the only things remaining
816 * to be made safe are the iBCS2 code and the process tracing/
817 * debugging code.
818 */
819static int
820trap_pfault(frame, usermode, eva)
821 struct trapframe *frame;
822 int usermode;
823 vm_offset_t eva;
824{
825 vm_offset_t va;
826 struct vmspace *vm = NULL;
827 vm_map_t map = 0;
828 int rv = 0;
829 vm_prot_t ftype;
37af14fe
MD
830 thread_t td = curthread;
831 struct proc *p = td->td_proc; /* may be NULL */
984263bc
MD
832
833 if (frame->tf_err & PGEX_W)
834 ftype = VM_PROT_WRITE;
835 else
836 ftype = VM_PROT_READ;
837
838 va = trunc_page(eva);
839 if (va < VM_MIN_KERNEL_ADDRESS) {
840 vm_offset_t v;
841 vm_page_t mpte;
842
843 if (p == NULL ||
844 (!usermode && va < VM_MAXUSER_ADDRESS &&
37af14fe
MD
845 (td->td_gd->gd_intr_nesting_level != 0 ||
846 td->td_pcb->pcb_onfault == NULL))) {
984263bc
MD
847 trap_fatal(frame, eva);
848 return (-1);
849 }
850
851 /*
852 * This is a fault on non-kernel virtual memory.
853 * vm is initialized above to NULL. If curproc is NULL
854 * or curproc->p_vmspace is NULL the fault is fatal.
855 */
856 vm = p->p_vmspace;
857 if (vm == NULL)
858 goto nogo;
859
860 map = &vm->vm_map;
861
862 /*
863 * Keep swapout from messing with us during this
864 * critical time.
865 */
866 ++p->p_lock;
867
868 /*
869 * Grow the stack if necessary
870 */
871 /* grow_stack returns false only if va falls into
872 * a growable stack region and the stack growth
873 * fails. It returns true if va was not within
874 * a growable stack region, or if the stack
875 * growth succeeded.
876 */
877 if (!grow_stack (p, va)) {
878 rv = KERN_FAILURE;
879 --p->p_lock;
880 goto nogo;
881 }
882
883 /* Fault in the user page: */
884 rv = vm_fault(map, va, ftype,
885 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
886 : VM_FAULT_NORMAL);
887
888 --p->p_lock;
889 } else {
890 /*
891 * Don't allow user-mode faults in kernel address space.
892 */
893 if (usermode)
894 goto nogo;
895
896 /*
897 * Since we know that kernel virtual address addresses
898 * always have pte pages mapped, we just have to fault
899 * the page.
900 */
901 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
902 }
903
904 if (rv == KERN_SUCCESS)
905 return (0);
906nogo:
907 if (!usermode) {
37af14fe
MD
908 if (mtd->td_gd->gd_intr_nesting_level == 0 &&
909 td->td_pcb->pcb_onfault) {
910 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
984263bc
MD
911 return (0);
912 }
913 trap_fatal(frame, eva);
914 return (-1);
915 }
916
917 /* kludge to pass faulting virtual address to sendsig */
918 frame->tf_err = eva;
919
920 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
921}
922#endif
923
924int
925trap_pfault(frame, usermode, eva)
926 struct trapframe *frame;
927 int usermode;
928 vm_offset_t eva;
929{
930 vm_offset_t va;
931 struct vmspace *vm = NULL;
932 vm_map_t map = 0;
933 int rv = 0;
934 vm_prot_t ftype;
37af14fe
MD
935 thread_t td = curthread;
936 struct proc *p = td->td_proc;
984263bc
MD
937
938 va = trunc_page(eva);
939 if (va >= KERNBASE) {
940 /*
941 * Don't allow user-mode faults in kernel address space.
942 * An exception: if the faulting address is the invalid
943 * instruction entry in the IDT, then the Intel Pentium
944 * F00F bug workaround was triggered, and we need to
945 * treat it is as an illegal instruction, and not a page
946 * fault.
947 */
948#if defined(I586_CPU) && !defined(NO_F00F_HACK)
949 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) {
950 frame->tf_trapno = T_PRIVINFLT;
951 return -2;
952 }
953#endif
954 if (usermode)
955 goto nogo;
956
957 map = kernel_map;
958 } else {
959 /*
960 * This is a fault on non-kernel virtual memory.
961 * vm is initialized above to NULL. If curproc is NULL
962 * or curproc->p_vmspace is NULL the fault is fatal.
963 */
964 if (p != NULL)
965 vm = p->p_vmspace;
966
967 if (vm == NULL)
968 goto nogo;
969
970 map = &vm->vm_map;
971 }
972
973 if (frame->tf_err & PGEX_W)
974 ftype = VM_PROT_WRITE;
975 else
976 ftype = VM_PROT_READ;
977
978 if (map != kernel_map) {
979 /*
980 * Keep swapout from messing with us during this
981 * critical time.
982 */
983 ++p->p_lock;
984
985 /*
986 * Grow the stack if necessary
987 */
988 /* grow_stack returns false only if va falls into
989 * a growable stack region and the stack growth
990 * fails. It returns true if va was not within
991 * a growable stack region, or if the stack
992 * growth succeeded.
993 */
994 if (!grow_stack (p, va)) {
995 rv = KERN_FAILURE;
996 --p->p_lock;
997 goto nogo;
998 }
999
1000 /* Fault in the user page: */
1001 rv = vm_fault(map, va, ftype,
1002 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
1003 : VM_FAULT_NORMAL);
1004
1005 --p->p_lock;
1006 } else {
1007 /*
1008 * Don't have to worry about process locking or stacks in the kernel.
1009 */
1010 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
1011 }
1012
1013 if (rv == KERN_SUCCESS)
1014 return (0);
1015nogo:
1016 if (!usermode) {
37af14fe
MD
1017 if (td->td_gd->gd_intr_nesting_level == 0 &&
1018 td->td_pcb->pcb_onfault) {
1019 frame->tf_eip = (register_t)td->td_pcb->pcb_onfault;
984263bc
MD
1020 return (0);
1021 }
1022 trap_fatal(frame, eva);
1023 return (-1);
1024 }
1025
1026 /* kludge to pass faulting virtual address to sendsig */
1027 frame->tf_err = eva;
1028
1029 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
1030}
1031
1032static void
1033trap_fatal(frame, eva)
1034 struct trapframe *frame;
1035 vm_offset_t eva;
1036{
1037 int code, type, ss, esp;
1038 struct soft_segment_descriptor softseg;
1039
1040 code = frame->tf_err;
1041 type = frame->tf_trapno;
3951a45f 1042 sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
984263bc
MD
1043
1044 if (type <= MAX_TRAP_MSG)
1045 printf("\n\nFatal trap %d: %s while in %s mode\n",
1046 type, trap_msg[type],
1047 frame->tf_eflags & PSL_VM ? "vm86" :
1048 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
1049#ifdef SMP
7a44d1cb 1050 /* three separate prints in case of a trap on an unmapped page */
984263bc 1051 printf("mp_lock = %08x; ", mp_lock);
8a8d5d85 1052 printf("cpuid = %d; ", mycpu->gd_cpuid);
984263bc
MD
1053 printf("lapic.id = %08x\n", lapic.id);
1054#endif
1055 if (type == T_PAGEFLT) {
1056 printf("fault virtual address = 0x%x\n", eva);
1057 printf("fault code = %s %s, %s\n",
1058 code & PGEX_U ? "user" : "supervisor",
1059 code & PGEX_W ? "write" : "read",
1060 code & PGEX_P ? "protection violation" : "page not present");
1061 }
1062 printf("instruction pointer = 0x%x:0x%x\n",
1063 frame->tf_cs & 0xffff, frame->tf_eip);
1064 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
1065 ss = frame->tf_ss & 0xffff;
1066 esp = frame->tf_esp;
1067 } else {
1068 ss = GSEL(GDATA_SEL, SEL_KPL);
1069 esp = (int)&frame->tf_esp;
1070 }
1071 printf("stack pointer = 0x%x:0x%x\n", ss, esp);
1072 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp);
1073 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n",
1074 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1075 printf(" = DPL %d, pres %d, def32 %d, gran %d\n",
1076 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1077 softseg.ssd_gran);
1078 printf("processor eflags = ");
1079 if (frame->tf_eflags & PSL_T)
1080 printf("trace trap, ");
1081 if (frame->tf_eflags & PSL_I)
1082 printf("interrupt enabled, ");
1083 if (frame->tf_eflags & PSL_NT)
1084 printf("nested task, ");
1085 if (frame->tf_eflags & PSL_RF)
1086 printf("resume, ");
1087 if (frame->tf_eflags & PSL_VM)
1088 printf("vm86, ");
1089 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1090 printf("current process = ");
1091 if (curproc) {
1092 printf("%lu (%s)\n",
1093 (u_long)curproc->p_pid, curproc->p_comm ?
1094 curproc->p_comm : "");
1095 } else {
1096 printf("Idle\n");
1097 }
f1d1c3fa
MD
1098 printf("current thread = pri %d ", curthread->td_pri);
1099 if (curthread->td_pri >= TDPRI_CRIT)
1100 printf("(CRIT)");
1101 printf("\n");
984263bc
MD
1102#ifdef SMP
1103/**
1104 * XXX FIXME:
1105 * we probably SHOULD have stopped the other CPUs before now!
1106 * another CPU COULD have been touching cpl at this moment...
1107 */
1108 printf(" <- SMP: XXX");
1109#endif
1110 printf("\n");
1111
1112#ifdef KDB
1113 if (kdb_trap(&psl))
1114 return;
1115#endif
1116#ifdef DDB
f7bc9806 1117 if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
984263bc
MD
1118 return;
1119#endif
1120 printf("trap number = %d\n", type);
1121 if (type <= MAX_TRAP_MSG)
1122 panic("%s", trap_msg[type]);
1123 else
1124 panic("unknown/reserved trap");
1125}
1126
1127/*
1128 * Double fault handler. Called when a fault occurs while writing
1129 * a frame for a trap/exception onto the stack. This usually occurs
1130 * when the stack overflows (such is the case with infinite recursion,
1131 * for example).
1132 *
1133 * XXX Note that the current PTD gets replaced by IdlePTD when the
1134 * task switch occurs. This means that the stack that was active at
1135 * the time of the double fault is not available at <kstack> unless
1136 * the machine was idle when the double fault occurred. The downside
1137 * of this is that "trace <ebp>" in ddb won't work.
1138 */
1139void
1140dblfault_handler()
1141{
85100692 1142 struct mdglobaldata *gd = mdcpu;
17a9f566 1143
984263bc 1144 printf("\nFatal double fault:\n");
17a9f566
MD
1145 printf("eip = 0x%x\n", gd->gd_common_tss.tss_eip);
1146 printf("esp = 0x%x\n", gd->gd_common_tss.tss_esp);
1147 printf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp);
984263bc 1148#ifdef SMP
7a44d1cb 1149 /* three separate prints in case of a trap on an unmapped page */
984263bc 1150 printf("mp_lock = %08x; ", mp_lock);
8a8d5d85 1151 printf("cpuid = %d; ", mycpu->gd_cpuid);
984263bc
MD
1152 printf("lapic.id = %08x\n", lapic.id);
1153#endif
1154 panic("double fault");
1155}
1156
1157/*
1158 * Compensate for 386 brain damage (missing URKR).
1159 * This is a little simpler than the pagefault handler in trap() because
1160 * it the page tables have already been faulted in and high addresses
1161 * are thrown out early for other reasons.
1162 */
1163int trapwrite(addr)
1164 unsigned addr;
1165{
1166 struct proc *p;
1167 vm_offset_t va;
1168 struct vmspace *vm;
1169 int rv;
1170
1171 va = trunc_page((vm_offset_t)addr);
1172 /*
1173 * XXX - MAX is END. Changed > to >= for temp. fix.
1174 */
1175 if (va >= VM_MAXUSER_ADDRESS)
1176 return (1);
1177
1178 p = curproc;
1179 vm = p->p_vmspace;
1180
1181 ++p->p_lock;
1182
1183 if (!grow_stack (p, va)) {
1184 --p->p_lock;
1185 return (1);
1186 }
1187
1188 /*
1189 * fault the data page
1190 */
1191 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY);
1192
1193 --p->p_lock;
1194
1195 if (rv != KERN_SUCCESS)
1196 return 1;
1197
1198 return (0);
1199}
1200
1201/*
1202 * syscall2 - MP aware system call request C handler
1203 *
1204 * A system call is essentially treated as a trap except that the
1205 * MP lock is not held on entry or return. We are responsible for
1206 * obtaining the MP lock if necessary and for handling ASTs
1207 * (e.g. a task switch) prior to return.
1208 *
1209 * In general, only simple access and manipulation of curproc and
1210 * the current stack is allowed without having to hold MP lock.
1211 */
1212void
a64ba182 1213syscall2(struct trapframe frame)
984263bc 1214{
dadab5e9
MD
1215 struct thread *td = curthread;
1216 struct proc *p = td->td_proc;
984263bc 1217 caddr_t params;
984263bc 1218 struct sysent *callp;
984263bc 1219 register_t orig_tf_eflags;
37af14fe 1220 int sticks;
984263bc
MD
1221 int error;
1222 int narg;
984263bc 1223 u_int code;
a64ba182 1224 union sysunion args;
984263bc
MD
1225
1226#ifdef DIAGNOSTIC
1227 if (ISPL(frame.tf_cs) != SEL_UPL) {
1228 get_mplock();
1229 panic("syscall");
1230 /* NOT REACHED */
1231 }
1232#endif
1233
8a8d5d85 1234#ifdef SMP
37af14fe 1235 KASSERT(td->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
8a8d5d85
MD
1236 get_mplock();
1237#endif
37af14fe
MD
1238 userenter(td); /* lazy raise our priority */
1239
1240 sticks = (int)td->td_sticks;
984263bc
MD
1241
1242 p->p_md.md_regs = &frame;
1243 params = (caddr_t)frame.tf_esp + sizeof(int);
1244 code = frame.tf_eax;
1245 orig_tf_eflags = frame.tf_eflags;
1246
1247 if (p->p_sysent->sv_prepsyscall) {
1248 /*
1249 * The prep code is not MP aware.
1250 */
245e4f17 1251 (*p->p_sysent->sv_prepsyscall)(&frame, (int *)(&args.nosys.usrmsg + 1), &code, &params);
984263bc
MD
1252 } else {
1253 /*
1254 * Need to check if this is a 32 bit or 64 bit syscall.
1255 * fuword is MP aware.
1256 */
1257 if (code == SYS_syscall) {
1258 /*
1259 * Code is first argument, followed by actual args.
1260 */
1261 code = fuword(params);
1262 params += sizeof(int);
1263 } else if (code == SYS___syscall) {
1264 /*
1265 * Like syscall, but code is a quad, so as to maintain
1266 * quad alignment for the rest of the arguments.
1267 */
1268 code = fuword(params);
1269 params += sizeof(quad_t);
1270 }
1271 }
1272
8ec60c3f 1273 code &= p->p_sysent->sv_mask;
7062f5b4
EN
1274 if (code >= p->p_sysent->sv_size)
1275 callp = &p->p_sysent->sv_table[0];
1276 else
1277 callp = &p->p_sysent->sv_table[code];
984263bc
MD
1278
1279 narg = callp->sy_narg & SYF_ARGMASK;
1280
1281 /*
1282 * copyin is MP aware, but the tracing code is not
1283 */
8ec60c3f
MD
1284 if (narg && params) {
1285 error = copyin(params, (caddr_t)(&args.nosys.usrmsg + 1),
1286 narg * sizeof(register_t));
1287 if (error) {
984263bc 1288#ifdef KTRACE
8ec60c3f
MD
1289 if (KTRPOINT(td, KTR_SYSCALL))
1290 ktrsyscall(p->p_tracep, code, narg,
1291 (void *)(&args.nosys.usrmsg + 1));
984263bc 1292#endif
8ec60c3f
MD
1293 goto bad;
1294 }
984263bc
MD
1295 }
1296
8a8d5d85 1297#if 0
984263bc
MD
1298 /*
1299 * Try to run the syscall without the MP lock if the syscall
1300 * is MP safe. We have to obtain the MP lock no matter what if
1301 * we are ktracing
1302 */
1303 if ((callp->sy_narg & SYF_MPSAFE) == 0) {
1304 get_mplock();
1305 have_mplock = 1;
1306 }
8a8d5d85 1307#endif
984263bc
MD
1308
1309#ifdef KTRACE
dadab5e9 1310 if (KTRPOINT(td, KTR_SYSCALL)) {
245e4f17 1311 ktrsyscall(p->p_tracep, code, narg, (void *)(&args.nosys.usrmsg + 1));
984263bc
MD
1312 }
1313#endif
01dce7bb
MD
1314
1315 /*
1316 * For traditional syscall code edx is left untouched when 32 bit
1317 * results are returned. Since edx is loaded from fds[1] when the
1318 * system call returns we pre-set it here.
1319 */
b44419cb
MD
1320 lwkt_initmsg(&args.lmsg, &td->td_msgport, 0,
1321 lwkt_cmd_op(code), lwkt_cmd_op_none);
df2244e3 1322 args.sysmsg_copyout = NULL;
c7114eea
MD
1323 args.sysmsg_fds[0] = 0;
1324 args.sysmsg_fds[1] = frame.tf_edx;
984263bc
MD
1325
1326 STOPEVENT(p, S_SCE, narg); /* MP aware */
1327
a64ba182 1328 error = (*callp->sy_call)(&args);
984263bc
MD
1329
1330 /*
1331 * MP SAFE (we may or may not have the MP lock at this point)
1332 */
1333 switch (error) {
1334 case 0:
1335 /*
1336 * Reinitialize proc pointer `p' as it may be different
1337 * if this is a child returning from fork syscall.
1338 */
1339 p = curproc;
c7114eea
MD
1340 frame.tf_eax = args.sysmsg_fds[0];
1341 frame.tf_edx = args.sysmsg_fds[1];
984263bc
MD
1342 frame.tf_eflags &= ~PSL_C;
1343 break;
984263bc
MD
1344 case ERESTART:
1345 /*
1346 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
1347 * int 0x80 is 2 bytes. We saved this in tf_err.
1348 */
1349 frame.tf_eip -= frame.tf_err;
1350 break;
984263bc
MD
1351 case EJUSTRETURN:
1352 break;
245e4f17
MD
1353 case EASYNC:
1354 panic("Unexpected EASYNC return value (for now)");
984263bc
MD
1355 default:
1356bad:
7062f5b4
EN
1357 if (p->p_sysent->sv_errsize) {
1358 if (error >= p->p_sysent->sv_errsize)
1359 error = -1; /* XXX */
1360 else
1361 error = p->p_sysent->sv_errtbl[error];
984263bc
MD
1362 }
1363 frame.tf_eax = error;
1364 frame.tf_eflags |= PSL_C;
1365 break;
1366 }
1367
1368 /*
1369 * Traced syscall. trapsignal() is not MP aware.
1370 */
1371 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
984263bc
MD
1372 frame.tf_eflags &= ~PSL_T;
1373 trapsignal(p, SIGTRAP, 0);
1374 }
1375
1376 /*
1377 * Handle reschedule and other end-of-syscall issues
1378 */
8a8d5d85 1379 userret(p, &frame, sticks);
984263bc
MD
1380
1381#ifdef KTRACE
dadab5e9 1382 if (KTRPOINT(td, KTR_SYSRET)) {
c7114eea 1383 ktrsysret(p->p_tracep, code, error, args.sysmsg_result);
984263bc
MD
1384 }
1385#endif
1386
1387 /*
1388 * This works because errno is findable through the
1389 * register set. If we ever support an emulation where this
1390 * is not the case, this code will need to be revisited.
1391 */
1392 STOPEVENT(p, S_SCX, code);
1393
a2a5ad0d 1394 userexit(p);
8a8d5d85 1395#ifdef SMP
984263bc
MD
1396 /*
1397 * Release the MP lock if we had to get it
1398 */
37af14fe 1399 KASSERT(td->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
8a8d5d85
MD
1400 rel_mplock();
1401#endif
984263bc
MD
1402}
1403
7062f5b4
EN
1404/*
1405 * free_sysun - Put an unused sysun on the free list.
1406 */
1407static __inline void
1408free_sysun(struct thread *td, union sysunion *sysun)
1409{
1410 struct globaldata *gd = td->td_gd;
1411
1412 crit_enter_quick(td);
1413 sysun->lmsg.opaque.ms_sysunnext = gd->gd_freesysun;
1414 gd->gd_freesysun = sysun;
1415 crit_exit_quick(td);
1416}
1417
a64ba182
MD
1418/*
1419 * sendsys2 - MP aware system message request C handler
1420 */
1421void
1422sendsys2(struct trapframe frame)
1423{
7966cb69 1424 struct globaldata *gd;
a64ba182
MD
1425 struct thread *td = curthread;
1426 struct proc *p = td->td_proc;
4fd10eb6 1427 register_t orig_tf_eflags;
a64ba182 1428 struct sysent *callp;
7062f5b4 1429 union sysunion *sysun = NULL;
4fd10eb6 1430 lwkt_msg_t umsg;
37af14fe 1431 int sticks;
a64ba182
MD
1432 int error;
1433 int narg;
4fd10eb6 1434 u_int code = 0;
a64ba182 1435 int msgsize;
90b9818c 1436 int result;
a64ba182
MD
1437
1438#ifdef DIAGNOSTIC
1439 if (ISPL(frame.tf_cs) != SEL_UPL) {
1440 get_mplock();
7062f5b4 1441 panic("sendsys");
a64ba182
MD
1442 /* NOT REACHED */
1443 }
1444#endif
1445
1446#ifdef SMP
37af14fe 1447 KASSERT(td->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
a64ba182
MD
1448 get_mplock();
1449#endif
1450 /*
1451 * access non-atomic field from critical section. p_sticks is
1452 * updated by the clock interrupt. Also use this opportunity
1453 * to lazy-raise our LWKT priority.
1454 */
7966cb69 1455 userenter(td);
37af14fe 1456 sticks = td->td_sticks;
a64ba182
MD
1457
1458 p->p_md.md_regs = &frame;
4fd10eb6 1459 orig_tf_eflags = frame.tf_eflags;
90b9818c 1460 result = 0;
a64ba182
MD
1461
1462 /*
1463 * Extract the system call message. If msgsize is zero we are
245e4f17
MD
1464 * blocking on a message and/or message port. If msgsize is -1
1465 * we are testing a message for completion or a message port for
1466 * activity.
c7114eea
MD
1467 *
1468 * The userland system call message size includes the size of the
245e4f17
MD
1469 * userland lwkt_msg plus arguments. We load it into the userland
1470 * portion of our sysunion structure then we initialize the kerneland
1471 * portion and go.
a64ba182 1472 */
a64ba182
MD
1473
1474 /*
1475 * Bad message size
1476 */
7062f5b4
EN
1477 if ((msgsize = frame.tf_edx) < sizeof(struct lwkt_msg) ||
1478 msgsize > sizeof(union sysunion) - sizeof(struct sysmsg)) {
a64ba182 1479 error = ENOSYS;
4fd10eb6 1480 goto bad2;
a64ba182
MD
1481 }
1482
1483 /*
245e4f17 1484 * Obtain a sysun from our per-cpu cache or allocate a new one. Use
4fd10eb6
MD
1485 * the opaque field to store the original (user) message pointer.
1486 * A critical section is necessary to interlock against interrupts
1487 * returning system messages to the thread cache.
a64ba182 1488 */
7966cb69
MD
1489 gd = td->td_gd;
1490 crit_enter_quick(td);
7062f5b4 1491 if ((sysun = gd->gd_freesysun) != NULL)
245e4f17 1492 gd->gd_freesysun = sysun->lmsg.opaque.ms_sysunnext;
7062f5b4 1493 else
245e4f17 1494 sysun = malloc(sizeof(union sysunion), M_SYSMSG, M_WAITOK);
7062f5b4 1495 crit_exit_quick(td);
4fd10eb6
MD
1496
1497 /*
245e4f17 1498 * Copy the user request into the kernel copy of the user request.
4fd10eb6 1499 */
a64ba182 1500 umsg = (void *)frame.tf_ecx;
245e4f17 1501 error = copyin(umsg, &sysun->nosys.usrmsg, msgsize);
c7114eea 1502 if (error)
4fd10eb6 1503 goto bad1;
7062f5b4
EN
1504 if ((sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC)) {
1505 error = suser(td);
1506 if (error) {
1507 goto bad1;
1508 }
1509 if (max_sysmsg > 0 && p->p_num_sysmsg >= max_sysmsg) {
1510 error = E2BIG;
1511 goto bad1;
1512 }
c7114eea 1513 }
a64ba182 1514
4fd10eb6 1515 /*
245e4f17
MD
1516 * Initialize the kernel message from the copied-in data and
1517 * pull in appropriate flags from the userland message.
1fa2b4b4
MD
1518 *
1519 * ms_abort_port is usually initialized in sendmsg/domsg, but since
1520 * we are not calling those functions (yet), we have to do it manually.
4fd10eb6 1521 */
b44419cb
MD
1522 lwkt_initmsg(&sysun->lmsg, &td->td_msgport, 0,
1523 sysun->nosys.usrmsg.umsg.ms_cmd,
1524 lwkt_cmd_op_none);
1fa2b4b4 1525 sysun->lmsg.ms_abort_port = sysun->lmsg.ms_reply_port;
df2244e3 1526 sysun->sysmsg_copyout = NULL;
245e4f17
MD
1527 sysun->lmsg.opaque.ms_umsg = umsg;
1528 sysun->lmsg.ms_flags |= sysun->nosys.usrmsg.umsg.ms_flags & MSGF_ASYNC;
a64ba182 1529
4fd10eb6
MD
1530 /*
1531 * Extract the system call number, lookup the system call, and
1532 * set the default return value.
1533 */
b44419cb 1534 code = (u_int)sysun->lmsg.ms_cmd.cm_op;
7062f5b4
EN
1535 /* We don't handle the syscall() syscall yet */
1536 if (code == 0) {
1537 error = ENOTSUP;
1538 free_sysun(td, sysun);
1539 goto bad2;
1540 }
a64ba182
MD
1541 if (code >= p->p_sysent->sv_size) {
1542 error = ENOSYS;
7062f5b4 1543 free_sysun(td, sysun);
4fd10eb6 1544 goto bad1;
a64ba182
MD
1545 }
1546
1547 callp = &p->p_sysent->sv_table[code];
1548
c7114eea 1549 narg = (msgsize - sizeof(struct lwkt_msg)) / sizeof(register_t);
4fd10eb6 1550
a64ba182
MD
1551#ifdef KTRACE
1552 if (KTRPOINT(td, KTR_SYSCALL)) {
245e4f17 1553 ktrsyscall(p->p_tracep, code, narg, (void *)(&sysun->nosys.usrmsg + 1));
a64ba182
MD
1554 }
1555#endif
245e4f17
MD
1556 sysun->lmsg.u.ms_fds[0] = 0;
1557 sysun->lmsg.u.ms_fds[1] = 0;
a64ba182
MD
1558
1559 STOPEVENT(p, S_SCE, narg); /* MP aware */
1560
1561 /*
1562 * Make the system call. An error code is always returned, results
4fd10eb6
MD
1563 * are copied back via ms_result32 or ms_result64. YYY temporary
1564 * stage copy p_retval[] into ms_result32/64
a64ba182
MD
1565 *
1566 * NOTE! XXX if this is a child returning from a fork curproc
4fd10eb6
MD
1567 * might be different. YYY huh? a child returning from a fork
1568 * should never 'return' from this call, it should go right to the
1569 * fork_trampoline function.
a64ba182 1570 */
245e4f17 1571 error = (*callp->sy_call)(sysun);
7966cb69 1572 gd = td->td_gd; /* RELOAD, might have switched cpus */
a64ba182 1573
4fd10eb6 1574bad1:
a64ba182 1575 /*
4fd10eb6
MD
1576 * If a synchronous return copy p_retval to ms_result64 and return
1577 * the sysmsg to the free pool.
90b9818c
MD
1578 *
1579 * YYY Don't writeback message if execve() YYY
a64ba182 1580 */
7062f5b4
EN
1581 sysun->nosys.usrmsg.umsg.ms_error = error;
1582 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1583 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1584 result = sysun->nosys.usrmsg.umsg.u.ms_fds[0]; /* for ktrace */
1585 if (error != 0 || code != SYS_execve) {
1586 int error2;
1587 error2 = copyout(&sysun->nosys.usrmsg.umsg.ms_copyout_start,
1588 &umsg->ms_copyout_start,
1589 ms_copyout_size);
1590 if (error2 != 0)
1591 error = error2;
1592 }
1fa2b4b4
MD
1593 if (error == EASYNC) {
1594 /*
1595 * Since only the current process ever messes with msgq,
1596 * we can safely manipulate it in parallel with the async
1597 * operation.
1598 */
1599 TAILQ_INSERT_TAIL(&p->p_sysmsgq, &sysun->sysmsg, msgq);
7062f5b4
EN
1600 p->p_num_sysmsg++;
1601 error = (int)&sysun->sysmsg;
1602 }
1603 else {
1604 free_sysun(td, sysun);
a64ba182 1605 }
4fd10eb6 1606bad2:
7062f5b4 1607 frame.tf_eax = (register_t)error;
a64ba182
MD
1608
1609 /*
1610 * Traced syscall. trapsignal() is not MP aware.
1611 */
1612 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1613 frame.tf_eflags &= ~PSL_T;
1614 trapsignal(p, SIGTRAP, 0);
1615 }
1616
1617 /*
1618 * Handle reschedule and other end-of-syscall issues
1619 */
1620 userret(p, &frame, sticks);
1621
1622#ifdef KTRACE
1623 if (KTRPOINT(td, KTR_SYSRET)) {
90b9818c 1624 ktrsysret(p->p_tracep, code, error, result);
a64ba182
MD
1625 }
1626#endif
1627
1628 /*
1629 * This works because errno is findable through the
1630 * register set. If we ever support an emulation where this
1631 * is not the case, this code will need to be revisited.
1632 */
1633 STOPEVENT(p, S_SCX, code);
1634
1635 userexit(p);
1636#ifdef SMP
1637 /*
1638 * Release the MP lock if we had to get it
1639 */
37af14fe 1640 KASSERT(td->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
a64ba182
MD
1641 rel_mplock();
1642#endif
1643}
1644
7062f5b4
EN
1645/*
1646 * waitsys2 - MP aware system message wait C handler
1647 */
1648void
1649waitsys2(struct trapframe frame)
1650{
1651 struct globaldata *gd;
1652 struct thread *td = curthread;
1653 struct proc *p = td->td_proc;
1654 union sysunion *sysun = NULL;
1655 lwkt_msg_t umsg;
1656 register_t orig_tf_eflags;
1657 int error = 0, result, sticks;
1658 u_int code = 0;
1659
1660#ifdef DIAGNOSTIC
1661 if (ISPL(frame.tf_cs) != SEL_UPL) {
1662 get_mplock();
1663 panic("waitsys2");
1664 /* NOT REACHED */
1665 }
1666#endif
1667
1668#ifdef SMP
1669 KASSERT(td->td_mpcount == 0, ("badmpcount syscall from %p",
1670 (void *)frame.tf_eip));
1671 get_mplock();
1672#endif
1673
1674 /*
1675 * access non-atomic field from critical section. p_sticks is
1676 * updated by the clock interrupt. Also use this opportunity
1677 * to lazy-raise our LWKT priority.
1678 */
1679 userenter(td);
1680 sticks = td->td_sticks;
1681
1682 p->p_md.md_regs = &frame;
1683 orig_tf_eflags = frame.tf_eflags;
1684 result = 0;
1685
1686 if (frame.tf_ecx) {
1687 struct sysmsg *ptr;
1688 int found = 0;
1689 TAILQ_FOREACH(ptr, &p->p_sysmsgq, msgq) {
1690 if ((void *)ptr == (void *)frame.tf_ecx) {
1691 sysun = (void *)sysmsg_wait(p,
1692 (void *)frame.tf_ecx, 1);
1693 found = 1;
1694 break;
1695 }
1696 }
1697 if (!found) {
1698 error = ENOENT;
1699 goto bad;
1700 }
1701 }
1702 else if (frame.tf_eax) {
1703 printf("waitport/checkport only the default port is supported at the moment\n");
1704 error = ENOTSUP;
1705 goto bad;
1706 }
1707 else {
1708 switch(frame.tf_edx) {
1709 case 0:
1710 sysun = (void *)sysmsg_wait(p, NULL, 0);
1711 break;
1712 case -1:
1713 sysun = (void *)sysmsg_wait(p, NULL, 1);
1714 break;
1715 default:
1716 error = ENOSYS;
1717 goto bad;
1718 }
1719 }
1720 if (sysun) {
1721 gd = td->td_gd;
1722 umsg = sysun->lmsg.opaque.ms_umsg;
1723 frame.tf_eax = (register_t)sysun;
1724 sysun->nosys.usrmsg.umsg.u.ms_fds[0] = sysun->lmsg.u.ms_fds[0];
1725 sysun->nosys.usrmsg.umsg.u.ms_fds[1] = sysun->lmsg.u.ms_fds[1];
1726 sysun->nosys.usrmsg.umsg.ms_error = sysun->lmsg.ms_error;
1727 error = sysun->lmsg.ms_error;
1728 result = sysun->lmsg.u.ms_fds[0]; /* for ktrace */
1729 error = copyout(&sysun->nosys.usrmsg.umsg.ms_copyout_start,
1730 &umsg->ms_copyout_start, ms_copyout_size);
1731 free_sysun(td, sysun);
1732 frame.tf_edx = 0;
1733 code = (u_int)sysun->lmsg.ms_cmd.cm_op;
1734 }
1735bad:
1736 if (error)
1737 frame.tf_eax = error;
1738 /*
1739 * Traced syscall. trapsignal() is not MP aware.
1740 */
1741 if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1742 frame.tf_eflags &= ~PSL_T;
1743 trapsignal(p, SIGTRAP, 0);
1744 }
1745
1746 /*
1747 * Handle reschedule and other end-of-syscall issues
1748 */
1749 userret(p, &frame, sticks);
1750
1751#ifdef KTRACE
1752 if (KTRPOINT(td, KTR_SYSRET)) {
1753 ktrsysret(p->p_tracep, code, error, result);
1754 }
1755#endif
1756
1757 /*
1758 * This works because errno is findable through the
1759 * register set. If we ever support an emulation where this
1760 * is not the case, this code will need to be revisited.
1761 */
1762 STOPEVENT(p, S_SCX, code);
1763
1764 userexit(p);
1765#ifdef SMP
1766 KASSERT(td->td_mpcount == 1, ("badmpcount syscall from %p",
1767 (void *)frame.tf_eip));
1768 rel_mplock();
1769#endif
1770}
1771
984263bc
MD
1772/*
1773 * Simplified back end of syscall(), used when returning from fork()
8a8d5d85
MD
1774 * directly into user mode. MP lock is held on entry and should be
1775 * released on return. This code will return back into the fork
1776 * trampoline code which then runs doreti.
984263bc
MD
1777 */
1778void
1779fork_return(p, frame)
1780 struct proc *p;
1781 struct trapframe frame;
1782{
1783 frame.tf_eax = 0; /* Child returns zero */
1784 frame.tf_eflags &= ~PSL_C; /* success */
1785 frame.tf_edx = 1;
1786
8a8d5d85 1787 userret(p, &frame, 0);
984263bc 1788#ifdef KTRACE
dadab5e9 1789 if (KTRPOINT(p->p_thread, KTR_SYSRET))
984263bc
MD
1790 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
1791#endif
d9eea1a5 1792 p->p_flag |= P_PASSIVE_ACQ;
a2a5ad0d 1793 userexit(p);
d9eea1a5 1794 p->p_flag &= ~P_PASSIVE_ACQ;
8a8d5d85 1795#ifdef SMP
37af14fe 1796 KKASSERT(p->p_thread->td_mpcount == 1);
8a8d5d85
MD
1797 rel_mplock();
1798#endif
984263bc 1799}