2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1982, 1987, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
46 #include "opt_directio.h"
49 #include "opt_msgbuf.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysproto.h>
55 #include <sys/signalvar.h>
56 #include <sys/kernel.h>
57 #include <sys/linker.h>
58 #include <sys/malloc.h>
61 #include <sys/reboot.h>
63 #include <sys/msgbuf.h>
64 #include <sys/sysent.h>
65 #include <sys/sysctl.h>
66 #include <sys/vmmeter.h>
68 #include <sys/upcall.h>
69 #include <sys/usched.h>
73 #include <vm/vm_param.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_pager.h>
80 #include <vm/vm_extern.h>
82 #include <sys/thread2.h>
83 #include <sys/mplock2.h>
91 #include <machine/cpu.h>
92 #include <machine/clock.h>
93 #include <machine/specialreg.h>
94 #include <machine/md_var.h>
95 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
96 #include <machine/globaldata.h> /* CPU_prvspace */
97 #include <machine/smp.h>
99 #include <machine/perfmon.h>
101 #include <machine/cputypes.h>
103 #include <bus/isa/rtc.h>
104 #include <machine/vm86.h>
105 #include <sys/random.h>
106 #include <sys/ptrace.h>
107 #include <machine/sigframe.h>
108 #include <unistd.h> /* umtx_* functions */
109 #include <pthread.h> /* pthread_yield */
111 extern void dblfault_handler (void);
113 #ifndef CPU_DISABLE_SSE
114 static void set_fpregs_xmm (struct save87 *, struct savexmm *);
115 static void fill_fpregs_xmm (struct savexmm *, struct save87 *);
116 #endif /* CPU_DISABLE_SSE */
118 extern void ffs_rawread_setup(void);
119 #endif /* DIRECTIO */
122 int64_t tsc_offsets[MAXCPU];
124 int64_t tsc_offsets[1];
127 #if defined(SWTCH_OPTIM_STATS)
128 extern int swtch_optim_stats;
129 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
130 CTLFLAG_RD, &swtch_optim_stats, 0, "");
131 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
132 CTLFLAG_RD, &tlb_flush_count, 0, "");
136 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
138 int error = sysctl_handle_int(oidp, 0, ctob((int)Maxmem), req);
142 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
143 0, 0, sysctl_hw_physmem, "IU", "");
146 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
148 int error = sysctl_handle_int(oidp, 0,
149 ctob((int)Maxmem - vmstats.v_wire_count), req);
153 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
154 0, 0, sysctl_hw_usermem, "IU", "");
156 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, "");
161 sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS)
165 /* Unwind the buffer, so that it's linear (possibly starting with
166 * some initial nulls).
168 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr,
169 msgbufp->msg_size-msgbufp->msg_bufr,req);
170 if(error) return(error);
171 if(msgbufp->msg_bufr>0) {
172 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr,
173 msgbufp->msg_bufr,req);
178 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD,
179 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer");
181 static int msgbuf_clear;
184 sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS)
187 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
189 if (!error && req->newptr) {
190 /* Clear the buffer and reset write pointer */
191 bzero(msgbufp->msg_ptr,msgbufp->msg_size);
192 msgbufp->msg_bufr=msgbufp->msg_bufx=0;
198 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW,
199 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I",
200 "Clear kernel message buffer");
205 * Send an interrupt to process.
207 * Stack is set up to allow sigcode stored
208 * at top to call routine, followed by kcall
209 * to sigreturn routine below. After sigreturn
210 * resets the signal mask, the stack, and the
211 * frame pointer, it returns to the user
215 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
217 struct lwp *lp = curthread->td_lwp;
218 struct proc *p = lp->lwp_proc;
219 struct trapframe *regs;
220 struct sigacts *psp = p->p_sigacts;
221 struct sigframe sf, *sfp;
224 regs = lp->lwp_md.md_regs;
225 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0;
227 /* save user context */
228 bzero(&sf, sizeof(struct sigframe));
229 sf.sf_uc.uc_sigmask = *mask;
230 sf.sf_uc.uc_stack = lp->lwp_sigstk;
231 sf.sf_uc.uc_mcontext.mc_onstack = oonstack;
232 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_gs, sizeof(struct trapframe));
234 /* make the size of the saved context visible to userland */
235 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext);
237 /* save mailbox pending state for syscall interlock semantics */
238 if (p->p_flag & P_MAILBOX)
239 sf.sf_uc.uc_mcontext.mc_xflags |= PGEX_MAILBOX;
242 /* Allocate and validate space for the signal handler context. */
243 if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack &&
244 SIGISMEMBER(psp->ps_sigonstack, sig)) {
245 sfp = (struct sigframe *)(lp->lwp_sigstk.ss_sp +
246 lp->lwp_sigstk.ss_size - sizeof(struct sigframe));
247 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
250 sfp = (struct sigframe *)regs->tf_esp - 1;
252 /* Translate the signal is appropriate */
253 if (p->p_sysent->sv_sigtbl) {
254 if (sig <= p->p_sysent->sv_sigsize)
255 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
258 /* Build the argument list for the signal handler. */
260 sf.sf_ucontext = (register_t)&sfp->sf_uc;
261 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
262 /* Signal handler installed with SA_SIGINFO. */
263 sf.sf_siginfo = (register_t)&sfp->sf_si;
264 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
266 /* fill siginfo structure */
267 sf.sf_si.si_signo = sig;
268 sf.sf_si.si_code = code;
269 sf.sf_si.si_addr = (void*)regs->tf_err;
272 /* Old FreeBSD-style arguments. */
273 sf.sf_siginfo = code;
274 sf.sf_addr = regs->tf_err;
275 sf.sf_ahu.sf_handler = catcher;
280 * If we're a vm86 process, we want to save the segment registers.
281 * We also change eflags to be our emulated eflags, not the actual
284 if (regs->tf_eflags & PSL_VM) {
285 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
286 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
288 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
289 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
290 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
291 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
293 if (vm86->vm86_has_vme == 0)
294 sf.sf_uc.uc_mcontext.mc_eflags =
295 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
296 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
299 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
300 * syscalls made by the signal handler. This just avoids
301 * wasting time for our lazy fixup of such faults. PSL_NT
302 * does nothing in vm86 mode, but vm86 programs can set it
303 * almost legitimately in probes for old cpu types.
305 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
310 * Save the FPU state and reinit the FP unit
312 npxpush(&sf.sf_uc.uc_mcontext);
315 * Copy the sigframe out to the user's stack.
317 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) {
319 * Something is wrong with the stack pointer.
320 * ...Kill the process.
325 regs->tf_esp = (int)sfp;
326 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
329 * i386 abi specifies that the direction flag must be cleared
332 regs->tf_eflags &= ~(PSL_T|PSL_D);
334 regs->tf_cs = _ucodesel;
335 regs->tf_ds = _udatasel;
336 regs->tf_es = _udatasel;
337 if (regs->tf_trapno == T_PROTFLT) {
338 regs->tf_fs = _udatasel;
339 regs->tf_gs = _udatasel;
341 regs->tf_ss = _udatasel;
345 * Sanitize the trapframe for a virtual kernel passing control to a custom
348 * Allow userland to set or maintain PSL_RF, the resume flag. This flag
349 * basically controls whether the return PC should skip the first instruction
350 * (as in an explicit system call) or re-execute it (as in an exception).
353 cpu_sanitize_frame(struct trapframe *frame)
355 frame->tf_cs = _ucodesel;
356 frame->tf_ds = _udatasel;
357 frame->tf_es = _udatasel;
359 frame->tf_fs = _udatasel;
360 frame->tf_gs = _udatasel;
362 frame->tf_ss = _udatasel;
363 frame->tf_eflags &= (PSL_RF | PSL_USERCHANGE);
364 frame->tf_eflags |= PSL_RESERVED_DEFAULT | PSL_I;
369 cpu_sanitize_tls(struct savetls *tls)
371 struct segment_descriptor *desc;
374 for (i = 0; i < NGTLS; ++i) {
376 if (desc->sd_dpl == 0 && desc->sd_type == 0)
378 if (desc->sd_def32 == 0)
380 if (desc->sd_type != SDT_MEMRWA)
382 if (desc->sd_dpl != SEL_UPL)
384 if (desc->sd_xx != 0 || desc->sd_p != 1)
391 * sigreturn(ucontext_t *sigcntxp)
393 * System call to cleanup state after a signal
394 * has been taken. Reset signal mask and
395 * stack state from context left by sendsig (above).
396 * Return to previous pc and psl as specified by
397 * context left by sendsig. Check carefully to
398 * make sure that the user has not modified the
399 * state to gain improper privileges.
403 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
404 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
407 sys_sigreturn(struct sigreturn_args *uap)
409 struct lwp *lp = curthread->td_lwp;
410 struct proc *p = lp->lwp_proc;
411 struct trapframe *regs;
417 error = copyin(uap->sigcntxp, &ucp, sizeof(ucp));
421 regs = lp->lwp_md.md_regs;
422 eflags = ucp.uc_mcontext.mc_eflags;
425 if (eflags & PSL_VM) {
426 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
427 struct vm86_kernel *vm86;
430 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
431 * set up the vm86 area, and we can't enter vm86 mode.
433 if (lp->lwp_thread->td_pcb->pcb_ext == 0)
435 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
436 if (vm86->vm86_inited == 0)
439 /* go back to user mode if both flags are set */
440 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
441 trapsignal(lp->lwp_proc, SIGBUS, 0);
443 if (vm86->vm86_has_vme) {
444 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
445 (eflags & VME_USERCHANGE) | PSL_VM;
447 vm86->vm86_eflags = eflags; /* save VIF, VIP */
448 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM;
450 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe));
451 tf->tf_eflags = eflags;
452 tf->tf_vm86_ds = tf->tf_ds;
453 tf->tf_vm86_es = tf->tf_es;
454 tf->tf_vm86_fs = tf->tf_fs;
455 tf->tf_vm86_gs = tf->tf_gs;
456 tf->tf_ds = _udatasel;
457 tf->tf_es = _udatasel;
459 tf->tf_fs = _udatasel;
460 tf->tf_gs = _udatasel;
466 * Don't allow users to change privileged or reserved flags.
469 * XXX do allow users to change the privileged flag PSL_RF.
470 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
471 * should sometimes set it there too. tf_eflags is kept in
472 * the signal context during signal handling and there is no
473 * other place to remember it, so the PSL_RF bit may be
474 * corrupted by the signal handler without us knowing.
475 * Corruption of the PSL_RF bit at worst causes one more or
476 * one less debugger trap, so allowing it is fairly harmless.
478 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
479 kprintf("sigreturn: eflags = 0x%x\n", eflags);
484 * Don't allow users to load a valid privileged %cs. Let the
485 * hardware check for invalid selectors, excess privilege in
486 * other selectors, invalid %eip's and invalid %esp's.
488 cs = ucp.uc_mcontext.mc_cs;
489 if (!CS_SECURE(cs)) {
490 kprintf("sigreturn: cs = 0x%x\n", cs);
491 trapsignal(lp, SIGBUS, T_PROTFLT);
494 bcopy(&ucp.uc_mcontext.mc_gs, regs, sizeof(struct trapframe));
498 * Restore the FPU state from the frame
501 npxpop(&ucp.uc_mcontext);
504 * Merge saved signal mailbox pending flag to maintain interlock
505 * semantics against system calls.
507 if (ucp.uc_mcontext.mc_xflags & PGEX_MAILBOX)
508 p->p_flag |= P_MAILBOX;
510 if (ucp.uc_mcontext.mc_onstack & 1)
511 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
513 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK;
515 lp->lwp_sigmask = ucp.uc_sigmask;
516 SIG_CANTMASK(lp->lwp_sigmask);
522 * Stack frame on entry to function. %eax will contain the function vector,
523 * %ecx will contain the function data. flags, ecx, and eax will have
524 * already been pushed on the stack.
535 sendupcall(struct vmupcall *vu, int morepending)
537 struct lwp *lp = curthread->td_lwp;
538 struct trapframe *regs;
539 struct upcall upcall;
540 struct upc_frame upc_frame;
544 * If we are a virtual kernel running an emulated user process
545 * context, switch back to the virtual kernel context before
546 * trying to post the signal.
548 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
549 lp->lwp_md.md_regs->tf_trapno = 0;
550 vkernel_trap(lp, lp->lwp_md.md_regs);
554 * Get the upcall data structure
556 if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) ||
557 copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int))
560 kprintf("bad upcall address\n");
565 * If the data structure is already marked pending or has a critical
566 * section count, mark the data structure as pending and return
567 * without doing an upcall. vu_pending is left set.
569 if (upcall.upc_pending || crit_count >= vu->vu_pending) {
570 if (upcall.upc_pending < vu->vu_pending) {
571 upcall.upc_pending = vu->vu_pending;
572 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
573 sizeof(upcall.upc_pending));
579 * We can run this upcall now, clear vu_pending.
581 * Bump our critical section count and set or clear the
582 * user pending flag depending on whether more upcalls are
583 * pending. The user will be responsible for calling
584 * upc_dispatch(-1) to process remaining upcalls.
587 upcall.upc_pending = morepending;
589 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
590 sizeof(upcall.upc_pending));
591 copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff,
595 * Construct a stack frame and issue the upcall
597 regs = lp->lwp_md.md_regs;
598 upc_frame.eax = regs->tf_eax;
599 upc_frame.ecx = regs->tf_ecx;
600 upc_frame.edx = regs->tf_edx;
601 upc_frame.flags = regs->tf_eflags;
602 upc_frame.oldip = regs->tf_eip;
603 if (copyout(&upc_frame, (void *)(regs->tf_esp - sizeof(upc_frame)),
604 sizeof(upc_frame)) != 0) {
605 kprintf("bad stack on upcall\n");
607 regs->tf_eax = (register_t)vu->vu_func;
608 regs->tf_ecx = (register_t)vu->vu_data;
609 regs->tf_edx = (register_t)lp->lwp_upcall;
610 regs->tf_eip = (register_t)vu->vu_ctx;
611 regs->tf_esp -= sizeof(upc_frame);
616 * fetchupcall occurs in the context of a system call, which means that
617 * we have to return EJUSTRETURN in order to prevent eax and edx from
618 * being overwritten by the syscall return value.
620 * if vu is not NULL we return the new context in %edx, the new data in %ecx,
621 * and the function pointer in %eax.
624 fetchupcall (struct vmupcall *vu, int morepending, void *rsp)
626 struct upc_frame upc_frame;
627 struct lwp *lp = curthread->td_lwp;
628 struct trapframe *regs;
630 struct upcall upcall;
633 regs = lp->lwp_md.md_regs;
635 error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int));
639 * This jumps us to the next ready context.
642 error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall));
645 error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int));
648 error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int));
649 regs->tf_eax = (register_t)vu->vu_func;
650 regs->tf_ecx = (register_t)vu->vu_data;
651 regs->tf_edx = (register_t)lp->lwp_upcall;
652 regs->tf_eip = (register_t)vu->vu_ctx;
653 regs->tf_esp = (register_t)rsp;
656 * This returns us to the originally interrupted code.
658 error = copyin(rsp, &upc_frame, sizeof(upc_frame));
659 regs->tf_eax = upc_frame.eax;
660 regs->tf_ecx = upc_frame.ecx;
661 regs->tf_edx = upc_frame.edx;
662 regs->tf_eflags = (regs->tf_eflags & ~PSL_USERCHANGE) |
663 (upc_frame.flags & PSL_USERCHANGE);
664 regs->tf_eip = upc_frame.oldip;
665 regs->tf_esp = (register_t)((char *)rsp + sizeof(upc_frame));
674 * cpu_idle() represents the idle LWKT. You cannot return from this function
675 * (unless you want to blow things up!). Instead we look for runnable threads
676 * and loop or halt as appropriate. Giant is not held on entry to the thread.
678 * The main loop is entered with a critical section held, we must release
679 * the critical section before doing anything else. lwkt_switch() will
680 * check for pending interrupts due to entering and exiting its own
683 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI
684 * to wake a HLTed cpu up. However, there are cases where the idlethread
685 * will be entered with the possibility that no IPI will occur and in such
686 * cases lwkt_switch() sets TDF_IDLE_NOHLT.
688 static int cpu_idle_hlt = 1;
689 static int cpu_idle_hltcnt;
690 static int cpu_idle_spincnt;
691 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
692 &cpu_idle_hlt, 0, "Idle loop HLT enable");
693 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW,
694 &cpu_idle_hltcnt, 0, "Idle loop entry halts");
695 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW,
696 &cpu_idle_spincnt, 0, "Idle loop entry spins");
701 struct thread *td = curthread;
702 struct mdglobaldata *gd = mdcpu;
706 KKASSERT(td->td_critcount == 0);
710 * See if there are any LWKTs ready to go.
715 * The idle loop halts only if no threads are scheduleable
716 * and no signals have occured.
718 if (cpu_idle_hlt && !lwkt_runnable() &&
719 (td->td_flags & TDF_IDLE_NOHLT) == 0) {
722 KKASSERT(MP_LOCK_HELD() == 0);
724 if (!lwkt_runnable()) {
726 struct timeval tv1, tv2;
727 gettimeofday(&tv1, NULL);
729 reqflags = gd->mi.gd_reqflags &
731 umtx_sleep(&gd->mi.gd_reqflags, reqflags,
734 gettimeofday(&tv2, NULL);
735 if (tv2.tv_usec - tv1.tv_usec +
736 (tv2.tv_sec - tv1.tv_sec) * 1000000
738 kprintf("cpu %d idlelock %08x %08x\n",
747 handle_cpu_contention_mask();
752 td->td_flags &= ~TDF_IDLE_NOHLT;
755 handle_cpu_contention_mask();
756 __asm __volatile("pause");
766 * Called by the LWKT switch core with a critical section held if the only
767 * schedulable thread needs the MP lock and we couldn't get it. On
768 * a real cpu we just spin in the scheduler. In the virtual kernel
769 * we sleep for a bit.
772 handle_cpu_contention_mask(void)
776 mask = cpu_contention_mask;
778 if (mask && BSFCPUMASK(mask) != mycpu->gd_cpuid)
783 * Called by the spinlock code with or without a critical section held
784 * when a spinlock is found to be seriously constested.
786 * We need to enter a critical section to prevent signals from recursing
790 cpu_spinlock_contested(void)
798 * Clear registers on exec
801 exec_setregs(u_long entry, u_long stack, u_long ps_strings)
803 struct thread *td = curthread;
804 struct lwp *lp = td->td_lwp;
805 struct trapframe *regs = lp->lwp_md.md_regs;
806 struct pcb *pcb = lp->lwp_thread->td_pcb;
808 /* was i386_user_cleanup() in NetBSD */
811 bzero((char *)regs, sizeof(struct trapframe));
812 regs->tf_eip = entry;
813 regs->tf_esp = stack;
814 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
822 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
823 regs->tf_ebx = ps_strings;
826 * Reset the hardware debug registers if they were in use.
827 * They won't have any meaning for the newly exec'd process.
829 if (pcb->pcb_flags & PCB_DBREGS) {
836 if (pcb == td->td_pcb) {
838 * Clear the debug registers on the running
839 * CPU, otherwise they will end up affecting
840 * the next process we switch to.
844 pcb->pcb_flags &= ~PCB_DBREGS;
848 * Initialize the math emulator (if any) for the current process.
849 * Actually, just clear the bit that says that the emulator has
850 * been initialized. Initialization is delayed until the process
851 * traps to the emulator (if it is done at all) mainly because
852 * emulators don't provide an entry point for initialization.
854 pcb->pcb_flags &= ~FP_SOFTFP;
857 * note: do not set CR0_TS here. npxinit() must do it after clearing
858 * gd_npxthread. Otherwise a preemptive interrupt thread may panic
863 load_cr0(rcr0() | CR0_MP);
867 /* Initialize the npx (if any) for the current process. */
868 npxinit(__INITIAL_NPXCW__);
873 * note: linux emulator needs edx to be 0x0 on entry, which is
874 * handled in execve simply by setting the 64 bit syscall
886 cr0 |= CR0_NE; /* Done by npxinit() */
887 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */
888 cr0 |= CR0_WP | CR0_AM;
895 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
898 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
900 if (!error && req->newptr)
905 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
906 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
908 extern u_long bootdev; /* not a cdev_t - encoding is different */
909 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
910 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)");
913 * Initialize 386 and configure to run kernel
917 * Initialize segments & interrupt table
920 extern struct user *proc0paddr;
925 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
926 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
927 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
928 IDTVEC(page), IDTVEC(mchk), IDTVEC(fpu), IDTVEC(align),
929 IDTVEC(xmm), IDTVEC(syscall),
932 IDTVEC(int0x80_syscall);
936 #ifdef DEBUG_INTERRUPTS
937 extern inthand_t *Xrsvdary[256];
941 ptrace_set_pc(struct lwp *lp, unsigned long addr)
943 lp->lwp_md.md_regs->tf_eip = addr;
948 ptrace_single_step(struct lwp *lp)
950 lp->lwp_md.md_regs->tf_eflags |= PSL_T;
955 fill_regs(struct lwp *lp, struct reg *regs)
957 struct trapframe *tp;
959 tp = lp->lwp_md.md_regs;
960 regs->r_gs = tp->tf_gs;
961 regs->r_fs = tp->tf_fs;
962 regs->r_es = tp->tf_es;
963 regs->r_ds = tp->tf_ds;
964 regs->r_edi = tp->tf_edi;
965 regs->r_esi = tp->tf_esi;
966 regs->r_ebp = tp->tf_ebp;
967 regs->r_ebx = tp->tf_ebx;
968 regs->r_edx = tp->tf_edx;
969 regs->r_ecx = tp->tf_ecx;
970 regs->r_eax = tp->tf_eax;
971 regs->r_eip = tp->tf_eip;
972 regs->r_cs = tp->tf_cs;
973 regs->r_eflags = tp->tf_eflags;
974 regs->r_esp = tp->tf_esp;
975 regs->r_ss = tp->tf_ss;
980 set_regs(struct lwp *lp, struct reg *regs)
982 struct trapframe *tp;
984 tp = lp->lwp_md.md_regs;
985 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
986 !CS_SECURE(regs->r_cs))
988 tp->tf_gs = regs->r_gs;
989 tp->tf_fs = regs->r_fs;
990 tp->tf_es = regs->r_es;
991 tp->tf_ds = regs->r_ds;
992 tp->tf_edi = regs->r_edi;
993 tp->tf_esi = regs->r_esi;
994 tp->tf_ebp = regs->r_ebp;
995 tp->tf_ebx = regs->r_ebx;
996 tp->tf_edx = regs->r_edx;
997 tp->tf_ecx = regs->r_ecx;
998 tp->tf_eax = regs->r_eax;
999 tp->tf_eip = regs->r_eip;
1000 tp->tf_cs = regs->r_cs;
1001 tp->tf_eflags = regs->r_eflags;
1002 tp->tf_esp = regs->r_esp;
1003 tp->tf_ss = regs->r_ss;
1007 #ifndef CPU_DISABLE_SSE
1009 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
1011 struct env87 *penv_87 = &sv_87->sv_env;
1012 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1015 /* FPU control/status */
1016 penv_87->en_cw = penv_xmm->en_cw;
1017 penv_87->en_sw = penv_xmm->en_sw;
1018 penv_87->en_tw = penv_xmm->en_tw;
1019 penv_87->en_fip = penv_xmm->en_fip;
1020 penv_87->en_fcs = penv_xmm->en_fcs;
1021 penv_87->en_opcode = penv_xmm->en_opcode;
1022 penv_87->en_foo = penv_xmm->en_foo;
1023 penv_87->en_fos = penv_xmm->en_fos;
1026 for (i = 0; i < 8; ++i)
1027 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
1029 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw;
1033 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
1035 struct env87 *penv_87 = &sv_87->sv_env;
1036 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1039 /* FPU control/status */
1040 penv_xmm->en_cw = penv_87->en_cw;
1041 penv_xmm->en_sw = penv_87->en_sw;
1042 penv_xmm->en_tw = penv_87->en_tw;
1043 penv_xmm->en_fip = penv_87->en_fip;
1044 penv_xmm->en_fcs = penv_87->en_fcs;
1045 penv_xmm->en_opcode = penv_87->en_opcode;
1046 penv_xmm->en_foo = penv_87->en_foo;
1047 penv_xmm->en_fos = penv_87->en_fos;
1050 for (i = 0; i < 8; ++i)
1051 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
1053 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw;
1055 #endif /* CPU_DISABLE_SSE */
1058 fill_fpregs(struct lwp *lp, struct fpreg *fpregs)
1060 #ifndef CPU_DISABLE_SSE
1062 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm,
1063 (struct save87 *)fpregs);
1066 #endif /* CPU_DISABLE_SSE */
1067 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
1072 set_fpregs(struct lwp *lp, struct fpreg *fpregs)
1074 #ifndef CPU_DISABLE_SSE
1076 set_fpregs_xmm((struct save87 *)fpregs,
1077 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm);
1080 #endif /* CPU_DISABLE_SSE */
1081 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs);
1086 fill_dbregs(struct lwp *lp, struct dbreg *dbregs)
1092 set_dbregs(struct lwp *lp, struct dbreg *dbregs)
1099 * Return > 0 if a hardware breakpoint has been hit, and the
1100 * breakpoint was in user space. Return 0, otherwise.
1103 user_dbreg_trap(void)
1105 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
1106 u_int32_t bp; /* breakpoint bits extracted from dr6 */
1107 int nbp; /* number of breakpoints that triggered */
1108 caddr_t addr[4]; /* breakpoint addresses */
1112 if ((dr7 & 0x000000ff) == 0) {
1114 * all GE and LE bits in the dr7 register are zero,
1115 * thus the trap couldn't have been caused by the
1116 * hardware debug registers
1123 bp = dr6 & 0x0000000f;
1127 * None of the breakpoint bits are set meaning this
1128 * trap was not caused by any of the debug registers
1134 * at least one of the breakpoints were hit, check to see
1135 * which ones and if any of them are user space addresses
1139 addr[nbp++] = (caddr_t)rdr0();
1142 addr[nbp++] = (caddr_t)rdr1();
1145 addr[nbp++] = (caddr_t)rdr2();
1148 addr[nbp++] = (caddr_t)rdr3();
1151 for (i=0; i<nbp; i++) {
1153 (caddr_t)VM_MAX_USER_ADDRESS) {
1155 * addr[i] is in user space
1162 * None of the breakpoints are in user space.
1175 cpu_feature = regs[3];
1181 Debugger(const char *msg)
1183 kprintf("Debugger(\"%s\") called.\n", msg);