2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1982, 1987, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
42 #include "opt_atalk.h"
43 #include "opt_compat.h"
45 #include "opt_directio.h"
48 #include "opt_msgbuf.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysproto.h>
54 #include <sys/signalvar.h>
55 #include <sys/kernel.h>
56 #include <sys/linker.h>
57 #include <sys/malloc.h>
60 #include <sys/reboot.h>
62 #include <sys/msgbuf.h>
63 #include <sys/sysent.h>
64 #include <sys/sysctl.h>
65 #include <sys/vmmeter.h>
67 #include <sys/upcall.h>
68 #include <sys/usched.h>
72 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vm_extern.h>
81 #include <sys/thread2.h>
82 #include <sys/mplock2.h>
90 #include <machine/cpu.h>
91 #include <machine/clock.h>
92 #include <machine/specialreg.h>
93 #include <machine/md_var.h>
94 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
95 #include <machine/globaldata.h> /* CPU_prvspace */
96 #include <machine/smp.h>
98 #include <machine/perfmon.h>
100 #include <machine/cputypes.h>
102 #include <bus/isa/rtc.h>
103 #include <sys/random.h>
104 #include <sys/ptrace.h>
105 #include <machine/sigframe.h>
106 #include <unistd.h> /* umtx_* functions */
107 #include <pthread.h> /* pthread_yield() */
109 extern void dblfault_handler (void);
111 #ifndef CPU_DISABLE_SSE
112 static void set_fpregs_xmm (struct save87 *, struct savexmm *);
113 static void fill_fpregs_xmm (struct savexmm *, struct save87 *);
114 #endif /* CPU_DISABLE_SSE */
116 extern void ffs_rawread_setup(void);
117 #endif /* DIRECTIO */
120 int64_t tsc_offsets[MAXCPU];
122 int64_t tsc_offsets[1];
125 #if defined(SWTCH_OPTIM_STATS)
126 extern int swtch_optim_stats;
127 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
128 CTLFLAG_RD, &swtch_optim_stats, 0, "");
129 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
130 CTLFLAG_RD, &tlb_flush_count, 0, "");
134 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
137 int error = sysctl_handle_int(oidp, 0, ctob((int)Maxmem), req);
141 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
142 0, 0, sysctl_hw_physmem, "IU", "");
145 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
148 int error = sysctl_handle_int(oidp, 0,
149 ctob((int)Maxmem - vmstats.v_wire_count), req);
153 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
154 0, 0, sysctl_hw_usermem, "IU", "");
156 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, "");
161 sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS)
165 /* Unwind the buffer, so that it's linear (possibly starting with
166 * some initial nulls).
168 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr,
169 msgbufp->msg_size-msgbufp->msg_bufr,req);
170 if(error) return(error);
171 if(msgbufp->msg_bufr>0) {
172 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr,
173 msgbufp->msg_bufr,req);
178 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD,
179 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer");
181 static int msgbuf_clear;
184 sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS)
187 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
189 if (!error && req->newptr) {
190 /* Clear the buffer and reset write pointer */
191 bzero(msgbufp->msg_ptr,msgbufp->msg_size);
192 msgbufp->msg_bufr=msgbufp->msg_bufx=0;
198 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW,
199 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I",
200 "Clear kernel message buffer");
205 * Send an interrupt to process.
207 * Stack is set up to allow sigcode stored
208 * at top to call routine, followed by kcall
209 * to sigreturn routine below. After sigreturn
210 * resets the signal mask, the stack, and the
211 * frame pointer, it returns to the user
215 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
217 struct lwp *lp = curthread->td_lwp;
218 struct proc *p = lp->lwp_proc;
219 struct trapframe *regs;
220 struct sigacts *psp = p->p_sigacts;
221 struct sigframe sf, *sfp;
225 regs = lp->lwp_md.md_regs;
226 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0;
228 /* Save user context */
229 bzero(&sf, sizeof(struct sigframe));
230 sf.sf_uc.uc_sigmask = *mask;
231 sf.sf_uc.uc_stack = lp->lwp_sigstk;
232 sf.sf_uc.uc_mcontext.mc_onstack = oonstack;
233 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0);
234 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe));
236 /* Make the size of the saved context visible to userland */
237 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext);
239 /* Save mailbox pending state for syscall interlock semantics */
240 if (p->p_flag & P_MAILBOX)
241 sf.sf_uc.uc_mcontext.mc_xflags |= PGEX_MAILBOX;
243 /* Allocate and validate space for the signal handler context. */
244 if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack &&
245 SIGISMEMBER(psp->ps_sigonstack, sig)) {
246 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size -
247 sizeof(struct sigframe));
248 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
250 /* We take red zone into account */
251 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
254 /* Align to 16 bytes */
255 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL);
257 /* Translate the signal is appropriate */
258 if (p->p_sysent->sv_sigtbl) {
259 if (sig <= p->p_sysent->sv_sigsize)
260 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
264 * Build the argument list for the signal handler.
266 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx)
268 regs->tf_rdi = sig; /* argument 1 */
269 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */
271 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
273 * Signal handler installed with SA_SIGINFO.
275 * action(signo, siginfo, ucontext)
277 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */
278 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */
279 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
281 /* fill siginfo structure */
282 sf.sf_si.si_signo = sig;
283 sf.sf_si.si_code = code;
284 sf.sf_si.si_addr = (void *)regs->tf_err;
287 * Old FreeBSD-style arguments.
289 * handler (signo, code, [uc], addr)
291 regs->tf_rsi = (register_t)code; /* argument 2 */
292 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */
293 sf.sf_ahu.sf_handler = catcher;
298 * If we're a vm86 process, we want to save the segment registers.
299 * We also change eflags to be our emulated eflags, not the actual
302 if (regs->tf_eflags & PSL_VM) {
303 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
304 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
306 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
307 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
308 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
309 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
311 if (vm86->vm86_has_vme == 0)
312 sf.sf_uc.uc_mcontext.mc_eflags =
313 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
314 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
317 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
318 * syscalls made by the signal handler. This just avoids
319 * wasting time for our lazy fixup of such faults. PSL_NT
320 * does nothing in vm86 mode, but vm86 programs can set it
321 * almost legitimately in probes for old cpu types.
323 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
328 * Save the FPU state and reinit the FP unit
330 npxpush(&sf.sf_uc.uc_mcontext);
333 * Copy the sigframe out to the user's stack.
335 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) {
337 * Something is wrong with the stack pointer.
338 * ...Kill the process.
343 regs->tf_rsp = (register_t)sfp;
344 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
347 * i386 abi specifies that the direction flag must be cleared
350 regs->tf_rflags &= ~(PSL_T|PSL_D);
353 * 64 bit mode has a code and stack selector but
354 * no data or extra selector. %fs and %gs are not
357 regs->tf_cs = _ucodesel;
358 regs->tf_ss = _udatasel;
362 * Sanitize the trapframe for a virtual kernel passing control to a custom
363 * VM context. Remove any items that would otherwise create a privilage
366 * XXX at the moment we allow userland to set the resume flag. Is this a
370 cpu_sanitize_frame(struct trapframe *frame)
372 frame->tf_cs = _ucodesel;
373 frame->tf_ss = _udatasel;
374 /* XXX VM (8086) mode not supported? */
375 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP);
376 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I;
382 * Sanitize the tls so loading the descriptor does not blow up
383 * on us. For x86_64 we don't have to do anything.
386 cpu_sanitize_tls(struct savetls *tls)
392 * sigreturn(ucontext_t *sigcntxp)
394 * System call to cleanup state after a signal
395 * has been taken. Reset signal mask and
396 * stack state from context left by sendsig (above).
397 * Return to previous pc and psl as specified by
398 * context left by sendsig. Check carefully to
399 * make sure that the user has not modified the
400 * state to gain improper privileges.
402 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
403 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
406 sys_sigreturn(struct sigreturn_args *uap)
408 struct lwp *lp = curthread->td_lwp;
409 struct proc *p = lp->lwp_proc;
410 struct trapframe *regs;
418 * We have to copy the information into kernel space so userland
419 * can't modify it while we are sniffing it.
421 regs = lp->lwp_md.md_regs;
422 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
426 rflags = ucp->uc_mcontext.mc_rflags;
428 /* VM (8086) mode not supported */
429 rflags &= ~PSL_VM_UNSUPP;
432 if (eflags & PSL_VM) {
433 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
434 struct vm86_kernel *vm86;
437 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
438 * set up the vm86 area, and we can't enter vm86 mode.
440 if (lp->lwp_thread->td_pcb->pcb_ext == 0)
442 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
443 if (vm86->vm86_inited == 0)
446 /* go back to user mode if both flags are set */
447 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
448 trapsignal(lp->lwp_proc, SIGBUS, 0);
450 if (vm86->vm86_has_vme) {
451 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
452 (eflags & VME_USERCHANGE) | PSL_VM;
454 vm86->vm86_eflags = eflags; /* save VIF, VIP */
455 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM;
457 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe));
458 tf->tf_eflags = eflags;
459 tf->tf_vm86_ds = tf->tf_ds;
460 tf->tf_vm86_es = tf->tf_es;
461 tf->tf_vm86_fs = tf->tf_fs;
462 tf->tf_vm86_gs = tf->tf_gs;
463 tf->tf_ds = _udatasel;
464 tf->tf_es = _udatasel;
466 tf->tf_fs = _udatasel;
467 tf->tf_gs = _udatasel;
473 * Don't allow users to change privileged or reserved flags.
476 * XXX do allow users to change the privileged flag PSL_RF.
477 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
478 * should sometimes set it there too. tf_eflags is kept in
479 * the signal context during signal handling and there is no
480 * other place to remember it, so the PSL_RF bit may be
481 * corrupted by the signal handler without us knowing.
482 * Corruption of the PSL_RF bit at worst causes one more or
483 * one less debugger trap, so allowing it is fairly harmless.
485 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
486 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags);
491 * Don't allow users to load a valid privileged %cs. Let the
492 * hardware check for invalid selectors, excess privilege in
493 * other selectors, invalid %eip's and invalid %esp's.
495 cs = ucp->uc_mcontext.mc_cs;
496 if (!CS_SECURE(cs)) {
497 kprintf("sigreturn: cs = 0x%x\n", cs);
498 trapsignal(lp, SIGBUS, T_PROTFLT);
501 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe));
505 * Restore the FPU state from the frame
507 npxpop(&ucp->uc_mcontext);
510 * Merge saved signal mailbox pending flag to maintain interlock
511 * semantics against system calls.
513 if (ucp->uc_mcontext.mc_xflags & PGEX_MAILBOX)
514 p->p_flag |= P_MAILBOX;
516 if (ucp->uc_mcontext.mc_onstack & 1)
517 lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
519 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK;
521 lp->lwp_sigmask = ucp->uc_sigmask;
522 SIG_CANTMASK(lp->lwp_sigmask);
527 * Stack frame on entry to function. %rax will contain the function vector,
528 * %rcx will contain the function data. flags, rcx, and rax will have
529 * already been pushed on the stack.
540 sendupcall(struct vmupcall *vu, int morepending)
542 struct lwp *lp = curthread->td_lwp;
543 struct trapframe *regs;
544 struct upcall upcall;
545 struct upc_frame upc_frame;
549 * If we are a virtual kernel running an emulated user process
550 * context, switch back to the virtual kernel context before
551 * trying to post the signal.
553 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
554 lp->lwp_md.md_regs->tf_trapno = 0;
555 vkernel_trap(lp, lp->lwp_md.md_regs);
559 * Get the upcall data structure
561 if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) ||
562 copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int))
565 kprintf("bad upcall address\n");
570 * If the data structure is already marked pending or has a critical
571 * section count, mark the data structure as pending and return
572 * without doing an upcall. vu_pending is left set.
574 if (upcall.upc_pending || crit_count >= vu->vu_pending) {
575 if (upcall.upc_pending < vu->vu_pending) {
576 upcall.upc_pending = vu->vu_pending;
577 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
578 sizeof(upcall.upc_pending));
584 * We can run this upcall now, clear vu_pending.
586 * Bump our critical section count and set or clear the
587 * user pending flag depending on whether more upcalls are
588 * pending. The user will be responsible for calling
589 * upc_dispatch(-1) to process remaining upcalls.
592 upcall.upc_pending = morepending;
594 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
595 sizeof(upcall.upc_pending));
596 copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff,
600 * Construct a stack frame and issue the upcall
602 regs = lp->lwp_md.md_regs;
603 upc_frame.rax = regs->tf_rax;
604 upc_frame.rcx = regs->tf_rcx;
605 upc_frame.rdx = regs->tf_rdx;
606 upc_frame.flags = regs->tf_rflags;
607 upc_frame.oldip = regs->tf_rip;
608 if (copyout(&upc_frame, (void *)(regs->tf_rsp - sizeof(upc_frame)),
609 sizeof(upc_frame)) != 0) {
610 kprintf("bad stack on upcall\n");
612 regs->tf_rax = (register_t)vu->vu_func;
613 regs->tf_rcx = (register_t)vu->vu_data;
614 regs->tf_rdx = (register_t)lp->lwp_upcall;
615 regs->tf_rip = (register_t)vu->vu_ctx;
616 regs->tf_rsp -= sizeof(upc_frame);
621 * fetchupcall occurs in the context of a system call, which means that
622 * we have to return EJUSTRETURN in order to prevent eax and edx from
623 * being overwritten by the syscall return value.
625 * if vu is not NULL we return the new context in %edx, the new data in %ecx,
626 * and the function pointer in %eax.
629 fetchupcall(struct vmupcall *vu, int morepending, void *rsp)
631 struct upc_frame upc_frame;
632 struct lwp *lp = curthread->td_lwp;
633 struct trapframe *regs;
635 struct upcall upcall;
638 regs = lp->lwp_md.md_regs;
640 error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int));
644 * This jumps us to the next ready context.
647 error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall));
650 error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int));
653 error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int));
654 regs->tf_rax = (register_t)vu->vu_func;
655 regs->tf_rcx = (register_t)vu->vu_data;
656 regs->tf_rdx = (register_t)lp->lwp_upcall;
657 regs->tf_rip = (register_t)vu->vu_ctx;
658 regs->tf_rsp = (register_t)rsp;
661 * This returns us to the originally interrupted code.
663 error = copyin(rsp, &upc_frame, sizeof(upc_frame));
664 regs->tf_rax = upc_frame.rax;
665 regs->tf_rcx = upc_frame.rcx;
666 regs->tf_rdx = upc_frame.rdx;
667 regs->tf_rflags = (regs->tf_rflags & ~PSL_USERCHANGE) |
668 (upc_frame.flags & PSL_USERCHANGE);
669 regs->tf_rip = upc_frame.oldip;
670 regs->tf_rsp = (register_t)((char *)rsp + sizeof(upc_frame));
679 * cpu_idle() represents the idle LWKT. You cannot return from this function
680 * (unless you want to blow things up!). Instead we look for runnable threads
681 * and loop or halt as appropriate. Giant is not held on entry to the thread.
683 * The main loop is entered with a critical section held, we must release
684 * the critical section before doing anything else. lwkt_switch() will
685 * check for pending interrupts due to entering and exiting its own
688 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI
689 * to wake a HLTed cpu up. However, there are cases where the idlethread
690 * will be entered with the possibility that no IPI will occur and in such
691 * cases lwkt_switch() sets TDF_IDLE_NOHLT.
693 static int cpu_idle_hlt = 1;
694 static int cpu_idle_hltcnt;
695 static int cpu_idle_spincnt;
696 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
697 &cpu_idle_hlt, 0, "Idle loop HLT enable");
698 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW,
699 &cpu_idle_hltcnt, 0, "Idle loop entry halts");
700 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW,
701 &cpu_idle_spincnt, 0, "Idle loop entry spins");
706 struct thread *td = curthread;
707 struct mdglobaldata *gd = mdcpu;
711 KKASSERT(td->td_critcount == 0);
715 * See if there are any LWKTs ready to go.
720 * The idle loop halts only if no threads are scheduleable
721 * and no signals have occured.
723 if (cpu_idle_hlt && !lwkt_runnable() &&
724 (td->td_flags & TDF_IDLE_NOHLT) == 0) {
726 if (!lwkt_runnable()) {
728 struct timeval tv1, tv2;
729 gettimeofday(&tv1, NULL);
731 reqflags = gd->mi.gd_reqflags &
733 umtx_sleep(&gd->mi.gd_reqflags, reqflags,
736 gettimeofday(&tv2, NULL);
737 if (tv2.tv_usec - tv1.tv_usec +
738 (tv2.tv_sec - tv1.tv_sec) * 1000000
740 kprintf("cpu %d idlelock %08x %08x\n",
749 handle_cpu_contention_mask();
754 td->td_flags &= ~TDF_IDLE_NOHLT;
757 handle_cpu_contention_mask();
758 __asm __volatile("pause");
768 * Called by the LWKT switch core with a critical section held if the only
769 * schedulable thread needs the MP lock and we couldn't get it. On
770 * a real cpu we just spin in the scheduler. In the virtual kernel
771 * we sleep for a bit.
774 handle_cpu_contention_mask(void)
778 mask = cpu_contention_mask;
780 if (mask && bsfl(mask) != mycpu->gd_cpuid)
785 * Called by the spinlock code with or without a critical section held
786 * when a spinlock is found to be seriously constested.
788 * We need to enter a critical section to prevent signals from recursing
792 cpu_spinlock_contested(void)
800 * Clear registers on exec
803 exec_setregs(u_long entry, u_long stack, u_long ps_strings)
805 struct thread *td = curthread;
806 struct lwp *lp = td->td_lwp;
807 struct pcb *pcb = td->td_pcb;
808 struct trapframe *regs = lp->lwp_md.md_regs;
810 /* was i386_user_cleanup() in NetBSD */
813 bzero((char *)regs, sizeof(struct trapframe));
814 regs->tf_rip = entry;
815 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */
816 regs->tf_rdi = stack; /* argv */
817 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
818 regs->tf_ss = _udatasel;
819 regs->tf_cs = _ucodesel;
820 regs->tf_rbx = ps_strings;
823 * Reset the hardware debug registers if they were in use.
824 * They won't have any meaning for the newly exec'd process.
826 if (pcb->pcb_flags & PCB_DBREGS) {
832 pcb->pcb_dr7 = 0; /* JG set bit 10? */
833 if (pcb == td->td_pcb) {
835 * Clear the debug registers on the running
836 * CPU, otherwise they will end up affecting
837 * the next process we switch to.
841 pcb->pcb_flags &= ~PCB_DBREGS;
845 * Initialize the math emulator (if any) for the current process.
846 * Actually, just clear the bit that says that the emulator has
847 * been initialized. Initialization is delayed until the process
848 * traps to the emulator (if it is done at all) mainly because
849 * emulators don't provide an entry point for initialization.
851 pcb->pcb_flags &= ~FP_SOFTFP;
854 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing
855 * gd_npxthread. Otherwise a preemptive interrupt thread
856 * may panic in npxdna().
860 load_cr0(rcr0() | CR0_MP);
864 * NOTE: The MSR values must be correct so we can return to
865 * userland. gd_user_fs/gs must be correct so the switch
866 * code knows what the current MSR values are.
868 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */
870 /* Initialize the npx (if any) for the current process. */
871 npxinit(__INITIAL_NPXCW__);
875 * note: linux emulator needs edx to be 0x0 on entry, which is
876 * handled in execve simply by setting the 64 bit syscall
888 cr0 |= CR0_NE; /* Done by npxinit() */
889 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */
890 cr0 |= CR0_WP | CR0_AM;
897 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
900 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
902 if (!error && req->newptr)
907 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
908 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
910 extern u_long bootdev; /* not a cdev_t - encoding is different */
911 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
912 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)");
915 * Initialize 386 and configure to run kernel
919 * Initialize segments & interrupt table
922 extern struct user *proc0paddr;
927 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
928 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
929 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
930 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
931 IDTVEC(xmm), IDTVEC(dblfault),
932 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
935 #ifdef DEBUG_INTERRUPTS
936 extern inthand_t *Xrsvdary[256];
940 ptrace_set_pc(struct lwp *lp, unsigned long addr)
942 lp->lwp_md.md_regs->tf_rip = addr;
947 ptrace_single_step(struct lwp *lp)
949 lp->lwp_md.md_regs->tf_rflags |= PSL_T;
954 fill_regs(struct lwp *lp, struct reg *regs)
956 struct trapframe *tp;
958 tp = lp->lwp_md.md_regs;
959 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs));
964 set_regs(struct lwp *lp, struct reg *regs)
966 struct trapframe *tp;
968 tp = lp->lwp_md.md_regs;
969 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) ||
970 !CS_SECURE(regs->r_cs))
972 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs));
976 #ifndef CPU_DISABLE_SSE
978 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
980 struct env87 *penv_87 = &sv_87->sv_env;
981 struct envxmm *penv_xmm = &sv_xmm->sv_env;
984 /* FPU control/status */
985 penv_87->en_cw = penv_xmm->en_cw;
986 penv_87->en_sw = penv_xmm->en_sw;
987 penv_87->en_tw = penv_xmm->en_tw;
988 penv_87->en_fip = penv_xmm->en_fip;
989 penv_87->en_fcs = penv_xmm->en_fcs;
990 penv_87->en_opcode = penv_xmm->en_opcode;
991 penv_87->en_foo = penv_xmm->en_foo;
992 penv_87->en_fos = penv_xmm->en_fos;
995 for (i = 0; i < 8; ++i)
996 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
998 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw;
1002 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
1004 struct env87 *penv_87 = &sv_87->sv_env;
1005 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1008 /* FPU control/status */
1009 penv_xmm->en_cw = penv_87->en_cw;
1010 penv_xmm->en_sw = penv_87->en_sw;
1011 penv_xmm->en_tw = penv_87->en_tw;
1012 penv_xmm->en_fip = penv_87->en_fip;
1013 penv_xmm->en_fcs = penv_87->en_fcs;
1014 penv_xmm->en_opcode = penv_87->en_opcode;
1015 penv_xmm->en_foo = penv_87->en_foo;
1016 penv_xmm->en_fos = penv_87->en_fos;
1019 for (i = 0; i < 8; ++i)
1020 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
1022 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw;
1024 #endif /* CPU_DISABLE_SSE */
1027 fill_fpregs(struct lwp *lp, struct fpreg *fpregs)
1029 #ifndef CPU_DISABLE_SSE
1031 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm,
1032 (struct save87 *)fpregs);
1035 #endif /* CPU_DISABLE_SSE */
1036 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
1041 set_fpregs(struct lwp *lp, struct fpreg *fpregs)
1043 #ifndef CPU_DISABLE_SSE
1045 set_fpregs_xmm((struct save87 *)fpregs,
1046 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm);
1049 #endif /* CPU_DISABLE_SSE */
1050 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs);
1055 fill_dbregs(struct lwp *lp, struct dbreg *dbregs)
1061 set_dbregs(struct lwp *lp, struct dbreg *dbregs)
1068 * Return > 0 if a hardware breakpoint has been hit, and the
1069 * breakpoint was in user space. Return 0, otherwise.
1072 user_dbreg_trap(void)
1074 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
1075 u_int32_t bp; /* breakpoint bits extracted from dr6 */
1076 int nbp; /* number of breakpoints that triggered */
1077 caddr_t addr[4]; /* breakpoint addresses */
1081 if ((dr7 & 0x000000ff) == 0) {
1083 * all GE and LE bits in the dr7 register are zero,
1084 * thus the trap couldn't have been caused by the
1085 * hardware debug registers
1092 bp = dr6 & 0x0000000f;
1096 * None of the breakpoint bits are set meaning this
1097 * trap was not caused by any of the debug registers
1103 * at least one of the breakpoints were hit, check to see
1104 * which ones and if any of them are user space addresses
1108 addr[nbp++] = (caddr_t)rdr0();
1111 addr[nbp++] = (caddr_t)rdr1();
1114 addr[nbp++] = (caddr_t)rdr2();
1117 addr[nbp++] = (caddr_t)rdr3();
1120 for (i=0; i<nbp; i++) {
1122 (caddr_t)VM_MAX_USER_ADDRESS) {
1124 * addr[i] is in user space
1131 * None of the breakpoints are in user space.
1144 cpu_feature = regs[3];
1150 Debugger(const char *msg)
1152 kprintf("Debugger(\"%s\") called.\n", msg);