2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
38 * This code is derived from software contributed to Berkeley by
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by the University of
52 * California, Berkeley and its contributors.
53 * 4. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
70 * $DragonFly: src/sys/platform/pc64/amd64/swtch.s,v 1.3 2008/08/29 17:07:10 dillon Exp $
73 //#include "use_npx.h"
75 #include <sys/rtprio.h>
77 #include <machine/asmacros.h>
78 #include <machine/segments.h>
80 #include <machine/pmap.h>
82 #include <machine_base/apic/apicreg.h>
84 #include <machine/lock.h>
86 #define CHECKNZ(expr, scratch_reg) \
87 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7:
92 #define MPLOCKED lock ;
101 #if defined(SWTCH_OPTIM_STATS)
102 .globl swtch_optim_stats, tlb_flush_count
103 swtch_optim_stats: .long 0 /* number of _swtch_optims */
104 tlb_flush_count: .long 0
111 * cpu_heavy_switch(struct thread *next_thread)
113 * Switch from the current thread to a new thread. This entry
114 * is normally called via the thread->td_switch function, and will
115 * only be called when the current thread is a heavy weight process.
117 * Some instructions have been reordered to reduce pipeline stalls.
119 * YYY disable interrupts once giant is removed.
121 ENTRY(cpu_heavy_switch)
123 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
125 movq PCPU(curthread),%rcx
126 /* On top of the stack is the return adress. */
127 movq (%rsp),%rax /* (reorder optimization) */
128 movq TD_PCB(%rcx),%rdx /* RDX = PCB */
129 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */
130 movq %rbx,PCB_RBX(%rdx)
131 movq %rsp,PCB_RSP(%rdx)
132 movq %rbp,PCB_RBP(%rdx)
133 movq %r12,PCB_R12(%rdx)
134 movq %r13,PCB_R13(%rdx)
135 movq %r14,PCB_R14(%rdx)
136 movq %r15,PCB_R15(%rdx)
138 movq %rcx,%rbx /* RBX = curthread */
139 movq TD_LWP(%rcx),%rcx
140 movl PCPU(cpuid), %eax
141 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
142 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx)
145 * Push the LWKT switch restore function, which resumes a heavy
146 * weight process. Note that the LWKT switcher is based on
147 * TD_SP, while the heavy weight process switcher is based on
148 * PCB_RSP. TD_SP is usually two ints pushed relative to
149 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore.
152 movq $cpu_heavy_restore, %rax
154 movq %rsp,TD_SP(%rbx)
157 * Save debug regs if necessary
159 movq PCB_FLAGS(%rdx),%rax
160 andq $PCB_DBREGS,%rax
161 jz 1f /* no, skip over */
162 movq %dr7,%rax /* yes, do the save */
163 movq %rax,PCB_DR7(%rdx)
164 /* JG correct value? */
165 andq $0x0000fc00, %rax /* disable all watchpoints */
168 movq %rax,PCB_DR6(%rdx)
170 movq %rax,PCB_DR3(%rdx)
172 movq %rax,PCB_DR2(%rdx)
174 movq %rax,PCB_DR1(%rdx)
176 movq %rax,PCB_DR0(%rdx)
182 * Save the FP state if we have used the FP. Note that calling
183 * npxsave will NULL out PCPU(npxthread).
185 cmpl %ebx,PCPU(npxthread)
187 pushl TD_SAVEFPU(%ebx)
188 call npxsave /* do it in a big C function */
189 addl $4,%esp /* EAX, ECX, EDX trashed */
192 #endif /* NNPX > 0 */
195 * Switch to the next thread, which was passed as an argument
196 * to cpu_heavy_switch(). The argument is in %rdi.
197 * Set the current thread, load the stack pointer,
198 * and 'ret' into the switch-restore function.
200 * The switch restore function expects the new thread to be in %rax
201 * and the old one to be in %rbx.
203 * There is a one-instruction window where curthread is the new
204 * thread but %rsp still points to the old thread's stack, but
205 * we are protected by a critical section so it is ok.
207 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */
208 movq %rax,PCPU(curthread)
209 movq TD_SP(%rax),%rsp
214 * cpu_exit_switch(struct thread *next)
216 * The switch function is changed to this when a thread is going away
217 * for good. We have to ensure that the MMU state is not cached, and
218 * we don't bother saving the existing thread state before switching.
220 * At this point we are in a critical section and this cpu owns the
221 * thread's token, which serves as an interlock until the switchout is
224 ENTRY(cpu_exit_switch)
226 * Get us out of the vmspace
232 /* JG no increment of statistics counters? see cpu_heavy_restore */
237 orq $(PG_RW|PG_V), %rcx
243 movq PCPU(curthread),%rbx
246 * If this is a process/lwp, deactivate the pmap after we've
249 movq TD_LWP(%rbx),%rcx
252 movl PCPU(cpuid), %eax
253 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
254 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx)
257 * Switch to the next thread. RET into the restore function, which
258 * expects the new thread in RAX and the old in RBX.
260 * There is a one-instruction window where curthread is the new
261 * thread but %rsp still points to the old thread's stack, but
262 * we are protected by a critical section so it is ok.
265 movq %rax,PCPU(curthread)
266 movq TD_SP(%rax),%rsp
271 * cpu_heavy_restore() (current thread in %rax on entry)
273 * Restore the thread after an LWKT switch. This entry is normally
274 * called via the LWKT switch restore function, which was pulled
275 * off the thread stack and jumped to.
277 * This entry is only called if the thread was previously saved
278 * using cpu_heavy_switch() (the heavy weight process thread switcher),
279 * or when a new process is initially scheduled. The first thing we
280 * do is clear the TDF_RUNNING bit in the old thread and set it in the
283 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
284 * a preemption switch may interrupt the process and then return via
287 * YYY theoretically we do not have to restore everything here, a lot
288 * of this junk can wait until we return to usermode. But for now
289 * we restore everything.
291 * YYY the PCB crap is really crap, it makes startup a bitch because
292 * we can't switch away.
294 * YYY note: spl check is done in mi_switch when it splx()'s.
297 ENTRY(cpu_heavy_restore)
299 movq TD_PCB(%rax),%rdx /* RDX = PCB */
300 movq TD_LWP(%rax),%rcx
302 #if defined(SWTCH_OPTIM_STATS)
303 incl _swtch_optim_stats
306 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
307 * safely test/reload %cr3 until after we have set the bit in the
308 * pmap (remember, we do not hold the MP lock in the switch code).
310 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
311 movl PCPU(cpuid), %esi
312 MPLOCKED btsl %esi, VM_PMAP+PM_ACTIVE(%rcx)
315 * Restore the MMU address space. If it is the same as the last
316 * thread we don't have to invalidate the tlb (i.e. reload cr3).
317 * YYY which naturally also means that the PM_ACTIVE bit had better
318 * already have been set before we set it above, check? YYY
322 movq PCB_CR3(%rdx),%rcx
325 #if defined(SWTCH_OPTIM_STATS)
326 decl _swtch_optim_stats
327 incl _tlb_flush_count
332 movq PCB_CR3(%rdx),%rcx
333 orq $(PG_RW|PG_U|PG_V), %rcx
341 * Clear TDF_RUNNING flag in old thread only after cleaning up
342 * %cr3. The target thread is already protected by being TDF_RUNQ
343 * so setting TDF_RUNNING isn't as big a deal.
345 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
346 orl $TDF_RUNNING,TD_FLAGS(%rax)
349 * Deal with the PCB extension, restore the private tss
351 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */
352 /* JG cheaper than "movq $1,%rbx", right? */
353 /* JG what's that magic value $1? */
354 movl $1,%ebx /* maybe mark use of a private tss */
361 * Going back to the common_tss. We may need to update TSS_ESP0
362 * which sets the top of the supervisor stack when entering from
363 * usermode. The PCB is at the top of the stack but we need another
364 * 16 bytes to take vm86 into account.
367 movq %rbx, PCPU(common_tss) + TSS_RSP0
368 movq %rbx, PCPU(rsp0)
371 cmpl $0,PCPU(private_tss) /* don't have to reload if */
372 je 3f /* already using the common TSS */
375 subl %ebx,%ebx /* unmark use of private tss */
378 * Get the address of the common TSS descriptor for the ltr.
379 * There is no way to get the address of a segment-accessed variable
380 * so we store a self-referential pointer at the base of the per-cpu
381 * data area and add the appropriate offset.
384 movq $gd_common_tssd, %rdi
385 /* JG name for "%gs:0"? */
389 * Move the correct TSS descriptor into the GDT slot, then reload
394 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */
395 movq PCPU(tss_gdt), %rbx /* entry in GDT */
398 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
404 * Restore the user %gs and %fs
406 movq PCB_FSBASE(%rdx),%r9
407 cmpq PCPU(user_fs),%r9
410 movq %r9,PCPU(user_fs)
411 movl $MSR_FSBASE,%ecx
412 movl PCB_FSBASE(%r10),%eax
413 movl PCB_FSBASE+4(%r10),%edx
417 movq PCB_GSBASE(%rdx),%r9
418 cmpq PCPU(user_gs),%r9
421 movq %r9,PCPU(user_gs)
422 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */
423 movl PCB_GSBASE(%r10),%eax
424 movl PCB_GSBASE+4(%r10),%edx
430 * Restore general registers.
432 movq PCB_RBX(%rdx), %rbx
433 movq PCB_RSP(%rdx), %rsp
434 movq PCB_RBP(%rdx), %rbp
435 movq PCB_R12(%rdx), %r12
436 movq PCB_R13(%rdx), %r13
437 movq PCB_R14(%rdx), %r14
438 movq PCB_R15(%rdx), %r15
439 movq PCB_RIP(%rdx), %rax
444 * Restore the user LDT if we have one
446 cmpl $0, PCB_USERLDT(%edx)
448 movl _default_ldt,%eax
449 cmpl PCPU(currentldt),%eax
452 movl %eax,PCPU(currentldt)
461 * Restore the user TLS if we have one
469 * Restore the DEBUG register state if necessary.
471 movq PCB_FLAGS(%rdx),%rax
472 andq $PCB_DBREGS,%rax
473 jz 1f /* no, skip over */
474 movq PCB_DR6(%rdx),%rax /* yes, do the restore */
476 movq PCB_DR3(%rdx),%rax
478 movq PCB_DR2(%rdx),%rax
480 movq PCB_DR1(%rdx),%rax
482 movq PCB_DR0(%rdx),%rax
484 movq %dr7,%rax /* load dr7 so as not to disturb */
485 /* JG correct value? */
486 andq $0x0000fc00,%rax /* reserved bits */
487 /* JG we've got more registers on amd64 */
489 movq PCB_DR7(%rdx),%rbx
490 /* JG correct value? */
491 andq $~0x0000fc00,%rbx
501 * savectx(struct pcb *pcb)
503 * Update pcb, saving current processor state.
507 /* JG use %rdi instead of %rcx everywhere? */
510 /* caller's return address - child won't execute this routine */
512 movq %rax,PCB_RIP(%rcx)
517 movq $0x000ffffffffff000, %rcx
522 movq %rax,PCB_CR3(%rcx)
524 movq %rbx,PCB_RBX(%rcx)
525 movq %rsp,PCB_RSP(%rcx)
526 movq %rbp,PCB_RBP(%rcx)
527 movq %r12,PCB_R12(%rcx)
528 movq %r13,PCB_R13(%rcx)
529 movq %r14,PCB_R14(%rcx)
530 movq %r15,PCB_R15(%rcx)
535 * If npxthread == NULL, then the npx h/w state is irrelevant and the
536 * state had better already be in the pcb. This is true for forks
537 * but not for dumps (the old book-keeping with FP flags in the pcb
538 * always lost for dumps because the dump pcb has 0 flags).
540 * If npxthread != NULL, then we have to save the npx h/w state to
541 * npxthread's pcb and copy it to the requested pcb, or save to the
542 * requested pcb and reload. Copying is easier because we would
543 * have to handle h/w bugs for reloading. We used to lose the
544 * parent's npx state for forks by forgetting to reload.
546 movl PCPU(npxthread),%eax
550 pushl %ecx /* target pcb */
551 movl TD_SAVEFPU(%eax),%eax /* originating savefpu area */
561 pushl $PCB_SAVEFPU_SIZE
562 leal PCB_SAVEFPU(%ecx),%ecx
567 #endif /* NNPX > 0 */
575 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution)
577 * Don't bother setting up any regs other than %rbp so backtraces
578 * don't die. This restore function is used to bootstrap into the
579 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
582 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
584 * If we are an AP we have to call ap_init() before jumping to
585 * cpu_idle(). ap_init() will synchronize with the BP and finish
586 * setting up various ncpu-dependant globaldata fields. This may
587 * happen on UP as well as SMP if we happen to be simulating multiple
590 ENTRY(cpu_idle_restore)
597 orq $(PG_RW|PG_V), %rcx
602 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
603 orl $TDF_RUNNING,TD_FLAGS(%rax)
611 * ap_init can decide to enable interrupts early, but otherwise, or if
612 * we are UP, do it here.
618 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution)
620 * Don't bother setting up any regs other then %rbp so backtraces
621 * don't die. This restore function is used to bootstrap into an
622 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
625 * Since all of our context is on the stack we are reentrant and
626 * we can release our critical section and enable interrupts early.
628 ENTRY(cpu_kthread_restore)
631 movq TD_PCB(%rax),%rdx
632 /* JG "movq $0, %rbp"? "xorq %rbp, %rbp"? */
634 orq $(PG_RW|PG_V), %rcx
639 /* rax and rbx come from the switchout code */
640 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
641 orl $TDF_RUNNING,TD_FLAGS(%rax)
642 subl $TDPRI_CRIT,TD_PRI(%rax)
643 movq PCB_R12(%rdx),%rdi /* argument to RBX function */
644 movq PCB_RBX(%rdx),%rax /* thread function */
645 /* note: top of stack return address inherited by function */
650 * cpu_lwkt_switch(struct thread *)
652 * Standard LWKT switching function. Only non-scratch registers are
653 * saved and we don't bother with the MMU state or anything else.
655 * This function is always called while in a critical section.
657 * There is a one-instruction window where curthread is the new
658 * thread but %rsp still points to the old thread's stack, but
659 * we are protected by a critical section so it is ok.
663 ENTRY(cpu_lwkt_switch)
664 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */
665 /* JG we've got more registers on AMD64 */
667 movq PCPU(curthread),%rbx
677 * Save the FP state if we have used the FP. Note that calling
678 * npxsave will NULL out PCPU(npxthread).
680 * We have to deal with the FP state for LWKT threads in case they
681 * happen to get preempted or block while doing an optimized
682 * bzero/bcopy/memcpy.
684 cmpl %ebx,PCPU(npxthread)
686 pushl TD_SAVEFPU(%ebx)
687 call npxsave /* do it in a big C function */
688 addl $4,%esp /* EAX, ECX, EDX trashed */
690 #endif /* NNPX > 0 */
693 movq %rdi,%rax /* switch to this thread */
694 pushq $cpu_lwkt_restore
695 movq %rsp,TD_SP(%rbx)
696 movq %rax,PCPU(curthread)
697 movq TD_SP(%rax),%rsp
700 * %rax contains new thread, %rbx contains old thread.
706 * cpu_lwkt_restore() (current thread in %rax on entry)
708 * Standard LWKT restore function. This function is always called
709 * while in a critical section.
711 * Warning: due to preemption the restore function can be used to
712 * 'return' to the original thread. Interrupt disablement must be
713 * protected through the switch so we cannot run splz here.
715 * YYY we theoretically do not need to load KPML4phys into cr3, but if
716 * so we need a way to detect when the PTD we are using is being
717 * deleted due to a process exiting.
719 ENTRY(cpu_lwkt_restore)
721 movq common_lvl4_phys,%rcx /* YYY borrow but beware desched/cpuchg/exit */
724 orq $(PG_RW|PG_V), %rcx
736 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
737 orl $TDF_RUNNING,TD_FLAGS(%rax)