2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
38 * This code is derived from software contributed to Berkeley by
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
68 //#include "use_npx.h"
70 #include <sys/rtprio.h>
72 #include <machine/asmacros.h>
73 #include <machine/segments.h>
75 #include <machine/pmap.h>
76 #include <machine/lock.h>
78 #define CHECKNZ(expr, scratch_reg) \
79 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7:
83 #define MPLOCKED lock ;
88 .globl lwkt_switch_return
90 #if defined(SWTCH_OPTIM_STATS)
91 .globl swtch_optim_stats, tlb_flush_count
92 swtch_optim_stats: .long 0 /* number of _swtch_optims */
93 tlb_flush_count: .long 0
100 * cpu_heavy_switch(struct thread *next_thread)
102 * Switch from the current thread to a new thread. This entry
103 * is normally called via the thread->td_switch function, and will
104 * only be called when the current thread is a heavy weight process.
106 * Some instructions have been reordered to reduce pipeline stalls.
108 * YYY disable interrupts once giant is removed.
110 ENTRY(cpu_heavy_switch)
112 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
114 movq PCPU(curthread),%rcx
115 /* On top of the stack is the return adress. */
116 movq (%rsp),%rax /* (reorder optimization) */
117 movq TD_PCB(%rcx),%rdx /* RDX = PCB */
118 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */
119 movq %rbx,PCB_RBX(%rdx)
120 movq %rsp,PCB_RSP(%rdx)
121 movq %rbp,PCB_RBP(%rdx)
122 movq %r12,PCB_R12(%rdx)
123 movq %r13,PCB_R13(%rdx)
124 movq %r14,PCB_R14(%rdx)
125 movq %r15,PCB_R15(%rdx)
128 * Clear the cpu bit in the pmap active mask. The restore
129 * function will set the bit in the pmap active mask.
131 * Special case: when switching between threads sharing the
132 * same vmspace if we avoid clearing the bit we do not have
133 * to reload %cr3 (if we clear the bit we could race page
134 * table ops done by other threads and would have to reload
135 * %cr3, because those ops will not know to IPI us).
137 movq %rcx,%rbx /* RBX = oldthread */
138 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */
139 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */
140 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */
141 testq %r13,%r13 /* might not be a heavy */
143 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */
146 movq PCPU(other_cpus)+0, %rax
147 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx)
148 movq PCPU(other_cpus)+8, %rax
149 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx)
150 movq PCPU(other_cpus)+16, %rax
151 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx)
152 movq PCPU(other_cpus)+24, %rax
153 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx)
157 * Push the LWKT switch restore function, which resumes a heavy
158 * weight process. Note that the LWKT switcher is based on
159 * TD_SP, while the heavy weight process switcher is based on
160 * PCB_RSP. TD_SP is usually two ints pushed relative to
161 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore.
164 movq $cpu_heavy_restore, %rax
166 movq %rsp,TD_SP(%rbx)
169 * Save debug regs if necessary
171 movq PCB_FLAGS(%rdx),%rax
172 andq $PCB_DBREGS,%rax
173 jz 1f /* no, skip over */
174 movq %dr7,%rax /* yes, do the save */
175 movq %rax,PCB_DR7(%rdx)
176 /* JG correct value? */
177 andq $0x0000fc00, %rax /* disable all watchpoints */
180 movq %rax,PCB_DR6(%rdx)
182 movq %rax,PCB_DR3(%rdx)
184 movq %rax,PCB_DR2(%rdx)
186 movq %rax,PCB_DR1(%rdx)
188 movq %rax,PCB_DR0(%rdx)
193 * Save the FP state if we have used the FP. Note that calling
194 * npxsave will NULL out PCPU(npxthread).
196 cmpq %rbx,PCPU(npxthread)
198 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */
199 movq TD_SAVEFPU(%rbx),%rdi
200 call npxsave /* do it in a big C function */
201 movq %r12,%rdi /* restore %rdi */
206 * Switch to the next thread, which was passed as an argument
207 * to cpu_heavy_switch(). The argument is in %rdi.
208 * Set the current thread, load the stack pointer,
209 * and 'ret' into the switch-restore function.
211 * The switch restore function expects the new thread to be in %rax
212 * and the old one to be in %rbx.
214 * There is a one-instruction window where curthread is the new
215 * thread but %rsp still points to the old thread's stack, but
216 * we are protected by a critical section so it is ok.
218 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */
219 movq %rax,PCPU(curthread)
220 movq TD_SP(%rax),%rsp
225 * cpu_exit_switch(struct thread *next)
227 * The switch function is changed to this when a thread is going away
228 * for good. We have to ensure that the MMU state is not cached, and
229 * we don't bother saving the existing thread state before switching.
231 * At this point we are in a critical section and this cpu owns the
232 * thread's token, which serves as an interlock until the switchout is
235 ENTRY(cpu_exit_switch)
237 * Get us out of the vmspace
244 /* JG no increment of statistics counters? see cpu_heavy_restore */
248 movq PCPU(curthread),%rbx
251 * If this is a process/lwp, deactivate the pmap after we've
254 movq TD_LWP(%rbx),%rcx
257 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
258 movq PCPU(other_cpus)+0, %rax
259 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx)
260 movq PCPU(other_cpus)+8, %rax
261 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx)
262 movq PCPU(other_cpus)+16, %rax
263 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx)
264 movq PCPU(other_cpus)+24, %rax
265 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx)
268 * Switch to the next thread. RET into the restore function, which
269 * expects the new thread in RAX and the old in RBX.
271 * There is a one-instruction window where curthread is the new
272 * thread but %rsp still points to the old thread's stack, but
273 * we are protected by a critical section so it is ok.
276 movq %rax,PCPU(curthread)
277 movq TD_SP(%rax),%rsp
282 * cpu_heavy_restore() (current thread in %rax on entry, %rbx is old thread)
284 * Restore the thread after an LWKT switch. This entry is normally
285 * called via the LWKT switch restore function, which was pulled
286 * off the thread stack and jumped to.
288 * This entry is only called if the thread was previously saved
289 * using cpu_heavy_switch() (the heavy weight process thread switcher),
290 * or when a new process is initially scheduled.
292 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
293 * a preemption switch may interrupt the process and then return via
296 * YYY theoretically we do not have to restore everything here, a lot
297 * of this junk can wait until we return to usermode. But for now
298 * we restore everything.
300 * YYY the PCB crap is really crap, it makes startup a bitch because
301 * we can't switch away.
303 * YYY note: spl check is done in mi_switch when it splx()'s.
306 ENTRY(cpu_heavy_restore)
308 movq TD_PCB(%rax),%rdx /* RDX = PCB */
310 #if defined(SWTCH_OPTIM_STATS)
311 incl _swtch_optim_stats
314 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
315 * safely test/reload %cr3 until after we have set the bit in the
316 * pmap (remember, we do not hold the MP lock in the switch code).
318 movq TD_LWP(%rax),%rcx
319 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
321 movq PCPU(cpumask)+0, %rsi
322 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+0(%rcx)
323 movq PCPU(cpumask)+8, %rsi
324 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+8(%rcx)
325 movq PCPU(cpumask)+16, %rsi
326 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+16(%rcx)
327 movq PCPU(cpumask)+24, %rsi
328 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+24(%rcx)
330 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi
331 testl $CPULOCK_EXCL,%esi
334 movq %rax,%r12 /* save newthread ptr */
335 movq %rcx,%rdi /* (found to be set) */
336 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */
338 movq TD_PCB(%rax),%rdx /* RDX = PCB */
341 * Restore the MMU address space. If it is the same as the last
342 * thread we don't have to invalidate the tlb (i.e. reload cr3).
343 * YYY which naturally also means that the PM_ACTIVE bit had better
344 * already have been set before we set it above, check? YYY
348 movq PCB_CR3(%rdx),%rcx
351 #if defined(SWTCH_OPTIM_STATS)
352 decl _swtch_optim_stats
353 incl _tlb_flush_count
359 * NOTE: %rbx is the previous thread and %rax is the new thread.
360 * %rbx is retained throughout so we can return it.
362 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
367 * Deal with the PCB extension, restore the private tss
369 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */
370 movq $1,%rcx /* maybe mark use of a private tss */
377 * Going back to the common_tss. We may need to update TSS_ESP0
378 * which sets the top of the supervisor stack when entering from
379 * usermode. The PCB is at the top of the stack but we need another
380 * 16 bytes to take vm86 into account.
383 movq %rcx, PCPU(common_tss) + TSS_RSP0
384 movq %rcx, PCPU(rsp0)
387 cmpl $0,PCPU(private_tss) /* don't have to reload if */
388 je 3f /* already using the common TSS */
391 subq %rcx,%rcx /* unmark use of private tss */
394 * Get the address of the common TSS descriptor for the ltr.
395 * There is no way to get the address of a segment-accessed variable
396 * so we store a self-referential pointer at the base of the per-cpu
397 * data area and add the appropriate offset.
400 movq $gd_common_tssd, %rdi
401 /* JG name for "%gs:0"? */
405 * Move the correct TSS descriptor into the GDT slot, then reload
410 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */
411 movq PCPU(tss_gdt), %rcx /* entry in GDT */
414 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
422 * Restore the user %gs and %fs
424 movq PCB_FSBASE(%rdx),%r9
425 cmpq PCPU(user_fs),%r9
428 movq %r9,PCPU(user_fs)
429 movl $MSR_FSBASE,%ecx
430 movl PCB_FSBASE(%r10),%eax
431 movl PCB_FSBASE+4(%r10),%edx
435 movq PCB_GSBASE(%rdx),%r9
436 cmpq PCPU(user_gs),%r9
439 movq %r9,PCPU(user_gs)
440 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */
441 movl PCB_GSBASE(%r10),%eax
442 movl PCB_GSBASE+4(%r10),%edx
449 * Restore general registers. %rbx is restored later.
451 movq PCB_RSP(%rdx), %rsp
452 movq PCB_RBP(%rdx), %rbp
453 movq PCB_R12(%rdx), %r12
454 movq PCB_R13(%rdx), %r13
455 movq PCB_R14(%rdx), %r14
456 movq PCB_R15(%rdx), %r15
457 movq PCB_RIP(%rdx), %rax
462 * Restore the user LDT if we have one
464 cmpl $0, PCB_USERLDT(%edx)
466 movl _default_ldt,%eax
467 cmpl PCPU(currentldt),%eax
470 movl %eax,PCPU(currentldt)
479 * Restore the user TLS if we have one
487 * Restore the DEBUG register state if necessary.
489 movq PCB_FLAGS(%rdx),%rax
490 andq $PCB_DBREGS,%rax
491 jz 1f /* no, skip over */
492 movq PCB_DR6(%rdx),%rax /* yes, do the restore */
494 movq PCB_DR3(%rdx),%rax
496 movq PCB_DR2(%rdx),%rax
498 movq PCB_DR1(%rdx),%rax
500 movq PCB_DR0(%rdx),%rax
502 movq %dr7,%rax /* load dr7 so as not to disturb */
503 /* JG correct value? */
504 andq $0x0000fc00,%rax /* reserved bits */
505 /* JG we've got more registers on x86_64 */
506 movq PCB_DR7(%rdx),%rcx
507 /* JG correct value? */
508 andq $~0x0000fc00,%rcx
513 movq PCB_RBX(%rdx),%rbx
519 * savectx(struct pcb *pcb)
521 * Update pcb, saving current processor state.
525 /* JG use %rdi instead of %rcx everywhere? */
528 /* caller's return address - child won't execute this routine */
530 movq %rax,PCB_RIP(%rcx)
531 movq %rbx,PCB_RBX(%rcx)
532 movq %rsp,PCB_RSP(%rcx)
533 movq %rbp,PCB_RBP(%rcx)
534 movq %r12,PCB_R12(%rcx)
535 movq %r13,PCB_R13(%rcx)
536 movq %r14,PCB_R14(%rcx)
537 movq %r15,PCB_R15(%rcx)
541 * If npxthread == NULL, then the npx h/w state is irrelevant and the
542 * state had better already be in the pcb. This is true for forks
543 * but not for dumps (the old book-keeping with FP flags in the pcb
544 * always lost for dumps because the dump pcb has 0 flags).
546 * If npxthread != NULL, then we have to save the npx h/w state to
547 * npxthread's pcb and copy it to the requested pcb, or save to the
548 * requested pcb and reload. Copying is easier because we would
549 * have to handle h/w bugs for reloading. We used to lose the
550 * parent's npx state for forks by forgetting to reload.
552 movq PCPU(npxthread),%rax
556 pushq %rcx /* target pcb */
557 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */
566 movq $PCB_SAVEFPU_SIZE,%rdx
567 leaq PCB_SAVEFPU(%rcx),%rcx
578 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution)
579 * (old thread is %rbx on entry)
581 * Don't bother setting up any regs other than %rbp so backtraces
582 * don't die. This restore function is used to bootstrap into the
583 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
586 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
587 * This only occurs during system boot so no special handling is
588 * required for migration.
590 * If we are an AP we have to call ap_init() before jumping to
591 * cpu_idle(). ap_init() will synchronize with the BP and finish
592 * setting up various ncpu-dependant globaldata fields. This may
593 * happen on UP as well as SMP if we happen to be simulating multiple
596 ENTRY(cpu_idle_restore)
604 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
605 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */
611 * cpu 0's idle thread entry for the first time must use normal
612 * lwkt_switch_return() semantics or a pending cpu migration on
613 * thread0 will deadlock.
618 call lwkt_switch_return
623 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution)
624 * (old thread is %rbx on entry)
626 * Don't bother setting up any regs other then %rbp so backtraces
627 * don't die. This restore function is used to bootstrap into an
628 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
631 * Because this switch target does not 'return' to lwkt_switch()
632 * we have to call lwkt_switch_return(otd) to clean up otd.
635 * Since all of our context is on the stack we are reentrant and
636 * we can release our critical section and enable interrupts early.
638 ENTRY(cpu_kthread_restore)
640 movq TD_PCB(%rax),%r13
644 * rax and rbx come from the switchout code. Call
645 * lwkt_switch_return(otd).
647 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs.
651 call lwkt_switch_return
653 decl TD_CRITCOUNT(%rax)
654 movq PCB_R12(%r13),%rdi /* argument to RBX function */
655 movq PCB_RBX(%r13),%rax /* thread function */
656 /* note: top of stack return address inherited by function */
661 * cpu_lwkt_switch(struct thread *)
663 * Standard LWKT switching function. Only non-scratch registers are
664 * saved and we don't bother with the MMU state or anything else.
666 * This function is always called while in a critical section.
668 * There is a one-instruction window where curthread is the new
669 * thread but %rsp still points to the old thread's stack, but
670 * we are protected by a critical section so it is ok.
674 ENTRY(cpu_lwkt_switch)
675 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */
676 /* JG we've got more registers on x86_64 */
678 movq PCPU(curthread),%rbx
687 * Save the FP state if we have used the FP. Note that calling
688 * npxsave will NULL out PCPU(npxthread).
690 * We have to deal with the FP state for LWKT threads in case they
691 * happen to get preempted or block while doing an optimized
692 * bzero/bcopy/memcpy.
694 cmpq %rbx,PCPU(npxthread)
696 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */
697 movq TD_SAVEFPU(%rbx),%rdi
698 call npxsave /* do it in a big C function */
699 movq %r12,%rdi /* restore %rdi */
703 movq %rdi,%rax /* switch to this thread */
704 pushq $cpu_lwkt_restore
705 movq %rsp,TD_SP(%rbx)
706 movq %rax,PCPU(curthread)
707 movq TD_SP(%rax),%rsp
710 * %rax contains new thread, %rbx contains old thread.
716 * cpu_lwkt_restore() (current thread in %rax on entry)
718 * Standard LWKT restore function. This function is always called
719 * while in a critical section.
721 * Warning: due to preemption the restore function can be used to
722 * 'return' to the original thread. Interrupt disablement must be
723 * protected through the switch so we cannot run splz here.
725 ENTRY(cpu_lwkt_restore)
727 * NOTE: %rbx is the previous thread and %eax is the new thread.
728 * %rbx is retained throughout so we can return it.
730 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
745 * Make AP become the idle loop.
747 ENTRY(bootstrap_idle)
748 movq PCPU(curthread),%rax
750 movq TD_SP(%rax),%rsp