2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
38 * This code is derived from software contributed to Berkeley by
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
68 //#include "use_npx.h"
70 #include <sys/rtprio.h>
72 #include <machine/asmacros.h>
73 #include <machine/segments.h>
75 #include <machine/pmap.h>
77 #include <machine_base/apic/apicreg.h>
79 #include <machine/lock.h>
83 #define MPLOCKED lock ;
88 * This feature allows the preempting (interrupt) kernel thread to borrow
89 * %cr3 from the user process it interrupts, allowing us to do-away with
90 * two %cr3 stores, two atomic ops (pm_active is not modified), and pmap
91 * lock tests (not needed since pm_active is not modified).
93 * Unfortunately, I couldn't really measure any result so for now the
94 * optimization is disabled.
96 #undef PREEMPT_OPTIMIZE
101 * This optimization attempted to avoid a %cr3 store and atomic op, and
102 * it might have been useful on older cpus but newer cpus (and more
103 * importantly multi-core cpus) generally do not switch between LWPs on
104 * the same cpu. Multiple user threads are more likely to be distributed
105 * across multiple cpus. In cpu-bound situations the scheduler will already
106 * be in batch-mode (meaning relatively few context-switches/sec), and
107 * otherwise the lwp(s) are likely to be blocked waiting for events.
109 * On the flip side, the conditionals this option uses measurably reduce
110 * performance (just slightly, honestly). So this option is disabled.
112 #undef LWP_SWITCH_OPTIMIZE
115 * Global Declarations
120 .globl lwkt_switch_return
122 #if defined(SWTCH_OPTIM_STATS)
123 .globl swtch_optim_stats, tlb_flush_count
124 swtch_optim_stats: .long 0 /* number of _swtch_optims */
125 tlb_flush_count: .long 0
134 * cpu_heavy_switch(struct thread *next_thread)
136 * Switch from the current thread to a new thread. This entry
137 * is normally called via the thread->td_switch function, and will
138 * only be called when the current thread is a heavy weight process.
140 * Some instructions have been reordered to reduce pipeline stalls.
142 * YYY disable interrupts once giant is removed.
144 ENTRY(cpu_heavy_switch)
146 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
148 movq PCPU(curthread),%rcx
149 /* On top of the stack is the return adress. */
150 movq (%rsp),%rax /* (reorder optimization) */
151 movq TD_PCB(%rcx),%rdx /* RDX = PCB */
152 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */
153 movq %rbx,PCB_RBX(%rdx)
154 movq %rsp,PCB_RSP(%rdx)
155 movq %rbp,PCB_RBP(%rdx)
156 movq %r12,PCB_R12(%rdx)
157 movq %r13,PCB_R13(%rdx)
158 movq %r14,PCB_R14(%rdx)
159 movq %r15,PCB_R15(%rdx)
162 * Clear the cpu bit in the pmap active mask. The restore
163 * function will set the bit in the pmap active mask.
165 * If we are switching away due to a preempt, TD_PREEMPTED(%rdi)
166 * will be non-NULL. In this situation we do want to avoid extra
167 * atomic ops and %cr3 reloads (see top of file for reasoning).
169 * NOTE: Do not try to optimize avoiding the %cr3 reload or pm_active
170 * adjustment. This mattered on uni-processor systems but in
171 * multi-core systems we are highly unlikely to be switching
172 * to another thread belonging to the same process on this cpu.
174 * (more likely the target thread is still sleeping, or if cpu-
175 * bound the scheduler is in batch mode and the switch rate is
178 movq %rcx,%rbx /* RBX = oldthread */
179 #ifdef PREEMPT_OPTIMIZE
181 * If we are being preempted the target thread borrows our %cr3
182 * and we leave our pmap bits intact for the duration.
184 movq TD_PREEMPTED(%rdi),%r13
189 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */
190 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */
191 #ifdef LWP_SWITCH_OPTIMIZE
192 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */
193 testq %r13,%r13 /* might not be a heavy */
195 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */
199 movq PCPU(cpumask_simple),%rsi
200 movq PCPU(cpumask_offset),%r12
202 MPLOCKED andq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1)
206 * Push the LWKT switch restore function, which resumes a heavy
207 * weight process. Note that the LWKT switcher is based on
208 * TD_SP, while the heavy weight process switcher is based on
209 * PCB_RSP. TD_SP is usually two ints pushed relative to
210 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore.
214 movq $cpu_heavy_restore, %rax
216 movq %rsp,TD_SP(%rbx)
219 * Save debug regs if necessary
221 movq PCB_FLAGS(%rdx),%rax
222 andq $PCB_DBREGS,%rax
223 jz 1f /* no, skip over */
224 movq %dr7,%rax /* yes, do the save */
225 movq %rax,PCB_DR7(%rdx)
226 /* JG correct value? */
227 andq $0x0000fc00, %rax /* disable all watchpoints */
230 movq %rax,PCB_DR6(%rdx)
232 movq %rax,PCB_DR3(%rdx)
234 movq %rax,PCB_DR2(%rdx)
236 movq %rax,PCB_DR1(%rdx)
238 movq %rax,PCB_DR0(%rdx)
243 * Save the FP state if we have used the FP. Note that calling
244 * npxsave will NULL out PCPU(npxthread).
246 cmpq %rbx,PCPU(npxthread)
248 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */
249 movq TD_SAVEFPU(%rbx),%rdi
250 call npxsave /* do it in a big C function */
251 movq %r12,%rdi /* restore %rdi */
256 * Switch to the next thread, which was passed as an argument
257 * to cpu_heavy_switch(). The argument is in %rdi.
258 * Set the current thread, load the stack pointer,
259 * and 'ret' into the switch-restore function.
261 * The switch restore function expects the new thread to be in %rax
262 * and the old one to be in %rbx.
264 * There is a one-instruction window where curthread is the new
265 * thread but %rsp still points to the old thread's stack, but
266 * we are protected by a critical section so it is ok.
268 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */
269 movq %rax,PCPU(curthread)
270 movq TD_SP(%rax),%rsp
272 END(cpu_heavy_switch)
275 * cpu_exit_switch(struct thread *next)
277 * The switch function is changed to this when a thread is going away
278 * for good. We have to ensure that the MMU state is not cached, and
279 * we don't bother saving the existing thread state before switching.
281 * At this point we are in a critical section and this cpu owns the
282 * thread's token, which serves as an interlock until the switchout is
285 ENTRY(cpu_exit_switch)
287 #ifdef PREEMPT_OPTIMIZE
289 * If we were preempting we are switching back to the original thread.
290 * In this situation we already have the original thread's %cr3 and
291 * should not replace it!
293 testl $TDF_PREEMPT_DONE, TD_FLAGS(%rdi)
298 * Get us out of the vmspace
307 movq PCPU(curthread),%rbx
310 * If this is a process/lwp, deactivate the pmap after we've
313 movq TD_LWP(%rbx),%rcx
316 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */
318 movq PCPU(cpumask_simple),%rax
319 movq PCPU(cpumask_offset),%r12
321 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1)
324 * Switch to the next thread. RET into the restore function, which
325 * expects the new thread in RAX and the old in RBX.
327 * There is a one-instruction window where curthread is the new
328 * thread but %rsp still points to the old thread's stack, but
329 * we are protected by a critical section so it is ok.
333 movq %rax,PCPU(curthread)
334 movq TD_SP(%rax),%rsp
339 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx)
341 * Restore the thread after an LWKT switch. This entry is normally
342 * called via the LWKT switch restore function, which was pulled
343 * off the thread stack and jumped to.
345 * This entry is only called if the thread was previously saved
346 * using cpu_heavy_switch() (the heavy weight process thread switcher),
347 * or when a new process is initially scheduled.
349 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
350 * a preemption switch may interrupt the process and then return via
353 * YYY theoretically we do not have to restore everything here, a lot
354 * of this junk can wait until we return to usermode. But for now
355 * we restore everything.
357 * YYY the PCB crap is really crap, it makes startup a bitch because
358 * we can't switch away.
360 * YYY note: spl check is done in mi_switch when it splx()'s.
363 ENTRY(cpu_heavy_restore)
364 movq TD_PCB(%rax),%rdx /* RDX = PCB */
365 movq %rdx, PCPU(trampoline)+TR_PCB_RSP
366 movq PCB_FLAGS(%rdx), %rcx
367 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS
368 movq PCB_CR3_ISO(%rdx), %rcx
369 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO
370 movq PCB_CR3(%rdx), %rcx
371 movq %rcx, PCPU(trampoline)+TR_PCB_CR3
374 #if defined(SWTCH_OPTIM_STATS)
375 incl _swtch_optim_stats
377 #ifdef PREEMPT_OPTIMIZE
379 * If restoring our thread after a preemption has returned to
380 * us, our %cr3 and pmap were borrowed and are being returned to
381 * us and no further action on those items need be taken.
383 testl $TDF_PREEMPT_DONE, TD_FLAGS(%rax)
388 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
389 * safely test/reload %cr3 until after we have set the bit in the
392 * We must do an interlocked test of the CPULOCK_EXCL at the same
393 * time. If found to be set we will have to wait for it to clear
394 * and then do a forced reload of %cr3 (even if the value matches).
396 * XXX When switching between two LWPs sharing the same vmspace
397 * the cpu_heavy_switch() code currently avoids clearing the
398 * cpu bit in PM_ACTIVE. So if the bit is already set we can
399 * avoid checking for the interlock via CPULOCK_EXCL. We currently
400 * do not perform this optimization.
402 movq TD_LWP(%rax),%rcx
403 movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */
405 movq PCPU(cpumask_simple),%rsi
406 movq PCPU(cpumask_offset),%r12
407 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1)
409 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi
410 testl $CPULOCK_EXCL,%esi
413 movq %rax,%r12 /* save newthread ptr */
414 movq %rcx,%rdi /* (found to be set) */
415 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */
419 * Need unconditional load cr3
421 movq TD_PCB(%rax),%rdx /* RDX = PCB */
422 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */
423 jmp 2f /* unconditional reload */
426 * Restore the MMU address space. If it is the same as the last
427 * thread we don't have to invalidate the tlb (i.e. reload cr3).
429 * XXX Temporary cludge, do NOT do this optimization! The problem
430 * is that the pm_active bit for the cpu had dropped for a small
431 * period of time, just a few cycles, but even one cycle is long
432 * enough for some other cpu doing a pmap invalidation to not see
435 * When that happens, and we don't invltlb (by loading %cr3), we
436 * wind up with a stale TLB.
438 movq TD_PCB(%rax),%rdx /* RDX = PCB */
439 movq %cr3,%rsi /* RSI = current CR3 */
440 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */
444 #if defined(SWTCH_OPTIM_STATS)
445 decl _swtch_optim_stats
446 incl _tlb_flush_count
452 * NOTE: %rbx is the previous thread and %rax is the new thread.
453 * %rbx is retained throughout so we can return it.
455 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
459 * Deal with the PCB extension, restore the private tss
461 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */
462 movq $1,%rcx /* maybe mark use of a private tss */
470 * Going back to the common_tss. (this was already executed at
473 * Set the top of the supervisor stack for the new thread
474 * in gd_thread_pcb so the trampoline code can load it into %rsp.
476 movq %rdx, PCPU(trampoline)+TR_PCB_RSP
477 movq PCB_FLAGS(%rdx), %rcx
478 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS
479 movq PCB_CR3_ISO(%rdx), %rcx
480 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO
481 movq PCB_CR3(%rdx), %rcx
482 movq %rcx, PCPU(trampoline)+TR_PCB_CR3
486 cmpl $0,PCPU(private_tss) /* don't have to reload if */
487 je 3f /* already using the common TSS */
490 subq %rcx,%rcx /* unmark use of private tss */
493 * Get the address of the common TSS descriptor for the ltr.
494 * There is no way to get the address of a segment-accessed variable
495 * so we store a self-referential pointer at the base of the per-cpu
496 * data area and add the appropriate offset.
499 movq $gd_common_tssd, %rdi
500 /* JG name for "%gs:0"? */
504 * Move the correct TSS descriptor into the GDT slot, then reload
509 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */
510 movq PCPU(tss_gdt), %rbx /* entry in GDT */
513 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
519 * Restore the user %gs and %fs
521 movq PCB_FSBASE(%rdx),%r9
522 cmpq PCPU(user_fs),%r9
525 movq %r9,PCPU(user_fs)
526 movl $MSR_FSBASE,%ecx
527 movl PCB_FSBASE(%r10),%eax
528 movl PCB_FSBASE+4(%r10),%edx
532 movq PCB_GSBASE(%rdx),%r9
533 cmpq PCPU(user_gs),%r9
536 movq %r9,PCPU(user_gs)
537 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */
538 movl PCB_GSBASE(%r10),%eax
539 movl PCB_GSBASE+4(%r10),%edx
545 * Restore general registers. %rbx is restored later.
547 movq PCB_RSP(%rdx), %rsp
548 movq PCB_RBP(%rdx), %rbp
549 movq PCB_R12(%rdx), %r12
550 movq PCB_R13(%rdx), %r13
551 movq PCB_R14(%rdx), %r14
552 movq PCB_R15(%rdx), %r15
553 movq PCB_RIP(%rdx), %rax
560 * Restore the user LDT if we have one
562 cmpl $0, PCB_USERLDT(%edx)
564 movl _default_ldt,%eax
565 cmpl PCPU(currentldt),%eax
568 movl %eax,PCPU(currentldt)
577 * Restore the user TLS if we have one
585 * Restore the DEBUG register state if necessary.
587 movq PCB_FLAGS(%rdx),%rax
588 andq $PCB_DBREGS,%rax
589 jz 1f /* no, skip over */
590 movq PCB_DR6(%rdx),%rax /* yes, do the restore */
592 movq PCB_DR3(%rdx),%rax
594 movq PCB_DR2(%rdx),%rax
596 movq PCB_DR1(%rdx),%rax
598 movq PCB_DR0(%rdx),%rax
600 movq %dr7,%rax /* load dr7 so as not to disturb */
601 /* JG correct value? */
602 andq $0x0000fc00,%rax /* reserved bits */
603 /* JG we've got more registers on x86_64 */
604 movq PCB_DR7(%rdx),%rcx
605 /* JG correct value? */
606 andq $~0x0000fc00,%rcx
611 * Clear the QUICKRET flag when restoring a user process context
612 * so we don't try to do a quick syscall return.
615 andl $~RQF_QUICKRET,PCPU(reqflags)
617 movq PCB_RBX(%rdx),%rbx
619 END(cpu_heavy_restore)
622 * savectx(struct pcb *pcb)
624 * Update pcb, saving current processor state.
628 /* JG use %rdi instead of %rcx everywhere? */
631 /* caller's return address - child won't execute this routine */
633 movq %rax,PCB_RIP(%rcx)
636 movq %rax,PCB_CR3(%rcx)
638 movq %rbx,PCB_RBX(%rcx)
639 movq %rsp,PCB_RSP(%rcx)
640 movq %rbp,PCB_RBP(%rcx)
641 movq %r12,PCB_R12(%rcx)
642 movq %r13,PCB_R13(%rcx)
643 movq %r14,PCB_R14(%rcx)
644 movq %r15,PCB_R15(%rcx)
648 * If npxthread == NULL, then the npx h/w state is irrelevant and the
649 * state had better already be in the pcb. This is true for forks
650 * but not for dumps (the old book-keeping with FP flags in the pcb
651 * always lost for dumps because the dump pcb has 0 flags).
653 * If npxthread != NULL, then we have to save the npx h/w state to
654 * npxthread's pcb and copy it to the requested pcb, or save to the
655 * requested pcb and reload. Copying is easier because we would
656 * have to handle h/w bugs for reloading. We used to lose the
657 * parent's npx state for forks by forgetting to reload.
659 movq PCPU(npxthread),%rax
663 pushq %rcx /* target pcb */
664 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */
673 movq $PCB_SAVEFPU_SIZE,%rdx
674 leaq PCB_SAVEFPU(%rcx),%rcx
685 * cpu_idle_restore() (current thread in %rax on entry, old thread in %rbx)
688 * Don't bother setting up any regs other than %rbp so backtraces
689 * don't die. This restore function is used to bootstrap into the
690 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
693 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
694 * This only occurs during system boot so no special handling is
695 * required for migration.
697 * If we are an AP we have to call ap_init() before jumping to
698 * cpu_idle(). ap_init() will synchronize with the BP and finish
699 * setting up various ncpu-dependant globaldata fields. This may
700 * happen on UP as well as SMP if we happen to be simulating multiple
703 ENTRY(cpu_idle_restore)
706 xorq %rbp,%rbp /* dummy frame pointer */
707 pushq $0 /* dummy return pc */
709 /* NOTE: idle thread can never preempt */
713 andl $~TDF_RUNNING,TD_FLAGS(%rbx)
714 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */
717 * ap_init can decide to enable interrupts early, but otherwise, or if
718 * we are UP, do it here.
724 * cpu 0's idle thread entry for the first time must use normal
725 * lwkt_switch_return() semantics or a pending cpu migration on
726 * thread0 will deadlock.
732 call lwkt_switch_return
735 END(cpu_idle_restore)
738 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx)
739 * (one-time execution)
741 * Don't bother setting up any regs other then %rbp so backtraces
742 * don't die. This restore function is used to bootstrap into an
743 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
746 * Because this switch target does not 'return' to lwkt_switch()
747 * we have to call lwkt_switch_return(otd) to clean up otd.
750 * Since all of our context is on the stack we are reentrant and
751 * we can release our critical section and enable interrupts early.
753 ENTRY(cpu_kthread_restore)
756 movq TD_PCB(%rax),%r13
759 #ifdef PREEMPT_OPTIMIZE
761 * If we are preempting someone we borrow their %cr3, do not overwrite
764 movq TD_PREEMPTED(%rax),%r14
772 * rax and rbx come from the switchout code. Call
773 * lwkt_switch_return(otd).
775 * NOTE: unlike i386, the %rsi and %rdi are not call-saved regs.
779 call lwkt_switch_return
781 decl TD_CRITCOUNT(%rax)
782 movq PCB_R12(%r13),%rdi /* argument to RBX function */
783 movq PCB_RBX(%r13),%rax /* thread function */
784 /* note: top of stack return address inherited by function */
786 END(cpu_kthread_restore)
789 * cpu_lwkt_switch(struct thread *)
791 * Standard LWKT switching function. Only non-scratch registers are
792 * saved and we don't bother with the MMU state or anything else.
794 * This function is always called while in a critical section.
796 * There is a one-instruction window where curthread is the new
797 * thread but %rsp still points to the old thread's stack, but
798 * we are protected by a critical section so it is ok.
800 ENTRY(cpu_lwkt_switch)
801 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */
803 movq PCPU(curthread),%rbx /* becomes old thread in restore */
813 * Save the FP state if we have used the FP. Note that calling
814 * npxsave will NULL out PCPU(npxthread).
816 * We have to deal with the FP state for LWKT threads in case they
817 * happen to get preempted or block while doing an optimized
818 * bzero/bcopy/memcpy.
820 cmpq %rbx,PCPU(npxthread)
822 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */
823 movq TD_SAVEFPU(%rbx),%rdi
824 call npxsave /* do it in a big C function */
825 movq %r12,%rdi /* restore %rdi */
829 movq %rdi,%rax /* switch to this thread */
830 pushq $cpu_lwkt_restore
831 movq %rsp,TD_SP(%rbx)
833 * %rax contains new thread, %rbx contains old thread.
835 movq %rax,PCPU(curthread)
836 movq TD_SP(%rax),%rsp
841 * cpu_lwkt_restore() (current thread in %rax on entry)
843 * Standard LWKT restore function. This function is always called
844 * while in a critical section.
846 * WARNING! Due to preemption the restore function can be used to 'return'
847 * to the original thread. Interrupt disablement must be
848 * protected through the switch so we cannot run splz here.
850 ENTRY(cpu_lwkt_restore)
851 #ifdef PREEMPT_OPTIMIZE
853 * If we are preempting someone we borrow their %cr3 and pmap
855 movq TD_PREEMPTED(%rax),%r14 /* kernel thread preempting? */
857 jne 1f /* yes, borrow %cr3 from old thread */
860 * Don't reload %cr3 if it hasn't changed. Since this is a LWKT
861 * thread (a kernel thread), and the kernel_pmap always permanently
862 * sets all pm_active bits, we don't have the same problem with it
863 * that we do with process pmaps.
872 * NOTE: %rbx is the previous thread and %rax is the new thread.
873 * %rbx is retained throughout so we can return it.
875 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
886 END(cpu_lwkt_restore)