2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1990 The Regents of the University of California.
35 * All rights reserved.
37 * This code is derived from software contributed to Berkeley by
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
73 #include <sys/rtprio.h>
75 #include <machine/asmacros.h>
76 #include <machine/segments.h>
78 #include <machine/pmap.h>
79 #include <machine_base/apic/apicreg.h>
80 #include <machine/lock.h>
84 #define MPLOCKED lock ;
89 .globl lwkt_switch_return
91 #if defined(SWTCH_OPTIM_STATS)
92 .globl swtch_optim_stats, tlb_flush_count
93 swtch_optim_stats: .long 0 /* number of _swtch_optims */
94 tlb_flush_count: .long 0
101 * cpu_heavy_switch(next_thread)
103 * Switch from the current thread to a new thread. This entry
104 * is normally called via the thread->td_switch function, and will
105 * only be called when the current thread is a heavy weight process.
107 * Some instructions have been reordered to reduce pipeline stalls.
109 * YYY disable interrupts once giant is removed.
111 ENTRY(cpu_heavy_switch)
115 movl PCPU(curthread),%ecx
116 movl (%esp),%eax /* (reorder optimization) */
117 movl TD_PCB(%ecx),%edx /* EDX = PCB */
118 movl %eax,PCB_EIP(%edx) /* return PC may be modified */
119 movl %ebx,PCB_EBX(%edx)
120 movl %esp,PCB_ESP(%edx)
121 movl %ebp,PCB_EBP(%edx)
122 movl %esi,PCB_ESI(%edx)
123 movl %edi,PCB_EDI(%edx)
124 movl 4(%esp),%edi /* EDI = newthread */
127 * Clear the cpu bit in the pmap active mask. The restore
128 * function will set the bit in the pmap active mask.
130 * Special case: when switching between threads sharing the
131 * same vmspace if we avoid clearing the bit we do not have
132 * to reload %cr3 (if we clear the bit we could race page
133 * table ops done by other threads and would have to reload
134 * %cr3, because those ops will not know to IPI us).
136 movl %ecx,%ebx /* EBX = oldthread */
137 movl TD_LWP(%ecx),%ecx /* ECX = oldlwp */
138 movl TD_LWP(%edi),%esi /* ESI = newlwp */
139 movl LWP_VMSPACE(%ecx),%ecx /* ECX = oldvmspace */
140 testl %esi,%esi /* might not be a heavy */
142 cmpl LWP_VMSPACE(%esi),%ecx /* same vmspace? */
145 movl PCPU(cpuid), %eax
146 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx)
149 * Push the LWKT switch restore function, which resumes a heavy
150 * weight process. Note that the LWKT switcher is based on
151 * TD_SP, while the heavy weight process switcher is based on
152 * PCB_ESP. TD_SP is usually two ints pushed relative to
153 * PCB_ESP. We push the flags for later restore by cpu_heavy_restore.
156 pushl $cpu_heavy_restore
157 movl %esp,TD_SP(%ebx)
160 * Save debug regs if necessary
162 movb PCB_FLAGS(%edx),%al
164 jz 1f /* no, skip over */
165 movl %dr7,%eax /* yes, do the save */
166 movl %eax,PCB_DR7(%edx)
167 andl $0x0000fc00, %eax /* disable all watchpoints */
170 movl %eax,PCB_DR6(%edx)
172 movl %eax,PCB_DR3(%edx)
174 movl %eax,PCB_DR2(%edx)
176 movl %eax,PCB_DR1(%edx)
178 movl %eax,PCB_DR0(%edx)
183 * Save the FP state if we have used the FP. Note that calling
184 * npxsave will NULL out PCPU(npxthread).
186 cmpl %ebx,PCPU(npxthread)
188 pushl TD_SAVEFPU(%ebx)
189 call npxsave /* do it in a big C function */
190 addl $4,%esp /* EAX, ECX, EDX trashed */
192 #endif /* NNPX > 0 */
195 * Switch to the next thread, which was passed as an argument
196 * to cpu_heavy_switch(). Due to the eflags and switch-restore
197 * function we pushed, the argument is at 12(%esp). Set the current
198 * thread, load the stack pointer, and 'ret' into the switch-restore
201 * The switch restore function expects the new thread to be in %eax
202 * and the old one to be in %ebx.
204 * There is a one-instruction window where curthread is the new
205 * thread but %esp still points to the old thread's stack, but
206 * we are protected by a critical section so it is ok.
208 movl %edi,%eax /* EAX = newtd, EBX = oldtd */
209 movl %eax,PCPU(curthread)
210 movl TD_SP(%eax),%esp
216 * The switch function is changed to this when a thread is going away
217 * for good. We have to ensure that the MMU state is not cached, and
218 * we don't bother saving the existing thread state before switching.
220 * At this point we are in a critical section and this cpu owns the
221 * thread's token, which serves as an interlock until the switchout is
224 ENTRY(cpu_exit_switch)
226 * Get us out of the vmspace
234 movl PCPU(curthread),%ebx
237 * If this is a process/lwp, deactivate the pmap after we've
240 movl TD_LWP(%ebx),%ecx
243 movl PCPU(cpuid), %eax
244 movl LWP_VMSPACE(%ecx), %ecx /* ECX = vmspace */
245 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx)
248 * Switch to the next thread. RET into the restore function, which
249 * expects the new thread in EAX and the old in EBX.
251 * There is a one-instruction window where curthread is the new
252 * thread but %esp still points to the old thread's stack, but
253 * we are protected by a critical section so it is ok.
256 movl %eax,PCPU(curthread)
257 movl TD_SP(%eax),%esp
261 * cpu_heavy_restore() (current thread in %eax on entry)
263 * Restore the thread after an LWKT switch. This entry is normally
264 * called via the LWKT switch restore function, which was pulled
265 * off the thread stack and jumped to.
267 * This entry is only called if the thread was previously saved
268 * using cpu_heavy_switch() (the heavy weight process thread switcher),
269 * or when a new process is initially scheduled.
271 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
272 * a preemption switch may interrupt the process and then return via
275 * YYY theoretically we do not have to restore everything here, a lot
276 * of this junk can wait until we return to usermode. But for now
277 * we restore everything.
279 * YYY the PCB crap is really crap, it makes startup a bitch because
280 * we can't switch away.
282 * YYY note: spl check is done in mi_switch when it splx()'s.
285 ENTRY(cpu_heavy_restore)
287 movl TD_LWP(%eax),%ecx
289 #if defined(SWTCH_OPTIM_STATS)
290 incl _swtch_optim_stats
293 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
294 * safely test/reload %cr3 until after we have set the bit in the
295 * pmap (remember, we do not hold the MP lock in the switch code).
297 * Also note that when switching between two lwps sharing the
298 * same vmspace we have already avoided clearing the cpu bit
299 * in pm_active. If we had cleared it other cpus would not know
300 * to IPI us and we would have to unconditionally reload %cr3.
302 * Also note that if the pmap is undergoing an atomic inval/mod
303 * that is unaware that our cpu has been added to it we have to
304 * wait for it to complete before we can continue.
306 movl LWP_VMSPACE(%ecx), %ecx /* ECX = vmspace */
307 pushl %eax /* save curthread */
309 movl VM_PMAP+PM_ACTIVE(%ecx),%eax /* old value for cmpxchgl */
310 movl PCPU(cpumask), %esi
311 orl %eax,%esi /* new value for cmpxchgl */
312 MPLOCKED cmpxchgl %esi,VM_PMAP+PM_ACTIVE(%ecx)
318 testl $CPUMASK_LOCK,%eax
320 pushl %ecx /* call(stack:vmspace) */
321 call pmap_interlock_wait
325 * Needs unconditional load cr3
327 popl %eax /* EAX = curthread */
328 movl TD_PCB(%eax),%edx /* EDX = PCB */
329 movl PCB_CR3(%edx),%ecx
335 * Restore the MMU address space. If it is the same as the last
336 * thread we don't have to invalidate the tlb (i.e. reload cr3).
337 * YYY which naturally also means that the PM_ACTIVE bit had better
338 * already have been set before we set it above, check? YYY
340 movl TD_PCB(%eax),%edx /* EDX = PCB */
342 movl PCB_CR3(%edx),%ecx
346 #if defined(SWTCH_OPTIM_STATS)
347 decl _swtch_optim_stats
348 incl _tlb_flush_count
354 * NOTE: %ebx is the previous thread and %eax is the new thread.
355 * %ebx is retained throughout so we can return it.
357 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
361 * Deal with the PCB extension, restore the private tss
363 movl PCB_EXT(%edx),%edi /* check for a PCB extension */
364 movl $1,%ecx /* maybe mark use of a private tss */
369 * Going back to the common_tss. We may need to update TSS_ESP0
370 * which sets the top of the supervisor stack when entering from
371 * usermode. The PCB is at the top of the stack but we need another
372 * 16 bytes to take vm86 into account.
375 movl %ecx, PCPU(common_tss) + TSS_ESP0
377 cmpl $0,PCPU(private_tss) /* don't have to reload if */
378 je 3f /* already using the common TSS */
380 subl %ecx,%ecx /* unmark use of private tss */
383 * Get the address of the common TSS descriptor for the ltr.
384 * There is no way to get the address of a segment-accessed variable
385 * so we store a self-referential pointer at the base of the per-cpu
386 * data area and add the appropriate offset.
388 movl $gd_common_tssd, %edi
392 * Move the correct TSS descriptor into the GDT slot, then reload
396 movl %ecx,PCPU(private_tss) /* mark/unmark private tss */
397 movl PCPU(tss_gdt), %ecx /* entry in GDT */
402 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
407 * Restore general registers. %ebx is restored later.
409 movl PCB_ESP(%edx),%esp
410 movl PCB_EBP(%edx),%ebp
411 movl PCB_ESI(%edx),%esi
412 movl PCB_EDI(%edx),%edi
413 movl PCB_EIP(%edx),%eax
417 * Restore the user LDT if we have one
419 cmpl $0, PCB_USERLDT(%edx)
421 movl _default_ldt,%eax
422 cmpl PCPU(currentldt),%eax
425 movl %eax,PCPU(currentldt)
432 * Restore the user TLS if we have one
439 * Restore the DEBUG register state if necessary.
441 movb PCB_FLAGS(%edx),%al
443 jz 1f /* no, skip over */
444 movl PCB_DR6(%edx),%eax /* yes, do the restore */
446 movl PCB_DR3(%edx),%eax
448 movl PCB_DR2(%edx),%eax
450 movl PCB_DR1(%edx),%eax
452 movl PCB_DR0(%edx),%eax
454 movl %dr7,%eax /* load dr7 so as not to disturb */
455 andl $0x0000fc00,%eax /* reserved bits */
456 movl PCB_DR7(%edx),%ecx
457 andl $~0x0000fc00,%ecx
461 movl %ebx,%eax /* return previous thread */
462 movl PCB_EBX(%edx),%ebx
468 * Update pcb, saving current processor state.
474 /* caller's return address - child won't execute this routine */
476 movl %eax,PCB_EIP(%ecx)
479 movl %eax,PCB_CR3(%ecx)
481 movl %ebx,PCB_EBX(%ecx)
482 movl %esp,PCB_ESP(%ecx)
483 movl %ebp,PCB_EBP(%ecx)
484 movl %esi,PCB_ESI(%ecx)
485 movl %edi,PCB_EDI(%ecx)
489 * If npxthread == NULL, then the npx h/w state is irrelevant and the
490 * state had better already be in the pcb. This is true for forks
491 * but not for dumps (the old book-keeping with FP flags in the pcb
492 * always lost for dumps because the dump pcb has 0 flags).
494 * If npxthread != NULL, then we have to save the npx h/w state to
495 * npxthread's pcb and copy it to the requested pcb, or save to the
496 * requested pcb and reload. Copying is easier because we would
497 * have to handle h/w bugs for reloading. We used to lose the
498 * parent's npx state for forks by forgetting to reload.
500 movl PCPU(npxthread),%eax
504 pushl %ecx /* target pcb */
505 movl TD_SAVEFPU(%eax),%eax /* originating savefpu area */
515 pushl $PCB_SAVEFPU_SIZE
516 leal PCB_SAVEFPU(%ecx),%ecx
521 #endif /* NNPX > 0 */
527 * cpu_idle_restore() (current thread in %eax on entry) (one-time execution)
529 * Don't bother setting up any regs other then %ebp so backtraces
530 * don't die. This restore function is used to bootstrap into the
531 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
534 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
535 * This only occurs during system boot so no special handling is
536 * required for migration.
538 * If we are an AP we have to call ap_init() before jumping to
539 * cpu_idle(). ap_init() will synchronize with the BP and finish
540 * setting up various ncpu-dependant globaldata fields. This may
541 * happen on UP as well as SMP if we happen to be simulating multiple
544 ENTRY(cpu_idle_restore)
550 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
551 orl $TDF_RUNNING,TD_FLAGS(%eax) /* manual, no switch_return */
557 * ap_init can decide to enable interrupts early, but otherwise, or if
558 * we are UP, do it here.
564 * cpu_kthread_restore() (current thread is %eax on entry) (one-time execution)
566 * Don't bother setting up any regs other then %ebp so backtraces
567 * don't die. This restore function is used to bootstrap into an
568 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
571 * Since all of our context is on the stack we are reentrant and
572 * we can release our critical section and enable interrupts early.
574 * Because this switch target does not 'return' to lwkt_switch()
575 * we have to call lwkt_switch_return(otd) to clean up otd.
578 ENTRY(cpu_kthread_restore)
581 movl TD_PCB(%eax),%esi
586 pushl %ebx /* argument to lwkt_switch_return */
587 call lwkt_switch_return
590 decl TD_CRITCOUNT(%eax)
591 popl %eax /* kthread exit function */
592 pushl PCB_EBX(%esi) /* argument to ESI function */
593 pushl %eax /* set exit func as return address */
594 movl PCB_ESI(%esi),%eax
600 * Standard LWKT switching function. Only non-scratch registers are
601 * saved and we don't bother with the MMU state or anything else.
603 * This function is always called while in a critical section.
605 * There is a one-instruction window where curthread is the new
606 * thread but %esp still points to the old thread's stack, but
607 * we are protected by a critical section so it is ok.
611 ENTRY(cpu_lwkt_switch)
612 pushl %ebp /* note: GDB hacked to locate ebp relative to td_sp */
614 movl PCPU(curthread),%ebx
618 /* warning: adjust movl into %eax below if you change the pushes */
622 * Save the FP state if we have used the FP. Note that calling
623 * npxsave will NULL out PCPU(npxthread).
625 * We have to deal with the FP state for LWKT threads in case they
626 * happen to get preempted or block while doing an optimized
627 * bzero/bcopy/memcpy.
629 cmpl %ebx,PCPU(npxthread)
631 pushl TD_SAVEFPU(%ebx)
632 call npxsave /* do it in a big C function */
633 addl $4,%esp /* EAX, ECX, EDX trashed */
635 #endif /* NNPX > 0 */
637 movl 4+20(%esp),%eax /* switch to this thread */
638 pushl $cpu_lwkt_restore
639 movl %esp,TD_SP(%ebx)
640 movl %eax,PCPU(curthread)
641 movl TD_SP(%eax),%esp
644 * eax contains new thread, ebx contains old thread.
649 * cpu_lwkt_restore() (current thread in %eax on entry)
651 * Standard LWKT restore function. This function is always called
652 * while in a critical section.
654 * Warning: due to preemption the restore function can be used to
655 * 'return' to the original thread. Interrupt disablement must be
656 * protected through the switch so we cannot run splz here.
658 * YYY we theoretically do not need to load IdlePTD into cr3, but if
659 * so we need a way to detect when the PTD we are using is being
660 * deleted due to a process exiting.
662 ENTRY(cpu_lwkt_restore)
663 movl IdlePTD,%ecx /* YYY borrow but beware desched/cpuchg/exit */
670 * NOTE: %ebx is the previous thread and %eax is the new thread.
671 * %ebx is retained throughout so we can return it.
673 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.