2 * Copyright (c) 1990 The Regents of the University of California.
4 * LWKT threads Copyright (c) 2003 Matthew Dillon
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
38 * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.21 2003/07/06 21:23:48 dillon Exp $
42 #include "opt_user_ldt.h"
44 #include <sys/rtprio.h>
46 #include <machine/asmacros.h>
47 #include <machine/ipl.h>
50 #include <machine/pmap.h>
51 #include <machine/smptests.h> /** GRAB_LOPRIO */
52 #include <machine/apic.h>
53 #include <machine/lock.h>
62 #if defined(SWTCH_OPTIM_STATS)
63 .globl swtch_optim_stats, tlb_flush_count
64 swtch_optim_stats: .long 0 /* number of _swtch_optims */
65 tlb_flush_count: .long 0
72 * cpu_heavy_switch(next_thread)
74 * Switch from the current thread to a new thread. This entry
75 * is normally called via the thread->td_switch function, and will
76 * only be called when the current thread is a heavy weight process.
78 * YYY disable interrupts once giant is removed.
80 ENTRY(cpu_heavy_switch)
81 movl PCPU(curthread),%ecx
82 movl TD_PROC(%ecx),%ecx
85 movl P_VMSPACE(%ecx), %edx
86 movl PCPU(cpuid), %eax
87 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
92 movl P_THREAD(%ecx),%edx
93 movl TD_PCB(%edx),%edx
94 movl (%esp),%eax /* Hardware registers */
95 movl %eax,PCB_EIP(%edx)
96 movl %ebx,PCB_EBX(%edx)
97 movl %esp,PCB_ESP(%edx)
98 movl %ebp,PCB_EBP(%edx)
99 movl %esi,PCB_ESI(%edx)
100 movl %edi,PCB_EDI(%edx)
101 movl %gs,PCB_GS(%edx)
104 * Push the LWKT switch restore function, which resumes a heavy
105 * weight process. Note that the LWKT switcher is based on
106 * TD_SP, while the heavy weight process switcher is based on
107 * PCB_ESP. TD_SP is usually one pointer pushed relative to
110 movl P_THREAD(%ecx),%eax
111 pushl $cpu_heavy_restore
112 movl %esp,TD_SP(%eax)
115 * Save debug regs if necessary
117 movb PCB_FLAGS(%edx),%al
119 jz 1f /* no, skip over */
120 movl %dr7,%eax /* yes, do the save */
121 movl %eax,PCB_DR7(%edx)
122 andl $0x0000fc00, %eax /* disable all watchpoints */
125 movl %eax,PCB_DR6(%edx)
127 movl %eax,PCB_DR3(%edx)
129 movl %eax,PCB_DR2(%edx)
131 movl %eax,PCB_DR1(%edx)
133 movl %eax,PCB_DR0(%edx)
137 * Save the FP state if we have used the FP.
140 movl P_THREAD(%ecx),%ecx
141 cmpl %ecx,PCPU(npxthread)
143 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
145 call npxsave /* do it in a big C function */
148 /* %ecx,%edx trashed */
149 #endif /* NNPX > 0 */
152 * Switch to the next thread, which was passed as an argument
153 * to cpu_heavy_switch(). Due to the switch-restore function we pushed,
154 * the argument is at 8(%esp). Set the current thread, load the
155 * stack pointer, and 'ret' into the switch-restore function.
158 movl %eax,PCPU(curthread)
159 movl TD_SP(%eax),%esp
165 * The switch function is changed to this when a thread is going away
166 * for good. We have to ensure that the MMU state is not cached, and
167 * we don't bother saving the existing thread state before switching.
169 * At this point we are in a critical section and this cpu owns the
170 * thread's token, which serves as an interlock until the switchout is
173 ENTRY(cpu_exit_switch)
175 * Get us out of the vmspace
183 movl PCPU(curthread),%ecx
185 * Switch to the next thread.
189 movl %eax,PCPU(curthread)
190 movl TD_SP(%eax),%esp
193 * We are now the next thread, set the exited flag and wakeup
196 orl $TDF_EXITED,TD_FLAGS(%ecx)
197 #if 0 /* YYY MP lock may not be held by new target */
199 pushl %ecx /* wakeup(oldthread) */
202 popl %eax /* note: next thread expects curthread in %eax */
206 * Restore the next thread's state and resume it. Note: the
207 * restore function assumes that the next thread's address is
213 * cpu_heavy_restore() (current thread in %eax on entry)
215 * Restore the thread after an LWKT switch. This entry is normally
216 * called via the LWKT switch restore function, which was pulled
217 * off the thread stack and jumped to.
219 * This entry is only called if the thread was previously saved
220 * using cpu_heavy_switch() (the heavy weight process thread switcher).
222 * YYY theoretically we do not have to restore everything here, a lot
223 * of this junk can wait until we return to usermode. But for now
224 * we restore everything.
226 * YYY STI/CLI sequencing.
228 * YYY note: spl check is done in mi_switch when it splx()'s.
231 ENTRY(cpu_heavy_restore)
232 /* interrupts are disabled */
233 movl TD_PCB(%eax),%edx
234 movl TD_PROC(%eax),%ecx
236 cmpb $SRUN,P_STAT(%ecx)
240 #if defined(SWTCH_OPTIM_STATS)
241 incl _swtch_optim_stats
244 * Restore the MMU address space
247 cmpl PCB_CR3(%edx),%ebx
249 #if defined(SWTCH_OPTIM_STATS)
250 decl _swtch_optim_stats
251 incl _tlb_flush_count
253 movl PCB_CR3(%edx),%ebx
258 * Deal with the PCB extension, restore the private tss
260 movl PCPU(cpuid), %esi
261 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
263 btsl %esi, private_tss /* mark use of private tss */
264 movl PCB_EXT(%edx), %edi /* new tss descriptor */
269 * update common_tss.tss_esp0 pointer. This is the supervisor
270 * stack pointer on entry from user mode. Since the pcb is
271 * at the top of the supervisor stack esp0 starts just below it.
272 * We leave enough space for vm86 (16 bytes).
274 * common_tss.tss_esp0 is needed when user mode traps into the
278 movl %ebx, PCPU(common_tss) + TSS_ESP0
280 btrl %esi, private_tss
284 * There is no way to get the address of a segment-accessed variable
285 * so we store a self-referential pointer at the base of the per-cpu
286 * data area and add the appropriate offset.
288 movl $gd_common_tssd, %edi
292 * Move the correct TSS descriptor into the GDT slot, then reload
293 * tr. YYY not sure what is going on here
296 movl PCPU(tss_gdt), %ebx /* entry in GDT */
301 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
305 * Tell the pmap that our cpu is using the VMSPACE now.
308 movl P_VMSPACE(%ecx), %ebx
309 movl PCPU(cpuid), %eax
310 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
313 * Restore general registers.
315 movl PCB_EBX(%edx),%ebx
316 movl PCB_ESP(%edx),%esp
317 movl PCB_EBP(%edx),%ebp
318 movl PCB_ESI(%edx),%esi
319 movl PCB_EDI(%edx),%edi
320 movl PCB_EIP(%edx),%eax
324 * Restore the user LDT if we have one
327 cmpl $0, PCB_USERLDT(%edx)
329 movl _default_ldt,%eax
330 cmpl PCPU(currentldt),%eax
333 movl %eax,PCPU(currentldt)
341 * Restore the %gs segment register, which must be done after
342 * loading the user LDT. Since user processes can modify the
343 * register via procfs, this may result in a fault which is
344 * detected by checking the fault address against cpu_switch_load_gs
345 * in i386/i386/trap.c
347 .globl cpu_switch_load_gs
349 movl PCB_GS(%edx),%gs
352 * Restore the DEBUG register state if necessary.
354 movb PCB_FLAGS(%edx),%al
356 jz 1f /* no, skip over */
357 movl PCB_DR6(%edx),%eax /* yes, do the restore */
359 movl PCB_DR3(%edx),%eax
361 movl PCB_DR2(%edx),%eax
363 movl PCB_DR1(%edx),%eax
365 movl PCB_DR0(%edx),%eax
367 movl %dr7,%eax /* load dr7 so as not to disturb */
368 andl $0x0000fc00,%eax /* reserved bits */
370 movl PCB_DR7(%edx),%ebx
371 andl $~0x0000fc00,%ebx
380 CROSSJUMPTARGET(sw1a)
387 sw0_1: .asciz "cpu_switch: panic: %p"
394 sw0_1: .asciz "cpu_switch: has wchan"
400 sw0_2: .asciz "cpu_switch: not SRUN"
403 #if defined(SMP) && defined(DIAGNOSTIC)
408 sw0_4: .asciz "cpu_switch: do not have lock"
409 #endif /* SMP && DIAGNOSTIC */
411 string: .asciz "SWITCHING\n"
415 * Update pcb, saving current processor state.
421 /* caller's return address - child won't execute this routine */
423 movl %eax,PCB_EIP(%ecx)
426 movl %eax,PCB_CR3(%ecx)
428 movl %ebx,PCB_EBX(%ecx)
429 movl %esp,PCB_ESP(%ecx)
430 movl %ebp,PCB_EBP(%ecx)
431 movl %esi,PCB_ESI(%ecx)
432 movl %edi,PCB_EDI(%ecx)
433 movl %gs,PCB_GS(%ecx)
437 * If npxthread == NULL, then the npx h/w state is irrelevant and the
438 * state had better already be in the pcb. This is true for forks
439 * but not for dumps (the old book-keeping with FP flags in the pcb
440 * always lost for dumps because the dump pcb has 0 flags).
442 * If npxthread != NULL, then we have to save the npx h/w state to
443 * npxthread's pcb and copy it to the requested pcb, or save to the
444 * requested pcb and reload. Copying is easier because we would
445 * have to handle h/w bugs for reloading. We used to lose the
446 * parent's npx state for forks by forgetting to reload.
448 movl PCPU(npxthread),%eax
453 movl TD_PCB(%eax),%eax
454 leal PCB_SAVEFPU(%eax),%eax
462 pushl $PCB_SAVEFPU_SIZE
463 leal PCB_SAVEFPU(%ecx),%ecx
468 #endif /* NNPX > 0 */
474 * cpu_idle_restore() (current thread in %eax on entry)
476 * Don't bother setting up any regs other then %ebp so backtraces
477 * don't die. This restore function is used to bootstrap into the
478 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
481 * If we are an AP we have to call ap_init() before jumping to
482 * cpu_idle(). ap_init() will synchronize with the BP and finish
483 * setting up various ncpu-dependant globaldata fields. This may
484 * happen on UP as well as SMP if we happen to be simulating multiple
487 ENTRY(cpu_idle_restore)
500 * cpu_kthread_restore() (current thread is %eax on entry)
502 * Don't bother setting up any regs other then %ebp so backtraces
503 * don't die. This restore function is used to bootstrap into an
504 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
507 * Since all of our context is on the stack we are reentrant and
508 * we can release our critical section and enable interrupts early.
510 ENTRY(cpu_kthread_restore)
511 movl TD_PCB(%eax),%ebx
513 subl $TDPRI_CRIT,TD_PRI(%eax)
515 popl %edx /* kthread exit function */
516 pushl PCB_EBX(%ebx) /* argument to ESI function */
517 pushl %edx /* set exit func as return address */
518 movl PCB_ESI(%ebx),%eax
524 * Standard LWKT switching function. Only non-scratch registers are
525 * saved and we don't bother with the MMU state or anything else.
527 * This function is always called while in a critical section.
531 ENTRY(cpu_lwkt_switch)
538 movl PCPU(curthread),%ecx
539 pushl $cpu_lwkt_restore
541 movl %esp,TD_SP(%ecx)
542 movl %eax,PCPU(curthread)
543 movl TD_SP(%eax),%esp
547 * cpu_lwkt_restore() (current thread in %eax on entry)
549 * Standard LWKT restore function. This function is always called
550 * while in a critical section.
552 * Warning: due to preemption the restore function can be used to
553 * 'return' to the original thread. Interrupt disablement must be
554 * protected through the switch so we cannot run splz here.
556 ENTRY(cpu_lwkt_restore)