2 * Copyright (c) 1990 The Regents of the University of California.
4 * LWKT threads Copyright (c) 2003 Matthew Dillon
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
38 * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.29 2003/12/20 05:52:26 dillon Exp $
43 #include <sys/rtprio.h>
45 #include <machine/asmacros.h>
46 #include <machine/ipl.h>
49 #include <machine/pmap.h>
50 #include <machine/smptests.h> /** GRAB_LOPRIO */
51 #include <machine/apic.h>
52 #include <machine/lock.h>
58 #define MPLOCKED lock ;
67 #if defined(SWTCH_OPTIM_STATS)
68 .globl swtch_optim_stats, tlb_flush_count
69 swtch_optim_stats: .long 0 /* number of _swtch_optims */
70 tlb_flush_count: .long 0
77 * cpu_heavy_switch(next_thread)
79 * Switch from the current thread to a new thread. This entry
80 * is normally called via the thread->td_switch function, and will
81 * only be called when the current thread is a heavy weight process.
83 * Some instructions have been reordered to reduce pipeline stalls.
85 * YYY disable interrupts once giant is removed.
87 ENTRY(cpu_heavy_switch)
91 movl PCPU(curthread),%ecx
92 movl (%esp),%eax /* (reorder optimization) */
93 movl TD_PCB(%ecx),%edx /* EDX = PCB */
94 movl %eax,PCB_EIP(%edx) /* return PC may be modified */
95 movl %ebx,PCB_EBX(%edx)
96 movl %esp,PCB_ESP(%edx)
97 movl %ebp,PCB_EBP(%edx)
98 movl %esi,PCB_ESI(%edx)
99 movl %edi,PCB_EDI(%edx)
100 movl %gs,PCB_GS(%edx)
102 movl %ecx,%ebx /* EBX = curthread */
103 movl TD_PROC(%ecx),%ecx
104 movl PCPU(cpuid), %eax
105 movl P_VMSPACE(%ecx), %ecx /* ECX = vmspace */
106 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx)
109 * Push the LWKT switch restore function, which resumes a heavy
110 * weight process. Note that the LWKT switcher is based on
111 * TD_SP, while the heavy weight process switcher is based on
112 * PCB_ESP. TD_SP is usually two ints pushed relative to
113 * PCB_ESP. We push the flags for later restore by cpu_heavy_restore.
116 pushl $cpu_heavy_restore
117 movl %esp,TD_SP(%ebx)
120 * Save debug regs if necessary
122 movb PCB_FLAGS(%edx),%al
124 jz 1f /* no, skip over */
125 movl %dr7,%eax /* yes, do the save */
126 movl %eax,PCB_DR7(%edx)
127 andl $0x0000fc00, %eax /* disable all watchpoints */
130 movl %eax,PCB_DR6(%edx)
132 movl %eax,PCB_DR3(%edx)
134 movl %eax,PCB_DR2(%edx)
136 movl %eax,PCB_DR1(%edx)
138 movl %eax,PCB_DR0(%edx)
142 * Save the FP state if we have used the FP. Note that calling
143 * npxsave will NULL out PCPU(npxthread).
146 cmpl %ebx,PCPU(npxthread)
148 addl $PCB_SAVEFPU,%edx
150 call npxsave /* do it in a big C function */
151 addl $4,%esp /* EAX, ECX, EDX trashed */
153 #endif /* NNPX > 0 */
156 * Switch to the next thread, which was passed as an argument
157 * to cpu_heavy_switch(). Due to the eflags and switch-restore
158 * function we pushed, the argument is at 12(%esp). Set the current
159 * thread, load the stack pointer, and 'ret' into the switch-restore
162 * The switch restore function expects the new thread to be in %eax
163 * and the old one to be in %ebx.
165 * There is a one-instruction window where curthread is the new
166 * thread but %esp still points to the old thread's stack, but
167 * we are protected by a critical section so it is ok.
169 movl 12(%esp),%eax /* EAX = newtd, EBX = oldtd */
170 movl %eax,PCPU(curthread)
171 movl TD_SP(%eax),%esp
177 * The switch function is changed to this when a thread is going away
178 * for good. We have to ensure that the MMU state is not cached, and
179 * we don't bother saving the existing thread state before switching.
181 * At this point we are in a critical section and this cpu owns the
182 * thread's token, which serves as an interlock until the switchout is
185 ENTRY(cpu_exit_switch)
187 * Get us out of the vmspace
195 movl PCPU(curthread),%ebx
197 * Switch to the next thread. RET into the restore function, which
198 * expects the new thread in EAX and the old in EBX.
200 * There is a one-instruction window where curthread is the new
201 * thread but %esp still points to the old thread's stack, but
202 * we are protected by a critical section so it is ok.
205 movl %eax,PCPU(curthread)
206 movl TD_SP(%eax),%esp
210 * cpu_heavy_restore() (current thread in %eax on entry)
212 * Restore the thread after an LWKT switch. This entry is normally
213 * called via the LWKT switch restore function, which was pulled
214 * off the thread stack and jumped to.
216 * This entry is only called if the thread was previously saved
217 * using cpu_heavy_switch() (the heavy weight process thread switcher),
218 * or when a new process is initially scheduled. The first thing we
219 * do is clear the TDF_RUNNING bit in the old thread and set it in the
222 * YYY theoretically we do not have to restore everything here, a lot
223 * of this junk can wait until we return to usermode. But for now
224 * we restore everything.
226 * YYY the PCB crap is really crap, it makes startup a bitch because
227 * we can't switch away.
229 * YYY note: spl check is done in mi_switch when it splx()'s.
232 ENTRY(cpu_heavy_restore)
234 movl TD_PCB(%eax),%edx /* EDX = PCB */
235 movl TD_PROC(%eax),%ecx
238 * A heavy weight process will normally be in an SRUN state
239 * but can also be preempted while it is entering a SZOMB
242 cmpb $SRUN,P_STAT(%ecx)
244 cmpb $SZOMB,P_STAT(%ecx)
249 #if defined(SWTCH_OPTIM_STATS)
250 incl _swtch_optim_stats
253 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
254 * safely test/reload %cr3 until after we have set the bit in the
255 * pmap (remember, we do not hold the MP lock in the switch code).
257 movl P_VMSPACE(%ecx), %ecx /* ECX = vmspace */
258 movl PCPU(cpuid), %esi
259 MPLOCKED btsl %esi, VM_PMAP+PM_ACTIVE(%ecx)
262 * Restore the MMU address space. If it is the same as the last
263 * thread we don't have to invalidate the tlb (i.e. reload cr3).
264 * YYY which naturally also means that the PM_ACTIVE bit had better
265 * already have been set before we set it above, check? YYY
268 movl PCB_CR3(%edx),%ecx
271 #if defined(SWTCH_OPTIM_STATS)
272 decl _swtch_optim_stats
273 incl _tlb_flush_count
278 * Clear TDF_RUNNING flag in old thread only after cleaning up
279 * %cr3. The target thread is already protected by being TDF_RUNQ
280 * so setting TDF_RUNNING isn't as big a deal.
282 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
283 orl $TDF_RUNNING,TD_FLAGS(%eax)
286 * Deal with the PCB extension, restore the private tss
288 movl PCB_EXT(%edx),%edi /* check for a PCB extension */
289 movl $1,%ebx /* maybe mark use of a private tss */
294 * Going back to the common_tss. We may need to update TSS_ESP0
295 * which sets the top of the supervisor stack when entering from
296 * usermode. The PCB is at the top of the stack but we need another
297 * 16 bytes to take vm86 into account.
300 movl %ebx, PCPU(common_tss) + TSS_ESP0
302 cmpl $0,PCPU(private_tss) /* don't have to reload if */
303 je 3f /* already using the common TSS */
305 subl %ebx,%ebx /* unmark use of private tss */
308 * Get the address of the common TSS descriptor for the ltr.
309 * There is no way to get the address of a segment-accessed variable
310 * so we store a self-referential pointer at the base of the per-cpu
311 * data area and add the appropriate offset.
313 movl $gd_common_tssd, %edi
317 * Move the correct TSS descriptor into the GDT slot, then reload
321 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */
322 movl PCPU(tss_gdt), %ebx /* entry in GDT */
327 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
332 * Restore general registers.
334 movl PCB_EBX(%edx),%ebx
335 movl PCB_ESP(%edx),%esp
336 movl PCB_EBP(%edx),%ebp
337 movl PCB_ESI(%edx),%esi
338 movl PCB_EDI(%edx),%edi
339 movl PCB_EIP(%edx),%eax
343 * Restore the user LDT if we have one
345 cmpl $0, PCB_USERLDT(%edx)
347 movl _default_ldt,%eax
348 cmpl PCPU(currentldt),%eax
351 movl %eax,PCPU(currentldt)
358 * Restore the %gs segment register, which must be done after
359 * loading the user LDT. Since user processes can modify the
360 * register via procfs, this may result in a fault which is
361 * detected by checking the fault address against cpu_switch_load_gs
362 * in i386/i386/trap.c
364 .globl cpu_switch_load_gs
366 movl PCB_GS(%edx),%gs
369 * Restore the DEBUG register state if necessary.
371 movb PCB_FLAGS(%edx),%al
373 jz 1f /* no, skip over */
374 movl PCB_DR6(%edx),%eax /* yes, do the restore */
376 movl PCB_DR3(%edx),%eax
378 movl PCB_DR2(%edx),%eax
380 movl PCB_DR1(%edx),%eax
382 movl PCB_DR0(%edx),%eax
384 movl %dr7,%eax /* load dr7 so as not to disturb */
385 andl $0x0000fc00,%eax /* reserved bits */
387 movl PCB_DR7(%edx),%ebx
388 andl $~0x0000fc00,%ebx
400 sw0_2: .asciz "cpu_switch: not SRUN"
404 * Update pcb, saving current processor state.
410 /* caller's return address - child won't execute this routine */
412 movl %eax,PCB_EIP(%ecx)
415 movl %eax,PCB_CR3(%ecx)
417 movl %ebx,PCB_EBX(%ecx)
418 movl %esp,PCB_ESP(%ecx)
419 movl %ebp,PCB_EBP(%ecx)
420 movl %esi,PCB_ESI(%ecx)
421 movl %edi,PCB_EDI(%ecx)
422 movl %gs,PCB_GS(%ecx)
426 * If npxthread == NULL, then the npx h/w state is irrelevant and the
427 * state had better already be in the pcb. This is true for forks
428 * but not for dumps (the old book-keeping with FP flags in the pcb
429 * always lost for dumps because the dump pcb has 0 flags).
431 * If npxthread != NULL, then we have to save the npx h/w state to
432 * npxthread's pcb and copy it to the requested pcb, or save to the
433 * requested pcb and reload. Copying is easier because we would
434 * have to handle h/w bugs for reloading. We used to lose the
435 * parent's npx state for forks by forgetting to reload.
437 movl PCPU(npxthread),%eax
442 movl TD_PCB(%eax),%eax
443 leal PCB_SAVEFPU(%eax),%eax
451 pushl $PCB_SAVEFPU_SIZE
452 leal PCB_SAVEFPU(%ecx),%ecx
457 #endif /* NNPX > 0 */
463 * cpu_idle_restore() (current thread in %eax on entry) (one-time execution)
465 * Don't bother setting up any regs other then %ebp so backtraces
466 * don't die. This restore function is used to bootstrap into the
467 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
470 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
472 * If we are an AP we have to call ap_init() before jumping to
473 * cpu_idle(). ap_init() will synchronize with the BP and finish
474 * setting up various ncpu-dependant globaldata fields. This may
475 * happen on UP as well as SMP if we happen to be simulating multiple
478 ENTRY(cpu_idle_restore)
484 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
485 orl $TDF_RUNNING,TD_FLAGS(%eax)
496 * cpu_kthread_restore() (current thread is %eax on entry) (one-time execution)
498 * Don't bother setting up any regs other then %ebp so backtraces
499 * don't die. This restore function is used to bootstrap into an
500 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
503 * Since all of our context is on the stack we are reentrant and
504 * we can release our critical section and enable interrupts early.
506 ENTRY(cpu_kthread_restore)
509 movl TD_PCB(%eax),%edx
512 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
513 orl $TDF_RUNNING,TD_FLAGS(%eax)
514 subl $TDPRI_CRIT,TD_PRI(%eax)
515 popl %eax /* kthread exit function */
516 pushl PCB_EBX(%edx) /* argument to ESI function */
517 pushl %eax /* set exit func as return address */
518 movl PCB_ESI(%edx),%eax
524 * Standard LWKT switching function. Only non-scratch registers are
525 * saved and we don't bother with the MMU state or anything else.
527 * This function is always called while in a critical section.
529 * There is a one-instruction window where curthread is the new
530 * thread but %esp still points to the old thread's stack, but
531 * we are protected by a critical section so it is ok.
535 ENTRY(cpu_lwkt_switch)
542 movl PCPU(curthread),%ebx
543 pushl $cpu_lwkt_restore
544 movl %esp,TD_SP(%ebx)
545 movl %eax,PCPU(curthread)
546 movl TD_SP(%eax),%esp
549 * eax contains new thread, ebx contains old thread.
554 * cpu_lwkt_restore() (current thread in %eax on entry)
556 * Standard LWKT restore function. This function is always called
557 * while in a critical section.
559 * Warning: due to preemption the restore function can be used to
560 * 'return' to the original thread. Interrupt disablement must be
561 * protected through the switch so we cannot run splz here.
563 * YYY we theoretically do not need to load IdlePTD into cr3, but if
564 * so we need a way to detect when the PTD we are using is being
565 * deleted due to a process exiting.
567 ENTRY(cpu_lwkt_restore)
568 movl IdlePTD,%ecx /* YYY borrow but beware desched/cpuchg/exit */
574 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
575 orl $TDF_RUNNING,TD_FLAGS(%eax)