2 * Copyright (c) 1990 The Regents of the University of California.
4 * LWKT threads Copyright (c) 2003 Matthew Dillon
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
38 * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.14 2003/06/27 03:30:37 dillon Exp $
42 #include "opt_user_ldt.h"
44 #include <sys/rtprio.h>
46 #include <machine/asmacros.h>
47 #include <machine/ipl.h>
50 #include <machine/pmap.h>
51 #include <machine/smptests.h> /** GRAB_LOPRIO */
52 #include <machine/apic.h>
53 #include <machine/lock.h>
62 #if defined(SWTCH_OPTIM_STATS)
63 .globl _swtch_optim_stats, _tlb_flush_count
64 _swtch_optim_stats: .long 0 /* number of _swtch_optims */
65 _tlb_flush_count: .long 0
72 * cpu_heavy_switch(next_thread)
74 * Switch from the current thread to a new thread. This entry
75 * is normally called via the thread->td_switch function, and will
76 * only be called when the current thread is a heavy weight process.
78 * YYY disable interrupts once giant is removed.
80 ENTRY(cpu_heavy_switch)
82 movl TD_PROC(%ecx),%ecx
86 movb P_ONCPU(%ecx), %al /* save "last" cpu */
87 movb %al, P_LASTCPU(%ecx)
88 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
90 movl P_VMSPACE(%ecx), %edx
96 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
101 movl P_THREAD(%ecx),%edx
102 movl TD_PCB(%edx),%edx
103 movl (%esp),%eax /* Hardware registers */
104 movl %eax,PCB_EIP(%edx)
105 movl %ebx,PCB_EBX(%edx)
106 movl %esp,PCB_ESP(%edx)
107 movl %ebp,PCB_EBP(%edx)
108 movl %esi,PCB_ESI(%edx)
109 movl %edi,PCB_EDI(%edx)
110 movl %gs,PCB_GS(%edx)
113 * Push the LWKT switch restore function, which resumes a heavy
114 * weight process. Note that the LWKT switcher is based on
115 * TD_SP, while the heavy weight process switcher is based on
116 * PCB_ESP. TD_SP is usually one pointer pushed relative to
119 movl P_THREAD(%ecx),%eax
120 pushl $cpu_heavy_restore
121 movl %esp,TD_SP(%eax)
124 * Save debug regs if necessary
126 movb PCB_FLAGS(%edx),%al
128 jz 1f /* no, skip over */
129 movl %dr7,%eax /* yes, do the save */
130 movl %eax,PCB_DR7(%edx)
131 andl $0x0000fc00, %eax /* disable all watchpoints */
134 movl %eax,PCB_DR6(%edx)
136 movl %eax,PCB_DR3(%edx)
138 movl %eax,PCB_DR2(%edx)
140 movl %eax,PCB_DR1(%edx)
142 movl %eax,PCB_DR0(%edx)
146 * Save BGL nesting count. Note that we hold the BGL with a
147 * count of at least 1 on entry to cpu_heavy_switch().
151 /* XXX FIXME: we should be saving the local APIC TPR */
153 cmpl $FREE_LOCK, %eax /* is it free? */
154 je badsw4 /* yes, bad medicine! */
155 #endif /* DIAGNOSTIC */
156 andl $COUNT_FIELD, %eax /* clear CPU portion */
157 movl %eax, PCB_MPNEST(%edx) /* store it */
161 * Save the FP state if we have used the FP.
164 movl P_THREAD(%ecx),%ecx
167 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
169 call _npxsave /* do it in a big C function */
172 /* %ecx,%edx trashed */
173 #endif /* NNPX > 0 */
176 * Switch to the next thread, which was passed as an argument
177 * to cpu_heavy_switch(). Due to the switch-restore function we pushed,
178 * the argument is at 8(%esp). Set the current thread, load the
179 * stack pointer, and 'ret' into the switch-restore function.
183 movl TD_SP(%eax),%esp
189 * The switch function is changed to this when a thread is going away
190 * for good. We have to ensure that the MMU state is not cached, and
191 * we don't bother saving the existing thread state before switching.
193 * At this point we are in a critical section and this cpu owns the
194 * thread's token, which serves as an interlock until the switchout is
197 ENTRY(cpu_exit_switch)
199 * Get us out of the vmspace
209 * Switch to the next thread.
214 movl TD_SP(%eax),%esp
217 * We are now the next thread, set the exited flag and wakeup
220 orl $TDF_EXITED,TD_FLAGS(%ecx)
222 pushl %ecx /* wakeup(oldthread) */
225 popl %eax /* note: next thread expects curthread in %eax */
228 * Restore the next thread's state and resume it. Note: the
229 * restore function assumes that the next thread's address is
235 * cpu_heavy_restore() (current thread in %eax on entry)
237 * Restore the thread after an LWKT switch. This entry is normally
238 * called via the LWKT switch restore function, which was pulled
239 * off the thread stack and jumped to.
241 * This entry is only called if the thread was previously saved
242 * using cpu_heavy_switch() (the heavy weight process thread switcher).
244 * YYY theoretically we do not have to restore everything here, a lot
245 * of this junk can wait until we return to usermode. But for now
246 * we restore everything.
248 * YYY STI/CLI sequencing.
250 * YYY note: spl check is done in mi_switch when it splx()'s.
252 ENTRY(cpu_heavy_restore)
253 /* interrupts are disabled */
254 movl TD_PCB(%eax),%edx
255 movl TD_PROC(%eax),%ecx
257 cmpb $SRUN,P_STAT(%ecx)
261 #if defined(SWTCH_OPTIM_STATS)
262 incl _swtch_optim_stats
265 * Restore the MMU address space
268 cmpl PCB_CR3(%edx),%ebx
270 #if defined(SWTCH_OPTIM_STATS)
271 decl _swtch_optim_stats
272 incl _tlb_flush_count
274 movl PCB_CR3(%edx),%ebx
279 * Deal with the PCB extension, restore the private tss
286 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
288 btsl %esi, _private_tss /* mark use of private tss */
289 movl PCB_EXT(%edx), %edi /* new tss descriptor */
294 * update common_tss.tss_esp0 pointer. This is the supervisor
295 * stack pointer on entry from user mode. Since the pcb is
296 * at the top of the supervisor stack esp0 starts just below it.
297 * We leave enough space for vm86 (16 bytes).
299 * common_tss.tss_esp0 is needed when user mode traps into the
303 movl %ebx, _common_tss + TSS_ESP0
305 btrl %esi, _private_tss
308 movl $gd_common_tssd, %edi
311 movl $_common_tssd, %edi
314 * Move the correct TSS descriptor into the GDT slot, then reload
315 * tr. YYY not sure what is going on here
318 movl _tss_gdt, %ebx /* entry in GDT */
323 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
327 * Tell the pmap that our cpu is using the VMSPACE now.
330 movl P_VMSPACE(%ecx), %ebx
336 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
339 * Restore general registers.
341 movl PCB_EBX(%edx),%ebx
342 movl PCB_ESP(%edx),%esp
343 movl PCB_EBP(%edx),%ebp
344 movl PCB_ESI(%edx),%esi
345 movl PCB_EDI(%edx),%edi
346 movl PCB_EIP(%edx),%eax
350 * SMP ickyness to direct interrupts.
354 #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
358 andl $~APIC_TPR_PRIO, lapic_tpr
359 #endif /** CHEAP_TPR */
360 #endif /** GRAB_LOPRIO */
362 movb %al, P_ONCPU(%ecx)
366 * Restore the BGL nesting count. Note that the nesting count will
370 movl _cpu_lockid, %eax
371 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
372 movl %eax, _mp_lock /* load the mp_lock */
373 /* XXX FIXME: we should be restoring the local APIC TPR */
377 * Restore the user LDT if we have one
380 cmpl $0, PCB_USERLDT(%edx)
382 movl __default_ldt,%eax
383 cmpl _currentldt,%eax
386 movl %eax,_currentldt
394 * Restore the %gs segment register, which must be done after
395 * loading the user LDT. Since user processes can modify the
396 * register via procfs, this may result in a fault which is
397 * detected by checking the fault address against cpu_switch_load_gs
398 * in i386/i386/trap.c
400 .globl cpu_switch_load_gs
402 movl PCB_GS(%edx),%gs
405 * Restore the DEBUG register state if necessary.
407 movb PCB_FLAGS(%edx),%al
409 jz 1f /* no, skip over */
410 movl PCB_DR6(%edx),%eax /* yes, do the restore */
412 movl PCB_DR3(%edx),%eax
414 movl PCB_DR2(%edx),%eax
416 movl PCB_DR1(%edx),%eax
418 movl PCB_DR0(%edx),%eax
420 movl %dr7,%eax /* load dr7 so as not to disturb */
421 andl $0x0000fc00,%eax /* reserved bits */
423 movl PCB_DR7(%edx),%ebx
424 andl $~0x0000fc00,%ebx
431 * Remove the heavy weight process from the heavy weight queue.
432 * this will also have the side effect of removing the thread from
433 * the run queue. YYY temporary?
435 * LWKT threads stay on the run queue until explicitly removed.
445 CROSSJUMPTARGET(sw1a)
452 sw0_1: .asciz "cpu_switch: has wchan"
458 sw0_2: .asciz "cpu_switch: not SRUN"
461 #if defined(SMP) && defined(DIAGNOSTIC)
466 sw0_4: .asciz "cpu_switch: do not have lock"
467 #endif /* SMP && DIAGNOSTIC */
469 string: .asciz "SWITCHING\n"
473 * Update pcb, saving current processor state.
479 /* caller's return address - child won't execute this routine */
481 movl %eax,PCB_EIP(%ecx)
484 movl %eax,PCB_CR3(%ecx)
486 movl %ebx,PCB_EBX(%ecx)
487 movl %esp,PCB_ESP(%ecx)
488 movl %ebp,PCB_EBP(%ecx)
489 movl %esi,PCB_ESI(%ecx)
490 movl %edi,PCB_EDI(%ecx)
491 movl %gs,PCB_GS(%ecx)
495 * If npxthread == NULL, then the npx h/w state is irrelevant and the
496 * state had better already be in the pcb. This is true for forks
497 * but not for dumps (the old book-keeping with FP flags in the pcb
498 * always lost for dumps because the dump pcb has 0 flags).
500 * If npxthread != NULL, then we have to save the npx h/w state to
501 * npxthread's pcb and copy it to the requested pcb, or save to the
502 * requested pcb and reload. Copying is easier because we would
503 * have to handle h/w bugs for reloading. We used to lose the
504 * parent's npx state for forks by forgetting to reload.
511 movl TD_PCB(%eax),%eax
512 leal PCB_SAVEFPU(%eax),%eax
520 pushl $PCB_SAVEFPU_SIZE
521 leal PCB_SAVEFPU(%ecx),%ecx
526 #endif /* NNPX > 0 */
532 * cpu_idle_restore() (current thread in %eax on entry)
534 * Don't bother setting up any regs other then %ebp so backtraces
535 * don't die. This restore function is used to bootstrap into the
536 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
539 ENTRY(cpu_idle_restore)
545 * cpu_kthread_restore() (current thread is %eax on entry)
547 * Don't bother setting up any regs other then %ebp so backtraces
548 * don't die. This restore function is used to bootstrap into an
549 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
552 ENTRY(cpu_kthread_restore)
553 movl TD_PCB(%eax),%ebx
555 popl %edx /* kthread exit function */
556 pushl PCB_EBX(%ebx) /* argument to ESI function */
557 pushl %edx /* set exit func as return address */
558 movl PCB_ESI(%ebx),%eax
564 * Standard LWKT switching function. Only non-scratch registers are
565 * saved and we don't bother with the MMU state or anything else.
568 ENTRY(cpu_lwkt_switch)
576 pushl $cpu_lwkt_restore
578 movl %esp,TD_SP(%ecx)
580 movl TD_SP(%eax),%esp
584 * cpu_idle_restore() (current thread in %eax on entry)
587 ENTRY(cpu_lwkt_restore)
593 movl TD_MACH+MTD_CPL(%eax),%ecx /* unmasked cpl? YYY too complex */
597 cmpl $0,_intr_nesting_level /* don't stack too deeply */
599 call splz /* execute unmasked ints */