2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
37 * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.6 2003/06/18 18:29:55 dillon Exp $
41 #include "opt_user_ldt.h"
43 #include <sys/rtprio.h>
45 #include <machine/asmacros.h>
46 #include <machine/ipl.h>
49 #include <machine/pmap.h>
50 #include <machine/smptests.h> /** GRAB_LOPRIO */
51 #include <machine/apic.h>
52 #include <machine/lock.h>
58 /*****************************************************************************/
60 /*****************************************************************************/
65 _hlt_vector: .long _cpu_idle /* pointer to halt routine */
69 #if defined(SWTCH_OPTIM_STATS)
70 .globl _swtch_optim_stats, _tlb_flush_count
71 _swtch_optim_stats: .long 0 /* number of _swtch_optims */
72 _tlb_flush_count: .long 0
78 * When no processes are on the runq, cpu_switch() branches to _idle
79 * to wait for something to come ready.
89 /* when called, we have the mplock, intr disabled */
90 /* use our idleproc's "context" */
95 #if defined(SWTCH_OPTIM_STATS)
96 decl _swtch_optim_stats
101 /* Keep space for nonexisting return addr, or profiling bombs */
102 movl $gd_idlestack_top-4, %ecx
106 /* update common_tss.tss_esp0 pointer */
107 movl %ecx, _common_tss + TSS_ESP0
110 btrl %esi, _private_tss
113 movl $gd_common_tssd, %edi
116 /* move correct tss descriptor into GDT slot, then reload tr */
117 movl _tss_gdt, %ebx /* entry in GDT */
122 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
129 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
130 * be left to cpu_switch().
132 * NOTE: spl*() may only be called while we hold the MP lock (which
140 * _REALLY_ free the lock, no matter how deep the prior nesting.
141 * We will recover the nesting on the way out when we have a new
144 * XXX: we had damn well better be sure we had it before doing this!
146 movl $FREE_LOCK, %eax
149 /* do NOT have lock, intrs disabled */
165 * Handle page-zeroing in the idle loop. Called with interrupts
166 * disabled and the MP lock released. Inside vm_page_zero_idle
167 * we enable interrupts and grab the mplock as required.
169 cmpl $0,_do_page_zero_idle
172 call _vm_page_zero_idle /* internal locking */
177 /* enable intrs for a halt */
178 movl $0, lapic_tpr /* 1st candidate for an INT */
179 call *_hlt_vector /* wait for interrupt */
184 * Note that interrupts must be enabled while obtaining the MP lock
185 * in order to be able to take IPI's while blocked.
189 movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
196 CROSSJUMP(jnz, sw1a, jz)
202 movl $HIDENAME(tmpstk),%esp
203 #if defined(OVERLY_CONSERVATIVE_PTD_MGMT)
204 #if defined(SWTCH_OPTIM_STATS)
205 incl _swtch_optim_stats
211 #if defined(SWTCH_OPTIM_STATS)
212 decl _swtch_optim_stats
213 incl _tlb_flush_count
219 /* update common_tss.tss_esp0 pointer */
220 movl %esp, _common_tss + TSS_ESP0
223 btrl %esi, _private_tss
226 movl $_common_tssd, %edi
228 /* move correct tss descriptor into GDT slot, then reload tr */
229 movl _tss_gdt, %ebx /* entry in GDT */
234 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
241 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
242 * be left to cpu_switch().
251 CROSSJUMP(jnz, sw1a, jz)
252 #ifdef DEVICE_POLLING
254 #else /* standard code */
255 call _vm_page_zero_idle
259 call *_hlt_vector /* wait for interrupt */
264 CROSSJUMPTARGET(_idle)
271 hlt /* XXX: until a wakeup IPI */
282 /* switch to new process. first, save context as needed */
284 movl TD_PROC(%ecx),%ecx
286 /* if no process to save, don't bother */
291 movb P_ONCPU(%ecx), %al /* save "last" cpu */
292 movb %al, P_LASTCPU(%ecx)
293 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
295 movl P_VMSPACE(%ecx), %edx
301 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
304 movl TD_PCB(%edx),%edx
306 movl (%esp),%eax /* Hardware registers */
307 movl %eax,PCB_EIP(%edx)
308 movl %ebx,PCB_EBX(%edx)
309 movl %esp,PCB_ESP(%edx)
310 movl %ebp,PCB_EBP(%edx)
311 movl %esi,PCB_ESI(%edx)
312 movl %edi,PCB_EDI(%edx)
313 movl %gs,PCB_GS(%edx)
315 /* test if debug regisers should be saved */
316 movb PCB_FLAGS(%edx),%al
318 jz 1f /* no, skip over */
319 movl %dr7,%eax /* yes, do the save */
320 movl %eax,PCB_DR7(%edx)
321 andl $0x0000fc00, %eax /* disable all watchpoints */
324 movl %eax,PCB_DR6(%edx)
326 movl %eax,PCB_DR3(%edx)
328 movl %eax,PCB_DR2(%edx)
330 movl %eax,PCB_DR1(%edx)
332 movl %eax,PCB_DR0(%edx)
337 /* XXX FIXME: we should be saving the local APIC TPR */
339 cmpl $FREE_LOCK, %eax /* is it free? */
340 je badsw4 /* yes, bad medicine! */
341 #endif /* DIAGNOSTIC */
342 andl $COUNT_FIELD, %eax /* clear CPU portion */
343 movl %eax, PCB_MPNEST(%edx) /* store it */
347 /* have we used fp, and need a save? */
348 movl P_THREAD(%ecx),%ecx
351 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
353 call _npxsave /* do it in a big C function */
356 /* %ecx,%edx trashed */
357 #endif /* NNPX > 0 */
360 * out of processes, set curthread to the current cpu's
361 * idlethread. Note that idlethread.td_proc will be NULL.
364 movl $gd_idlethread, %edi
367 movl $_idlethread, %edi
371 /* save is done, now choose a new process or idle */
376 /* Stop scheduling if smp_active goes zero and we are not BSP */
380 CROSSJUMP(je, _idle, jne) /* wind down */
385 call _chooseproc /* trash ecx, edx, ret eax*/
387 CROSSJUMP(je, _idle, jne) /* if no proc, idle */
391 andl $~AST_RESCHED,_astpending
394 cmpl %eax,P_WCHAN(%ecx)
396 cmpb $SRUN,P_STAT(%ecx)
399 movl P_THREAD(%ecx),%edx
400 movl TD_PCB(%edx),%edx
402 #if defined(SWTCH_OPTIM_STATS)
403 incl _swtch_optim_stats
405 /* switch address space */
407 cmpl PCB_CR3(%edx),%ebx
409 #if defined(SWTCH_OPTIM_STATS)
410 decl _swtch_optim_stats
411 incl _tlb_flush_count
413 movl PCB_CR3(%edx),%ebx
422 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
424 btsl %esi, _private_tss /* mark use of private tss */
425 movl PCB_EXT(%edx), %edi /* new tss descriptor */
430 * update common_tss.tss_esp0 pointer. This is the supervisor
431 * stack pointer on entry from user mode. Since the pcb is
432 * at the top of the supervisor stack esp0 starts just below it.
433 * We leave enough space for vm86 (16 bytes).
436 movl %ebx, _common_tss + TSS_ESP0
438 btrl %esi, _private_tss
441 movl $gd_common_tssd, %edi
444 movl $_common_tssd, %edi
447 /* move correct tss descriptor into GDT slot, then reload tr */
448 movl _tss_gdt, %ebx /* entry in GDT */
453 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
456 movl P_VMSPACE(%ecx), %ebx
462 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
464 /* restore context */
465 movl PCB_EBX(%edx),%ebx
466 movl PCB_ESP(%edx),%esp
467 movl PCB_EBP(%edx),%ebp
468 movl PCB_ESI(%edx),%esi
469 movl PCB_EDI(%edx),%edi
470 movl PCB_EIP(%edx),%eax
474 #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
478 andl $~APIC_TPR_PRIO, lapic_tpr
479 #endif /** CHEAP_TPR */
480 #endif /** GRAB_LOPRIO */
482 movb %al, P_ONCPU(%ecx)
484 movl P_THREAD(%ecx),%ecx /* ecx = thread */
485 movl %ecx, _curthread
486 movl TD_PROC(%ecx),%ecx /* YYY does %ecx need to be restored? */
489 movl _cpu_lockid, %eax
490 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
491 movl %eax, _mp_lock /* load the mp_lock */
492 /* XXX FIXME: we should be restoring the local APIC TPR */
496 cmpl $0, PCB_USERLDT(%edx)
498 movl __default_ldt,%eax
499 cmpl _currentldt,%eax
502 movl %eax,_currentldt
510 /* This must be done after loading the user LDT. */
511 .globl cpu_switch_load_gs
513 movl PCB_GS(%edx),%gs
515 /* test if debug regisers should be restored */
516 movb PCB_FLAGS(%edx),%al
518 jz 1f /* no, skip over */
519 movl PCB_DR6(%edx),%eax /* yes, do the restore */
521 movl PCB_DR3(%edx),%eax
523 movl PCB_DR2(%edx),%eax
525 movl PCB_DR1(%edx),%eax
527 movl PCB_DR0(%edx),%eax
529 movl %dr7,%eax /* load dr7 so as not to disturb */
530 andl $0x0000fc00,%eax /* reserved bits */
532 movl PCB_DR7(%edx),%ebx
533 andl $~0x0000fc00,%ebx
542 CROSSJUMPTARGET(sw1a)
549 sw0_1: .asciz "cpu_switch: has wchan"
555 sw0_2: .asciz "cpu_switch: not SRUN"
558 #if defined(SMP) && defined(DIAGNOSTIC)
563 sw0_4: .asciz "cpu_switch: do not have lock"
564 #endif /* SMP && DIAGNOSTIC */
568 * Update pcb, saving current processor state.
574 /* caller's return address - child won't execute this routine */
576 movl %eax,PCB_EIP(%ecx)
579 movl %eax,PCB_CR3(%ecx)
581 movl %ebx,PCB_EBX(%ecx)
582 movl %esp,PCB_ESP(%ecx)
583 movl %ebp,PCB_EBP(%ecx)
584 movl %esi,PCB_ESI(%ecx)
585 movl %edi,PCB_EDI(%ecx)
586 movl %gs,PCB_GS(%ecx)
590 * If npxthread == NULL, then the npx h/w state is irrelevant and the
591 * state had better already be in the pcb. This is true for forks
592 * but not for dumps (the old book-keeping with FP flags in the pcb
593 * always lost for dumps because the dump pcb has 0 flags).
595 * If npxthread != NULL, then we have to save the npx h/w state to
596 * npxthread's pcb and copy it to the requested pcb, or save to the
597 * requested pcb and reload. Copying is easier because we would
598 * have to handle h/w bugs for reloading. We used to lose the
599 * parent's npx state for forks by forgetting to reload.
606 movl TD_PCB(%eax),%eax
607 leal PCB_SAVEFPU(%eax),%eax
615 pushl $PCB_SAVEFPU_SIZE
616 leal PCB_SAVEFPU(%ecx),%ecx
621 #endif /* NNPX > 0 */