2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
40 #include "opt_user_ldt.h"
42 #include <sys/rtprio.h>
44 #include <machine/asmacros.h>
45 #include <machine/ipl.h>
48 #include <machine/pmap.h>
49 #include <machine/smptests.h> /** GRAB_LOPRIO */
50 #include <machine/apic.h>
51 #include <machine/lock.h>
57 /*****************************************************************************/
59 /*****************************************************************************/
64 _hlt_vector: .long _cpu_idle /* pointer to halt routine */
68 #if defined(SWTCH_OPTIM_STATS)
69 .globl _swtch_optim_stats, _tlb_flush_count
70 _swtch_optim_stats: .long 0 /* number of _swtch_optims */
71 _tlb_flush_count: .long 0
77 * When no processes are on the runq, cpu_switch() branches to _idle
78 * to wait for something to come ready.
88 /* when called, we have the mplock, intr disabled */
89 /* use our idleproc's "context" */
94 #if defined(SWTCH_OPTIM_STATS)
95 decl _swtch_optim_stats
100 /* Keep space for nonexisting return addr, or profiling bombs */
101 movl $gd_idlestack_top-4, %ecx
105 /* update common_tss.tss_esp0 pointer */
106 movl %ecx, _common_tss + TSS_ESP0
109 btrl %esi, _private_tss
112 movl $gd_common_tssd, %edi
115 /* move correct tss descriptor into GDT slot, then reload tr */
116 movl _tss_gdt, %ebx /* entry in GDT */
121 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
128 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
129 * be left to cpu_switch().
131 * NOTE: spl*() may only be called while we hold the MP lock (which
139 * _REALLY_ free the lock, no matter how deep the prior nesting.
140 * We will recover the nesting on the way out when we have a new
143 * XXX: we had damn well better be sure we had it before doing this!
145 movl $FREE_LOCK, %eax
148 /* do NOT have lock, intrs disabled */
164 * Handle page-zeroing in the idle loop. Called with interrupts
165 * disabled and the MP lock released. Inside vm_page_zero_idle
166 * we enable interrupts and grab the mplock as required.
168 cmpl $0,_do_page_zero_idle
171 call _vm_page_zero_idle /* internal locking */
176 /* enable intrs for a halt */
177 movl $0, lapic_tpr /* 1st candidate for an INT */
178 call *_hlt_vector /* wait for interrupt */
183 * Note that interrupts must be enabled while obtaining the MP lock
184 * in order to be able to take IPI's while blocked.
188 movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
195 CROSSJUMP(jnz, sw1a, jz)
201 movl $HIDENAME(tmpstk),%esp
202 #if defined(OVERLY_CONSERVATIVE_PTD_MGMT)
203 #if defined(SWTCH_OPTIM_STATS)
204 incl _swtch_optim_stats
210 #if defined(SWTCH_OPTIM_STATS)
211 decl _swtch_optim_stats
212 incl _tlb_flush_count
218 /* update common_tss.tss_esp0 pointer */
219 movl %esp, _common_tss + TSS_ESP0
222 btrl %esi, _private_tss
225 movl $_common_tssd, %edi
227 /* move correct tss descriptor into GDT slot, then reload tr */
228 movl _tss_gdt, %ebx /* entry in GDT */
233 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
240 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
241 * be left to cpu_switch().
250 CROSSJUMP(jnz, sw1a, jz)
251 #ifdef DEVICE_POLLING
253 #else /* standard code */
254 call _vm_page_zero_idle
258 call *_hlt_vector /* wait for interrupt */
263 CROSSJUMPTARGET(_idle)
270 hlt /* XXX: until a wakeup IPI */
281 /* switch to new process. first, save context as needed */
284 /* if no process to save, don't bother */
289 movb P_ONCPU(%ecx), %al /* save "last" cpu */
290 movb %al, P_LASTCPU(%ecx)
291 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
293 movl P_VMSPACE(%ecx), %edx
299 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
301 movl P_ADDR(%ecx),%edx
303 movl (%esp),%eax /* Hardware registers */
304 movl %eax,PCB_EIP(%edx)
305 movl %ebx,PCB_EBX(%edx)
306 movl %esp,PCB_ESP(%edx)
307 movl %ebp,PCB_EBP(%edx)
308 movl %esi,PCB_ESI(%edx)
309 movl %edi,PCB_EDI(%edx)
310 movl %gs,PCB_GS(%edx)
312 /* test if debug regisers should be saved */
313 movb PCB_FLAGS(%edx),%al
315 jz 1f /* no, skip over */
316 movl %dr7,%eax /* yes, do the save */
317 movl %eax,PCB_DR7(%edx)
318 andl $0x0000fc00, %eax /* disable all watchpoints */
321 movl %eax,PCB_DR6(%edx)
323 movl %eax,PCB_DR3(%edx)
325 movl %eax,PCB_DR2(%edx)
327 movl %eax,PCB_DR1(%edx)
329 movl %eax,PCB_DR0(%edx)
334 /* XXX FIXME: we should be saving the local APIC TPR */
336 cmpl $FREE_LOCK, %eax /* is it free? */
337 je badsw4 /* yes, bad medicine! */
338 #endif /* DIAGNOSTIC */
339 andl $COUNT_FIELD, %eax /* clear CPU portion */
340 movl %eax, PCB_MPNEST(%edx) /* store it */
344 /* have we used fp, and need a save? */
347 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
349 call _npxsave /* do it in a big C function */
352 #endif /* NNPX > 0 */
354 movl $0,_curproc /* out of process */
356 /* save is done, now choose a new process or idle */
361 /* Stop scheduling if smp_active goes zero and we are not BSP */
365 CROSSJUMP(je, _idle, jne) /* wind down */
370 call _chooseproc /* trash ecx, edx, ret eax*/
372 CROSSJUMP(je, _idle, jne) /* if no proc, idle */
376 andl $~AST_RESCHED,_astpending
379 cmpl %eax,P_WCHAN(%ecx)
381 cmpb $SRUN,P_STAT(%ecx)
385 movl P_ADDR(%ecx),%edx
387 #if defined(SWTCH_OPTIM_STATS)
388 incl _swtch_optim_stats
390 /* switch address space */
392 cmpl PCB_CR3(%edx),%ebx
394 #if defined(SWTCH_OPTIM_STATS)
395 decl _swtch_optim_stats
396 incl _tlb_flush_count
398 movl PCB_CR3(%edx),%ebx
407 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
409 btsl %esi, _private_tss /* mark use of private tss */
410 movl PCB_EXT(%edx), %edi /* new tss descriptor */
414 /* update common_tss.tss_esp0 pointer */
415 movl %edx, %ebx /* pcb */
416 addl $(UPAGES * PAGE_SIZE - 16), %ebx
417 movl %ebx, _common_tss + TSS_ESP0
419 btrl %esi, _private_tss
422 movl $gd_common_tssd, %edi
425 movl $_common_tssd, %edi
428 /* move correct tss descriptor into GDT slot, then reload tr */
429 movl _tss_gdt, %ebx /* entry in GDT */
434 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
437 movl P_VMSPACE(%ecx), %ebx
443 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
445 /* restore context */
446 movl PCB_EBX(%edx),%ebx
447 movl PCB_ESP(%edx),%esp
448 movl PCB_EBP(%edx),%ebp
449 movl PCB_ESI(%edx),%esi
450 movl PCB_EDI(%edx),%edi
451 movl PCB_EIP(%edx),%eax
455 #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
459 andl $~APIC_TPR_PRIO, lapic_tpr
460 #endif /** CHEAP_TPR */
461 #endif /** GRAB_LOPRIO */
463 movb %al, P_ONCPU(%ecx)
466 movl %ecx, _curproc /* into next process */
469 movl _cpu_lockid, %eax
470 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
471 movl %eax, _mp_lock /* load the mp_lock */
472 /* XXX FIXME: we should be restoring the local APIC TPR */
476 cmpl $0, PCB_USERLDT(%edx)
478 movl __default_ldt,%eax
479 cmpl _currentldt,%eax
482 movl %eax,_currentldt
490 /* This must be done after loading the user LDT. */
491 .globl cpu_switch_load_gs
493 movl PCB_GS(%edx),%gs
495 /* test if debug regisers should be restored */
496 movb PCB_FLAGS(%edx),%al
498 jz 1f /* no, skip over */
499 movl PCB_DR6(%edx),%eax /* yes, do the restore */
501 movl PCB_DR3(%edx),%eax
503 movl PCB_DR2(%edx),%eax
505 movl PCB_DR1(%edx),%eax
507 movl PCB_DR0(%edx),%eax
509 movl %dr7,%eax /* load dr7 so as not to disturb */
510 andl $0x0000fc00,%eax /* reserved bits */
512 movl PCB_DR7(%edx),%ebx
513 andl $~0x0000fc00,%ebx
522 CROSSJUMPTARGET(sw1a)
529 sw0_1: .asciz "cpu_switch: has wchan"
535 sw0_2: .asciz "cpu_switch: not SRUN"
538 #if defined(SMP) && defined(DIAGNOSTIC)
543 sw0_4: .asciz "cpu_switch: do not have lock"
544 #endif /* SMP && DIAGNOSTIC */
548 * Update pcb, saving current processor state.
554 /* caller's return address - child won't execute this routine */
556 movl %eax,PCB_EIP(%ecx)
559 movl %eax,PCB_CR3(%ecx)
561 movl %ebx,PCB_EBX(%ecx)
562 movl %esp,PCB_ESP(%ecx)
563 movl %ebp,PCB_EBP(%ecx)
564 movl %esi,PCB_ESI(%ecx)
565 movl %edi,PCB_EDI(%ecx)
566 movl %gs,PCB_GS(%ecx)
570 * If npxproc == NULL, then the npx h/w state is irrelevant and the
571 * state had better already be in the pcb. This is true for forks
572 * but not for dumps (the old book-keeping with FP flags in the pcb
573 * always lost for dumps because the dump pcb has 0 flags).
575 * If npxproc != NULL, then we have to save the npx h/w state to
576 * npxproc's pcb and copy it to the requested pcb, or save to the
577 * requested pcb and reload. Copying is easier because we would
578 * have to handle h/w bugs for reloading. We used to lose the
579 * parent's npx state for forks by forgetting to reload.
586 movl P_ADDR(%eax),%eax
587 leal PCB_SAVEFPU(%eax),%eax
595 pushl $PCB_SAVEFPU_SIZE
596 leal PCB_SAVEFPU(%ecx),%ecx
601 #endif /* NNPX > 0 */