add gd_other_cpus
[dragonfly.git] / sys / i386 / i386 / swtch.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
f1d1c3fa 4 * LWKT threads Copyright (c) 2003 Matthew Dillon
984263bc
MD
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
2954c92f 38 * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.18 2003/07/01 20:30:40 dillon Exp $
984263bc
MD
39 */
40
41#include "npx.h"
42#include "opt_user_ldt.h"
43
44#include <sys/rtprio.h>
45
46#include <machine/asmacros.h>
47#include <machine/ipl.h>
48
49#ifdef SMP
50#include <machine/pmap.h>
51#include <machine/smptests.h> /** GRAB_LOPRIO */
52#include <machine/apic.h>
53#include <machine/lock.h>
54#endif /* SMP */
55
56#include "assym.s"
57
984263bc
MD
58 .data
59
2954c92f 60 .globl panic
984263bc
MD
61
62#if defined(SWTCH_OPTIM_STATS)
2954c92f
MD
63 .globl swtch_optim_stats, tlb_flush_count
64swtch_optim_stats: .long 0 /* number of _swtch_optims */
65tlb_flush_count: .long 0
984263bc
MD
66#endif
67
68 .text
69
984263bc
MD
70
71/*
8ad65e08
MD
72 * cpu_heavy_switch(next_thread)
73 *
74 * Switch from the current thread to a new thread. This entry
75 * is normally called via the thread->td_switch function, and will
76 * only be called when the current thread is a heavy weight process.
77 *
78 * YYY disable interrupts once giant is removed.
984263bc 79 */
8ad65e08 80ENTRY(cpu_heavy_switch)
2954c92f 81 movl PCPU(curthread),%ecx
84b592ba 82 movl TD_PROC(%ecx),%ecx
984263bc 83
8ad65e08 84 cli
984263bc
MD
85#ifdef SMP
86 movb P_ONCPU(%ecx), %al /* save "last" cpu */
87 movb %al, P_LASTCPU(%ecx)
88 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
89#endif /* SMP */
90 movl P_VMSPACE(%ecx), %edx
91#ifdef SMP
92 movl _cpuid, %eax
93#else
94 xorl %eax, %eax
95#endif /* SMP */
96 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
97
8ad65e08
MD
98 /*
99 * Save general regs
100 */
101 movl P_THREAD(%ecx),%edx
b7c628e4 102 movl TD_PCB(%edx),%edx
984263bc
MD
103 movl (%esp),%eax /* Hardware registers */
104 movl %eax,PCB_EIP(%edx)
105 movl %ebx,PCB_EBX(%edx)
106 movl %esp,PCB_ESP(%edx)
107 movl %ebp,PCB_EBP(%edx)
108 movl %esi,PCB_ESI(%edx)
109 movl %edi,PCB_EDI(%edx)
110 movl %gs,PCB_GS(%edx)
111
8ad65e08
MD
112 /*
113 * Push the LWKT switch restore function, which resumes a heavy
114 * weight process. Note that the LWKT switcher is based on
115 * TD_SP, while the heavy weight process switcher is based on
116 * PCB_ESP. TD_SP is usually one pointer pushed relative to
117 * PCB_ESP.
118 */
119 movl P_THREAD(%ecx),%eax
120 pushl $cpu_heavy_restore
121 movl %esp,TD_SP(%eax)
122
123 /*
124 * Save debug regs if necessary
125 */
984263bc
MD
126 movb PCB_FLAGS(%edx),%al
127 andb $PCB_DBREGS,%al
128 jz 1f /* no, skip over */
129 movl %dr7,%eax /* yes, do the save */
130 movl %eax,PCB_DR7(%edx)
131 andl $0x0000fc00, %eax /* disable all watchpoints */
132 movl %eax,%dr7
133 movl %dr6,%eax
134 movl %eax,PCB_DR6(%edx)
135 movl %dr3,%eax
136 movl %eax,PCB_DR3(%edx)
137 movl %dr2,%eax
138 movl %eax,PCB_DR2(%edx)
139 movl %dr1,%eax
140 movl %eax,PCB_DR1(%edx)
141 movl %dr0,%eax
142 movl %eax,PCB_DR0(%edx)
1431:
144
8ad65e08
MD
145 /*
146 * Save BGL nesting count. Note that we hold the BGL with a
147 * count of at least 1 on entry to cpu_heavy_switch().
148 */
984263bc
MD
149#ifdef SMP
150 movl _mp_lock, %eax
151 /* XXX FIXME: we should be saving the local APIC TPR */
152#ifdef DIAGNOSTIC
153 cmpl $FREE_LOCK, %eax /* is it free? */
154 je badsw4 /* yes, bad medicine! */
155#endif /* DIAGNOSTIC */
156 andl $COUNT_FIELD, %eax /* clear CPU portion */
157 movl %eax, PCB_MPNEST(%edx) /* store it */
158#endif /* SMP */
159
8ad65e08
MD
160 /*
161 * Save the FP state if we have used the FP.
162 */
984263bc 163#if NNPX > 0
263e4574 164 movl P_THREAD(%ecx),%ecx
2954c92f 165 cmpl %ecx,PCPU(npxthread)
984263bc
MD
166 jne 1f
167 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
168 pushl %edx
2954c92f 169 call npxsave /* do it in a big C function */
984263bc
MD
170 popl %eax
1711:
af0bff84 172 /* %ecx,%edx trashed */
984263bc
MD
173#endif /* NNPX > 0 */
174
84b592ba 175 /*
8ad65e08
MD
176 * Switch to the next thread, which was passed as an argument
177 * to cpu_heavy_switch(). Due to the switch-restore function we pushed,
178 * the argument is at 8(%esp). Set the current thread, load the
179 * stack pointer, and 'ret' into the switch-restore function.
84b592ba 180 */
8ad65e08 181 movl 8(%esp),%eax
2954c92f 182 movl %eax,PCPU(curthread)
8ad65e08
MD
183 movl TD_SP(%eax),%esp
184 ret
984263bc 185
8ad65e08
MD
186/*
187 * cpu_exit_switch()
188 *
189 * The switch function is changed to this when a thread is going away
190 * for good. We have to ensure that the MMU state is not cached, and
191 * we don't bother saving the existing thread state before switching.
ae8050a4
MD
192 *
193 * At this point we are in a critical section and this cpu owns the
194 * thread's token, which serves as an interlock until the switchout is
195 * complete.
8ad65e08
MD
196 */
197ENTRY(cpu_exit_switch)
ae8050a4
MD
198 /*
199 * Get us out of the vmspace
200 */
2954c92f 201 movl IdlePTD,%ecx
8ad65e08
MD
202 movl %cr3,%eax
203 cmpl %ecx,%eax
204 je 1f
205 movl %ecx,%cr3
2954c92f 206 movl PCPU(curthread),%ecx
984263bc 2071:
ae8050a4
MD
208 /*
209 * Switch to the next thread.
210 */
8ad65e08
MD
211 cli
212 movl 4(%esp),%eax
2954c92f 213 movl %eax,PCPU(curthread)
8ad65e08 214 movl TD_SP(%eax),%esp
ae8050a4
MD
215
216 /*
99df837e
MD
217 * We are now the next thread, set the exited flag and wakeup
218 * any waiters.
ae8050a4 219 */
99df837e 220 orl $TDF_EXITED,TD_FLAGS(%ecx)
ae8050a4 221 pushl %eax
99df837e
MD
222 pushl %ecx /* wakeup(oldthread) */
223 call wakeup
ae8050a4 224 addl $4,%esp
99df837e 225 popl %eax /* note: next thread expects curthread in %eax */
ae8050a4
MD
226
227 /*
228 * Restore the next thread's state and resume it. Note: the
229 * restore function assumes that the next thread's address is
230 * in %eax.
231 */
8ad65e08 232 ret
984263bc 233
8ad65e08
MD
234/*
235 * cpu_heavy_restore() (current thread in %eax on entry)
236 *
237 * Restore the thread after an LWKT switch. This entry is normally
238 * called via the LWKT switch restore function, which was pulled
239 * off the thread stack and jumped to.
240 *
241 * This entry is only called if the thread was previously saved
242 * using cpu_heavy_switch() (the heavy weight process thread switcher).
243 *
244 * YYY theoretically we do not have to restore everything here, a lot
245 * of this junk can wait until we return to usermode. But for now
246 * we restore everything.
247 *
248 * YYY STI/CLI sequencing.
7d0bac62
MD
249 *
250 * YYY note: spl check is done in mi_switch when it splx()'s.
8ad65e08 251 */
26a0694b 252
8ad65e08
MD
253ENTRY(cpu_heavy_restore)
254 /* interrupts are disabled */
8f41e33b 255 movl TD_PCB(%eax),%edx
8ad65e08 256 movl TD_PROC(%eax),%ecx
984263bc 257#ifdef DIAGNOSTIC
984263bc
MD
258 cmpb $SRUN,P_STAT(%ecx)
259 jne badsw2
260#endif
984263bc
MD
261
262#if defined(SWTCH_OPTIM_STATS)
263 incl _swtch_optim_stats
264#endif
8ad65e08
MD
265 /*
266 * Restore the MMU address space
267 */
984263bc
MD
268 movl %cr3,%ebx
269 cmpl PCB_CR3(%edx),%ebx
270 je 4f
271#if defined(SWTCH_OPTIM_STATS)
272 decl _swtch_optim_stats
273 incl _tlb_flush_count
274#endif
275 movl PCB_CR3(%edx),%ebx
276 movl %ebx,%cr3
2774:
278
8ad65e08
MD
279 /*
280 * Deal with the PCB extension, restore the private tss
281 */
984263bc
MD
282#ifdef SMP
283 movl _cpuid, %esi
284#else
285 xorl %esi, %esi
286#endif
287 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
288 je 1f
2954c92f 289 btsl %esi, private_tss /* mark use of private tss */
984263bc
MD
290 movl PCB_EXT(%edx), %edi /* new tss descriptor */
291 jmp 2f
2921:
293
b7c628e4
MD
294 /*
295 * update common_tss.tss_esp0 pointer. This is the supervisor
296 * stack pointer on entry from user mode. Since the pcb is
297 * at the top of the supervisor stack esp0 starts just below it.
298 * We leave enough space for vm86 (16 bytes).
8ad65e08
MD
299 *
300 * common_tss.tss_esp0 is needed when user mode traps into the
301 * kernel.
b7c628e4
MD
302 */
303 leal -16(%edx),%ebx
2954c92f 304 movl %ebx, PCPU(common_tss) + TSS_ESP0
984263bc 305
2954c92f 306 btrl %esi, private_tss
984263bc 307 jae 3f
17a9f566
MD
308
309 /*
310 * There is no way to get the address of a segment-accessed variable
311 * so we store a self-referential pointer at the base of the per-cpu
312 * data area and add the appropriate offset.
313 */
984263bc
MD
314 movl $gd_common_tssd, %edi
315 addl %fs:0, %edi
17a9f566 316
8ad65e08
MD
317 /*
318 * Move the correct TSS descriptor into the GDT slot, then reload
319 * tr. YYY not sure what is going on here
320 */
984263bc 3212:
2954c92f 322 movl PCPU(tss_gdt), %ebx /* entry in GDT */
984263bc
MD
323 movl 0(%edi), %eax
324 movl %eax, 0(%ebx)
325 movl 4(%edi), %eax
326 movl %eax, 4(%ebx)
327 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
328 ltr %si
8ad65e08
MD
329
330 /*
331 * Tell the pmap that our cpu is using the VMSPACE now.
332 */
984263bc
MD
3333:
334 movl P_VMSPACE(%ecx), %ebx
2954c92f 335 movl PCPU(cpuid), %eax
984263bc
MD
336 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
337
8ad65e08
MD
338 /*
339 * Restore general registers.
340 */
984263bc
MD
341 movl PCB_EBX(%edx),%ebx
342 movl PCB_ESP(%edx),%esp
343 movl PCB_EBP(%edx),%ebp
344 movl PCB_ESI(%edx),%esi
345 movl PCB_EDI(%edx),%edi
346 movl PCB_EIP(%edx),%eax
347 movl %eax,(%esp)
348
8ad65e08
MD
349 /*
350 * SMP ickyness to direct interrupts.
351 */
352
984263bc
MD
353#ifdef SMP
354#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
355#ifdef CHEAP_TPR
356 movl $0, lapic_tpr
357#else
358 andl $~APIC_TPR_PRIO, lapic_tpr
359#endif /** CHEAP_TPR */
360#endif /** GRAB_LOPRIO */
361 movl _cpuid,%eax
362 movb %al, P_ONCPU(%ecx)
363#endif /* SMP */
984263bc 364
8ad65e08
MD
365 /*
366 * Restore the BGL nesting count. Note that the nesting count will
367 * be at least 1.
368 */
984263bc
MD
369#ifdef SMP
370 movl _cpu_lockid, %eax
371 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
372 movl %eax, _mp_lock /* load the mp_lock */
373 /* XXX FIXME: we should be restoring the local APIC TPR */
374#endif /* SMP */
375
8ad65e08
MD
376 /*
377 * Restore the user LDT if we have one
378 */
984263bc
MD
379#ifdef USER_LDT
380 cmpl $0, PCB_USERLDT(%edx)
381 jnz 1f
2954c92f
MD
382 movl _default_ldt,%eax
383 cmpl PCPU(currentldt),%eax
984263bc 384 je 2f
2954c92f
MD
385 lldt _default_ldt
386 movl %eax,PCPU(currentldt)
984263bc
MD
387 jmp 2f
3881: pushl %edx
2954c92f 389 call set_user_ldt
984263bc
MD
390 popl %edx
3912:
392#endif
8ad65e08
MD
393 /*
394 * Restore the %gs segment register, which must be done after
395 * loading the user LDT. Since user processes can modify the
396 * register via procfs, this may result in a fault which is
397 * detected by checking the fault address against cpu_switch_load_gs
398 * in i386/i386/trap.c
399 */
984263bc
MD
400 .globl cpu_switch_load_gs
401cpu_switch_load_gs:
402 movl PCB_GS(%edx),%gs
403
8ad65e08
MD
404 /*
405 * Restore the DEBUG register state if necessary.
406 */
984263bc
MD
407 movb PCB_FLAGS(%edx),%al
408 andb $PCB_DBREGS,%al
409 jz 1f /* no, skip over */
410 movl PCB_DR6(%edx),%eax /* yes, do the restore */
411 movl %eax,%dr6
412 movl PCB_DR3(%edx),%eax
413 movl %eax,%dr3
414 movl PCB_DR2(%edx),%eax
415 movl %eax,%dr2
416 movl PCB_DR1(%edx),%eax
417 movl %eax,%dr1
418 movl PCB_DR0(%edx),%eax
419 movl %eax,%dr0
420 movl %dr7,%eax /* load dr7 so as not to disturb */
421 andl $0x0000fc00,%eax /* reserved bits */
422 pushl %ebx
423 movl PCB_DR7(%edx),%ebx
424 andl $~0x0000fc00,%ebx
425 orl %ebx,%eax
426 popl %ebx
427 movl %eax,%dr7
4281:
429
8ad65e08 430 sti /* XXX */
984263bc
MD
431 ret
432
433CROSSJUMPTARGET(sw1a)
434
ef0fdad1
MD
435badsw0:
436 pushl %eax
437 pushl $sw0_1
2954c92f 438 call panic
ef0fdad1
MD
439
440sw0_1: .asciz "cpu_switch: panic: %p"
441
984263bc
MD
442#ifdef DIAGNOSTIC
443badsw1:
444 pushl $sw0_1
2954c92f 445 call panic
984263bc
MD
446
447sw0_1: .asciz "cpu_switch: has wchan"
448
449badsw2:
450 pushl $sw0_2
2954c92f 451 call panic
984263bc
MD
452
453sw0_2: .asciz "cpu_switch: not SRUN"
454#endif
455
456#if defined(SMP) && defined(DIAGNOSTIC)
457badsw4:
458 pushl $sw0_4
2954c92f 459 call panic
984263bc
MD
460
461sw0_4: .asciz "cpu_switch: do not have lock"
462#endif /* SMP && DIAGNOSTIC */
463
0cfcada1
MD
464string: .asciz "SWITCHING\n"
465
984263bc
MD
466/*
467 * savectx(pcb)
468 * Update pcb, saving current processor state.
469 */
470ENTRY(savectx)
471 /* fetch PCB */
472 movl 4(%esp),%ecx
473
474 /* caller's return address - child won't execute this routine */
475 movl (%esp),%eax
476 movl %eax,PCB_EIP(%ecx)
477
478 movl %cr3,%eax
479 movl %eax,PCB_CR3(%ecx)
480
481 movl %ebx,PCB_EBX(%ecx)
482 movl %esp,PCB_ESP(%ecx)
483 movl %ebp,PCB_EBP(%ecx)
484 movl %esi,PCB_ESI(%ecx)
485 movl %edi,PCB_EDI(%ecx)
486 movl %gs,PCB_GS(%ecx)
487
488#if NNPX > 0
489 /*
af0bff84 490 * If npxthread == NULL, then the npx h/w state is irrelevant and the
984263bc
MD
491 * state had better already be in the pcb. This is true for forks
492 * but not for dumps (the old book-keeping with FP flags in the pcb
493 * always lost for dumps because the dump pcb has 0 flags).
494 *
af0bff84
MD
495 * If npxthread != NULL, then we have to save the npx h/w state to
496 * npxthread's pcb and copy it to the requested pcb, or save to the
984263bc
MD
497 * requested pcb and reload. Copying is easier because we would
498 * have to handle h/w bugs for reloading. We used to lose the
499 * parent's npx state for forks by forgetting to reload.
500 */
2954c92f 501 movl PCPU(npxthread),%eax
984263bc
MD
502 testl %eax,%eax
503 je 1f
504
505 pushl %ecx
b7c628e4 506 movl TD_PCB(%eax),%eax
984263bc
MD
507 leal PCB_SAVEFPU(%eax),%eax
508 pushl %eax
509 pushl %eax
2954c92f 510 call npxsave
984263bc
MD
511 addl $4,%esp
512 popl %eax
513 popl %ecx
514
515 pushl $PCB_SAVEFPU_SIZE
516 leal PCB_SAVEFPU(%ecx),%ecx
517 pushl %ecx
518 pushl %eax
2954c92f 519 call bcopy
984263bc
MD
520 addl $12,%esp
521#endif /* NNPX > 0 */
522
5231:
524 ret
8ad65e08
MD
525
526/*
527 * cpu_idle_restore() (current thread in %eax on entry)
528 *
529 * Don't bother setting up any regs other then %ebp so backtraces
530 * don't die. This restore function is used to bootstrap into the
531 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
532 * switching.
533 */
534ENTRY(cpu_idle_restore)
535 movl $0,%ebp
536 pushl $0
ef0fdad1 537 sti
8ad65e08
MD
538 jmp cpu_idle
539
0cfcada1
MD
540/*
541 * cpu_kthread_restore() (current thread is %eax on entry)
542 *
543 * Don't bother setting up any regs other then %ebp so backtraces
544 * don't die. This restore function is used to bootstrap into an
545 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
546 * after this.
26a0694b
MD
547 *
548 * Since all of our context is on the stack we are reentrant and
549 * we can release our critical section and enable interrupts early.
0cfcada1
MD
550 */
551ENTRY(cpu_kthread_restore)
552 movl TD_PCB(%eax),%ebx
553 movl $0,%ebp
26a0694b 554 subl $TDPRI_CRIT,TD_PRI(%eax)
ef0fdad1 555 sti
0cfcada1
MD
556 popl %edx /* kthread exit function */
557 pushl PCB_EBX(%ebx) /* argument to ESI function */
558 pushl %edx /* set exit func as return address */
559 movl PCB_ESI(%ebx),%eax
560 jmp *%eax
561
8ad65e08
MD
562/*
563 * cpu_lwkt_switch()
564 *
565 * Standard LWKT switching function. Only non-scratch registers are
566 * saved and we don't bother with the MMU state or anything else.
26a0694b
MD
567 *
568 * This function is always called while in a critical section.
569 *
8ad65e08
MD
570 * YYY BGL, SPL
571 */
572ENTRY(cpu_lwkt_switch)
573 movl 4(%esp),%eax
574 pushl %ebp
575 pushl %ebx
576 pushl %esi
577 pushl %edi
578 pushfl
2954c92f 579 movl PCPU(curthread),%ecx
8ad65e08
MD
580 pushl $cpu_lwkt_restore
581 cli
582 movl %esp,TD_SP(%ecx)
2954c92f 583 movl %eax,PCPU(curthread)
8ad65e08
MD
584 movl TD_SP(%eax),%esp
585 ret
586
587/*
26a0694b 588 * cpu_lwkt_restore() (current thread in %eax on entry)
8ad65e08 589 *
26a0694b
MD
590 * Standard LWKT restore function. This function is always called
591 * while in a critical section.
592 *
593 * Warning: due to preemption the restore function can be used to
594 * 'return' to the original thread. Interrupt disablement must be
595 * protected through the switch so we cannot run splz here.
8ad65e08
MD
596 */
597ENTRY(cpu_lwkt_restore)
598 popfl
599 popl %edi
600 popl %esi
601 popl %ebx
602 popl %ebp
603 ret
604