rename td_token to td_xtoken to deal with conflict against sys/thread.h
[dragonfly.git] / sys / i386 / i386 / swtch.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
f1d1c3fa 4 * LWKT threads Copyright (c) 2003 Matthew Dillon
984263bc
MD
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
f1d1c3fa 38 * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.8 2003/06/21 07:54:56 dillon Exp $
984263bc
MD
39 */
40
41#include "npx.h"
42#include "opt_user_ldt.h"
43
44#include <sys/rtprio.h>
45
46#include <machine/asmacros.h>
47#include <machine/ipl.h>
48
49#ifdef SMP
50#include <machine/pmap.h>
51#include <machine/smptests.h> /** GRAB_LOPRIO */
52#include <machine/apic.h>
53#include <machine/lock.h>
54#endif /* SMP */
55
56#include "assym.s"
57
984263bc
MD
58 .data
59
984263bc
MD
60 .globl _panic
61
62#if defined(SWTCH_OPTIM_STATS)
63 .globl _swtch_optim_stats, _tlb_flush_count
64_swtch_optim_stats: .long 0 /* number of _swtch_optims */
65_tlb_flush_count: .long 0
66#endif
67
68 .text
69
984263bc
MD
70
71/*
8ad65e08
MD
72 * cpu_heavy_switch(next_thread)
73 *
74 * Switch from the current thread to a new thread. This entry
75 * is normally called via the thread->td_switch function, and will
76 * only be called when the current thread is a heavy weight process.
77 *
78 * YYY disable interrupts once giant is removed.
984263bc 79 */
8ad65e08 80ENTRY(cpu_heavy_switch)
84b592ba 81 movl _curthread,%ecx
f1d1c3fa
MD
82 movl _cpl,%edx /* YYY temporary */
83 movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */
84b592ba 84 movl TD_PROC(%ecx),%ecx
984263bc 85
8ad65e08 86 cli
984263bc
MD
87#ifdef SMP
88 movb P_ONCPU(%ecx), %al /* save "last" cpu */
89 movb %al, P_LASTCPU(%ecx)
90 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
91#endif /* SMP */
92 movl P_VMSPACE(%ecx), %edx
93#ifdef SMP
94 movl _cpuid, %eax
95#else
96 xorl %eax, %eax
97#endif /* SMP */
98 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
99
8ad65e08
MD
100 /*
101 * Save general regs
102 */
103 movl P_THREAD(%ecx),%edx
b7c628e4 104 movl TD_PCB(%edx),%edx
984263bc
MD
105 movl (%esp),%eax /* Hardware registers */
106 movl %eax,PCB_EIP(%edx)
107 movl %ebx,PCB_EBX(%edx)
108 movl %esp,PCB_ESP(%edx)
109 movl %ebp,PCB_EBP(%edx)
110 movl %esi,PCB_ESI(%edx)
111 movl %edi,PCB_EDI(%edx)
112 movl %gs,PCB_GS(%edx)
113
8ad65e08
MD
114 /*
115 * Push the LWKT switch restore function, which resumes a heavy
116 * weight process. Note that the LWKT switcher is based on
117 * TD_SP, while the heavy weight process switcher is based on
118 * PCB_ESP. TD_SP is usually one pointer pushed relative to
119 * PCB_ESP.
120 */
121 movl P_THREAD(%ecx),%eax
122 pushl $cpu_heavy_restore
123 movl %esp,TD_SP(%eax)
124
125 /*
126 * Save debug regs if necessary
127 */
984263bc
MD
128 movb PCB_FLAGS(%edx),%al
129 andb $PCB_DBREGS,%al
130 jz 1f /* no, skip over */
131 movl %dr7,%eax /* yes, do the save */
132 movl %eax,PCB_DR7(%edx)
133 andl $0x0000fc00, %eax /* disable all watchpoints */
134 movl %eax,%dr7
135 movl %dr6,%eax
136 movl %eax,PCB_DR6(%edx)
137 movl %dr3,%eax
138 movl %eax,PCB_DR3(%edx)
139 movl %dr2,%eax
140 movl %eax,PCB_DR2(%edx)
141 movl %dr1,%eax
142 movl %eax,PCB_DR1(%edx)
143 movl %dr0,%eax
144 movl %eax,PCB_DR0(%edx)
1451:
146
8ad65e08
MD
147 /*
148 * Save BGL nesting count. Note that we hold the BGL with a
149 * count of at least 1 on entry to cpu_heavy_switch().
150 */
984263bc
MD
151#ifdef SMP
152 movl _mp_lock, %eax
153 /* XXX FIXME: we should be saving the local APIC TPR */
154#ifdef DIAGNOSTIC
155 cmpl $FREE_LOCK, %eax /* is it free? */
156 je badsw4 /* yes, bad medicine! */
157#endif /* DIAGNOSTIC */
158 andl $COUNT_FIELD, %eax /* clear CPU portion */
159 movl %eax, PCB_MPNEST(%edx) /* store it */
160#endif /* SMP */
161
8ad65e08
MD
162 /*
163 * Save the FP state if we have used the FP.
164 */
984263bc 165#if NNPX > 0
263e4574 166 movl P_THREAD(%ecx),%ecx
af0bff84 167 cmpl %ecx,_npxthread
984263bc
MD
168 jne 1f
169 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
170 pushl %edx
171 call _npxsave /* do it in a big C function */
172 popl %eax
1731:
af0bff84 174 /* %ecx,%edx trashed */
984263bc
MD
175#endif /* NNPX > 0 */
176
84b592ba 177 /*
8ad65e08
MD
178 * Switch to the next thread, which was passed as an argument
179 * to cpu_heavy_switch(). Due to the switch-restore function we pushed,
180 * the argument is at 8(%esp). Set the current thread, load the
181 * stack pointer, and 'ret' into the switch-restore function.
84b592ba 182 */
8ad65e08
MD
183 movl 8(%esp),%eax
184 movl %eax,_curthread
185 movl TD_SP(%eax),%esp
186 ret
984263bc 187
8ad65e08
MD
188/*
189 * cpu_exit_switch()
190 *
191 * The switch function is changed to this when a thread is going away
192 * for good. We have to ensure that the MMU state is not cached, and
193 * we don't bother saving the existing thread state before switching.
194 */
195ENTRY(cpu_exit_switch)
196 movl _IdlePTD,%ecx
197 movl %cr3,%eax
198 cmpl %ecx,%eax
199 je 1f
200 movl %ecx,%cr3
984263bc 2011:
8ad65e08
MD
202 cli
203 movl 4(%esp),%eax
204 movl %eax,_curthread
205 movl TD_SP(%eax),%esp
206 ret
984263bc 207
8ad65e08
MD
208/*
209 * cpu_heavy_restore() (current thread in %eax on entry)
210 *
211 * Restore the thread after an LWKT switch. This entry is normally
212 * called via the LWKT switch restore function, which was pulled
213 * off the thread stack and jumped to.
214 *
215 * This entry is only called if the thread was previously saved
216 * using cpu_heavy_switch() (the heavy weight process thread switcher).
217 *
218 * YYY theoretically we do not have to restore everything here, a lot
219 * of this junk can wait until we return to usermode. But for now
220 * we restore everything.
221 *
222 * YYY STI/CLI sequencing.
223 */
224ENTRY(cpu_heavy_restore)
225 /* interrupts are disabled */
f1d1c3fa
MD
226 movl TD_MACH+MTD_CPL(%eax),%edx
227 movl %edx,_cpl /* YYY temporary */
228 movl TD_PCB(%eax),%edx /* YYY temporary */
8ad65e08 229 movl TD_PROC(%eax),%ecx
984263bc 230#ifdef DIAGNOSTIC
984263bc
MD
231 cmpb $SRUN,P_STAT(%ecx)
232 jne badsw2
233#endif
984263bc
MD
234
235#if defined(SWTCH_OPTIM_STATS)
236 incl _swtch_optim_stats
237#endif
8ad65e08
MD
238 /*
239 * Restore the MMU address space
240 */
984263bc
MD
241 movl %cr3,%ebx
242 cmpl PCB_CR3(%edx),%ebx
243 je 4f
244#if defined(SWTCH_OPTIM_STATS)
245 decl _swtch_optim_stats
246 incl _tlb_flush_count
247#endif
248 movl PCB_CR3(%edx),%ebx
249 movl %ebx,%cr3
2504:
251
8ad65e08
MD
252 /*
253 * Deal with the PCB extension, restore the private tss
254 */
984263bc
MD
255#ifdef SMP
256 movl _cpuid, %esi
257#else
258 xorl %esi, %esi
259#endif
260 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
261 je 1f
262 btsl %esi, _private_tss /* mark use of private tss */
263 movl PCB_EXT(%edx), %edi /* new tss descriptor */
264 jmp 2f
2651:
266
b7c628e4
MD
267 /*
268 * update common_tss.tss_esp0 pointer. This is the supervisor
269 * stack pointer on entry from user mode. Since the pcb is
270 * at the top of the supervisor stack esp0 starts just below it.
271 * We leave enough space for vm86 (16 bytes).
8ad65e08
MD
272 *
273 * common_tss.tss_esp0 is needed when user mode traps into the
274 * kernel.
b7c628e4
MD
275 */
276 leal -16(%edx),%ebx
984263bc
MD
277 movl %ebx, _common_tss + TSS_ESP0
278
279 btrl %esi, _private_tss
280 jae 3f
281#ifdef SMP
282 movl $gd_common_tssd, %edi
283 addl %fs:0, %edi
284#else
285 movl $_common_tssd, %edi
286#endif
8ad65e08
MD
287 /*
288 * Move the correct TSS descriptor into the GDT slot, then reload
289 * tr. YYY not sure what is going on here
290 */
984263bc 2912:
984263bc
MD
292 movl _tss_gdt, %ebx /* entry in GDT */
293 movl 0(%edi), %eax
294 movl %eax, 0(%ebx)
295 movl 4(%edi), %eax
296 movl %eax, 4(%ebx)
297 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
298 ltr %si
8ad65e08
MD
299
300 /*
301 * Tell the pmap that our cpu is using the VMSPACE now.
302 */
984263bc
MD
3033:
304 movl P_VMSPACE(%ecx), %ebx
305#ifdef SMP
306 movl _cpuid, %eax
307#else
308 xorl %eax, %eax
309#endif
310 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
311
8ad65e08
MD
312 /*
313 * Restore general registers.
314 */
984263bc
MD
315 movl PCB_EBX(%edx),%ebx
316 movl PCB_ESP(%edx),%esp
317 movl PCB_EBP(%edx),%ebp
318 movl PCB_ESI(%edx),%esi
319 movl PCB_EDI(%edx),%edi
320 movl PCB_EIP(%edx),%eax
321 movl %eax,(%esp)
322
8ad65e08
MD
323 /*
324 * SMP ickyness to direct interrupts.
325 */
326
984263bc
MD
327#ifdef SMP
328#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
329#ifdef CHEAP_TPR
330 movl $0, lapic_tpr
331#else
332 andl $~APIC_TPR_PRIO, lapic_tpr
333#endif /** CHEAP_TPR */
334#endif /** GRAB_LOPRIO */
335 movl _cpuid,%eax
336 movb %al, P_ONCPU(%ecx)
337#endif /* SMP */
984263bc 338
8ad65e08
MD
339 /*
340 * Restore the BGL nesting count. Note that the nesting count will
341 * be at least 1.
342 */
984263bc
MD
343#ifdef SMP
344 movl _cpu_lockid, %eax
345 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
346 movl %eax, _mp_lock /* load the mp_lock */
347 /* XXX FIXME: we should be restoring the local APIC TPR */
348#endif /* SMP */
349
8ad65e08
MD
350 /*
351 * Restore the user LDT if we have one
352 */
984263bc
MD
353#ifdef USER_LDT
354 cmpl $0, PCB_USERLDT(%edx)
355 jnz 1f
356 movl __default_ldt,%eax
357 cmpl _currentldt,%eax
358 je 2f
359 lldt __default_ldt
360 movl %eax,_currentldt
361 jmp 2f
3621: pushl %edx
363 call _set_user_ldt
364 popl %edx
3652:
366#endif
8ad65e08
MD
367 /*
368 * Restore the %gs segment register, which must be done after
369 * loading the user LDT. Since user processes can modify the
370 * register via procfs, this may result in a fault which is
371 * detected by checking the fault address against cpu_switch_load_gs
372 * in i386/i386/trap.c
373 */
984263bc
MD
374 .globl cpu_switch_load_gs
375cpu_switch_load_gs:
376 movl PCB_GS(%edx),%gs
377
8ad65e08
MD
378 /*
379 * Restore the DEBUG register state if necessary.
380 */
984263bc
MD
381 movb PCB_FLAGS(%edx),%al
382 andb $PCB_DBREGS,%al
383 jz 1f /* no, skip over */
384 movl PCB_DR6(%edx),%eax /* yes, do the restore */
385 movl %eax,%dr6
386 movl PCB_DR3(%edx),%eax
387 movl %eax,%dr3
388 movl PCB_DR2(%edx),%eax
389 movl %eax,%dr2
390 movl PCB_DR1(%edx),%eax
391 movl %eax,%dr1
392 movl PCB_DR0(%edx),%eax
393 movl %eax,%dr0
394 movl %dr7,%eax /* load dr7 so as not to disturb */
395 andl $0x0000fc00,%eax /* reserved bits */
396 pushl %ebx
397 movl PCB_DR7(%edx),%ebx
398 andl $~0x0000fc00,%ebx
399 orl %ebx,%eax
400 popl %ebx
401 movl %eax,%dr7
4021:
8ad65e08
MD
403#if 0
404 /*
405 * Remove the heavy weight process from the heavy weight queue.
406 * this will also have the side effect of removing the thread from
407 * the run queue. YYY temporary?
408 *
409 * LWKT threads stay on the run queue until explicitly removed.
410 */
411 pushl %ecx
412 call remrunqueue
413 addl $4,%esp
414#endif
984263bc 415
8ad65e08 416 sti /* XXX */
984263bc
MD
417 ret
418
419CROSSJUMPTARGET(sw1a)
420
421#ifdef DIAGNOSTIC
422badsw1:
423 pushl $sw0_1
424 call _panic
425
426sw0_1: .asciz "cpu_switch: has wchan"
427
428badsw2:
429 pushl $sw0_2
430 call _panic
431
432sw0_2: .asciz "cpu_switch: not SRUN"
433#endif
434
435#if defined(SMP) && defined(DIAGNOSTIC)
436badsw4:
437 pushl $sw0_4
438 call _panic
439
440sw0_4: .asciz "cpu_switch: do not have lock"
441#endif /* SMP && DIAGNOSTIC */
442
443/*
444 * savectx(pcb)
445 * Update pcb, saving current processor state.
446 */
447ENTRY(savectx)
448 /* fetch PCB */
449 movl 4(%esp),%ecx
450
451 /* caller's return address - child won't execute this routine */
452 movl (%esp),%eax
453 movl %eax,PCB_EIP(%ecx)
454
455 movl %cr3,%eax
456 movl %eax,PCB_CR3(%ecx)
457
458 movl %ebx,PCB_EBX(%ecx)
459 movl %esp,PCB_ESP(%ecx)
460 movl %ebp,PCB_EBP(%ecx)
461 movl %esi,PCB_ESI(%ecx)
462 movl %edi,PCB_EDI(%ecx)
463 movl %gs,PCB_GS(%ecx)
464
465#if NNPX > 0
466 /*
af0bff84 467 * If npxthread == NULL, then the npx h/w state is irrelevant and the
984263bc
MD
468 * state had better already be in the pcb. This is true for forks
469 * but not for dumps (the old book-keeping with FP flags in the pcb
470 * always lost for dumps because the dump pcb has 0 flags).
471 *
af0bff84
MD
472 * If npxthread != NULL, then we have to save the npx h/w state to
473 * npxthread's pcb and copy it to the requested pcb, or save to the
984263bc
MD
474 * requested pcb and reload. Copying is easier because we would
475 * have to handle h/w bugs for reloading. We used to lose the
476 * parent's npx state for forks by forgetting to reload.
477 */
af0bff84 478 movl _npxthread,%eax
984263bc
MD
479 testl %eax,%eax
480 je 1f
481
482 pushl %ecx
b7c628e4 483 movl TD_PCB(%eax),%eax
984263bc
MD
484 leal PCB_SAVEFPU(%eax),%eax
485 pushl %eax
486 pushl %eax
487 call _npxsave
488 addl $4,%esp
489 popl %eax
490 popl %ecx
491
492 pushl $PCB_SAVEFPU_SIZE
493 leal PCB_SAVEFPU(%ecx),%ecx
494 pushl %ecx
495 pushl %eax
496 call _bcopy
497 addl $12,%esp
498#endif /* NNPX > 0 */
499
5001:
501 ret
8ad65e08
MD
502
503/*
504 * cpu_idle_restore() (current thread in %eax on entry)
505 *
506 * Don't bother setting up any regs other then %ebp so backtraces
507 * don't die. This restore function is used to bootstrap into the
508 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
509 * switching.
510 */
511ENTRY(cpu_idle_restore)
512 movl $0,%ebp
513 pushl $0
514 jmp cpu_idle
515
516/*
517 * cpu_lwkt_switch()
518 *
519 * Standard LWKT switching function. Only non-scratch registers are
520 * saved and we don't bother with the MMU state or anything else.
521 * YYY BGL, SPL
522 */
523ENTRY(cpu_lwkt_switch)
524 movl 4(%esp),%eax
525 pushl %ebp
526 pushl %ebx
527 pushl %esi
528 pushl %edi
529 pushfl
530 movl _curthread,%ecx
f1d1c3fa
MD
531 movl _cpl,%edx /* YYY temporary */
532 movl %edx,TD_MACH+MTD_CPL(%ecx) /* YYY temporary */
8ad65e08
MD
533 pushl $cpu_lwkt_restore
534 cli
535 movl %esp,TD_SP(%ecx)
536 movl %eax,_curthread
537 movl TD_SP(%eax),%esp
538 ret
539
540/*
541 * cpu_idle_restore() (current thread in %eax on entry)
542 *
543 * Don't bother setting up any regs other then %ebp so backtraces
544 * don't die.
545 */
546ENTRY(cpu_lwkt_restore)
547 popfl
548 popl %edi
549 popl %esi
550 popl %ebx
551 popl %ebp
f1d1c3fa
MD
552 movl TD_MACH+MTD_CPL(%eax),%ecx /* YYY temporary */
553 movl %ecx,_cpl /* YYY temporary */
8ad65e08
MD
554 ret
555