format cleanup for readability. Tab out back-slashes.
[dragonfly.git] / sys / i386 / i386 / swtch.s
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 * LWKT threads Copyright (c) 2003 Matthew Dillon
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
38 * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.15 2003/06/28 02:09:47 dillon Exp $
39 */
40
41#include "npx.h"
42#include "opt_user_ldt.h"
43
44#include <sys/rtprio.h>
45
46#include <machine/asmacros.h>
47#include <machine/ipl.h>
48
49#ifdef SMP
50#include <machine/pmap.h>
51#include <machine/smptests.h> /** GRAB_LOPRIO */
52#include <machine/apic.h>
53#include <machine/lock.h>
54#endif /* SMP */
55
56#include "assym.s"
57
58 .data
59
60 .globl _panic
61
62#if defined(SWTCH_OPTIM_STATS)
63 .globl _swtch_optim_stats, _tlb_flush_count
64_swtch_optim_stats: .long 0 /* number of _swtch_optims */
65_tlb_flush_count: .long 0
66#endif
67
68 .text
69
70
71/*
72 * cpu_heavy_switch(next_thread)
73 *
74 * Switch from the current thread to a new thread. This entry
75 * is normally called via the thread->td_switch function, and will
76 * only be called when the current thread is a heavy weight process.
77 *
78 * YYY disable interrupts once giant is removed.
79 */
80ENTRY(cpu_heavy_switch)
81 movl _curthread,%ecx
82 movl TD_PROC(%ecx),%ecx
83
84 cli
85#ifdef SMP
86 movb P_ONCPU(%ecx), %al /* save "last" cpu */
87 movb %al, P_LASTCPU(%ecx)
88 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
89#endif /* SMP */
90 movl P_VMSPACE(%ecx), %edx
91#ifdef SMP
92 movl _cpuid, %eax
93#else
94 xorl %eax, %eax
95#endif /* SMP */
96 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
97
98 /*
99 * Save general regs
100 */
101 movl P_THREAD(%ecx),%edx
102 movl TD_PCB(%edx),%edx
103 movl (%esp),%eax /* Hardware registers */
104 movl %eax,PCB_EIP(%edx)
105 movl %ebx,PCB_EBX(%edx)
106 movl %esp,PCB_ESP(%edx)
107 movl %ebp,PCB_EBP(%edx)
108 movl %esi,PCB_ESI(%edx)
109 movl %edi,PCB_EDI(%edx)
110 movl %gs,PCB_GS(%edx)
111
112 /*
113 * Push the LWKT switch restore function, which resumes a heavy
114 * weight process. Note that the LWKT switcher is based on
115 * TD_SP, while the heavy weight process switcher is based on
116 * PCB_ESP. TD_SP is usually one pointer pushed relative to
117 * PCB_ESP.
118 */
119 movl P_THREAD(%ecx),%eax
120 pushl $cpu_heavy_restore
121 movl %esp,TD_SP(%eax)
122
123 /*
124 * Save debug regs if necessary
125 */
126 movb PCB_FLAGS(%edx),%al
127 andb $PCB_DBREGS,%al
128 jz 1f /* no, skip over */
129 movl %dr7,%eax /* yes, do the save */
130 movl %eax,PCB_DR7(%edx)
131 andl $0x0000fc00, %eax /* disable all watchpoints */
132 movl %eax,%dr7
133 movl %dr6,%eax
134 movl %eax,PCB_DR6(%edx)
135 movl %dr3,%eax
136 movl %eax,PCB_DR3(%edx)
137 movl %dr2,%eax
138 movl %eax,PCB_DR2(%edx)
139 movl %dr1,%eax
140 movl %eax,PCB_DR1(%edx)
141 movl %dr0,%eax
142 movl %eax,PCB_DR0(%edx)
1431:
144
145 /*
146 * Save BGL nesting count. Note that we hold the BGL with a
147 * count of at least 1 on entry to cpu_heavy_switch().
148 */
149#ifdef SMP
150 movl _mp_lock, %eax
151 /* XXX FIXME: we should be saving the local APIC TPR */
152#ifdef DIAGNOSTIC
153 cmpl $FREE_LOCK, %eax /* is it free? */
154 je badsw4 /* yes, bad medicine! */
155#endif /* DIAGNOSTIC */
156 andl $COUNT_FIELD, %eax /* clear CPU portion */
157 movl %eax, PCB_MPNEST(%edx) /* store it */
158#endif /* SMP */
159
160 /*
161 * Save the FP state if we have used the FP.
162 */
163#if NNPX > 0
164 movl P_THREAD(%ecx),%ecx
165 cmpl %ecx,_npxthread
166 jne 1f
167 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
168 pushl %edx
169 call _npxsave /* do it in a big C function */
170 popl %eax
1711:
172 /* %ecx,%edx trashed */
173#endif /* NNPX > 0 */
174
175 /*
176 * Switch to the next thread, which was passed as an argument
177 * to cpu_heavy_switch(). Due to the switch-restore function we pushed,
178 * the argument is at 8(%esp). Set the current thread, load the
179 * stack pointer, and 'ret' into the switch-restore function.
180 */
181 movl 8(%esp),%eax
182 movl %eax,_curthread
183 movl TD_SP(%eax),%esp
184 ret
185
186/*
187 * cpu_exit_switch()
188 *
189 * The switch function is changed to this when a thread is going away
190 * for good. We have to ensure that the MMU state is not cached, and
191 * we don't bother saving the existing thread state before switching.
192 *
193 * At this point we are in a critical section and this cpu owns the
194 * thread's token, which serves as an interlock until the switchout is
195 * complete.
196 */
197ENTRY(cpu_exit_switch)
198 /*
199 * Get us out of the vmspace
200 */
201 movl _IdlePTD,%ecx
202 movl %cr3,%eax
203 cmpl %ecx,%eax
204 je 1f
205 movl %ecx,%cr3
206 movl _curthread,%ecx
2071:
208 /*
209 * Switch to the next thread.
210 */
211 cli
212 movl 4(%esp),%eax
213 movl %eax,_curthread
214 movl TD_SP(%eax),%esp
215
216 /*
217 * We are now the next thread, set the exited flag and wakeup
218 * any waiters.
219 */
220 orl $TDF_EXITED,TD_FLAGS(%ecx)
221 pushl %eax
222 pushl %ecx /* wakeup(oldthread) */
223 call wakeup
224 addl $4,%esp
225 popl %eax /* note: next thread expects curthread in %eax */
226
227 /*
228 * Restore the next thread's state and resume it. Note: the
229 * restore function assumes that the next thread's address is
230 * in %eax.
231 */
232 ret
233
234/*
235 * cpu_heavy_restore() (current thread in %eax on entry)
236 *
237 * Restore the thread after an LWKT switch. This entry is normally
238 * called via the LWKT switch restore function, which was pulled
239 * off the thread stack and jumped to.
240 *
241 * This entry is only called if the thread was previously saved
242 * using cpu_heavy_switch() (the heavy weight process thread switcher).
243 *
244 * YYY theoretically we do not have to restore everything here, a lot
245 * of this junk can wait until we return to usermode. But for now
246 * we restore everything.
247 *
248 * YYY STI/CLI sequencing.
249 *
250 * YYY note: spl check is done in mi_switch when it splx()'s.
251 */
252ENTRY(cpu_heavy_restore)
253 /* interrupts are disabled */
254 movl TD_PCB(%eax),%edx
255 movl TD_PROC(%eax),%ecx
256#ifdef DIAGNOSTIC
257 cmpb $SRUN,P_STAT(%ecx)
258 jne badsw2
259#endif
260
261#if defined(SWTCH_OPTIM_STATS)
262 incl _swtch_optim_stats
263#endif
264 /*
265 * Restore the MMU address space
266 */
267 movl %cr3,%ebx
268 cmpl PCB_CR3(%edx),%ebx
269 je 4f
270#if defined(SWTCH_OPTIM_STATS)
271 decl _swtch_optim_stats
272 incl _tlb_flush_count
273#endif
274 movl PCB_CR3(%edx),%ebx
275 movl %ebx,%cr3
2764:
277
278 /*
279 * Deal with the PCB extension, restore the private tss
280 */
281#ifdef SMP
282 movl _cpuid, %esi
283#else
284 xorl %esi, %esi
285#endif
286 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
287 je 1f
288 btsl %esi, _private_tss /* mark use of private tss */
289 movl PCB_EXT(%edx), %edi /* new tss descriptor */
290 jmp 2f
2911:
292
293 /*
294 * update common_tss.tss_esp0 pointer. This is the supervisor
295 * stack pointer on entry from user mode. Since the pcb is
296 * at the top of the supervisor stack esp0 starts just below it.
297 * We leave enough space for vm86 (16 bytes).
298 *
299 * common_tss.tss_esp0 is needed when user mode traps into the
300 * kernel.
301 */
302 leal -16(%edx),%ebx
303 movl %ebx, _common_tss + TSS_ESP0
304
305 btrl %esi, _private_tss
306 jae 3f
307
308 /*
309 * There is no way to get the address of a segment-accessed variable
310 * so we store a self-referential pointer at the base of the per-cpu
311 * data area and add the appropriate offset.
312 */
313 movl $gd_common_tssd, %edi
314 addl %fs:0, %edi
315
316 /*
317 * Move the correct TSS descriptor into the GDT slot, then reload
318 * tr. YYY not sure what is going on here
319 */
3202:
321 movl _tss_gdt, %ebx /* entry in GDT */
322 movl 0(%edi), %eax
323 movl %eax, 0(%ebx)
324 movl 4(%edi), %eax
325 movl %eax, 4(%ebx)
326 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
327 ltr %si
328
329 /*
330 * Tell the pmap that our cpu is using the VMSPACE now.
331 */
3323:
333 movl P_VMSPACE(%ecx), %ebx
334 movl _cpuid, %eax
335 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
336
337 /*
338 * Restore general registers.
339 */
340 movl PCB_EBX(%edx),%ebx
341 movl PCB_ESP(%edx),%esp
342 movl PCB_EBP(%edx),%ebp
343 movl PCB_ESI(%edx),%esi
344 movl PCB_EDI(%edx),%edi
345 movl PCB_EIP(%edx),%eax
346 movl %eax,(%esp)
347
348 /*
349 * SMP ickyness to direct interrupts.
350 */
351
352#ifdef SMP
353#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
354#ifdef CHEAP_TPR
355 movl $0, lapic_tpr
356#else
357 andl $~APIC_TPR_PRIO, lapic_tpr
358#endif /** CHEAP_TPR */
359#endif /** GRAB_LOPRIO */
360 movl _cpuid,%eax
361 movb %al, P_ONCPU(%ecx)
362#endif /* SMP */
363
364 /*
365 * Restore the BGL nesting count. Note that the nesting count will
366 * be at least 1.
367 */
368#ifdef SMP
369 movl _cpu_lockid, %eax
370 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
371 movl %eax, _mp_lock /* load the mp_lock */
372 /* XXX FIXME: we should be restoring the local APIC TPR */
373#endif /* SMP */
374
375 /*
376 * Restore the user LDT if we have one
377 */
378#ifdef USER_LDT
379 cmpl $0, PCB_USERLDT(%edx)
380 jnz 1f
381 movl __default_ldt,%eax
382 cmpl _currentldt,%eax
383 je 2f
384 lldt __default_ldt
385 movl %eax,_currentldt
386 jmp 2f
3871: pushl %edx
388 call _set_user_ldt
389 popl %edx
3902:
391#endif
392 /*
393 * Restore the %gs segment register, which must be done after
394 * loading the user LDT. Since user processes can modify the
395 * register via procfs, this may result in a fault which is
396 * detected by checking the fault address against cpu_switch_load_gs
397 * in i386/i386/trap.c
398 */
399 .globl cpu_switch_load_gs
400cpu_switch_load_gs:
401 movl PCB_GS(%edx),%gs
402
403 /*
404 * Restore the DEBUG register state if necessary.
405 */
406 movb PCB_FLAGS(%edx),%al
407 andb $PCB_DBREGS,%al
408 jz 1f /* no, skip over */
409 movl PCB_DR6(%edx),%eax /* yes, do the restore */
410 movl %eax,%dr6
411 movl PCB_DR3(%edx),%eax
412 movl %eax,%dr3
413 movl PCB_DR2(%edx),%eax
414 movl %eax,%dr2
415 movl PCB_DR1(%edx),%eax
416 movl %eax,%dr1
417 movl PCB_DR0(%edx),%eax
418 movl %eax,%dr0
419 movl %dr7,%eax /* load dr7 so as not to disturb */
420 andl $0x0000fc00,%eax /* reserved bits */
421 pushl %ebx
422 movl PCB_DR7(%edx),%ebx
423 andl $~0x0000fc00,%ebx
424 orl %ebx,%eax
425 popl %ebx
426 movl %eax,%dr7
4271:
428#if 0
429 /*
430 * Remove the heavy weight process from the heavy weight queue.
431 * this will also have the side effect of removing the thread from
432 * the run queue. YYY temporary?
433 *
434 * LWKT threads stay on the run queue until explicitly removed.
435 */
436 pushl %ecx
437 call remrunqueue
438 addl $4,%esp
439#endif
440
441 sti /* XXX */
442 ret
443
444CROSSJUMPTARGET(sw1a)
445
446#ifdef DIAGNOSTIC
447badsw1:
448 pushl $sw0_1
449 call _panic
450
451sw0_1: .asciz "cpu_switch: has wchan"
452
453badsw2:
454 pushl $sw0_2
455 call _panic
456
457sw0_2: .asciz "cpu_switch: not SRUN"
458#endif
459
460#if defined(SMP) && defined(DIAGNOSTIC)
461badsw4:
462 pushl $sw0_4
463 call _panic
464
465sw0_4: .asciz "cpu_switch: do not have lock"
466#endif /* SMP && DIAGNOSTIC */
467
468string: .asciz "SWITCHING\n"
469
470/*
471 * savectx(pcb)
472 * Update pcb, saving current processor state.
473 */
474ENTRY(savectx)
475 /* fetch PCB */
476 movl 4(%esp),%ecx
477
478 /* caller's return address - child won't execute this routine */
479 movl (%esp),%eax
480 movl %eax,PCB_EIP(%ecx)
481
482 movl %cr3,%eax
483 movl %eax,PCB_CR3(%ecx)
484
485 movl %ebx,PCB_EBX(%ecx)
486 movl %esp,PCB_ESP(%ecx)
487 movl %ebp,PCB_EBP(%ecx)
488 movl %esi,PCB_ESI(%ecx)
489 movl %edi,PCB_EDI(%ecx)
490 movl %gs,PCB_GS(%ecx)
491
492#if NNPX > 0
493 /*
494 * If npxthread == NULL, then the npx h/w state is irrelevant and the
495 * state had better already be in the pcb. This is true for forks
496 * but not for dumps (the old book-keeping with FP flags in the pcb
497 * always lost for dumps because the dump pcb has 0 flags).
498 *
499 * If npxthread != NULL, then we have to save the npx h/w state to
500 * npxthread's pcb and copy it to the requested pcb, or save to the
501 * requested pcb and reload. Copying is easier because we would
502 * have to handle h/w bugs for reloading. We used to lose the
503 * parent's npx state for forks by forgetting to reload.
504 */
505 movl _npxthread,%eax
506 testl %eax,%eax
507 je 1f
508
509 pushl %ecx
510 movl TD_PCB(%eax),%eax
511 leal PCB_SAVEFPU(%eax),%eax
512 pushl %eax
513 pushl %eax
514 call _npxsave
515 addl $4,%esp
516 popl %eax
517 popl %ecx
518
519 pushl $PCB_SAVEFPU_SIZE
520 leal PCB_SAVEFPU(%ecx),%ecx
521 pushl %ecx
522 pushl %eax
523 call _bcopy
524 addl $12,%esp
525#endif /* NNPX > 0 */
526
5271:
528 ret
529
530/*
531 * cpu_idle_restore() (current thread in %eax on entry)
532 *
533 * Don't bother setting up any regs other then %ebp so backtraces
534 * don't die. This restore function is used to bootstrap into the
535 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
536 * switching.
537 */
538ENTRY(cpu_idle_restore)
539 movl $0,%ebp
540 pushl $0
541 jmp cpu_idle
542
543/*
544 * cpu_kthread_restore() (current thread is %eax on entry)
545 *
546 * Don't bother setting up any regs other then %ebp so backtraces
547 * don't die. This restore function is used to bootstrap into an
548 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
549 * after this.
550 */
551ENTRY(cpu_kthread_restore)
552 movl TD_PCB(%eax),%ebx
553 movl $0,%ebp
554 popl %edx /* kthread exit function */
555 pushl PCB_EBX(%ebx) /* argument to ESI function */
556 pushl %edx /* set exit func as return address */
557 movl PCB_ESI(%ebx),%eax
558 jmp *%eax
559
560/*
561 * cpu_lwkt_switch()
562 *
563 * Standard LWKT switching function. Only non-scratch registers are
564 * saved and we don't bother with the MMU state or anything else.
565 * YYY BGL, SPL
566 */
567ENTRY(cpu_lwkt_switch)
568 movl 4(%esp),%eax
569 pushl %ebp
570 pushl %ebx
571 pushl %esi
572 pushl %edi
573 pushfl
574 movl _curthread,%ecx
575 pushl $cpu_lwkt_restore
576 cli
577 movl %esp,TD_SP(%ecx)
578 movl %eax,_curthread
579 movl TD_SP(%eax),%esp
580 ret
581
582/*
583 * cpu_idle_restore() (current thread in %eax on entry)
584 *
585 */
586ENTRY(cpu_lwkt_restore)
587 popfl
588 popl %edi
589 popl %esi
590 popl %ebx
591 popl %ebp
592 movl TD_MACH+MTD_CPL(%eax),%ecx /* unmasked cpl? YYY too complex */
593 notl %ecx
594 andl _ipending,%ecx
595 je 1f
596 cmpl $0,_intr_nesting_level /* don't stack too deeply */
597 jne 1f
598 call splz /* execute unmasked ints */
5991:
600 ret
601