thread stage 1: convert curproc to curthread, embed struct thread in proc.
[dragonfly.git] / sys / i386 / i386 / swtch.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
84b592ba 37 * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.3 2003/06/18 06:33:24 dillon Exp $
984263bc
MD
38 */
39
40#include "npx.h"
41#include "opt_user_ldt.h"
42
43#include <sys/rtprio.h>
44
45#include <machine/asmacros.h>
46#include <machine/ipl.h>
47
48#ifdef SMP
49#include <machine/pmap.h>
50#include <machine/smptests.h> /** GRAB_LOPRIO */
51#include <machine/apic.h>
52#include <machine/lock.h>
53#endif /* SMP */
54
55#include "assym.s"
56
57
58/*****************************************************************************/
59/* Scheduling */
60/*****************************************************************************/
61
62 .data
63
64 .globl _hlt_vector
65_hlt_vector: .long _cpu_idle /* pointer to halt routine */
66
67 .globl _panic
68
69#if defined(SWTCH_OPTIM_STATS)
70 .globl _swtch_optim_stats, _tlb_flush_count
71_swtch_optim_stats: .long 0 /* number of _swtch_optims */
72_tlb_flush_count: .long 0
73#endif
74
75 .text
76
77/*
78 * When no processes are on the runq, cpu_switch() branches to _idle
79 * to wait for something to come ready.
80 */
81 ALIGN_TEXT
82 .type _idle,@function
83_idle:
84 xorl %ebp,%ebp
85 movl %ebp,_switchtime
86
87#ifdef SMP
88
89 /* when called, we have the mplock, intr disabled */
90 /* use our idleproc's "context" */
91 movl _IdlePTD, %ecx
92 movl %cr3, %eax
93 cmpl %ecx, %eax
94 je 2f
95#if defined(SWTCH_OPTIM_STATS)
96 decl _swtch_optim_stats
97 incl _tlb_flush_count
98#endif
99 movl %ecx, %cr3
1002:
101 /* Keep space for nonexisting return addr, or profiling bombs */
102 movl $gd_idlestack_top-4, %ecx
103 addl %fs:0, %ecx
104 movl %ecx, %esp
105
106 /* update common_tss.tss_esp0 pointer */
107 movl %ecx, _common_tss + TSS_ESP0
108
109 movl _cpuid, %esi
110 btrl %esi, _private_tss
111 jae 1f
112
113 movl $gd_common_tssd, %edi
114 addl %fs:0, %edi
115
116 /* move correct tss descriptor into GDT slot, then reload tr */
117 movl _tss_gdt, %ebx /* entry in GDT */
118 movl 0(%edi), %eax
119 movl %eax, 0(%ebx)
120 movl 4(%edi), %eax
121 movl %eax, 4(%ebx)
122 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
123 ltr %si
1241:
125
126 sti
127
128 /*
129 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
130 * be left to cpu_switch().
131 *
132 * NOTE: spl*() may only be called while we hold the MP lock (which
133 * we do).
134 */
135 call _spl0
136
137 cli
138
139 /*
140 * _REALLY_ free the lock, no matter how deep the prior nesting.
141 * We will recover the nesting on the way out when we have a new
142 * proc to load.
143 *
144 * XXX: we had damn well better be sure we had it before doing this!
145 */
146 movl $FREE_LOCK, %eax
147 movl %eax, _mp_lock
148
149 /* do NOT have lock, intrs disabled */
150 .globl idle_loop
151idle_loop:
152
153 cmpl $0,_smp_active
154 jne 1f
155 cmpl $0,_cpuid
156 je 1f
157 jmp 2f
158
1591:
160 call _procrunnable
161 testl %eax,%eax
162 jnz 3f
163
164 /*
165 * Handle page-zeroing in the idle loop. Called with interrupts
166 * disabled and the MP lock released. Inside vm_page_zero_idle
167 * we enable interrupts and grab the mplock as required.
168 */
169 cmpl $0,_do_page_zero_idle
170 je 2f
171
172 call _vm_page_zero_idle /* internal locking */
173 testl %eax, %eax
174 jnz idle_loop
1752:
176
177 /* enable intrs for a halt */
178 movl $0, lapic_tpr /* 1st candidate for an INT */
179 call *_hlt_vector /* wait for interrupt */
180 cli
181 jmp idle_loop
182
183 /*
184 * Note that interrupts must be enabled while obtaining the MP lock
185 * in order to be able to take IPI's while blocked.
186 */
1873:
188#ifdef GRAB_LOPRIO
189 movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
190#endif
191 sti
192 call _get_mplock
193 cli
194 call _procrunnable
195 testl %eax,%eax
196 CROSSJUMP(jnz, sw1a, jz)
197 call _rel_mplock
198 jmp idle_loop
199
200#else /* !SMP */
201
202 movl $HIDENAME(tmpstk),%esp
203#if defined(OVERLY_CONSERVATIVE_PTD_MGMT)
204#if defined(SWTCH_OPTIM_STATS)
205 incl _swtch_optim_stats
206#endif
207 movl _IdlePTD, %ecx
208 movl %cr3, %eax
209 cmpl %ecx, %eax
210 je 2f
211#if defined(SWTCH_OPTIM_STATS)
212 decl _swtch_optim_stats
213 incl _tlb_flush_count
214#endif
215 movl %ecx, %cr3
2162:
217#endif
218
219 /* update common_tss.tss_esp0 pointer */
220 movl %esp, _common_tss + TSS_ESP0
221
222 movl $0, %esi
223 btrl %esi, _private_tss
224 jae 1f
225
226 movl $_common_tssd, %edi
227
228 /* move correct tss descriptor into GDT slot, then reload tr */
229 movl _tss_gdt, %ebx /* entry in GDT */
230 movl 0(%edi), %eax
231 movl %eax, 0(%ebx)
232 movl 4(%edi), %eax
233 movl %eax, 4(%ebx)
234 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
235 ltr %si
2361:
237
238 sti
239
240 /*
241 * XXX callers of cpu_switch() do a bogus splclock(). Locking should
242 * be left to cpu_switch().
243 */
244 call _spl0
245
246 ALIGN_TEXT
247idle_loop:
248 cli
249 call _procrunnable
250 testl %eax,%eax
251 CROSSJUMP(jnz, sw1a, jz)
252#ifdef DEVICE_POLLING
253 call _idle_poll
254#else /* standard code */
255 call _vm_page_zero_idle
256#endif
257 testl %eax, %eax
258 jnz idle_loop
259 call *_hlt_vector /* wait for interrupt */
260 jmp idle_loop
261
262#endif /* SMP */
263
264CROSSJUMPTARGET(_idle)
265
266#if 0
267
268ENTRY(default_halt)
269 sti
270#ifndef SMP
271 hlt /* XXX: until a wakeup IPI */
272#endif
273 ret
274
275#endif
276
277/*
278 * cpu_switch()
279 */
280ENTRY(cpu_switch)
281
282 /* switch to new process. first, save context as needed */
84b592ba
MD
283 movl _curthread,%ecx
284 movl TD_PROC(%ecx),%ecx
984263bc
MD
285
286 /* if no process to save, don't bother */
287 testl %ecx,%ecx
288 je sw1
289
290#ifdef SMP
291 movb P_ONCPU(%ecx), %al /* save "last" cpu */
292 movb %al, P_LASTCPU(%ecx)
293 movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
294#endif /* SMP */
295 movl P_VMSPACE(%ecx), %edx
296#ifdef SMP
297 movl _cpuid, %eax
298#else
299 xorl %eax, %eax
300#endif /* SMP */
301 btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
302
303 movl P_ADDR(%ecx),%edx
304
305 movl (%esp),%eax /* Hardware registers */
306 movl %eax,PCB_EIP(%edx)
307 movl %ebx,PCB_EBX(%edx)
308 movl %esp,PCB_ESP(%edx)
309 movl %ebp,PCB_EBP(%edx)
310 movl %esi,PCB_ESI(%edx)
311 movl %edi,PCB_EDI(%edx)
312 movl %gs,PCB_GS(%edx)
313
314 /* test if debug regisers should be saved */
315 movb PCB_FLAGS(%edx),%al
316 andb $PCB_DBREGS,%al
317 jz 1f /* no, skip over */
318 movl %dr7,%eax /* yes, do the save */
319 movl %eax,PCB_DR7(%edx)
320 andl $0x0000fc00, %eax /* disable all watchpoints */
321 movl %eax,%dr7
322 movl %dr6,%eax
323 movl %eax,PCB_DR6(%edx)
324 movl %dr3,%eax
325 movl %eax,PCB_DR3(%edx)
326 movl %dr2,%eax
327 movl %eax,PCB_DR2(%edx)
328 movl %dr1,%eax
329 movl %eax,PCB_DR1(%edx)
330 movl %dr0,%eax
331 movl %eax,PCB_DR0(%edx)
3321:
333
334#ifdef SMP
335 movl _mp_lock, %eax
336 /* XXX FIXME: we should be saving the local APIC TPR */
337#ifdef DIAGNOSTIC
338 cmpl $FREE_LOCK, %eax /* is it free? */
339 je badsw4 /* yes, bad medicine! */
340#endif /* DIAGNOSTIC */
341 andl $COUNT_FIELD, %eax /* clear CPU portion */
342 movl %eax, PCB_MPNEST(%edx) /* store it */
343#endif /* SMP */
344
345#if NNPX > 0
346 /* have we used fp, and need a save? */
347 cmpl %ecx,_npxproc
348 jne 1f
349 addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
350 pushl %edx
351 call _npxsave /* do it in a big C function */
352 popl %eax
3531:
354#endif /* NNPX > 0 */
355
84b592ba
MD
356 /*
357 * out of processes, set curthread to the current cpu's
358 * idlethread. Note that idlethread.td_proc will be NULL.
359 */
360#ifdef SMP
361 movl $gd_idlethread, %edi
362 addl %fs:0, %edi
363#else
364 movl $_idlethread, %edi
365#endif
366 movl %edi,_curthread
984263bc
MD
367
368 /* save is done, now choose a new process or idle */
369sw1:
370 cli
371
372#ifdef SMP
373 /* Stop scheduling if smp_active goes zero and we are not BSP */
374 cmpl $0,_smp_active
375 jne 1f
376 cmpl $0,_cpuid
377 CROSSJUMP(je, _idle, jne) /* wind down */
3781:
379#endif
380
381sw1a:
382 call _chooseproc /* trash ecx, edx, ret eax*/
383 testl %eax,%eax
384 CROSSJUMP(je, _idle, jne) /* if no proc, idle */
385 movl %eax,%ecx
386
387 xorl %eax,%eax
388 andl $~AST_RESCHED,_astpending
389
390#ifdef DIAGNOSTIC
391 cmpl %eax,P_WCHAN(%ecx)
392 jne badsw1
393 cmpb $SRUN,P_STAT(%ecx)
394 jne badsw2
395#endif
396
397 movl P_ADDR(%ecx),%edx
398
399#if defined(SWTCH_OPTIM_STATS)
400 incl _swtch_optim_stats
401#endif
402 /* switch address space */
403 movl %cr3,%ebx
404 cmpl PCB_CR3(%edx),%ebx
405 je 4f
406#if defined(SWTCH_OPTIM_STATS)
407 decl _swtch_optim_stats
408 incl _tlb_flush_count
409#endif
410 movl PCB_CR3(%edx),%ebx
411 movl %ebx,%cr3
4124:
413
414#ifdef SMP
415 movl _cpuid, %esi
416#else
417 xorl %esi, %esi
418#endif
419 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
420 je 1f
421 btsl %esi, _private_tss /* mark use of private tss */
422 movl PCB_EXT(%edx), %edi /* new tss descriptor */
423 jmp 2f
4241:
425
426 /* update common_tss.tss_esp0 pointer */
427 movl %edx, %ebx /* pcb */
428 addl $(UPAGES * PAGE_SIZE - 16), %ebx
429 movl %ebx, _common_tss + TSS_ESP0
430
431 btrl %esi, _private_tss
432 jae 3f
433#ifdef SMP
434 movl $gd_common_tssd, %edi
435 addl %fs:0, %edi
436#else
437 movl $_common_tssd, %edi
438#endif
4392:
440 /* move correct tss descriptor into GDT slot, then reload tr */
441 movl _tss_gdt, %ebx /* entry in GDT */
442 movl 0(%edi), %eax
443 movl %eax, 0(%ebx)
444 movl 4(%edi), %eax
445 movl %eax, 4(%ebx)
446 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
447 ltr %si
4483:
449 movl P_VMSPACE(%ecx), %ebx
450#ifdef SMP
451 movl _cpuid, %eax
452#else
453 xorl %eax, %eax
454#endif
455 btsl %eax, VM_PMAP+PM_ACTIVE(%ebx)
456
457 /* restore context */
458 movl PCB_EBX(%edx),%ebx
459 movl PCB_ESP(%edx),%esp
460 movl PCB_EBP(%edx),%ebp
461 movl PCB_ESI(%edx),%esi
462 movl PCB_EDI(%edx),%edi
463 movl PCB_EIP(%edx),%eax
464 movl %eax,(%esp)
465
466#ifdef SMP
467#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
468#ifdef CHEAP_TPR
469 movl $0, lapic_tpr
470#else
471 andl $~APIC_TPR_PRIO, lapic_tpr
472#endif /** CHEAP_TPR */
473#endif /** GRAB_LOPRIO */
474 movl _cpuid,%eax
475 movb %al, P_ONCPU(%ecx)
476#endif /* SMP */
477 movl %edx, _curpcb
84b592ba
MD
478 addl $P_THREAD,%ecx /* set current thread */
479 movl %ecx, _curthread
480 subl $P_THREAD,%ecx /* YYY does %ecx need to be restored? */
984263bc
MD
481
482#ifdef SMP
483 movl _cpu_lockid, %eax
484 orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
485 movl %eax, _mp_lock /* load the mp_lock */
486 /* XXX FIXME: we should be restoring the local APIC TPR */
487#endif /* SMP */
488
489#ifdef USER_LDT
490 cmpl $0, PCB_USERLDT(%edx)
491 jnz 1f
492 movl __default_ldt,%eax
493 cmpl _currentldt,%eax
494 je 2f
495 lldt __default_ldt
496 movl %eax,_currentldt
497 jmp 2f
4981: pushl %edx
499 call _set_user_ldt
500 popl %edx
5012:
502#endif
503
504 /* This must be done after loading the user LDT. */
505 .globl cpu_switch_load_gs
506cpu_switch_load_gs:
507 movl PCB_GS(%edx),%gs
508
509 /* test if debug regisers should be restored */
510 movb PCB_FLAGS(%edx),%al
511 andb $PCB_DBREGS,%al
512 jz 1f /* no, skip over */
513 movl PCB_DR6(%edx),%eax /* yes, do the restore */
514 movl %eax,%dr6
515 movl PCB_DR3(%edx),%eax
516 movl %eax,%dr3
517 movl PCB_DR2(%edx),%eax
518 movl %eax,%dr2
519 movl PCB_DR1(%edx),%eax
520 movl %eax,%dr1
521 movl PCB_DR0(%edx),%eax
522 movl %eax,%dr0
523 movl %dr7,%eax /* load dr7 so as not to disturb */
524 andl $0x0000fc00,%eax /* reserved bits */
525 pushl %ebx
526 movl PCB_DR7(%edx),%ebx
527 andl $~0x0000fc00,%ebx
528 orl %ebx,%eax
529 popl %ebx
530 movl %eax,%dr7
5311:
532
533 sti
534 ret
535
536CROSSJUMPTARGET(sw1a)
537
538#ifdef DIAGNOSTIC
539badsw1:
540 pushl $sw0_1
541 call _panic
542
543sw0_1: .asciz "cpu_switch: has wchan"
544
545badsw2:
546 pushl $sw0_2
547 call _panic
548
549sw0_2: .asciz "cpu_switch: not SRUN"
550#endif
551
552#if defined(SMP) && defined(DIAGNOSTIC)
553badsw4:
554 pushl $sw0_4
555 call _panic
556
557sw0_4: .asciz "cpu_switch: do not have lock"
558#endif /* SMP && DIAGNOSTIC */
559
560/*
561 * savectx(pcb)
562 * Update pcb, saving current processor state.
563 */
564ENTRY(savectx)
565 /* fetch PCB */
566 movl 4(%esp),%ecx
567
568 /* caller's return address - child won't execute this routine */
569 movl (%esp),%eax
570 movl %eax,PCB_EIP(%ecx)
571
572 movl %cr3,%eax
573 movl %eax,PCB_CR3(%ecx)
574
575 movl %ebx,PCB_EBX(%ecx)
576 movl %esp,PCB_ESP(%ecx)
577 movl %ebp,PCB_EBP(%ecx)
578 movl %esi,PCB_ESI(%ecx)
579 movl %edi,PCB_EDI(%ecx)
580 movl %gs,PCB_GS(%ecx)
581
582#if NNPX > 0
583 /*
584 * If npxproc == NULL, then the npx h/w state is irrelevant and the
585 * state had better already be in the pcb. This is true for forks
586 * but not for dumps (the old book-keeping with FP flags in the pcb
587 * always lost for dumps because the dump pcb has 0 flags).
588 *
589 * If npxproc != NULL, then we have to save the npx h/w state to
590 * npxproc's pcb and copy it to the requested pcb, or save to the
591 * requested pcb and reload. Copying is easier because we would
592 * have to handle h/w bugs for reloading. We used to lose the
593 * parent's npx state for forks by forgetting to reload.
594 */
595 movl _npxproc,%eax
596 testl %eax,%eax
597 je 1f
598
599 pushl %ecx
600 movl P_ADDR(%eax),%eax
601 leal PCB_SAVEFPU(%eax),%eax
602 pushl %eax
603 pushl %eax
604 call _npxsave
605 addl $4,%esp
606 popl %eax
607 popl %ecx
608
609 pushl $PCB_SAVEFPU_SIZE
610 leal PCB_SAVEFPU(%ecx),%ecx
611 pushl %ecx
612 pushl %eax
613 call _bcopy
614 addl $12,%esp
615#endif /* NNPX > 0 */
616
6171:
618 ret