Commit | Line | Data |
---|---|---|
8c10bfcf MD |
1 | /* |
2 | * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. | |
3 | * | |
4 | * This code is derived from software contributed to The DragonFly Project | |
5 | * by Matthew Dillon <dillon@backplane.com> | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * 3. Neither the name of The DragonFly Project nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific, prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
32 | * SUCH DAMAGE. | |
33 | * | |
984263bc MD |
34 | * Copyright (c) 1990 The Regents of the University of California. |
35 | * All rights reserved. | |
36 | * | |
37 | * This code is derived from software contributed to Berkeley by | |
38 | * William Jolitz. | |
39 | * | |
40 | * Redistribution and use in source and binary forms, with or without | |
41 | * modification, are permitted provided that the following conditions | |
42 | * are met: | |
43 | * 1. Redistributions of source code must retain the above copyright | |
44 | * notice, this list of conditions and the following disclaimer. | |
45 | * 2. Redistributions in binary form must reproduce the above copyright | |
46 | * notice, this list of conditions and the following disclaimer in the | |
47 | * documentation and/or other materials provided with the distribution. | |
48 | * 3. All advertising materials mentioning features or use of this software | |
49 | * must display the following acknowledgement: | |
50 | * This product includes software developed by the University of | |
51 | * California, Berkeley and its contributors. | |
52 | * 4. Neither the name of the University nor the names of its contributors | |
53 | * may be used to endorse or promote products derived from this software | |
54 | * without specific prior written permission. | |
55 | * | |
56 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
57 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
58 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
59 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
60 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
61 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
62 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
63 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
64 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
65 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
66 | * SUCH DAMAGE. | |
67 | * | |
68 | * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ | |
69 | */ | |
70 | ||
1f2de5d4 | 71 | #include "use_npx.h" |
984263bc MD |
72 | |
73 | #include <sys/rtprio.h> | |
74 | ||
75 | #include <machine/asmacros.h> | |
bdc560a1 | 76 | #include <machine/segments.h> |
984263bc | 77 | |
984263bc | 78 | #include <machine/pmap.h> |
a9295349 | 79 | #include <machine_base/apic/apicreg.h> |
984263bc | 80 | #include <machine/lock.h> |
984263bc MD |
81 | |
82 | #include "assym.s" | |
83 | ||
a2a5ad0d | 84 | #define MPLOCKED lock ; |
a2a5ad0d | 85 | |
984263bc MD |
86 | .data |
87 | ||
2954c92f | 88 | .globl panic |
cc9b6223 | 89 | .globl lwkt_switch_return |
984263bc MD |
90 | |
91 | #if defined(SWTCH_OPTIM_STATS) | |
2954c92f MD |
92 | .globl swtch_optim_stats, tlb_flush_count |
93 | swtch_optim_stats: .long 0 /* number of _swtch_optims */ | |
94 | tlb_flush_count: .long 0 | |
984263bc MD |
95 | #endif |
96 | ||
97 | .text | |
98 | ||
984263bc MD |
99 | |
100 | /* | |
8ad65e08 MD |
101 | * cpu_heavy_switch(next_thread) |
102 | * | |
103 | * Switch from the current thread to a new thread. This entry | |
104 | * is normally called via the thread->td_switch function, and will | |
105 | * only be called when the current thread is a heavy weight process. | |
106 | * | |
d9eea1a5 MD |
107 | * Some instructions have been reordered to reduce pipeline stalls. |
108 | * | |
8ad65e08 | 109 | * YYY disable interrupts once giant is removed. |
984263bc | 110 | */ |
8ad65e08 | 111 | ENTRY(cpu_heavy_switch) |
8ad65e08 MD |
112 | /* |
113 | * Save general regs | |
114 | */ | |
d9eea1a5 MD |
115 | movl PCPU(curthread),%ecx |
116 | movl (%esp),%eax /* (reorder optimization) */ | |
117 | movl TD_PCB(%ecx),%edx /* EDX = PCB */ | |
118 | movl %eax,PCB_EIP(%edx) /* return PC may be modified */ | |
984263bc MD |
119 | movl %ebx,PCB_EBX(%edx) |
120 | movl %esp,PCB_ESP(%edx) | |
121 | movl %ebp,PCB_EBP(%edx) | |
122 | movl %esi,PCB_ESI(%edx) | |
123 | movl %edi,PCB_EDI(%edx) | |
b25897b2 | 124 | movl 4(%esp),%edi /* EDI = newthread */ |
984263bc | 125 | |
b25897b2 MD |
126 | /* |
127 | * Clear the cpu bit in the pmap active mask. The restore | |
128 | * function will set the bit in the pmap active mask. | |
129 | * | |
130 | * Special case: when switching between threads sharing the | |
131 | * same vmspace if we avoid clearing the bit we do not have | |
132 | * to reload %cr3 (if we clear the bit we could race page | |
133 | * table ops done by other threads and would have to reload | |
134 | * %cr3, because those ops will not know to IPI us). | |
135 | */ | |
136 | movl %ecx,%ebx /* EBX = oldthread */ | |
137 | movl TD_LWP(%ecx),%ecx /* ECX = oldlwp */ | |
138 | movl TD_LWP(%edi),%esi /* ESI = newlwp */ | |
139 | movl LWP_VMSPACE(%ecx),%ecx /* ECX = oldvmspace */ | |
140 | testl %esi,%esi /* might not be a heavy */ | |
141 | jz 1f | |
142 | cmpl LWP_VMSPACE(%esi),%ecx /* same vmspace? */ | |
143 | je 2f | |
144 | 1: | |
d9eea1a5 | 145 | movl PCPU(cpuid), %eax |
d9eea1a5 | 146 | MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx) |
b25897b2 | 147 | 2: |
8ad65e08 MD |
148 | /* |
149 | * Push the LWKT switch restore function, which resumes a heavy | |
150 | * weight process. Note that the LWKT switcher is based on | |
151 | * TD_SP, while the heavy weight process switcher is based on | |
d9eea1a5 MD |
152 | * PCB_ESP. TD_SP is usually two ints pushed relative to |
153 | * PCB_ESP. We push the flags for later restore by cpu_heavy_restore. | |
8ad65e08 | 154 | */ |
d9eea1a5 | 155 | pushfl |
8ad65e08 | 156 | pushl $cpu_heavy_restore |
d9eea1a5 | 157 | movl %esp,TD_SP(%ebx) |
8ad65e08 MD |
158 | |
159 | /* | |
160 | * Save debug regs if necessary | |
161 | */ | |
984263bc MD |
162 | movb PCB_FLAGS(%edx),%al |
163 | andb $PCB_DBREGS,%al | |
164 | jz 1f /* no, skip over */ | |
165 | movl %dr7,%eax /* yes, do the save */ | |
166 | movl %eax,PCB_DR7(%edx) | |
167 | andl $0x0000fc00, %eax /* disable all watchpoints */ | |
168 | movl %eax,%dr7 | |
169 | movl %dr6,%eax | |
170 | movl %eax,PCB_DR6(%edx) | |
171 | movl %dr3,%eax | |
172 | movl %eax,PCB_DR3(%edx) | |
173 | movl %dr2,%eax | |
174 | movl %eax,PCB_DR2(%edx) | |
175 | movl %dr1,%eax | |
176 | movl %eax,PCB_DR1(%edx) | |
177 | movl %dr0,%eax | |
178 | movl %eax,PCB_DR0(%edx) | |
179 | 1: | |
180 | ||
263541db | 181 | #if NNPX > 0 |
8ad65e08 | 182 | /* |
a2a5ad0d MD |
183 | * Save the FP state if we have used the FP. Note that calling |
184 | * npxsave will NULL out PCPU(npxthread). | |
8ad65e08 | 185 | */ |
d9eea1a5 | 186 | cmpl %ebx,PCPU(npxthread) |
984263bc | 187 | jne 1f |
65d6ce10 | 188 | pushl TD_SAVEFPU(%ebx) |
2954c92f | 189 | call npxsave /* do it in a big C function */ |
d9eea1a5 | 190 | addl $4,%esp /* EAX, ECX, EDX trashed */ |
984263bc MD |
191 | 1: |
192 | #endif /* NNPX > 0 */ | |
193 | ||
84b592ba | 194 | /* |
8ad65e08 | 195 | * Switch to the next thread, which was passed as an argument |
d9eea1a5 MD |
196 | * to cpu_heavy_switch(). Due to the eflags and switch-restore |
197 | * function we pushed, the argument is at 12(%esp). Set the current | |
198 | * thread, load the stack pointer, and 'ret' into the switch-restore | |
199 | * function. | |
200 | * | |
201 | * The switch restore function expects the new thread to be in %eax | |
202 | * and the old one to be in %ebx. | |
71ef2f5c MD |
203 | * |
204 | * There is a one-instruction window where curthread is the new | |
205 | * thread but %esp still points to the old thread's stack, but | |
206 | * we are protected by a critical section so it is ok. | |
84b592ba | 207 | */ |
b25897b2 | 208 | movl %edi,%eax /* EAX = newtd, EBX = oldtd */ |
2954c92f | 209 | movl %eax,PCPU(curthread) |
8ad65e08 MD |
210 | movl TD_SP(%eax),%esp |
211 | ret | |
984263bc | 212 | |
8ad65e08 MD |
213 | /* |
214 | * cpu_exit_switch() | |
215 | * | |
216 | * The switch function is changed to this when a thread is going away | |
217 | * for good. We have to ensure that the MMU state is not cached, and | |
218 | * we don't bother saving the existing thread state before switching. | |
ae8050a4 MD |
219 | * |
220 | * At this point we are in a critical section and this cpu owns the | |
221 | * thread's token, which serves as an interlock until the switchout is | |
222 | * complete. | |
8ad65e08 MD |
223 | */ |
224 | ENTRY(cpu_exit_switch) | |
ae8050a4 MD |
225 | /* |
226 | * Get us out of the vmspace | |
227 | */ | |
2954c92f | 228 | movl IdlePTD,%ecx |
8ad65e08 MD |
229 | movl %cr3,%eax |
230 | cmpl %ecx,%eax | |
231 | je 1f | |
232 | movl %ecx,%cr3 | |
984263bc | 233 | 1: |
d9eea1a5 | 234 | movl PCPU(curthread),%ebx |
e3161323 MD |
235 | |
236 | /* | |
237 | * If this is a process/lwp, deactivate the pmap after we've | |
238 | * switched it out. | |
239 | */ | |
287ebb09 | 240 | movl TD_LWP(%ebx),%ecx |
e3161323 MD |
241 | testl %ecx,%ecx |
242 | jz 2f | |
243 | movl PCPU(cpuid), %eax | |
287ebb09 | 244 | movl LWP_VMSPACE(%ecx), %ecx /* ECX = vmspace */ |
e3161323 MD |
245 | MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx) |
246 | 2: | |
ae8050a4 | 247 | /* |
d9eea1a5 MD |
248 | * Switch to the next thread. RET into the restore function, which |
249 | * expects the new thread in EAX and the old in EBX. | |
71ef2f5c MD |
250 | * |
251 | * There is a one-instruction window where curthread is the new | |
252 | * thread but %esp still points to the old thread's stack, but | |
253 | * we are protected by a critical section so it is ok. | |
ae8050a4 | 254 | */ |
8ad65e08 | 255 | movl 4(%esp),%eax |
2954c92f | 256 | movl %eax,PCPU(curthread) |
8ad65e08 MD |
257 | movl TD_SP(%eax),%esp |
258 | ret | |
984263bc | 259 | |
8ad65e08 MD |
260 | /* |
261 | * cpu_heavy_restore() (current thread in %eax on entry) | |
262 | * | |
263 | * Restore the thread after an LWKT switch. This entry is normally | |
264 | * called via the LWKT switch restore function, which was pulled | |
265 | * off the thread stack and jumped to. | |
266 | * | |
267 | * This entry is only called if the thread was previously saved | |
d9eea1a5 | 268 | * using cpu_heavy_switch() (the heavy weight process thread switcher), |
cc9b6223 | 269 | * or when a new process is initially scheduled. |
8ad65e08 | 270 | * |
164b8401 | 271 | * NOTE: The lwp may be in any state, not necessarily LSRUN, because |
b74c5f55 MD |
272 | * a preemption switch may interrupt the process and then return via |
273 | * cpu_heavy_restore. | |
274 | * | |
8ad65e08 MD |
275 | * YYY theoretically we do not have to restore everything here, a lot |
276 | * of this junk can wait until we return to usermode. But for now | |
277 | * we restore everything. | |
278 | * | |
96728c05 MD |
279 | * YYY the PCB crap is really crap, it makes startup a bitch because |
280 | * we can't switch away. | |
7d0bac62 MD |
281 | * |
282 | * YYY note: spl check is done in mi_switch when it splx()'s. | |
8ad65e08 | 283 | */ |
26a0694b | 284 | |
8ad65e08 | 285 | ENTRY(cpu_heavy_restore) |
d9eea1a5 | 286 | popfl |
287ebb09 | 287 | movl TD_LWP(%eax),%ecx |
984263bc MD |
288 | |
289 | #if defined(SWTCH_OPTIM_STATS) | |
290 | incl _swtch_optim_stats | |
291 | #endif | |
8ad65e08 | 292 | /* |
a2a5ad0d MD |
293 | * Tell the pmap that our cpu is using the VMSPACE now. We cannot |
294 | * safely test/reload %cr3 until after we have set the bit in the | |
295 | * pmap (remember, we do not hold the MP lock in the switch code). | |
b25897b2 MD |
296 | * |
297 | * Also note that when switching between two lwps sharing the | |
298 | * same vmspace we have already avoided clearing the cpu bit | |
299 | * in pm_active. If we had cleared it other cpus would not know | |
300 | * to IPI us and we would have to unconditionally reload %cr3. | |
301 | * | |
302 | * Also note that if the pmap is undergoing an atomic inval/mod | |
303 | * that is unaware that our cpu has been added to it we have to | |
304 | * wait for it to complete before we can continue. | |
8ad65e08 | 305 | */ |
287ebb09 | 306 | movl LWP_VMSPACE(%ecx), %ecx /* ECX = vmspace */ |
d8d8c8c5 MD |
307 | pushl %eax /* save curthread */ |
308 | 1: | |
309 | movl VM_PMAP+PM_ACTIVE(%ecx),%eax /* old value for cmpxchgl */ | |
310 | movl PCPU(cpumask), %esi | |
311 | orl %eax,%esi /* new value for cmpxchgl */ | |
312 | MPLOCKED cmpxchgl %esi,VM_PMAP+PM_ACTIVE(%ecx) | |
313 | jnz 1b | |
314 | ||
315 | /* | |
316 | * Check CPUMASK_BIT | |
317 | */ | |
318 | testl $CPUMASK_LOCK,%eax | |
c2fb025d | 319 | jz 1f |
d8d8c8c5 | 320 | pushl %ecx /* call(stack:vmspace) */ |
c2fb025d MD |
321 | call pmap_interlock_wait |
322 | popl %ecx | |
d8d8c8c5 MD |
323 | |
324 | /* | |
325 | * Needs unconditional load cr3 | |
326 | */ | |
327 | popl %eax /* EAX = curthread */ | |
328 | movl TD_PCB(%eax),%edx /* EDX = PCB */ | |
329 | movl PCB_CR3(%edx),%ecx | |
330 | jmp 2f | |
c2fb025d | 331 | 1: |
d8d8c8c5 | 332 | popl %eax |
a2a5ad0d MD |
333 | |
334 | /* | |
335 | * Restore the MMU address space. If it is the same as the last | |
336 | * thread we don't have to invalidate the tlb (i.e. reload cr3). | |
337 | * YYY which naturally also means that the PM_ACTIVE bit had better | |
338 | * already have been set before we set it above, check? YYY | |
339 | */ | |
c2fb025d | 340 | movl TD_PCB(%eax),%edx /* EDX = PCB */ |
d9eea1a5 MD |
341 | movl %cr3,%esi |
342 | movl PCB_CR3(%edx),%ecx | |
343 | cmpl %esi,%ecx | |
984263bc | 344 | je 4f |
d8d8c8c5 | 345 | 2: |
984263bc MD |
346 | #if defined(SWTCH_OPTIM_STATS) |
347 | decl _swtch_optim_stats | |
348 | incl _tlb_flush_count | |
349 | #endif | |
d9eea1a5 | 350 | movl %ecx,%cr3 |
984263bc | 351 | 4: |
d8d8c8c5 | 352 | |
d9eea1a5 | 353 | /* |
cc9b6223 MD |
354 | * NOTE: %ebx is the previous thread and %eax is the new thread. |
355 | * %ebx is retained throughout so we can return it. | |
356 | * | |
357 | * lwkt_switch[_return] is responsible for handling TDF_RUNNING. | |
d9eea1a5 | 358 | */ |
d9eea1a5 | 359 | |
8ad65e08 MD |
360 | /* |
361 | * Deal with the PCB extension, restore the private tss | |
362 | */ | |
a2a5ad0d | 363 | movl PCB_EXT(%edx),%edi /* check for a PCB extension */ |
cc9b6223 | 364 | movl $1,%ecx /* maybe mark use of a private tss */ |
a2a5ad0d MD |
365 | testl %edi,%edi |
366 | jnz 2f | |
984263bc | 367 | |
b7c628e4 | 368 | /* |
a2a5ad0d MD |
369 | * Going back to the common_tss. We may need to update TSS_ESP0 |
370 | * which sets the top of the supervisor stack when entering from | |
371 | * usermode. The PCB is at the top of the stack but we need another | |
372 | * 16 bytes to take vm86 into account. | |
b7c628e4 | 373 | */ |
cc9b6223 MD |
374 | leal -16(%edx),%ecx |
375 | movl %ecx, PCPU(common_tss) + TSS_ESP0 | |
984263bc | 376 | |
a2a5ad0d MD |
377 | cmpl $0,PCPU(private_tss) /* don't have to reload if */ |
378 | je 3f /* already using the common TSS */ | |
379 | ||
cc9b6223 | 380 | subl %ecx,%ecx /* unmark use of private tss */ |
17a9f566 MD |
381 | |
382 | /* | |
a2a5ad0d | 383 | * Get the address of the common TSS descriptor for the ltr. |
17a9f566 MD |
384 | * There is no way to get the address of a segment-accessed variable |
385 | * so we store a self-referential pointer at the base of the per-cpu | |
386 | * data area and add the appropriate offset. | |
387 | */ | |
984263bc MD |
388 | movl $gd_common_tssd, %edi |
389 | addl %fs:0, %edi | |
17a9f566 | 390 | |
8ad65e08 MD |
391 | /* |
392 | * Move the correct TSS descriptor into the GDT slot, then reload | |
a2a5ad0d | 393 | * ltr. |
8ad65e08 | 394 | */ |
984263bc | 395 | 2: |
cc9b6223 MD |
396 | movl %ecx,PCPU(private_tss) /* mark/unmark private tss */ |
397 | movl PCPU(tss_gdt), %ecx /* entry in GDT */ | |
984263bc | 398 | movl 0(%edi), %eax |
cc9b6223 | 399 | movl %eax, 0(%ecx) |
984263bc | 400 | movl 4(%edi), %eax |
cc9b6223 | 401 | movl %eax, 4(%ecx) |
984263bc MD |
402 | movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ |
403 | ltr %si | |
8ad65e08 | 404 | |
984263bc | 405 | 3: |
8ad65e08 | 406 | /* |
cc9b6223 | 407 | * Restore general registers. %ebx is restored later. |
8ad65e08 | 408 | */ |
984263bc MD |
409 | movl PCB_ESP(%edx),%esp |
410 | movl PCB_EBP(%edx),%ebp | |
411 | movl PCB_ESI(%edx),%esi | |
412 | movl PCB_EDI(%edx),%edi | |
413 | movl PCB_EIP(%edx),%eax | |
414 | movl %eax,(%esp) | |
415 | ||
8ad65e08 MD |
416 | /* |
417 | * Restore the user LDT if we have one | |
418 | */ | |
984263bc MD |
419 | cmpl $0, PCB_USERLDT(%edx) |
420 | jnz 1f | |
2954c92f MD |
421 | movl _default_ldt,%eax |
422 | cmpl PCPU(currentldt),%eax | |
984263bc | 423 | je 2f |
2954c92f MD |
424 | lldt _default_ldt |
425 | movl %eax,PCPU(currentldt) | |
984263bc MD |
426 | jmp 2f |
427 | 1: pushl %edx | |
2954c92f | 428 | call set_user_ldt |
984263bc MD |
429 | popl %edx |
430 | 2: | |
806bf111 MD |
431 | /* |
432 | * Restore the user TLS if we have one | |
433 | */ | |
434 | pushl %edx | |
435 | call set_user_TLS | |
436 | popl %edx | |
984263bc | 437 | |
8ad65e08 MD |
438 | /* |
439 | * Restore the DEBUG register state if necessary. | |
440 | */ | |
984263bc MD |
441 | movb PCB_FLAGS(%edx),%al |
442 | andb $PCB_DBREGS,%al | |
443 | jz 1f /* no, skip over */ | |
444 | movl PCB_DR6(%edx),%eax /* yes, do the restore */ | |
445 | movl %eax,%dr6 | |
446 | movl PCB_DR3(%edx),%eax | |
447 | movl %eax,%dr3 | |
448 | movl PCB_DR2(%edx),%eax | |
449 | movl %eax,%dr2 | |
450 | movl PCB_DR1(%edx),%eax | |
451 | movl %eax,%dr1 | |
452 | movl PCB_DR0(%edx),%eax | |
453 | movl %eax,%dr0 | |
454 | movl %dr7,%eax /* load dr7 so as not to disturb */ | |
455 | andl $0x0000fc00,%eax /* reserved bits */ | |
cc9b6223 MD |
456 | movl PCB_DR7(%edx),%ecx |
457 | andl $~0x0000fc00,%ecx | |
458 | orl %ecx,%eax | |
984263bc MD |
459 | movl %eax,%dr7 |
460 | 1: | |
cc9b6223 MD |
461 | movl %ebx,%eax /* return previous thread */ |
462 | movl PCB_EBX(%edx),%ebx | |
984263bc MD |
463 | ret |
464 | ||
984263bc MD |
465 | /* |
466 | * savectx(pcb) | |
65d6ce10 | 467 | * |
984263bc MD |
468 | * Update pcb, saving current processor state. |
469 | */ | |
470 | ENTRY(savectx) | |
471 | /* fetch PCB */ | |
472 | movl 4(%esp),%ecx | |
473 | ||
474 | /* caller's return address - child won't execute this routine */ | |
475 | movl (%esp),%eax | |
476 | movl %eax,PCB_EIP(%ecx) | |
477 | ||
478 | movl %cr3,%eax | |
479 | movl %eax,PCB_CR3(%ecx) | |
480 | ||
481 | movl %ebx,PCB_EBX(%ecx) | |
482 | movl %esp,PCB_ESP(%ecx) | |
483 | movl %ebp,PCB_EBP(%ecx) | |
484 | movl %esi,PCB_ESI(%ecx) | |
485 | movl %edi,PCB_EDI(%ecx) | |
984263bc MD |
486 | |
487 | #if NNPX > 0 | |
488 | /* | |
af0bff84 | 489 | * If npxthread == NULL, then the npx h/w state is irrelevant and the |
984263bc MD |
490 | * state had better already be in the pcb. This is true for forks |
491 | * but not for dumps (the old book-keeping with FP flags in the pcb | |
492 | * always lost for dumps because the dump pcb has 0 flags). | |
493 | * | |
af0bff84 MD |
494 | * If npxthread != NULL, then we have to save the npx h/w state to |
495 | * npxthread's pcb and copy it to the requested pcb, or save to the | |
984263bc MD |
496 | * requested pcb and reload. Copying is easier because we would |
497 | * have to handle h/w bugs for reloading. We used to lose the | |
498 | * parent's npx state for forks by forgetting to reload. | |
499 | */ | |
2954c92f | 500 | movl PCPU(npxthread),%eax |
984263bc MD |
501 | testl %eax,%eax |
502 | je 1f | |
503 | ||
a02705a9 MD |
504 | pushl %ecx /* target pcb */ |
505 | movl TD_SAVEFPU(%eax),%eax /* originating savefpu area */ | |
984263bc | 506 | pushl %eax |
a02705a9 | 507 | |
984263bc | 508 | pushl %eax |
2954c92f | 509 | call npxsave |
984263bc | 510 | addl $4,%esp |
a02705a9 | 511 | |
984263bc MD |
512 | popl %eax |
513 | popl %ecx | |
514 | ||
515 | pushl $PCB_SAVEFPU_SIZE | |
65d6ce10 | 516 | leal PCB_SAVEFPU(%ecx),%ecx |
984263bc MD |
517 | pushl %ecx |
518 | pushl %eax | |
2954c92f | 519 | call bcopy |
984263bc MD |
520 | addl $12,%esp |
521 | #endif /* NNPX > 0 */ | |
522 | ||
523 | 1: | |
524 | ret | |
8ad65e08 MD |
525 | |
526 | /* | |
a2a5ad0d | 527 | * cpu_idle_restore() (current thread in %eax on entry) (one-time execution) |
8ad65e08 MD |
528 | * |
529 | * Don't bother setting up any regs other then %ebp so backtraces | |
530 | * don't die. This restore function is used to bootstrap into the | |
531 | * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for | |
532 | * switching. | |
72740893 | 533 | * |
d9eea1a5 | 534 | * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. |
cc9b6223 MD |
535 | * This only occurs during system boot so no special handling is |
536 | * required for migration. | |
d9eea1a5 | 537 | * |
72740893 MD |
538 | * If we are an AP we have to call ap_init() before jumping to |
539 | * cpu_idle(). ap_init() will synchronize with the BP and finish | |
540 | * setting up various ncpu-dependant globaldata fields. This may | |
541 | * happen on UP as well as SMP if we happen to be simulating multiple | |
542 | * cpus. | |
8ad65e08 MD |
543 | */ |
544 | ENTRY(cpu_idle_restore) | |
d9eea1a5 | 545 | /* cli */ |
a2a5ad0d | 546 | movl IdlePTD,%ecx |
8ad65e08 MD |
547 | movl $0,%ebp |
548 | pushl $0 | |
a2a5ad0d | 549 | movl %ecx,%cr3 |
d9eea1a5 | 550 | andl $~TDF_RUNNING,TD_FLAGS(%ebx) |
121f93bc | 551 | orl $TDF_RUNNING,TD_FLAGS(%eax) /* manual, no switch_return */ |
72740893 MD |
552 | cmpl $0,PCPU(cpuid) |
553 | je 1f | |
554 | call ap_init | |
555 | 1: | |
d19f6edf MD |
556 | /* |
557 | * ap_init can decide to enable interrupts early, but otherwise, or if | |
558 | * we are UP, do it here. | |
559 | */ | |
ef0fdad1 | 560 | sti |
8ad65e08 MD |
561 | jmp cpu_idle |
562 | ||
0cfcada1 | 563 | /* |
a2a5ad0d | 564 | * cpu_kthread_restore() (current thread is %eax on entry) (one-time execution) |
0cfcada1 MD |
565 | * |
566 | * Don't bother setting up any regs other then %ebp so backtraces | |
567 | * don't die. This restore function is used to bootstrap into an | |
568 | * LWKT based kernel thread only. cpu_lwkt_switch() will be used | |
569 | * after this. | |
26a0694b MD |
570 | * |
571 | * Since all of our context is on the stack we are reentrant and | |
572 | * we can release our critical section and enable interrupts early. | |
cc9b6223 MD |
573 | * |
574 | * Because this switch target does not 'return' to lwkt_switch() | |
575 | * we have to call lwkt_switch_return(otd) to clean up otd. | |
576 | * otd is in %ebx. | |
0cfcada1 MD |
577 | */ |
578 | ENTRY(cpu_kthread_restore) | |
d9eea1a5 | 579 | sti |
a2a5ad0d | 580 | movl IdlePTD,%ecx |
cc9b6223 | 581 | movl TD_PCB(%eax),%esi |
0cfcada1 | 582 | movl $0,%ebp |
a2a5ad0d | 583 | movl %ecx,%cr3 |
cc9b6223 MD |
584 | |
585 | pushl %eax | |
586 | pushl %ebx /* argument to lwkt_switch_return */ | |
587 | call lwkt_switch_return | |
588 | addl $4,%esp | |
589 | popl %eax | |
f9235b6d | 590 | decl TD_CRITCOUNT(%eax) |
d9eea1a5 | 591 | popl %eax /* kthread exit function */ |
cc9b6223 | 592 | pushl PCB_EBX(%esi) /* argument to ESI function */ |
d9eea1a5 | 593 | pushl %eax /* set exit func as return address */ |
cc9b6223 | 594 | movl PCB_ESI(%esi),%eax |
0cfcada1 MD |
595 | jmp *%eax |
596 | ||
8ad65e08 MD |
597 | /* |
598 | * cpu_lwkt_switch() | |
599 | * | |
600 | * Standard LWKT switching function. Only non-scratch registers are | |
601 | * saved and we don't bother with the MMU state or anything else. | |
26a0694b MD |
602 | * |
603 | * This function is always called while in a critical section. | |
604 | * | |
71ef2f5c MD |
605 | * There is a one-instruction window where curthread is the new |
606 | * thread but %esp still points to the old thread's stack, but | |
607 | * we are protected by a critical section so it is ok. | |
608 | * | |
8ad65e08 MD |
609 | * YYY BGL, SPL |
610 | */ | |
611 | ENTRY(cpu_lwkt_switch) | |
18bbe476 | 612 | pushl %ebp /* note: GDB hacked to locate ebp relative to td_sp */ |
8ad65e08 | 613 | pushl %ebx |
263541db | 614 | movl PCPU(curthread),%ebx |
8ad65e08 MD |
615 | pushl %esi |
616 | pushl %edi | |
617 | pushfl | |
263541db MD |
618 | /* warning: adjust movl into %eax below if you change the pushes */ |
619 | ||
620 | #if NNPX > 0 | |
621 | /* | |
622 | * Save the FP state if we have used the FP. Note that calling | |
623 | * npxsave will NULL out PCPU(npxthread). | |
624 | * | |
625 | * We have to deal with the FP state for LWKT threads in case they | |
626 | * happen to get preempted or block while doing an optimized | |
627 | * bzero/bcopy/memcpy. | |
628 | */ | |
629 | cmpl %ebx,PCPU(npxthread) | |
630 | jne 1f | |
65d6ce10 | 631 | pushl TD_SAVEFPU(%ebx) |
263541db MD |
632 | call npxsave /* do it in a big C function */ |
633 | addl $4,%esp /* EAX, ECX, EDX trashed */ | |
634 | 1: | |
635 | #endif /* NNPX > 0 */ | |
636 | ||
637 | movl 4+20(%esp),%eax /* switch to this thread */ | |
8ad65e08 | 638 | pushl $cpu_lwkt_restore |
d9eea1a5 | 639 | movl %esp,TD_SP(%ebx) |
2954c92f | 640 | movl %eax,PCPU(curthread) |
8ad65e08 | 641 | movl TD_SP(%eax),%esp |
d9eea1a5 MD |
642 | |
643 | /* | |
644 | * eax contains new thread, ebx contains old thread. | |
645 | */ | |
8ad65e08 MD |
646 | ret |
647 | ||
648 | /* | |
26a0694b | 649 | * cpu_lwkt_restore() (current thread in %eax on entry) |
8ad65e08 | 650 | * |
26a0694b MD |
651 | * Standard LWKT restore function. This function is always called |
652 | * while in a critical section. | |
653 | * | |
654 | * Warning: due to preemption the restore function can be used to | |
655 | * 'return' to the original thread. Interrupt disablement must be | |
656 | * protected through the switch so we cannot run splz here. | |
a2a5ad0d MD |
657 | * |
658 | * YYY we theoretically do not need to load IdlePTD into cr3, but if | |
659 | * so we need a way to detect when the PTD we are using is being | |
660 | * deleted due to a process exiting. | |
8ad65e08 MD |
661 | */ |
662 | ENTRY(cpu_lwkt_restore) | |
a2a5ad0d | 663 | movl IdlePTD,%ecx /* YYY borrow but beware desched/cpuchg/exit */ |
d9eea1a5 MD |
664 | movl %cr3,%edx |
665 | cmpl %ecx,%edx | |
a2a5ad0d MD |
666 | je 1f |
667 | movl %ecx,%cr3 | |
668 | 1: | |
cc9b6223 MD |
669 | /* |
670 | * NOTE: %ebx is the previous thread and %eax is the new thread. | |
671 | * %ebx is retained throughout so we can return it. | |
672 | * | |
673 | * lwkt_switch[_return] is responsible for handling TDF_RUNNING. | |
674 | */ | |
675 | movl %ebx,%eax | |
8ad65e08 MD |
676 | popfl |
677 | popl %edi | |
678 | popl %esi | |
679 | popl %ebx | |
680 | popl %ebp | |
681 | ret | |
682 |