kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / i386 / i386 / swtch.s
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
f1d1c3fa 4 * LWKT threads Copyright (c) 2003 Matthew Dillon
984263bc
MD
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
1f2de5d4 38 * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.26 2003/08/07 21:17:22 dillon Exp $
984263bc
MD
39 */
40
1f2de5d4 41#include "use_npx.h"
984263bc
MD
42#include "opt_user_ldt.h"
43
44#include <sys/rtprio.h>
45
46#include <machine/asmacros.h>
47#include <machine/ipl.h>
48
49#ifdef SMP
50#include <machine/pmap.h>
51#include <machine/smptests.h> /** GRAB_LOPRIO */
52#include <machine/apic.h>
53#include <machine/lock.h>
54#endif /* SMP */
55
56#include "assym.s"
57
a2a5ad0d
MD
58#if defined(SMP)
59#define MPLOCKED lock ;
60#else
61#define MPLOCKED
62#endif
63
984263bc
MD
64 .data
65
2954c92f 66 .globl panic
984263bc
MD
67
68#if defined(SWTCH_OPTIM_STATS)
2954c92f
MD
69 .globl swtch_optim_stats, tlb_flush_count
70swtch_optim_stats: .long 0 /* number of _swtch_optims */
71tlb_flush_count: .long 0
984263bc
MD
72#endif
73
74 .text
75
984263bc
MD
76
77/*
8ad65e08
MD
78 * cpu_heavy_switch(next_thread)
79 *
80 * Switch from the current thread to a new thread. This entry
81 * is normally called via the thread->td_switch function, and will
82 * only be called when the current thread is a heavy weight process.
83 *
d9eea1a5
MD
84 * Some instructions have been reordered to reduce pipeline stalls.
85 *
8ad65e08 86 * YYY disable interrupts once giant is removed.
984263bc 87 */
8ad65e08 88ENTRY(cpu_heavy_switch)
8ad65e08
MD
89 /*
90 * Save general regs
91 */
d9eea1a5
MD
92 movl PCPU(curthread),%ecx
93 movl (%esp),%eax /* (reorder optimization) */
94 movl TD_PCB(%ecx),%edx /* EDX = PCB */
95 movl %eax,PCB_EIP(%edx) /* return PC may be modified */
984263bc
MD
96 movl %ebx,PCB_EBX(%edx)
97 movl %esp,PCB_ESP(%edx)
98 movl %ebp,PCB_EBP(%edx)
99 movl %esi,PCB_ESI(%edx)
100 movl %edi,PCB_EDI(%edx)
101 movl %gs,PCB_GS(%edx)
102
d9eea1a5
MD
103 movl %ecx,%ebx /* EBX = curthread */
104 movl TD_PROC(%ecx),%ecx
105 movl PCPU(cpuid), %eax
106 movl P_VMSPACE(%ecx), %ecx /* ECX = vmspace */
107 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%ecx)
108
8ad65e08
MD
109 /*
110 * Push the LWKT switch restore function, which resumes a heavy
111 * weight process. Note that the LWKT switcher is based on
112 * TD_SP, while the heavy weight process switcher is based on
d9eea1a5
MD
113 * PCB_ESP. TD_SP is usually two ints pushed relative to
114 * PCB_ESP. We push the flags for later restore by cpu_heavy_restore.
8ad65e08 115 */
d9eea1a5 116 pushfl
8ad65e08 117 pushl $cpu_heavy_restore
d9eea1a5 118 movl %esp,TD_SP(%ebx)
8ad65e08
MD
119
120 /*
121 * Save debug regs if necessary
122 */
984263bc
MD
123 movb PCB_FLAGS(%edx),%al
124 andb $PCB_DBREGS,%al
125 jz 1f /* no, skip over */
126 movl %dr7,%eax /* yes, do the save */
127 movl %eax,PCB_DR7(%edx)
128 andl $0x0000fc00, %eax /* disable all watchpoints */
129 movl %eax,%dr7
130 movl %dr6,%eax
131 movl %eax,PCB_DR6(%edx)
132 movl %dr3,%eax
133 movl %eax,PCB_DR3(%edx)
134 movl %dr2,%eax
135 movl %eax,PCB_DR2(%edx)
136 movl %dr1,%eax
137 movl %eax,PCB_DR1(%edx)
138 movl %dr0,%eax
139 movl %eax,PCB_DR0(%edx)
1401:
141
8ad65e08 142 /*
a2a5ad0d
MD
143 * Save the FP state if we have used the FP. Note that calling
144 * npxsave will NULL out PCPU(npxthread).
8ad65e08 145 */
984263bc 146#if NNPX > 0
d9eea1a5 147 cmpl %ebx,PCPU(npxthread)
984263bc 148 jne 1f
d9eea1a5 149 addl $PCB_SAVEFPU,%edx
984263bc 150 pushl %edx
2954c92f 151 call npxsave /* do it in a big C function */
d9eea1a5 152 addl $4,%esp /* EAX, ECX, EDX trashed */
984263bc
MD
1531:
154#endif /* NNPX > 0 */
155
84b592ba 156 /*
8ad65e08 157 * Switch to the next thread, which was passed as an argument
d9eea1a5
MD
158 * to cpu_heavy_switch(). Due to the eflags and switch-restore
159 * function we pushed, the argument is at 12(%esp). Set the current
160 * thread, load the stack pointer, and 'ret' into the switch-restore
161 * function.
162 *
163 * The switch restore function expects the new thread to be in %eax
164 * and the old one to be in %ebx.
84b592ba 165 */
d9eea1a5 166 movl 12(%esp),%eax /* EAX = newtd, EBX = oldtd */
2954c92f 167 movl %eax,PCPU(curthread)
8ad65e08
MD
168 movl TD_SP(%eax),%esp
169 ret
984263bc 170
8ad65e08
MD
171/*
172 * cpu_exit_switch()
173 *
174 * The switch function is changed to this when a thread is going away
175 * for good. We have to ensure that the MMU state is not cached, and
176 * we don't bother saving the existing thread state before switching.
ae8050a4
MD
177 *
178 * At this point we are in a critical section and this cpu owns the
179 * thread's token, which serves as an interlock until the switchout is
180 * complete.
8ad65e08
MD
181 */
182ENTRY(cpu_exit_switch)
ae8050a4
MD
183 /*
184 * Get us out of the vmspace
185 */
2954c92f 186 movl IdlePTD,%ecx
8ad65e08
MD
187 movl %cr3,%eax
188 cmpl %ecx,%eax
189 je 1f
190 movl %ecx,%cr3
984263bc 1911:
d9eea1a5 192 movl PCPU(curthread),%ebx
ae8050a4 193 /*
d9eea1a5
MD
194 * Switch to the next thread. RET into the restore function, which
195 * expects the new thread in EAX and the old in EBX.
ae8050a4 196 */
8ad65e08 197 movl 4(%esp),%eax
2954c92f 198 movl %eax,PCPU(curthread)
8ad65e08
MD
199 movl TD_SP(%eax),%esp
200 ret
984263bc 201
8ad65e08
MD
202/*
203 * cpu_heavy_restore() (current thread in %eax on entry)
204 *
205 * Restore the thread after an LWKT switch. This entry is normally
206 * called via the LWKT switch restore function, which was pulled
207 * off the thread stack and jumped to.
208 *
209 * This entry is only called if the thread was previously saved
d9eea1a5
MD
210 * using cpu_heavy_switch() (the heavy weight process thread switcher),
211 * or when a new process is initially scheduled. The first thing we
212 * do is clear the TDF_RUNNING bit in the old thread and set it in the
213 * new thread.
8ad65e08
MD
214 *
215 * YYY theoretically we do not have to restore everything here, a lot
216 * of this junk can wait until we return to usermode. But for now
217 * we restore everything.
218 *
96728c05
MD
219 * YYY the PCB crap is really crap, it makes startup a bitch because
220 * we can't switch away.
7d0bac62
MD
221 *
222 * YYY note: spl check is done in mi_switch when it splx()'s.
8ad65e08 223 */
26a0694b 224
8ad65e08 225ENTRY(cpu_heavy_restore)
d9eea1a5
MD
226 popfl
227 movl TD_PCB(%eax),%edx /* EDX = PCB */
8ad65e08 228 movl TD_PROC(%eax),%ecx
984263bc 229#ifdef DIAGNOSTIC
984263bc
MD
230 cmpb $SRUN,P_STAT(%ecx)
231 jne badsw2
232#endif
984263bc
MD
233
234#if defined(SWTCH_OPTIM_STATS)
235 incl _swtch_optim_stats
236#endif
8ad65e08 237 /*
a2a5ad0d
MD
238 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
239 * safely test/reload %cr3 until after we have set the bit in the
240 * pmap (remember, we do not hold the MP lock in the switch code).
8ad65e08 241 */
d9eea1a5
MD
242 movl P_VMSPACE(%ecx), %ecx /* ECX = vmspace */
243 movl PCPU(cpuid), %esi
244 MPLOCKED btsl %esi, VM_PMAP+PM_ACTIVE(%ecx)
a2a5ad0d
MD
245
246 /*
247 * Restore the MMU address space. If it is the same as the last
248 * thread we don't have to invalidate the tlb (i.e. reload cr3).
249 * YYY which naturally also means that the PM_ACTIVE bit had better
250 * already have been set before we set it above, check? YYY
251 */
d9eea1a5
MD
252 movl %cr3,%esi
253 movl PCB_CR3(%edx),%ecx
254 cmpl %esi,%ecx
984263bc
MD
255 je 4f
256#if defined(SWTCH_OPTIM_STATS)
257 decl _swtch_optim_stats
258 incl _tlb_flush_count
259#endif
d9eea1a5 260 movl %ecx,%cr3
984263bc 2614:
d9eea1a5
MD
262 /*
263 * Clear TDF_RUNNING flag in old thread only after cleaning up
264 * %cr3. The target thread is already protected by being TDF_RUNQ
265 * so setting TDF_RUNNING isn't as big a deal.
266 */
267 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
268 orl $TDF_RUNNING,TD_FLAGS(%eax)
269
8ad65e08
MD
270 /*
271 * Deal with the PCB extension, restore the private tss
272 */
a2a5ad0d
MD
273 movl PCB_EXT(%edx),%edi /* check for a PCB extension */
274 movl $1,%ebx /* maybe mark use of a private tss */
275 testl %edi,%edi
276 jnz 2f
984263bc 277
b7c628e4 278 /*
a2a5ad0d
MD
279 * Going back to the common_tss. We may need to update TSS_ESP0
280 * which sets the top of the supervisor stack when entering from
281 * usermode. The PCB is at the top of the stack but we need another
282 * 16 bytes to take vm86 into account.
b7c628e4
MD
283 */
284 leal -16(%edx),%ebx
2954c92f 285 movl %ebx, PCPU(common_tss) + TSS_ESP0
984263bc 286
a2a5ad0d
MD
287 cmpl $0,PCPU(private_tss) /* don't have to reload if */
288 je 3f /* already using the common TSS */
289
290 subl %ebx,%ebx /* unmark use of private tss */
17a9f566
MD
291
292 /*
a2a5ad0d 293 * Get the address of the common TSS descriptor for the ltr.
17a9f566
MD
294 * There is no way to get the address of a segment-accessed variable
295 * so we store a self-referential pointer at the base of the per-cpu
296 * data area and add the appropriate offset.
297 */
984263bc
MD
298 movl $gd_common_tssd, %edi
299 addl %fs:0, %edi
17a9f566 300
8ad65e08
MD
301 /*
302 * Move the correct TSS descriptor into the GDT slot, then reload
a2a5ad0d 303 * ltr.
8ad65e08 304 */
984263bc 3052:
a2a5ad0d 306 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */
2954c92f 307 movl PCPU(tss_gdt), %ebx /* entry in GDT */
984263bc
MD
308 movl 0(%edi), %eax
309 movl %eax, 0(%ebx)
310 movl 4(%edi), %eax
311 movl %eax, 4(%ebx)
312 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
313 ltr %si
8ad65e08 314
984263bc 3153:
8ad65e08
MD
316 /*
317 * Restore general registers.
318 */
984263bc
MD
319 movl PCB_EBX(%edx),%ebx
320 movl PCB_ESP(%edx),%esp
321 movl PCB_EBP(%edx),%ebp
322 movl PCB_ESI(%edx),%esi
323 movl PCB_EDI(%edx),%edi
324 movl PCB_EIP(%edx),%eax
325 movl %eax,(%esp)
326
8ad65e08
MD
327 /*
328 * Restore the user LDT if we have one
329 */
984263bc
MD
330#ifdef USER_LDT
331 cmpl $0, PCB_USERLDT(%edx)
332 jnz 1f
2954c92f
MD
333 movl _default_ldt,%eax
334 cmpl PCPU(currentldt),%eax
984263bc 335 je 2f
2954c92f
MD
336 lldt _default_ldt
337 movl %eax,PCPU(currentldt)
984263bc
MD
338 jmp 2f
3391: pushl %edx
2954c92f 340 call set_user_ldt
984263bc
MD
341 popl %edx
3422:
343#endif
8ad65e08
MD
344 /*
345 * Restore the %gs segment register, which must be done after
346 * loading the user LDT. Since user processes can modify the
347 * register via procfs, this may result in a fault which is
348 * detected by checking the fault address against cpu_switch_load_gs
349 * in i386/i386/trap.c
350 */
984263bc
MD
351 .globl cpu_switch_load_gs
352cpu_switch_load_gs:
353 movl PCB_GS(%edx),%gs
354
8ad65e08
MD
355 /*
356 * Restore the DEBUG register state if necessary.
357 */
984263bc
MD
358 movb PCB_FLAGS(%edx),%al
359 andb $PCB_DBREGS,%al
360 jz 1f /* no, skip over */
361 movl PCB_DR6(%edx),%eax /* yes, do the restore */
362 movl %eax,%dr6
363 movl PCB_DR3(%edx),%eax
364 movl %eax,%dr3
365 movl PCB_DR2(%edx),%eax
366 movl %eax,%dr2
367 movl PCB_DR1(%edx),%eax
368 movl %eax,%dr1
369 movl PCB_DR0(%edx),%eax
370 movl %eax,%dr0
371 movl %dr7,%eax /* load dr7 so as not to disturb */
372 andl $0x0000fc00,%eax /* reserved bits */
373 pushl %ebx
374 movl PCB_DR7(%edx),%ebx
375 andl $~0x0000fc00,%ebx
376 orl %ebx,%eax
377 popl %ebx
378 movl %eax,%dr7
3791:
380
984263bc
MD
381 ret
382
984263bc
MD
383badsw2:
384 pushl $sw0_2
2954c92f 385 call panic
984263bc
MD
386
387sw0_2: .asciz "cpu_switch: not SRUN"
0cfcada1 388
984263bc
MD
389/*
390 * savectx(pcb)
391 * Update pcb, saving current processor state.
392 */
393ENTRY(savectx)
394 /* fetch PCB */
395 movl 4(%esp),%ecx
396
397 /* caller's return address - child won't execute this routine */
398 movl (%esp),%eax
399 movl %eax,PCB_EIP(%ecx)
400
401 movl %cr3,%eax
402 movl %eax,PCB_CR3(%ecx)
403
404 movl %ebx,PCB_EBX(%ecx)
405 movl %esp,PCB_ESP(%ecx)
406 movl %ebp,PCB_EBP(%ecx)
407 movl %esi,PCB_ESI(%ecx)
408 movl %edi,PCB_EDI(%ecx)
409 movl %gs,PCB_GS(%ecx)
410
411#if NNPX > 0
412 /*
af0bff84 413 * If npxthread == NULL, then the npx h/w state is irrelevant and the
984263bc
MD
414 * state had better already be in the pcb. This is true for forks
415 * but not for dumps (the old book-keeping with FP flags in the pcb
416 * always lost for dumps because the dump pcb has 0 flags).
417 *
af0bff84
MD
418 * If npxthread != NULL, then we have to save the npx h/w state to
419 * npxthread's pcb and copy it to the requested pcb, or save to the
984263bc
MD
420 * requested pcb and reload. Copying is easier because we would
421 * have to handle h/w bugs for reloading. We used to lose the
422 * parent's npx state for forks by forgetting to reload.
423 */
2954c92f 424 movl PCPU(npxthread),%eax
984263bc
MD
425 testl %eax,%eax
426 je 1f
427
428 pushl %ecx
b7c628e4 429 movl TD_PCB(%eax),%eax
984263bc
MD
430 leal PCB_SAVEFPU(%eax),%eax
431 pushl %eax
432 pushl %eax
2954c92f 433 call npxsave
984263bc
MD
434 addl $4,%esp
435 popl %eax
436 popl %ecx
437
438 pushl $PCB_SAVEFPU_SIZE
439 leal PCB_SAVEFPU(%ecx),%ecx
440 pushl %ecx
441 pushl %eax
2954c92f 442 call bcopy
984263bc
MD
443 addl $12,%esp
444#endif /* NNPX > 0 */
445
4461:
447 ret
8ad65e08
MD
448
449/*
a2a5ad0d 450 * cpu_idle_restore() (current thread in %eax on entry) (one-time execution)
8ad65e08
MD
451 *
452 * Don't bother setting up any regs other then %ebp so backtraces
453 * don't die. This restore function is used to bootstrap into the
454 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
455 * switching.
72740893 456 *
d9eea1a5
MD
457 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
458 *
72740893
MD
459 * If we are an AP we have to call ap_init() before jumping to
460 * cpu_idle(). ap_init() will synchronize with the BP and finish
461 * setting up various ncpu-dependant globaldata fields. This may
462 * happen on UP as well as SMP if we happen to be simulating multiple
463 * cpus.
8ad65e08
MD
464 */
465ENTRY(cpu_idle_restore)
d9eea1a5 466 /* cli */
a2a5ad0d 467 movl IdlePTD,%ecx
8ad65e08
MD
468 movl $0,%ebp
469 pushl $0
a2a5ad0d 470 movl %ecx,%cr3
d9eea1a5
MD
471 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
472 orl $TDF_RUNNING,TD_FLAGS(%eax)
72740893
MD
473#ifdef SMP
474 cmpl $0,PCPU(cpuid)
475 je 1f
476 call ap_init
4771:
478#endif
ef0fdad1 479 sti
8ad65e08
MD
480 jmp cpu_idle
481
0cfcada1 482/*
a2a5ad0d 483 * cpu_kthread_restore() (current thread is %eax on entry) (one-time execution)
0cfcada1
MD
484 *
485 * Don't bother setting up any regs other then %ebp so backtraces
486 * don't die. This restore function is used to bootstrap into an
487 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
488 * after this.
26a0694b
MD
489 *
490 * Since all of our context is on the stack we are reentrant and
491 * we can release our critical section and enable interrupts early.
0cfcada1
MD
492 */
493ENTRY(cpu_kthread_restore)
d9eea1a5 494 sti
a2a5ad0d 495 movl IdlePTD,%ecx
d9eea1a5 496 movl TD_PCB(%eax),%edx
0cfcada1 497 movl $0,%ebp
a2a5ad0d 498 movl %ecx,%cr3
d9eea1a5
MD
499 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
500 orl $TDF_RUNNING,TD_FLAGS(%eax)
26a0694b 501 subl $TDPRI_CRIT,TD_PRI(%eax)
d9eea1a5
MD
502 popl %eax /* kthread exit function */
503 pushl PCB_EBX(%edx) /* argument to ESI function */
504 pushl %eax /* set exit func as return address */
505 movl PCB_ESI(%edx),%eax
0cfcada1
MD
506 jmp *%eax
507
8ad65e08
MD
508/*
509 * cpu_lwkt_switch()
510 *
511 * Standard LWKT switching function. Only non-scratch registers are
512 * saved and we don't bother with the MMU state or anything else.
26a0694b
MD
513 *
514 * This function is always called while in a critical section.
515 *
8ad65e08
MD
516 * YYY BGL, SPL
517 */
518ENTRY(cpu_lwkt_switch)
519 movl 4(%esp),%eax
520 pushl %ebp
521 pushl %ebx
522 pushl %esi
523 pushl %edi
524 pushfl
d9eea1a5 525 movl PCPU(curthread),%ebx
8ad65e08 526 pushl $cpu_lwkt_restore
d9eea1a5 527 movl %esp,TD_SP(%ebx)
2954c92f 528 movl %eax,PCPU(curthread)
8ad65e08 529 movl TD_SP(%eax),%esp
d9eea1a5
MD
530
531 /*
532 * eax contains new thread, ebx contains old thread.
533 */
8ad65e08
MD
534 ret
535
536/*
26a0694b 537 * cpu_lwkt_restore() (current thread in %eax on entry)
8ad65e08 538 *
26a0694b
MD
539 * Standard LWKT restore function. This function is always called
540 * while in a critical section.
541 *
542 * Warning: due to preemption the restore function can be used to
543 * 'return' to the original thread. Interrupt disablement must be
544 * protected through the switch so we cannot run splz here.
a2a5ad0d
MD
545 *
546 * YYY we theoretically do not need to load IdlePTD into cr3, but if
547 * so we need a way to detect when the PTD we are using is being
548 * deleted due to a process exiting.
8ad65e08
MD
549 */
550ENTRY(cpu_lwkt_restore)
a2a5ad0d 551 movl IdlePTD,%ecx /* YYY borrow but beware desched/cpuchg/exit */
d9eea1a5
MD
552 movl %cr3,%edx
553 cmpl %ecx,%edx
a2a5ad0d
MD
554 je 1f
555 movl %ecx,%cr3
5561:
d9eea1a5
MD
557 andl $~TDF_RUNNING,TD_FLAGS(%ebx)
558 orl $TDF_RUNNING,TD_FLAGS(%eax)
8ad65e08
MD
559 popfl
560 popl %edi
561 popl %esi
562 popl %ebx
563 popl %ebp
564 ret
565