npx(4): Silence a warning in the SMP build.
[dragonfly.git] / sys / platform / pc32 / isa / npx.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
35 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $
36 * $DragonFly: src/sys/platform/pc32/isa/npx.c,v 1.49 2008/08/02 01:14:43 dillon Exp $
37 */
38
39#include "opt_cpu.h"
40#include "opt_debug_npx.h"
41#include "opt_math_emulate.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/kernel.h>
47#include <sys/malloc.h>
48#include <sys/module.h>
49#include <sys/sysctl.h>
50#include <sys/proc.h>
51#include <sys/rman.h>
52#ifdef NPX_DEBUG
53#include <sys/syslog.h>
54#endif
55#include <sys/signalvar.h>
56#include <sys/thread2.h>
57
58#ifndef SMP
59#include <machine/asmacros.h>
60#endif
61#include <machine/cputypes.h>
62#include <machine/frame.h>
63#include <machine/ipl.h>
64#include <machine/md_var.h>
65#include <machine/pcb.h>
66#include <machine/psl.h>
67#ifndef SMP
68#include <machine/clock.h>
69#endif
70#include <machine/specialreg.h>
71#include <machine/segments.h>
72#include <machine/globaldata.h>
73
74#ifndef SMP
75#include <machine_base/icu/icu.h>
76#include <machine_base/isa/intr_machdep.h>
77#include <bus/isa/isa.h>
78#endif
79
80/*
81 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
82 */
83
84/* Configuration flags. */
85#define NPX_DISABLE_I586_OPTIMIZED_BCOPY (1 << 0)
86#define NPX_DISABLE_I586_OPTIMIZED_BZERO (1 << 1)
87#define NPX_DISABLE_I586_OPTIMIZED_COPYIO (1 << 2)
88#define NPX_PREFER_EMULATOR (1 << 3)
89
90#ifdef __GNUC__
91
92#define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr)))
93#define fnclex() __asm("fnclex")
94#define fninit() __asm("fninit")
95#define fnop() __asm("fnop")
96#define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr)))
97#define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
98#define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr)))
99#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fnop")
100#define frstor(addr) __asm("frstor %0" : : "m" (*(addr)))
101#ifndef CPU_DISABLE_SSE
102#define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr)))
103#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
104#endif
105#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
106 : : "n" (CR0_TS) : "ax")
107#define stop_emulating() __asm("clts")
108
109#else /* not __GNUC__ */
110
111void fldcw (caddr_t addr);
112void fnclex (void);
113void fninit (void);
114void fnop (void);
115void fnsave (caddr_t addr);
116void fnstcw (caddr_t addr);
117void fnstsw (caddr_t addr);
118void fp_divide_by_0 (void);
119void frstor (caddr_t addr);
120#ifndef CPU_DISABLE_SSE
121void fxsave (caddr_t addr);
122void fxrstor (caddr_t addr);
123#endif
124void start_emulating (void);
125void stop_emulating (void);
126
127#endif /* __GNUC__ */
128
129#ifndef CPU_DISABLE_SSE
130#define GET_FPU_EXSW_PTR(td) \
131 (cpu_fxsr ? \
132 &(td)->td_savefpu->sv_xmm.sv_ex_sw : \
133 &(td)->td_savefpu->sv_87.sv_ex_sw)
134#else /* CPU_DISABLE_SSE */
135#define GET_FPU_EXSW_PTR(td) \
136 (&(td)->td_savefpu->sv_87.sv_ex_sw)
137#endif /* CPU_DISABLE_SSE */
138
139typedef u_char bool_t;
140#ifndef CPU_DISABLE_SSE
141static void fpu_clean_state(void);
142#endif
143
144
145static int npx_attach (device_t dev);
146 void npx_intr (void *);
147static int npx_probe (device_t dev);
148static int npx_probe1 (device_t dev);
149static void fpusave (union savefpu *);
150static void fpurstor (union savefpu *);
151
152int hw_float; /* XXX currently just alias for npx_exists */
153
154SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
155 CTLFLAG_RD, &hw_float, 0,
156 "Floatingpoint instructions executed in hardware");
157#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(CPU_DISABLE_SSE)
158int mmxopt = 1;
159SYSCTL_INT(_kern, OID_AUTO, mmxopt, CTLFLAG_RD, &mmxopt, 0,
160 "MMX/XMM optimized bcopy/copyin/copyout support");
161#endif
162
163#ifndef SMP
164static u_int npx0_imask;
165static struct gate_descriptor npx_idt_probeintr;
166static int npx_intrno;
167static volatile u_int npx_intrs_while_probing;
168static volatile u_int npx_traps_while_probing;
169#endif
170
171static bool_t npx_ex16;
172static bool_t npx_exists;
173static bool_t npx_irq13;
174static int npx_irq; /* irq number */
175
176#ifndef SMP
177/*
178 * Special interrupt handlers. Someday intr0-intr15 will be used to count
179 * interrupts. We'll still need a special exception 16 handler. The busy
180 * latch stuff in probeintr() can be moved to npxprobe().
181 */
182inthand_t probeintr;
183__asm(" \n\
184 .text \n\
185 .p2align 2,0x90 \n\
186 .type " __XSTRING(CNAME(probeintr)) ",@function \n\
187" __XSTRING(CNAME(probeintr)) ": \n\
188 ss \n\
189 incl " __XSTRING(CNAME(npx_intrs_while_probing)) " \n\
190 pushl %eax \n\
191 movb $0x20,%al # EOI (asm in strings loses cpp features) \n\
192 outb %al,$0xa0 # IO_ICU2 \n\
193 outb %al,$0x20 # IO_ICU1 \n\
194 movb $0,%al \n\
195 outb %al,$0xf0 # clear BUSY# latch \n\
196 popl %eax \n\
197 iret \n\
198");
199
200inthand_t probetrap;
201__asm(" \n\
202 .text \n\
203 .p2align 2,0x90 \n\
204 .type " __XSTRING(CNAME(probetrap)) ",@function \n\
205" __XSTRING(CNAME(probetrap)) ": \n\
206 ss \n\
207 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\
208 fnclex \n\
209 iret \n\
210");
211#endif /* SMP */
212
213static struct krate badfprate = { 1 };
214
215/*
216 * Probe routine. Initialize cr0 to give correct behaviour for [f]wait
217 * whether the device exists or not (XXX should be elsewhere). Set flags
218 * to tell npxattach() what to do. Modify device struct if npx doesn't
219 * need to use interrupts. Return 1 if device exists.
220 */
221static int
222npx_probe(device_t dev)
223{
224#ifdef SMP
225
226 if (resource_int_value("npx", 0, "irq", &npx_irq) != 0)
227 npx_irq = 13;
228 return npx_probe1(dev);
229
230#else /* SMP */
231
232 int result;
233 u_long save_eflags;
234 u_char save_icu1_mask;
235 u_char save_icu2_mask;
236 struct gate_descriptor save_idt_npxintr;
237 struct gate_descriptor save_idt_npxtrap;
238 /*
239 * This routine is now just a wrapper for npxprobe1(), to install
240 * special npx interrupt and trap handlers, to enable npx interrupts
241 * and to disable other interrupts. Someday isa_configure() will
242 * install suitable handlers and run with interrupts enabled so we
243 * won't need to do so much here.
244 */
245 if (resource_int_value("npx", 0, "irq", &npx_irq) != 0)
246 npx_irq = 13;
247 npx_intrno = IDT_OFFSET + npx_irq;
248 save_eflags = read_eflags();
249 cpu_disable_intr();
250 save_icu1_mask = inb(IO_ICU1 + 1);
251 save_icu2_mask = inb(IO_ICU2 + 1);
252 save_idt_npxintr = idt[npx_intrno];
253 save_idt_npxtrap = idt[16];
254 outb(IO_ICU1 + 1, ~(1 << ICU_IRQ_SLAVE));
255 outb(IO_ICU2 + 1, ~(1 << (npx_irq - 8)));
256 setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
257 setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
258 npx_idt_probeintr = idt[npx_intrno];
259 cpu_enable_intr();
260 result = npx_probe1(dev);
261 cpu_disable_intr();
262 outb(IO_ICU1 + 1, save_icu1_mask);
263 outb(IO_ICU2 + 1, save_icu2_mask);
264 idt[npx_intrno] = save_idt_npxintr;
265 idt[16] = save_idt_npxtrap;
266 write_eflags(save_eflags);
267 return (result);
268
269#endif /* SMP */
270}
271
272static int
273npx_probe1(device_t dev)
274{
275#ifndef SMP
276 u_short control;
277 u_short status;
278#endif
279
280 /*
281 * Partially reset the coprocessor, if any. Some BIOS's don't reset
282 * it after a warm boot.
283 */
284 outb(0xf1, 0); /* full reset on some systems, NOP on others */
285 outb(0xf0, 0); /* clear BUSY# latch */
286 /*
287 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
288 * instructions. We must set the CR0_MP bit and use the CR0_TS
289 * bit to control the trap, because setting the CR0_EM bit does
290 * not cause WAIT instructions to trap. It's important to trap
291 * WAIT instructions - otherwise the "wait" variants of no-wait
292 * control instructions would degenerate to the "no-wait" variants
293 * after FP context switches but work correctly otherwise. It's
294 * particularly important to trap WAITs when there is no NPX -
295 * otherwise the "wait" variants would always degenerate.
296 *
297 * Try setting CR0_NE to get correct error reporting on 486DX's.
298 * Setting it should fail or do nothing on lesser processors.
299 */
300 load_cr0(rcr0() | CR0_MP | CR0_NE);
301 /*
302 * But don't trap while we're probing.
303 */
304 stop_emulating();
305 /*
306 * Finish resetting the coprocessor, if any. If there is an error
307 * pending, then we may get a bogus IRQ13, but probeintr() will handle
308 * it OK. Bogus halts have never been observed, but we enabled
309 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
310 */
311 fninit();
312
313 device_set_desc(dev, "math processor");
314 /*
315 * Modern CPUs all have an FPU that uses the INT16 interface
316 * and provide a simple way to verify that, so handle the
317 * common case right away.
318 */
319 if (cpu_feature & CPUID_FPU) {
320 npx_irq13 = 0;
321 npx_ex16 = hw_float = npx_exists = 1;
322 return (0);
323 }
324
325#ifndef SMP
326 /*
327 * Don't use fwait here because it might hang.
328 * Don't use fnop here because it usually hangs if there is no FPU.
329 */
330 DELAY(1000); /* wait for any IRQ13 */
331#ifdef DIAGNOSTIC
332 if (npx_intrs_while_probing != 0)
333 kprintf("fninit caused %u bogus npx interrupt(s)\n",
334 npx_intrs_while_probing);
335 if (npx_traps_while_probing != 0)
336 kprintf("fninit caused %u bogus npx trap(s)\n",
337 npx_traps_while_probing);
338#endif
339 /*
340 * Check for a status of mostly zero.
341 */
342 status = 0x5a5a;
343 fnstsw(&status);
344 if ((status & 0xb8ff) == 0) {
345 /*
346 * Good, now check for a proper control word.
347 */
348 control = 0x5a5a;
349 fnstcw(&control);
350 if ((control & 0x1f3f) == 0x033f) {
351 hw_float = npx_exists = 1;
352 /*
353 * We have an npx, now divide by 0 to see if exception
354 * 16 works.
355 */
356 control &= ~(1 << 2); /* enable divide by 0 trap */
357 fldcw(&control);
358 npx_traps_while_probing = npx_intrs_while_probing = 0;
359 fp_divide_by_0();
360 if (npx_traps_while_probing != 0) {
361 /*
362 * Good, exception 16 works.
363 */
364 npx_ex16 = 1;
365 return (0);
366 }
367 if (npx_intrs_while_probing != 0) {
368 int rid;
369 struct resource *r;
370 void *intr;
371 /*
372 * Bad, we are stuck with IRQ13.
373 */
374 npx_irq13 = 1;
375 /*
376 * npxattach would be too late to set npx0_imask
377 */
378 npx0_imask |= (1 << npx_irq);
379
380 /*
381 * We allocate these resources permanently,
382 * so there is no need to keep track of them.
383 */
384 rid = 0;
385 r = bus_alloc_resource(dev, SYS_RES_IOPORT,
386 &rid, IO_NPX, IO_NPX,
387 IO_NPXSIZE, RF_ACTIVE);
388 if (r == 0)
389 panic("npx: can't get ports");
390 rid = 0;
391 r = bus_alloc_resource(dev, SYS_RES_IRQ,
392 &rid, npx_irq, npx_irq,
393 1, RF_ACTIVE);
394 if (r == 0)
395 panic("npx: can't get IRQ");
396 BUS_SETUP_INTR(device_get_parent(dev),
397 dev, r, 0,
398 npx_intr, 0, &intr, NULL);
399 if (intr == 0)
400 panic("npx: can't create intr");
401
402 return (0);
403 }
404 /*
405 * Worse, even IRQ13 is broken. Use emulator.
406 */
407 }
408 }
409#endif /* SMP */
410 /*
411 * Probe failed, but we want to get to npxattach to initialize the
412 * emulator and say that it has been installed. XXX handle devices
413 * that aren't really devices better.
414 */
415 return (0);
416}
417
418/*
419 * Attach routine - announce which it is, and wire into system
420 */
421int
422npx_attach(device_t dev)
423{
424 int flags;
425
426 if (resource_int_value("npx", 0, "flags", &flags) != 0)
427 flags = 0;
428
429 if (flags)
430 device_printf(dev, "flags 0x%x ", flags);
431 if (npx_irq13) {
432 device_printf(dev, "using IRQ 13 interface\n");
433 } else {
434#if defined(MATH_EMULATE)
435 if (npx_ex16) {
436 if (!(flags & NPX_PREFER_EMULATOR))
437 device_printf(dev, "INT 16 interface\n");
438 else {
439 device_printf(dev, "FPU exists, but flags request "
440 "emulator\n");
441 hw_float = npx_exists = 0;
442 }
443 } else if (npx_exists) {
444 device_printf(dev, "error reporting broken; using 387 emulator\n");
445 hw_float = npx_exists = 0;
446 } else
447 device_printf(dev, "387 emulator\n");
448#else
449 if (npx_ex16) {
450 device_printf(dev, "INT 16 interface\n");
451 if (flags & NPX_PREFER_EMULATOR) {
452 device_printf(dev, "emulator requested, but none compiled "
453 "into kernel, using FPU\n");
454 }
455 } else
456 device_printf(dev, "no 387 emulator in kernel and no FPU!\n");
457#endif
458 }
459 npxinit(__INITIAL_NPXCW__);
460
461#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(CPU_DISABLE_SSE)
462 /*
463 * The asm_mmx_*() routines actually use XMM as well, so only
464 * enable them if we have SSE2 and are using FXSR (fxsave/fxrstore).
465 */
466 TUNABLE_INT_FETCH("kern.mmxopt", &mmxopt);
467 if ((cpu_feature & CPUID_MMX) && (cpu_feature & CPUID_SSE) &&
468 (cpu_feature & CPUID_SSE2) &&
469 npx_ex16 && npx_exists && mmxopt && cpu_fxsr
470 ) {
471 if ((flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY) == 0) {
472 bcopy_vector = (void **)asm_xmm_bcopy;
473 ovbcopy_vector = (void **)asm_xmm_bcopy;
474 memcpy_vector = (void **)asm_xmm_memcpy;
475 kprintf("Using XMM optimized bcopy/copyin/copyout\n");
476 }
477 if ((flags & NPX_DISABLE_I586_OPTIMIZED_BZERO) == 0) {
478 /* XXX */
479 }
480 } else if ((cpu_feature & CPUID_MMX) && (cpu_feature & CPUID_SSE) &&
481 npx_ex16 && npx_exists && mmxopt && cpu_fxsr
482 ) {
483 if ((flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY) == 0) {
484 bcopy_vector = (void **)asm_mmx_bcopy;
485 ovbcopy_vector = (void **)asm_mmx_bcopy;
486 memcpy_vector = (void **)asm_mmx_memcpy;
487 kprintf("Using MMX optimized bcopy/copyin/copyout\n");
488 }
489 if ((flags & NPX_DISABLE_I586_OPTIMIZED_BZERO) == 0) {
490 /* XXX */
491 }
492 }
493#endif
494#if 0
495 if (cpu_class == CPUCLASS_586 && npx_ex16 && npx_exists &&
496 timezero("i586_bzero()", i586_bzero) <
497 timezero("bzero()", bzero) * 4 / 5) {
498 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) {
499 bcopy_vector = i586_bcopy;
500 ovbcopy_vector = i586_bcopy;
501 }
502 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO))
503 bzero_vector = i586_bzero;
504 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) {
505 copyin_vector = i586_copyin;
506 copyout_vector = i586_copyout;
507 }
508 }
509#endif
510 return (0); /* XXX unused */
511}
512
513/*
514 * Initialize the floating point unit.
515 */
516void
517npxinit(u_short control)
518{
519 static union savefpu dummy __aligned(16);
520
521 if (!npx_exists)
522 return;
523 /*
524 * fninit has the same h/w bugs as fnsave. Use the detoxified
525 * fnsave to throw away any junk in the fpu. npxsave() initializes
526 * the fpu and sets npxthread = NULL as important side effects.
527 */
528 npxsave(&dummy);
529 crit_enter();
530 stop_emulating();
531 fldcw(&control);
532 fpusave(curthread->td_savefpu);
533 mdcpu->gd_npxthread = NULL;
534 start_emulating();
535 crit_exit();
536}
537
538/*
539 * Free coprocessor (if we have it).
540 */
541void
542npxexit(void)
543{
544 if (curthread == mdcpu->gd_npxthread)
545 npxsave(curthread->td_savefpu);
546#ifdef NPX_DEBUG
547 if (npx_exists) {
548 u_int masked_exceptions;
549
550 masked_exceptions =
551 curthread->td_savefpu->sv_87.sv_env.en_cw
552 & curthread->td_savefpu->sv_87.sv_env.en_sw & 0x7f;
553 /*
554 * Log exceptions that would have trapped with the old
555 * control word (overflow, divide by 0, and invalid operand).
556 */
557 if (masked_exceptions & 0x0d)
558 log(LOG_ERR,
559 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
560 curproc->p_pid, curproc->p_comm, masked_exceptions);
561 }
562#endif
563}
564
565/*
566 * The following mechanism is used to ensure that the FPE_... value
567 * that is passed as a trapcode to the signal handler of the user
568 * process does not have more than one bit set.
569 *
570 * Multiple bits may be set if the user process modifies the control
571 * word while a status word bit is already set. While this is a sign
572 * of bad coding, we have no choise than to narrow them down to one
573 * bit, since we must not send a trapcode that is not exactly one of
574 * the FPE_ macros.
575 *
576 * The mechanism has a static table with 127 entries. Each combination
577 * of the 7 FPU status word exception bits directly translates to a
578 * position in this table, where a single FPE_... value is stored.
579 * This FPE_... value stored there is considered the "most important"
580 * of the exception bits and will be sent as the signal code. The
581 * precedence of the bits is based upon Intel Document "Numerical
582 * Applications", Chapter "Special Computational Situations".
583 *
584 * The macro to choose one of these values does these steps: 1) Throw
585 * away status word bits that cannot be masked. 2) Throw away the bits
586 * currently masked in the control word, assuming the user isn't
587 * interested in them anymore. 3) Reinsert status word bit 7 (stack
588 * fault) if it is set, which cannot be masked but must be presered.
589 * 4) Use the remaining bits to point into the trapcode table.
590 *
591 * The 6 maskable bits in order of their preference, as stated in the
592 * above referenced Intel manual:
593 * 1 Invalid operation (FP_X_INV)
594 * 1a Stack underflow
595 * 1b Stack overflow
596 * 1c Operand of unsupported format
597 * 1d SNaN operand.
598 * 2 QNaN operand (not an exception, irrelavant here)
599 * 3 Any other invalid-operation not mentioned above or zero divide
600 * (FP_X_INV, FP_X_DZ)
601 * 4 Denormal operand (FP_X_DNML)
602 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
603 * 6 Inexact result (FP_X_IMP)
604 */
605static char fpetable[128] = {
606 0,
607 FPE_FLTINV, /* 1 - INV */
608 FPE_FLTUND, /* 2 - DNML */
609 FPE_FLTINV, /* 3 - INV | DNML */
610 FPE_FLTDIV, /* 4 - DZ */
611 FPE_FLTINV, /* 5 - INV | DZ */
612 FPE_FLTDIV, /* 6 - DNML | DZ */
613 FPE_FLTINV, /* 7 - INV | DNML | DZ */
614 FPE_FLTOVF, /* 8 - OFL */
615 FPE_FLTINV, /* 9 - INV | OFL */
616 FPE_FLTUND, /* A - DNML | OFL */
617 FPE_FLTINV, /* B - INV | DNML | OFL */
618 FPE_FLTDIV, /* C - DZ | OFL */
619 FPE_FLTINV, /* D - INV | DZ | OFL */
620 FPE_FLTDIV, /* E - DNML | DZ | OFL */
621 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
622 FPE_FLTUND, /* 10 - UFL */
623 FPE_FLTINV, /* 11 - INV | UFL */
624 FPE_FLTUND, /* 12 - DNML | UFL */
625 FPE_FLTINV, /* 13 - INV | DNML | UFL */
626 FPE_FLTDIV, /* 14 - DZ | UFL */
627 FPE_FLTINV, /* 15 - INV | DZ | UFL */
628 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
629 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
630 FPE_FLTOVF, /* 18 - OFL | UFL */
631 FPE_FLTINV, /* 19 - INV | OFL | UFL */
632 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
633 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
634 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
635 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
636 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
637 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
638 FPE_FLTRES, /* 20 - IMP */
639 FPE_FLTINV, /* 21 - INV | IMP */
640 FPE_FLTUND, /* 22 - DNML | IMP */
641 FPE_FLTINV, /* 23 - INV | DNML | IMP */
642 FPE_FLTDIV, /* 24 - DZ | IMP */
643 FPE_FLTINV, /* 25 - INV | DZ | IMP */
644 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
645 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
646 FPE_FLTOVF, /* 28 - OFL | IMP */
647 FPE_FLTINV, /* 29 - INV | OFL | IMP */
648 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
649 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
650 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
651 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
652 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
653 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
654 FPE_FLTUND, /* 30 - UFL | IMP */
655 FPE_FLTINV, /* 31 - INV | UFL | IMP */
656 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
657 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
658 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
659 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
660 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
661 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
662 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
663 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
664 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
665 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
666 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
667 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
668 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
669 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
670 FPE_FLTSUB, /* 40 - STK */
671 FPE_FLTSUB, /* 41 - INV | STK */
672 FPE_FLTUND, /* 42 - DNML | STK */
673 FPE_FLTSUB, /* 43 - INV | DNML | STK */
674 FPE_FLTDIV, /* 44 - DZ | STK */
675 FPE_FLTSUB, /* 45 - INV | DZ | STK */
676 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
677 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
678 FPE_FLTOVF, /* 48 - OFL | STK */
679 FPE_FLTSUB, /* 49 - INV | OFL | STK */
680 FPE_FLTUND, /* 4A - DNML | OFL | STK */
681 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
682 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
683 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
684 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
685 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
686 FPE_FLTUND, /* 50 - UFL | STK */
687 FPE_FLTSUB, /* 51 - INV | UFL | STK */
688 FPE_FLTUND, /* 52 - DNML | UFL | STK */
689 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
690 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
691 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
692 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
693 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
694 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
695 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
696 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
697 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
698 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
699 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
700 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
701 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
702 FPE_FLTRES, /* 60 - IMP | STK */
703 FPE_FLTSUB, /* 61 - INV | IMP | STK */
704 FPE_FLTUND, /* 62 - DNML | IMP | STK */
705 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
706 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
707 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
708 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
709 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
710 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
711 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
712 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
713 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
714 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
715 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
716 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
717 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
718 FPE_FLTUND, /* 70 - UFL | IMP | STK */
719 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
720 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
721 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
722 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
723 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
724 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
725 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
726 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
727 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
728 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
729 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
730 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
731 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
732 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
733 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
734};
735
736/*
737 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
738 *
739 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now
740 * depend on longjmp() restoring a usable state. Restoring the state
741 * or examining it might fail if we didn't clear exceptions.
742 *
743 * The error code chosen will be one of the FPE_... macros. It will be
744 * sent as the second argument to old BSD-style signal handlers and as
745 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
746 *
747 * XXX the FP state is not preserved across signal handlers. So signal
748 * handlers cannot afford to do FP unless they preserve the state or
749 * longjmp() out. Both preserving the state and longjmp()ing may be
750 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable
751 * solution for signals other than SIGFPE.
752 *
753 * The MP lock is not held on entry (see i386/i386/exception.s) and
754 * should not be held on exit. Interrupts are enabled. We must enter
755 * a critical section to stabilize the FP system and prevent an interrupt
756 * or preemption from changing the FP state out from under us.
757 */
758void
759npx_intr(void *dummy)
760{
761 int code;
762 u_short control;
763 struct intrframe *frame;
764 u_long *exstat;
765
766 crit_enter();
767
768 /*
769 * This exception can only occur with CR0_TS clear, otherwise we
770 * would get a DNA exception. However, since interrupts were
771 * enabled a preemption could have sneaked in and used the FP system
772 * before we entered our critical section. If that occured, the
773 * TS bit will be set and npxthread will be NULL.
774 */
775 if (npx_exists && (rcr0() & CR0_TS)) {
776 KASSERT(mdcpu->gd_npxthread == NULL, ("gd_npxthread was %p with TS set!", mdcpu->gd_npxthread));
777 npxdna();
778 crit_exit();
779 return;
780 }
781 if (mdcpu->gd_npxthread == NULL || !npx_exists) {
782 get_mplock();
783 kprintf("npxintr: npxthread = %p, curthread = %p, npx_exists = %d\n",
784 mdcpu->gd_npxthread, curthread, npx_exists);
785 panic("npxintr from nowhere");
786 }
787 if (mdcpu->gd_npxthread != curthread) {
788 get_mplock();
789 kprintf("npxintr: npxthread = %p, curthread = %p, npx_exists = %d\n",
790 mdcpu->gd_npxthread, curthread, npx_exists);
791 panic("npxintr from non-current process");
792 }
793
794 exstat = GET_FPU_EXSW_PTR(curthread);
795 outb(0xf0, 0);
796 fnstsw(exstat);
797 fnstcw(&control);
798 fnclex();
799
800 get_mplock();
801
802 /*
803 * Pass exception to process.
804 */
805 frame = (struct intrframe *)&dummy; /* XXX */
806 if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) {
807 /*
808 * Interrupt is essentially a trap, so we can afford to call
809 * the SIGFPE handler (if any) as soon as the interrupt
810 * returns.
811 *
812 * XXX little or nothing is gained from this, and plenty is
813 * lost - the interrupt frame has to contain the trap frame
814 * (this is otherwise only necessary for the rescheduling trap
815 * in doreti, and the frame for that could easily be set up
816 * just before it is used).
817 */
818 curthread->td_lwp->lwp_md.md_regs = INTR_TO_TRAPFRAME(frame);
819 /*
820 * Encode the appropriate code for detailed information on
821 * this exception.
822 */
823 code =
824 fpetable[(*exstat & ~control & 0x3f) | (*exstat & 0x40)];
825 trapsignal(curthread->td_lwp, SIGFPE, code);
826 } else {
827 /*
828 * Nested interrupt. These losers occur when:
829 * o an IRQ13 is bogusly generated at a bogus time, e.g.:
830 * o immediately after an fnsave or frstor of an
831 * error state.
832 * o a couple of 386 instructions after
833 * "fstpl _memvar" causes a stack overflow.
834 * These are especially nasty when combined with a
835 * trace trap.
836 * o an IRQ13 occurs at the same time as another higher-
837 * priority interrupt.
838 *
839 * Treat them like a true async interrupt.
840 */
841 lwpsignal(curproc, curthread->td_lwp, SIGFPE);
842 }
843 rel_mplock();
844 crit_exit();
845}
846
847/*
848 * Implement the device not available (DNA) exception. gd_npxthread had
849 * better be NULL. Restore the current thread's FP state and set gd_npxthread
850 * to curthread.
851 *
852 * Interrupts are enabled and preemption can occur. Enter a critical
853 * section to stabilize the FP state.
854 */
855int
856npxdna(void)
857{
858 thread_t td = curthread;
859 u_long *exstat;
860 int didinit = 0;
861
862 if (!npx_exists)
863 return (0);
864 if (mdcpu->gd_npxthread != NULL) {
865 kprintf("npxdna: npxthread = %p, curthread = %p\n",
866 mdcpu->gd_npxthread, td);
867 panic("npxdna");
868 }
869
870 /*
871 * Setup the initial saved state if the thread has never before
872 * used the FP unit. This also occurs when a thread pushes a
873 * signal handler and uses FP in the handler.
874 */
875 if ((td->td_flags & (TDF_USINGFP | TDF_KERNELFP)) == 0) {
876 td->td_flags |= TDF_USINGFP;
877 npxinit(__INITIAL_NPXCW__);
878 didinit = 1;
879 }
880
881 /*
882 * The setting of gd_npxthread and the call to fpurstor() must not
883 * be preempted by an interrupt thread or we will take an npxdna
884 * trap and potentially save our current fpstate (which is garbage)
885 * and then restore the garbage rather then the originally saved
886 * fpstate.
887 */
888 crit_enter();
889 stop_emulating();
890 /*
891 * Record new context early in case frstor causes an IRQ13.
892 */
893 mdcpu->gd_npxthread = td;
894 exstat = GET_FPU_EXSW_PTR(td);
895 *exstat = 0;
896 /*
897 * The following frstor may cause an IRQ13 when the state being
898 * restored has a pending error. The error will appear to have been
899 * triggered by the current (npx) user instruction even when that
900 * instruction is a no-wait instruction that should not trigger an
901 * error (e.g., fnclex). On at least one 486 system all of the
902 * no-wait instructions are broken the same as frstor, so our
903 * treatment does not amplify the breakage. On at least one
904 * 386/Cyrix 387 system, fnclex works correctly while frstor and
905 * fnsave are broken, so our treatment breaks fnclex if it is the
906 * first FPU instruction after a context switch.
907 */
908 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~0xFFBF)
909#ifndef CPU_DISABLE_SSE
910 && cpu_fxsr
911#endif
912 ) {
913 krateprintf(&badfprate,
914 "FXRSTR: illegal FP MXCSR %08x didinit = %d\n",
915 td->td_savefpu->sv_xmm.sv_env.en_mxcsr, didinit);
916 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= 0xFFBF;
917 lwpsignal(curproc, curthread->td_lwp, SIGFPE);
918 }
919 fpurstor(td->td_savefpu);
920 crit_exit();
921
922 return (1);
923}
924
925/*
926 * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error
927 * pending, then fnsave generates a bogus IRQ13 on some systems. Force
928 * any IRQ13 to be handled immediately, and then ignore it. This routine is
929 * often called at splhigh so it must not use many system services. In
930 * particular, it's much easier to install a special handler than to
931 * guarantee that it's safe to use npxintr() and its supporting code.
932 *
933 * WARNING! This call is made during a switch and the MP lock will be
934 * setup for the new target thread rather then the current thread, so we
935 * cannot do anything here that depends on the *_mplock() functions as
936 * we may trip over their assertions.
937 *
938 * WARNING! When using fxsave we MUST fninit after saving the FP state. The
939 * kernel will always assume that the FP state is 'safe' (will not cause
940 * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still
941 * setup a custom save area before actually using the FP unit, but it will
942 * not bother calling fninit. This greatly improves kernel performance when
943 * it wishes to use the FP unit.
944 */
945void
946npxsave(union savefpu *addr)
947{
948#if defined(SMP) || !defined(CPU_DISABLE_SSE)
949
950 crit_enter();
951 stop_emulating();
952 fpusave(addr);
953 mdcpu->gd_npxthread = NULL;
954 fninit();
955 start_emulating();
956 crit_exit();
957
958#else /* !SMP and CPU_DISABLE_SSE */
959
960 u_char icu1_mask;
961 u_char icu2_mask;
962 u_char old_icu1_mask;
963 u_char old_icu2_mask;
964 struct gate_descriptor save_idt_npxintr;
965 u_long save_eflags;
966
967 save_eflags = read_eflags();
968 cpu_disable_intr();
969 old_icu1_mask = inb(IO_ICU1 + 1);
970 old_icu2_mask = inb(IO_ICU2 + 1);
971 save_idt_npxintr = idt[npx_intrno];
972 outb(IO_ICU1 + 1, old_icu1_mask & ~((1 << ICU_IRQ_SLAVE) | npx0_imask));
973 outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8));
974 idt[npx_intrno] = npx_idt_probeintr;
975 cpu_enable_intr();
976 stop_emulating();
977 fnsave(addr);
978 fnop();
979 cpu_disable_intr();
980 mdcpu->gd_npxthread = NULL;
981 start_emulating();
982 icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */
983 icu2_mask = inb(IO_ICU2 + 1);
984 outb(IO_ICU1 + 1,
985 (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask));
986 outb(IO_ICU2 + 1,
987 (icu2_mask & ~(npx0_imask >> 8))
988 | (old_icu2_mask & (npx0_imask >> 8)));
989 idt[npx_intrno] = save_idt_npxintr;
990 write_eflags(save_eflags); /* back to usual state */
991
992#endif /* SMP */
993}
994
995static void
996fpusave(union savefpu *addr)
997{
998#ifndef CPU_DISABLE_SSE
999 if (cpu_fxsr)
1000 fxsave(addr);
1001 else
1002#endif
1003 fnsave(addr);
1004}
1005
1006/*
1007 * Save the FP state to the mcontext structure.
1008 *
1009 * WARNING: If you want to try to npxsave() directly to mctx->mc_fpregs,
1010 * then it MUST be 16-byte aligned. Currently this is not guarenteed.
1011 */
1012void
1013npxpush(mcontext_t *mctx)
1014{
1015 thread_t td = curthread;
1016
1017 KKASSERT((td->td_flags & TDF_KERNELFP) == 0);
1018
1019 if (td->td_flags & TDF_USINGFP) {
1020 if (mdcpu->gd_npxthread == td) {
1021 /*
1022 * XXX Note: This is a bit inefficient if the signal
1023 * handler uses floating point, extra faults will
1024 * occur.
1025 */
1026 mctx->mc_ownedfp = _MC_FPOWNED_FPU;
1027 npxsave(td->td_savefpu);
1028 } else {
1029 mctx->mc_ownedfp = _MC_FPOWNED_PCB;
1030 }
1031 bcopy(td->td_savefpu, mctx->mc_fpregs, sizeof(mctx->mc_fpregs));
1032 td->td_flags &= ~TDF_USINGFP;
1033 mctx->mc_fpformat =
1034#ifndef CPU_DISABLE_SSE
1035 (cpu_fxsr) ? _MC_FPFMT_XMM :
1036#endif
1037 _MC_FPFMT_387;
1038 } else {
1039 mctx->mc_ownedfp = _MC_FPOWNED_NONE;
1040 mctx->mc_fpformat = _MC_FPFMT_NODEV;
1041 }
1042}
1043
1044/*
1045 * Restore the FP state from the mcontext structure.
1046 */
1047void
1048npxpop(mcontext_t *mctx)
1049{
1050 thread_t td = curthread;
1051
1052 KKASSERT((td->td_flags & TDF_KERNELFP) == 0);
1053
1054 switch(mctx->mc_ownedfp) {
1055 case _MC_FPOWNED_NONE:
1056 /*
1057 * If the signal handler used the FP unit but the interrupted
1058 * code did not, release the FP unit. Clear TDF_USINGFP will
1059 * force the FP unit to reinit so the interrupted code sees
1060 * a clean slate.
1061 */
1062 if (td->td_flags & TDF_USINGFP) {
1063 if (td == mdcpu->gd_npxthread)
1064 npxsave(td->td_savefpu);
1065 td->td_flags &= ~TDF_USINGFP;
1066 }
1067 break;
1068 case _MC_FPOWNED_FPU:
1069 case _MC_FPOWNED_PCB:
1070 /*
1071 * Clear ownership of the FP unit and restore our saved state.
1072 *
1073 * NOTE: The signal handler may have set-up some FP state and
1074 * enabled the FP unit, so we have to restore no matter what.
1075 *
1076 * XXX: This is bit inefficient, if the code being returned
1077 * to is actively using the FP this results in multiple
1078 * kernel faults.
1079 *
1080 * WARNING: The saved state was exposed to userland and may
1081 * have to be sanitized to avoid a GP fault in the kernel.
1082 */
1083 if (td == mdcpu->gd_npxthread)
1084 npxsave(td->td_savefpu);
1085 bcopy(mctx->mc_fpregs, td->td_savefpu, sizeof(*td->td_savefpu));
1086 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~0xFFBF)
1087#ifndef CPU_DISABLE_SSE
1088 && cpu_fxsr
1089#endif
1090 ) {
1091 krateprintf(&badfprate,
1092 "pid %d (%s) signal return from user: "
1093 "illegal FP MXCSR %08x\n",
1094 td->td_proc->p_pid,
1095 td->td_proc->p_comm,
1096 td->td_savefpu->sv_xmm.sv_env.en_mxcsr);
1097 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= 0xFFBF;
1098 }
1099 td->td_flags |= TDF_USINGFP;
1100 break;
1101 }
1102}
1103
1104#ifndef CPU_DISABLE_SSE
1105/*
1106 * On AuthenticAMD processors, the fxrstor instruction does not restore
1107 * the x87's stored last instruction pointer, last data pointer, and last
1108 * opcode values, except in the rare case in which the exception summary
1109 * (ES) bit in the x87 status word is set to 1.
1110 *
1111 * In order to avoid leaking this information across processes, we clean
1112 * these values by performing a dummy load before executing fxrstor().
1113 */
1114static double dummy_variable = 0.0;
1115static void
1116fpu_clean_state(void)
1117{
1118 u_short status;
1119
1120 /*
1121 * Clear the ES bit in the x87 status word if it is currently
1122 * set, in order to avoid causing a fault in the upcoming load.
1123 */
1124 fnstsw(&status);
1125 if (status & 0x80)
1126 fnclex();
1127
1128 /*
1129 * Load the dummy variable into the x87 stack. This mangles
1130 * the x87 stack, but we don't care since we're about to call
1131 * fxrstor() anyway.
1132 */
1133 __asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable));
1134}
1135#endif /* CPU_DISABLE_SSE */
1136
1137static void
1138fpurstor(union savefpu *addr)
1139{
1140#ifndef CPU_DISABLE_SSE
1141 if (cpu_fxsr) {
1142 fpu_clean_state();
1143 fxrstor(addr);
1144 } else {
1145 frstor(addr);
1146 }
1147#else
1148 frstor(addr);
1149#endif
1150}
1151
1152/*
1153 * Because npx is a static device that always exists under nexus,
1154 * and is not scanned by the nexus device, we need an identify
1155 * function to install the device.
1156 */
1157static device_method_t npx_methods[] = {
1158 /* Device interface */
1159 DEVMETHOD(device_identify, bus_generic_identify),
1160 DEVMETHOD(device_probe, npx_probe),
1161 DEVMETHOD(device_attach, npx_attach),
1162 DEVMETHOD(device_detach, bus_generic_detach),
1163 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1164 DEVMETHOD(device_suspend, bus_generic_suspend),
1165 DEVMETHOD(device_resume, bus_generic_resume),
1166
1167 { 0, 0 }
1168};
1169
1170static driver_t npx_driver = {
1171 "npx",
1172 npx_methods,
1173 1, /* no softc */
1174};
1175
1176static devclass_t npx_devclass;
1177
1178/*
1179 * We prefer to attach to the root nexus so that the usual case (exception 16)
1180 * doesn't describe the processor as being `on isa'.
1181 */
1182DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0);