| 1 | /*- |
| 2 | * Copyright (c) 1990 William Jolitz. |
| 3 | * Copyright (c) 1991 The Regents of the University of California. |
| 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions |
| 8 | * are met: |
| 9 | * 1. Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * 2. Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * 3. All advertising materials mentioning features or use of this software |
| 15 | * must display the following acknowledgement: |
| 16 | * This product includes software developed by the University of |
| 17 | * California, Berkeley and its contributors. |
| 18 | * 4. Neither the name of the University nor the names of its contributors |
| 19 | * may be used to endorse or promote products derived from this software |
| 20 | * without specific prior written permission. |
| 21 | * |
| 22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | * |
| 34 | * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 |
| 35 | * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $ |
| 36 | * $DragonFly: src/sys/i386/isa/Attic/npx.c,v 1.32 2006/09/03 18:29:16 dillon Exp $ |
| 37 | */ |
| 38 | |
| 39 | #include "opt_cpu.h" |
| 40 | #include "opt_debug_npx.h" |
| 41 | #include "opt_math_emulate.h" |
| 42 | |
| 43 | #include <sys/param.h> |
| 44 | #include <sys/systm.h> |
| 45 | #include <sys/bus.h> |
| 46 | #include <sys/kernel.h> |
| 47 | #include <sys/malloc.h> |
| 48 | #include <sys/module.h> |
| 49 | #include <sys/sysctl.h> |
| 50 | #include <sys/proc.h> |
| 51 | #include <machine/bus.h> |
| 52 | #include <sys/rman.h> |
| 53 | #ifdef NPX_DEBUG |
| 54 | #include <sys/syslog.h> |
| 55 | #endif |
| 56 | #include <sys/signalvar.h> |
| 57 | #include <sys/thread2.h> |
| 58 | |
| 59 | #ifndef SMP |
| 60 | #include <machine/asmacros.h> |
| 61 | #endif |
| 62 | #include <machine/cputypes.h> |
| 63 | #include <machine/frame.h> |
| 64 | #include <machine/ipl.h> |
| 65 | #include <machine/md_var.h> |
| 66 | #include <machine/pcb.h> |
| 67 | #include <machine/psl.h> |
| 68 | #ifndef SMP |
| 69 | #include <machine/clock.h> |
| 70 | #endif |
| 71 | #include <machine/resource.h> |
| 72 | #include <machine/specialreg.h> |
| 73 | #include <machine/segments.h> |
| 74 | #include <machine/globaldata.h> |
| 75 | |
| 76 | #ifndef SMP |
| 77 | #include <i386/icu/icu.h> |
| 78 | #include <i386/isa/intr_machdep.h> |
| 79 | #include <bus/isa/i386/isa.h> |
| 80 | #endif |
| 81 | |
| 82 | /* |
| 83 | * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. |
| 84 | */ |
| 85 | |
| 86 | /* Configuration flags. */ |
| 87 | #define NPX_DISABLE_I586_OPTIMIZED_BCOPY (1 << 0) |
| 88 | #define NPX_DISABLE_I586_OPTIMIZED_BZERO (1 << 1) |
| 89 | #define NPX_DISABLE_I586_OPTIMIZED_COPYIO (1 << 2) |
| 90 | #define NPX_PREFER_EMULATOR (1 << 3) |
| 91 | |
| 92 | #ifdef __GNUC__ |
| 93 | |
| 94 | #define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) |
| 95 | #define fnclex() __asm("fnclex") |
| 96 | #define fninit() __asm("fninit") |
| 97 | #define fnop() __asm("fnop") |
| 98 | #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) |
| 99 | #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) |
| 100 | #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) |
| 101 | #define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fnop") |
| 102 | #define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) |
| 103 | #ifndef CPU_DISABLE_SSE |
| 104 | #define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) |
| 105 | #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) |
| 106 | #endif |
| 107 | #define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ |
| 108 | : : "n" (CR0_TS) : "ax") |
| 109 | #define stop_emulating() __asm("clts") |
| 110 | |
| 111 | #else /* not __GNUC__ */ |
| 112 | |
| 113 | void fldcw (caddr_t addr); |
| 114 | void fnclex (void); |
| 115 | void fninit (void); |
| 116 | void fnop (void); |
| 117 | void fnsave (caddr_t addr); |
| 118 | void fnstcw (caddr_t addr); |
| 119 | void fnstsw (caddr_t addr); |
| 120 | void fp_divide_by_0 (void); |
| 121 | void frstor (caddr_t addr); |
| 122 | #ifndef CPU_DISABLE_SSE |
| 123 | void fxsave (caddr_t addr); |
| 124 | void fxrstor (caddr_t addr); |
| 125 | #endif |
| 126 | void start_emulating (void); |
| 127 | void stop_emulating (void); |
| 128 | |
| 129 | #endif /* __GNUC__ */ |
| 130 | |
| 131 | #ifndef CPU_DISABLE_SSE |
| 132 | #define GET_FPU_EXSW_PTR(td) \ |
| 133 | (cpu_fxsr ? \ |
| 134 | &(td)->td_savefpu->sv_xmm.sv_ex_sw : \ |
| 135 | &(td)->td_savefpu->sv_87.sv_ex_sw) |
| 136 | #else /* CPU_DISABLE_SSE */ |
| 137 | #define GET_FPU_EXSW_PTR(td) \ |
| 138 | (&(td)->td_savefpu->sv_87.sv_ex_sw) |
| 139 | #endif /* CPU_DISABLE_SSE */ |
| 140 | |
| 141 | typedef u_char bool_t; |
| 142 | #ifndef CPU_DISABLE_SSE |
| 143 | static void fpu_clean_state(void); |
| 144 | #endif |
| 145 | |
| 146 | |
| 147 | static int npx_attach (device_t dev); |
| 148 | void npx_intr (void *); |
| 149 | static int npx_probe (device_t dev); |
| 150 | static int npx_probe1 (device_t dev); |
| 151 | static void fpusave (union savefpu *); |
| 152 | static void fpurstor (union savefpu *); |
| 153 | |
| 154 | int hw_float; /* XXX currently just alias for npx_exists */ |
| 155 | |
| 156 | SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint, |
| 157 | CTLFLAG_RD, &hw_float, 0, |
| 158 | "Floatingpoint instructions executed in hardware"); |
| 159 | #if (defined(I586_CPU) || defined(I686_CPU)) && !defined(CPU_DISABLE_SSE) |
| 160 | int mmxopt = 1; |
| 161 | SYSCTL_INT(_kern, OID_AUTO, mmxopt, CTLFLAG_RD, &mmxopt, 0, |
| 162 | "MMX/XMM optimized bcopy/copyin/copyout support"); |
| 163 | #endif |
| 164 | |
| 165 | #ifndef SMP |
| 166 | static u_int npx0_imask; |
| 167 | static struct gate_descriptor npx_idt_probeintr; |
| 168 | static int npx_intrno; |
| 169 | static volatile u_int npx_intrs_while_probing; |
| 170 | static volatile u_int npx_traps_while_probing; |
| 171 | #endif |
| 172 | |
| 173 | static bool_t npx_ex16; |
| 174 | static bool_t npx_exists; |
| 175 | static bool_t npx_irq13; |
| 176 | static int npx_irq; /* irq number */ |
| 177 | |
| 178 | #ifndef SMP |
| 179 | /* |
| 180 | * Special interrupt handlers. Someday intr0-intr15 will be used to count |
| 181 | * interrupts. We'll still need a special exception 16 handler. The busy |
| 182 | * latch stuff in probeintr() can be moved to npxprobe(). |
| 183 | */ |
| 184 | inthand_t probeintr; |
| 185 | __asm(" \n\ |
| 186 | .text \n\ |
| 187 | .p2align 2,0x90 \n\ |
| 188 | .type " __XSTRING(CNAME(probeintr)) ",@function \n\ |
| 189 | " __XSTRING(CNAME(probeintr)) ": \n\ |
| 190 | ss \n\ |
| 191 | incl " __XSTRING(CNAME(npx_intrs_while_probing)) " \n\ |
| 192 | pushl %eax \n\ |
| 193 | movb $0x20,%al # EOI (asm in strings loses cpp features) \n\ |
| 194 | outb %al,$0xa0 # IO_ICU2 \n\ |
| 195 | outb %al,$0x20 # IO_ICU1 \n\ |
| 196 | movb $0,%al \n\ |
| 197 | outb %al,$0xf0 # clear BUSY# latch \n\ |
| 198 | popl %eax \n\ |
| 199 | iret \n\ |
| 200 | "); |
| 201 | |
| 202 | inthand_t probetrap; |
| 203 | __asm(" \n\ |
| 204 | .text \n\ |
| 205 | .p2align 2,0x90 \n\ |
| 206 | .type " __XSTRING(CNAME(probetrap)) ",@function \n\ |
| 207 | " __XSTRING(CNAME(probetrap)) ": \n\ |
| 208 | ss \n\ |
| 209 | incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ |
| 210 | fnclex \n\ |
| 211 | iret \n\ |
| 212 | "); |
| 213 | #endif /* SMP */ |
| 214 | |
| 215 | /* |
| 216 | * Probe routine. Initialize cr0 to give correct behaviour for [f]wait |
| 217 | * whether the device exists or not (XXX should be elsewhere). Set flags |
| 218 | * to tell npxattach() what to do. Modify device struct if npx doesn't |
| 219 | * need to use interrupts. Return 1 if device exists. |
| 220 | */ |
| 221 | static int |
| 222 | npx_probe(device_t dev) |
| 223 | { |
| 224 | #ifdef SMP |
| 225 | |
| 226 | if (resource_int_value("npx", 0, "irq", &npx_irq) != 0) |
| 227 | npx_irq = 13; |
| 228 | return npx_probe1(dev); |
| 229 | |
| 230 | #else /* SMP */ |
| 231 | |
| 232 | int result; |
| 233 | u_long save_eflags; |
| 234 | u_char save_icu1_mask; |
| 235 | u_char save_icu2_mask; |
| 236 | struct gate_descriptor save_idt_npxintr; |
| 237 | struct gate_descriptor save_idt_npxtrap; |
| 238 | /* |
| 239 | * This routine is now just a wrapper for npxprobe1(), to install |
| 240 | * special npx interrupt and trap handlers, to enable npx interrupts |
| 241 | * and to disable other interrupts. Someday isa_configure() will |
| 242 | * install suitable handlers and run with interrupts enabled so we |
| 243 | * won't need to do so much here. |
| 244 | */ |
| 245 | if (resource_int_value("npx", 0, "irq", &npx_irq) != 0) |
| 246 | npx_irq = 13; |
| 247 | npx_intrno = IDT_OFFSET + npx_irq; |
| 248 | save_eflags = read_eflags(); |
| 249 | cpu_disable_intr(); |
| 250 | save_icu1_mask = inb(IO_ICU1 + 1); |
| 251 | save_icu2_mask = inb(IO_ICU2 + 1); |
| 252 | save_idt_npxintr = idt[npx_intrno]; |
| 253 | save_idt_npxtrap = idt[16]; |
| 254 | outb(IO_ICU1 + 1, ~(1 << ICU_IRQ_SLAVE)); |
| 255 | outb(IO_ICU2 + 1, ~(1 << (npx_irq - 8))); |
| 256 | setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); |
| 257 | setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); |
| 258 | npx_idt_probeintr = idt[npx_intrno]; |
| 259 | cpu_enable_intr(); |
| 260 | result = npx_probe1(dev); |
| 261 | cpu_disable_intr(); |
| 262 | outb(IO_ICU1 + 1, save_icu1_mask); |
| 263 | outb(IO_ICU2 + 1, save_icu2_mask); |
| 264 | idt[npx_intrno] = save_idt_npxintr; |
| 265 | idt[16] = save_idt_npxtrap; |
| 266 | write_eflags(save_eflags); |
| 267 | return (result); |
| 268 | |
| 269 | #endif /* SMP */ |
| 270 | } |
| 271 | |
| 272 | static int |
| 273 | npx_probe1(device_t dev) |
| 274 | { |
| 275 | #ifndef SMP |
| 276 | u_short control; |
| 277 | u_short status; |
| 278 | #endif |
| 279 | |
| 280 | /* |
| 281 | * Partially reset the coprocessor, if any. Some BIOS's don't reset |
| 282 | * it after a warm boot. |
| 283 | */ |
| 284 | outb(0xf1, 0); /* full reset on some systems, NOP on others */ |
| 285 | outb(0xf0, 0); /* clear BUSY# latch */ |
| 286 | /* |
| 287 | * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT |
| 288 | * instructions. We must set the CR0_MP bit and use the CR0_TS |
| 289 | * bit to control the trap, because setting the CR0_EM bit does |
| 290 | * not cause WAIT instructions to trap. It's important to trap |
| 291 | * WAIT instructions - otherwise the "wait" variants of no-wait |
| 292 | * control instructions would degenerate to the "no-wait" variants |
| 293 | * after FP context switches but work correctly otherwise. It's |
| 294 | * particularly important to trap WAITs when there is no NPX - |
| 295 | * otherwise the "wait" variants would always degenerate. |
| 296 | * |
| 297 | * Try setting CR0_NE to get correct error reporting on 486DX's. |
| 298 | * Setting it should fail or do nothing on lesser processors. |
| 299 | */ |
| 300 | load_cr0(rcr0() | CR0_MP | CR0_NE); |
| 301 | /* |
| 302 | * But don't trap while we're probing. |
| 303 | */ |
| 304 | stop_emulating(); |
| 305 | /* |
| 306 | * Finish resetting the coprocessor, if any. If there is an error |
| 307 | * pending, then we may get a bogus IRQ13, but probeintr() will handle |
| 308 | * it OK. Bogus halts have never been observed, but we enabled |
| 309 | * IRQ13 and cleared the BUSY# latch early to handle them anyway. |
| 310 | */ |
| 311 | fninit(); |
| 312 | |
| 313 | #ifdef SMP |
| 314 | /* |
| 315 | * Exception 16 MUST work for SMP. |
| 316 | */ |
| 317 | npx_irq13 = 0; |
| 318 | npx_ex16 = hw_float = npx_exists = 1; |
| 319 | device_set_desc(dev, "math processor"); |
| 320 | return (0); |
| 321 | |
| 322 | #else /* !SMP */ |
| 323 | device_set_desc(dev, "math processor"); |
| 324 | |
| 325 | /* |
| 326 | * Don't use fwait here because it might hang. |
| 327 | * Don't use fnop here because it usually hangs if there is no FPU. |
| 328 | */ |
| 329 | DELAY(1000); /* wait for any IRQ13 */ |
| 330 | #ifdef DIAGNOSTIC |
| 331 | if (npx_intrs_while_probing != 0) |
| 332 | printf("fninit caused %u bogus npx interrupt(s)\n", |
| 333 | npx_intrs_while_probing); |
| 334 | if (npx_traps_while_probing != 0) |
| 335 | printf("fninit caused %u bogus npx trap(s)\n", |
| 336 | npx_traps_while_probing); |
| 337 | #endif |
| 338 | /* |
| 339 | * Check for a status of mostly zero. |
| 340 | */ |
| 341 | status = 0x5a5a; |
| 342 | fnstsw(&status); |
| 343 | if ((status & 0xb8ff) == 0) { |
| 344 | /* |
| 345 | * Good, now check for a proper control word. |
| 346 | */ |
| 347 | control = 0x5a5a; |
| 348 | fnstcw(&control); |
| 349 | if ((control & 0x1f3f) == 0x033f) { |
| 350 | hw_float = npx_exists = 1; |
| 351 | /* |
| 352 | * We have an npx, now divide by 0 to see if exception |
| 353 | * 16 works. |
| 354 | */ |
| 355 | control &= ~(1 << 2); /* enable divide by 0 trap */ |
| 356 | fldcw(&control); |
| 357 | npx_traps_while_probing = npx_intrs_while_probing = 0; |
| 358 | fp_divide_by_0(); |
| 359 | if (npx_traps_while_probing != 0) { |
| 360 | /* |
| 361 | * Good, exception 16 works. |
| 362 | */ |
| 363 | npx_ex16 = 1; |
| 364 | return (0); |
| 365 | } |
| 366 | if (npx_intrs_while_probing != 0) { |
| 367 | int rid; |
| 368 | struct resource *r; |
| 369 | void *intr; |
| 370 | /* |
| 371 | * Bad, we are stuck with IRQ13. |
| 372 | */ |
| 373 | npx_irq13 = 1; |
| 374 | /* |
| 375 | * npxattach would be too late to set npx0_imask |
| 376 | */ |
| 377 | npx0_imask |= (1 << npx_irq); |
| 378 | |
| 379 | /* |
| 380 | * We allocate these resources permanently, |
| 381 | * so there is no need to keep track of them. |
| 382 | */ |
| 383 | rid = 0; |
| 384 | r = bus_alloc_resource(dev, SYS_RES_IOPORT, |
| 385 | &rid, IO_NPX, IO_NPX, |
| 386 | IO_NPXSIZE, RF_ACTIVE); |
| 387 | if (r == 0) |
| 388 | panic("npx: can't get ports"); |
| 389 | rid = 0; |
| 390 | r = bus_alloc_resource(dev, SYS_RES_IRQ, |
| 391 | &rid, npx_irq, npx_irq, |
| 392 | 1, RF_ACTIVE); |
| 393 | if (r == 0) |
| 394 | panic("npx: can't get IRQ"); |
| 395 | BUS_SETUP_INTR(device_get_parent(dev), |
| 396 | dev, r, 0, |
| 397 | npx_intr, 0, &intr, NULL); |
| 398 | if (intr == 0) |
| 399 | panic("npx: can't create intr"); |
| 400 | |
| 401 | return (0); |
| 402 | } |
| 403 | /* |
| 404 | * Worse, even IRQ13 is broken. Use emulator. |
| 405 | */ |
| 406 | } |
| 407 | } |
| 408 | /* |
| 409 | * Probe failed, but we want to get to npxattach to initialize the |
| 410 | * emulator and say that it has been installed. XXX handle devices |
| 411 | * that aren't really devices better. |
| 412 | */ |
| 413 | return (0); |
| 414 | #endif /* SMP */ |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Attach routine - announce which it is, and wire into system |
| 419 | */ |
| 420 | int |
| 421 | npx_attach(device_t dev) |
| 422 | { |
| 423 | int flags; |
| 424 | |
| 425 | if (resource_int_value("npx", 0, "flags", &flags) != 0) |
| 426 | flags = 0; |
| 427 | |
| 428 | if (flags) |
| 429 | device_printf(dev, "flags 0x%x ", flags); |
| 430 | if (npx_irq13) { |
| 431 | device_printf(dev, "using IRQ 13 interface\n"); |
| 432 | } else { |
| 433 | #if defined(MATH_EMULATE) |
| 434 | if (npx_ex16) { |
| 435 | if (!(flags & NPX_PREFER_EMULATOR)) |
| 436 | device_printf(dev, "INT 16 interface\n"); |
| 437 | else { |
| 438 | device_printf(dev, "FPU exists, but flags request " |
| 439 | "emulator\n"); |
| 440 | hw_float = npx_exists = 0; |
| 441 | } |
| 442 | } else if (npx_exists) { |
| 443 | device_printf(dev, "error reporting broken; using 387 emulator\n"); |
| 444 | hw_float = npx_exists = 0; |
| 445 | } else |
| 446 | device_printf(dev, "387 emulator\n"); |
| 447 | #else |
| 448 | if (npx_ex16) { |
| 449 | device_printf(dev, "INT 16 interface\n"); |
| 450 | if (flags & NPX_PREFER_EMULATOR) { |
| 451 | device_printf(dev, "emulator requested, but none compiled " |
| 452 | "into kernel, using FPU\n"); |
| 453 | } |
| 454 | } else |
| 455 | device_printf(dev, "no 387 emulator in kernel and no FPU!\n"); |
| 456 | #endif |
| 457 | } |
| 458 | npxinit(__INITIAL_NPXCW__); |
| 459 | |
| 460 | #if (defined(I586_CPU) || defined(I686_CPU)) && !defined(CPU_DISABLE_SSE) |
| 461 | /* |
| 462 | * The asm_mmx_*() routines actually use XMM as well, so only |
| 463 | * enable them if we have SSE2 and are using FXSR (fxsave/fxrstore). |
| 464 | */ |
| 465 | TUNABLE_INT_FETCH("kern.mmxopt", &mmxopt); |
| 466 | if ((cpu_feature & CPUID_MMX) && (cpu_feature & CPUID_SSE) && |
| 467 | (cpu_feature & CPUID_SSE2) && |
| 468 | npx_ex16 && npx_exists && mmxopt && cpu_fxsr |
| 469 | ) { |
| 470 | if ((flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY) == 0) { |
| 471 | bcopy_vector = (void **)asm_xmm_bcopy; |
| 472 | ovbcopy_vector = (void **)asm_xmm_bcopy; |
| 473 | memcpy_vector = (void **)asm_xmm_memcpy; |
| 474 | printf("Using XMM optimized bcopy/copyin/copyout\n"); |
| 475 | } |
| 476 | if ((flags & NPX_DISABLE_I586_OPTIMIZED_BZERO) == 0) { |
| 477 | /* XXX */ |
| 478 | } |
| 479 | } else if ((cpu_feature & CPUID_MMX) && (cpu_feature & CPUID_SSE) && |
| 480 | npx_ex16 && npx_exists && mmxopt && cpu_fxsr |
| 481 | ) { |
| 482 | if ((flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY) == 0) { |
| 483 | bcopy_vector = (void **)asm_mmx_bcopy; |
| 484 | ovbcopy_vector = (void **)asm_mmx_bcopy; |
| 485 | memcpy_vector = (void **)asm_mmx_memcpy; |
| 486 | printf("Using MMX optimized bcopy/copyin/copyout\n"); |
| 487 | } |
| 488 | if ((flags & NPX_DISABLE_I586_OPTIMIZED_BZERO) == 0) { |
| 489 | /* XXX */ |
| 490 | } |
| 491 | } |
| 492 | #endif |
| 493 | #if 0 |
| 494 | if (cpu_class == CPUCLASS_586 && npx_ex16 && npx_exists && |
| 495 | timezero("i586_bzero()", i586_bzero) < |
| 496 | timezero("bzero()", bzero) * 4 / 5) { |
| 497 | if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) { |
| 498 | bcopy_vector = i586_bcopy; |
| 499 | ovbcopy_vector = i586_bcopy; |
| 500 | } |
| 501 | if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO)) |
| 502 | bzero = i586_bzero; |
| 503 | if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) { |
| 504 | copyin_vector = i586_copyin; |
| 505 | copyout_vector = i586_copyout; |
| 506 | } |
| 507 | } |
| 508 | #endif |
| 509 | return (0); /* XXX unused */ |
| 510 | } |
| 511 | |
| 512 | /* |
| 513 | * Initialize the floating point unit. |
| 514 | */ |
| 515 | void |
| 516 | npxinit(u_short control) |
| 517 | { |
| 518 | static union savefpu dummy; |
| 519 | |
| 520 | if (!npx_exists) |
| 521 | return; |
| 522 | /* |
| 523 | * fninit has the same h/w bugs as fnsave. Use the detoxified |
| 524 | * fnsave to throw away any junk in the fpu. npxsave() initializes |
| 525 | * the fpu and sets npxthread = NULL as important side effects. |
| 526 | */ |
| 527 | npxsave(&dummy); |
| 528 | crit_enter(); |
| 529 | stop_emulating(); |
| 530 | fldcw(&control); |
| 531 | fpusave(curthread->td_savefpu); |
| 532 | mdcpu->gd_npxthread = NULL; |
| 533 | start_emulating(); |
| 534 | crit_exit(); |
| 535 | } |
| 536 | |
| 537 | /* |
| 538 | * Free coprocessor (if we have it). |
| 539 | */ |
| 540 | void |
| 541 | npxexit(struct proc *p) |
| 542 | { |
| 543 | if (p->p_thread == mdcpu->gd_npxthread) |
| 544 | npxsave(curthread->td_savefpu); |
| 545 | #ifdef NPX_DEBUG |
| 546 | if (npx_exists) { |
| 547 | u_int masked_exceptions; |
| 548 | |
| 549 | masked_exceptions = |
| 550 | curthread->td_savefpu->sv_87.sv_env.en_cw |
| 551 | & curthread->td_savefpu->sv_87.sv_env.en_sw & 0x7f; |
| 552 | /* |
| 553 | * Log exceptions that would have trapped with the old |
| 554 | * control word (overflow, divide by 0, and invalid operand). |
| 555 | */ |
| 556 | if (masked_exceptions & 0x0d) |
| 557 | log(LOG_ERR, |
| 558 | "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", |
| 559 | p->p_pid, p->p_comm, masked_exceptions); |
| 560 | } |
| 561 | #endif |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * The following mechanism is used to ensure that the FPE_... value |
| 566 | * that is passed as a trapcode to the signal handler of the user |
| 567 | * process does not have more than one bit set. |
| 568 | * |
| 569 | * Multiple bits may be set if the user process modifies the control |
| 570 | * word while a status word bit is already set. While this is a sign |
| 571 | * of bad coding, we have no choise than to narrow them down to one |
| 572 | * bit, since we must not send a trapcode that is not exactly one of |
| 573 | * the FPE_ macros. |
| 574 | * |
| 575 | * The mechanism has a static table with 127 entries. Each combination |
| 576 | * of the 7 FPU status word exception bits directly translates to a |
| 577 | * position in this table, where a single FPE_... value is stored. |
| 578 | * This FPE_... value stored there is considered the "most important" |
| 579 | * of the exception bits and will be sent as the signal code. The |
| 580 | * precedence of the bits is based upon Intel Document "Numerical |
| 581 | * Applications", Chapter "Special Computational Situations". |
| 582 | * |
| 583 | * The macro to choose one of these values does these steps: 1) Throw |
| 584 | * away status word bits that cannot be masked. 2) Throw away the bits |
| 585 | * currently masked in the control word, assuming the user isn't |
| 586 | * interested in them anymore. 3) Reinsert status word bit 7 (stack |
| 587 | * fault) if it is set, which cannot be masked but must be presered. |
| 588 | * 4) Use the remaining bits to point into the trapcode table. |
| 589 | * |
| 590 | * The 6 maskable bits in order of their preference, as stated in the |
| 591 | * above referenced Intel manual: |
| 592 | * 1 Invalid operation (FP_X_INV) |
| 593 | * 1a Stack underflow |
| 594 | * 1b Stack overflow |
| 595 | * 1c Operand of unsupported format |
| 596 | * 1d SNaN operand. |
| 597 | * 2 QNaN operand (not an exception, irrelavant here) |
| 598 | * 3 Any other invalid-operation not mentioned above or zero divide |
| 599 | * (FP_X_INV, FP_X_DZ) |
| 600 | * 4 Denormal operand (FP_X_DNML) |
| 601 | * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) |
| 602 | * 6 Inexact result (FP_X_IMP) |
| 603 | */ |
| 604 | static char fpetable[128] = { |
| 605 | 0, |
| 606 | FPE_FLTINV, /* 1 - INV */ |
| 607 | FPE_FLTUND, /* 2 - DNML */ |
| 608 | FPE_FLTINV, /* 3 - INV | DNML */ |
| 609 | FPE_FLTDIV, /* 4 - DZ */ |
| 610 | FPE_FLTINV, /* 5 - INV | DZ */ |
| 611 | FPE_FLTDIV, /* 6 - DNML | DZ */ |
| 612 | FPE_FLTINV, /* 7 - INV | DNML | DZ */ |
| 613 | FPE_FLTOVF, /* 8 - OFL */ |
| 614 | FPE_FLTINV, /* 9 - INV | OFL */ |
| 615 | FPE_FLTUND, /* A - DNML | OFL */ |
| 616 | FPE_FLTINV, /* B - INV | DNML | OFL */ |
| 617 | FPE_FLTDIV, /* C - DZ | OFL */ |
| 618 | FPE_FLTINV, /* D - INV | DZ | OFL */ |
| 619 | FPE_FLTDIV, /* E - DNML | DZ | OFL */ |
| 620 | FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ |
| 621 | FPE_FLTUND, /* 10 - UFL */ |
| 622 | FPE_FLTINV, /* 11 - INV | UFL */ |
| 623 | FPE_FLTUND, /* 12 - DNML | UFL */ |
| 624 | FPE_FLTINV, /* 13 - INV | DNML | UFL */ |
| 625 | FPE_FLTDIV, /* 14 - DZ | UFL */ |
| 626 | FPE_FLTINV, /* 15 - INV | DZ | UFL */ |
| 627 | FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ |
| 628 | FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ |
| 629 | FPE_FLTOVF, /* 18 - OFL | UFL */ |
| 630 | FPE_FLTINV, /* 19 - INV | OFL | UFL */ |
| 631 | FPE_FLTUND, /* 1A - DNML | OFL | UFL */ |
| 632 | FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ |
| 633 | FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ |
| 634 | FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ |
| 635 | FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ |
| 636 | FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ |
| 637 | FPE_FLTRES, /* 20 - IMP */ |
| 638 | FPE_FLTINV, /* 21 - INV | IMP */ |
| 639 | FPE_FLTUND, /* 22 - DNML | IMP */ |
| 640 | FPE_FLTINV, /* 23 - INV | DNML | IMP */ |
| 641 | FPE_FLTDIV, /* 24 - DZ | IMP */ |
| 642 | FPE_FLTINV, /* 25 - INV | DZ | IMP */ |
| 643 | FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ |
| 644 | FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ |
| 645 | FPE_FLTOVF, /* 28 - OFL | IMP */ |
| 646 | FPE_FLTINV, /* 29 - INV | OFL | IMP */ |
| 647 | FPE_FLTUND, /* 2A - DNML | OFL | IMP */ |
| 648 | FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ |
| 649 | FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ |
| 650 | FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ |
| 651 | FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ |
| 652 | FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ |
| 653 | FPE_FLTUND, /* 30 - UFL | IMP */ |
| 654 | FPE_FLTINV, /* 31 - INV | UFL | IMP */ |
| 655 | FPE_FLTUND, /* 32 - DNML | UFL | IMP */ |
| 656 | FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ |
| 657 | FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ |
| 658 | FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ |
| 659 | FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ |
| 660 | FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ |
| 661 | FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ |
| 662 | FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ |
| 663 | FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ |
| 664 | FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ |
| 665 | FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ |
| 666 | FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ |
| 667 | FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ |
| 668 | FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ |
| 669 | FPE_FLTSUB, /* 40 - STK */ |
| 670 | FPE_FLTSUB, /* 41 - INV | STK */ |
| 671 | FPE_FLTUND, /* 42 - DNML | STK */ |
| 672 | FPE_FLTSUB, /* 43 - INV | DNML | STK */ |
| 673 | FPE_FLTDIV, /* 44 - DZ | STK */ |
| 674 | FPE_FLTSUB, /* 45 - INV | DZ | STK */ |
| 675 | FPE_FLTDIV, /* 46 - DNML | DZ | STK */ |
| 676 | FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ |
| 677 | FPE_FLTOVF, /* 48 - OFL | STK */ |
| 678 | FPE_FLTSUB, /* 49 - INV | OFL | STK */ |
| 679 | FPE_FLTUND, /* 4A - DNML | OFL | STK */ |
| 680 | FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ |
| 681 | FPE_FLTDIV, /* 4C - DZ | OFL | STK */ |
| 682 | FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ |
| 683 | FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ |
| 684 | FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ |
| 685 | FPE_FLTUND, /* 50 - UFL | STK */ |
| 686 | FPE_FLTSUB, /* 51 - INV | UFL | STK */ |
| 687 | FPE_FLTUND, /* 52 - DNML | UFL | STK */ |
| 688 | FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ |
| 689 | FPE_FLTDIV, /* 54 - DZ | UFL | STK */ |
| 690 | FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ |
| 691 | FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ |
| 692 | FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ |
| 693 | FPE_FLTOVF, /* 58 - OFL | UFL | STK */ |
| 694 | FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ |
| 695 | FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ |
| 696 | FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ |
| 697 | FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ |
| 698 | FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ |
| 699 | FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ |
| 700 | FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ |
| 701 | FPE_FLTRES, /* 60 - IMP | STK */ |
| 702 | FPE_FLTSUB, /* 61 - INV | IMP | STK */ |
| 703 | FPE_FLTUND, /* 62 - DNML | IMP | STK */ |
| 704 | FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ |
| 705 | FPE_FLTDIV, /* 64 - DZ | IMP | STK */ |
| 706 | FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ |
| 707 | FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ |
| 708 | FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ |
| 709 | FPE_FLTOVF, /* 68 - OFL | IMP | STK */ |
| 710 | FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ |
| 711 | FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ |
| 712 | FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ |
| 713 | FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ |
| 714 | FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ |
| 715 | FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ |
| 716 | FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ |
| 717 | FPE_FLTUND, /* 70 - UFL | IMP | STK */ |
| 718 | FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ |
| 719 | FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ |
| 720 | FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ |
| 721 | FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ |
| 722 | FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ |
| 723 | FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ |
| 724 | FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ |
| 725 | FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ |
| 726 | FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ |
| 727 | FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ |
| 728 | FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ |
| 729 | FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ |
| 730 | FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ |
| 731 | FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ |
| 732 | FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ |
| 733 | }; |
| 734 | |
| 735 | /* |
| 736 | * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE. |
| 737 | * |
| 738 | * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now |
| 739 | * depend on longjmp() restoring a usable state. Restoring the state |
| 740 | * or examining it might fail if we didn't clear exceptions. |
| 741 | * |
| 742 | * The error code chosen will be one of the FPE_... macros. It will be |
| 743 | * sent as the second argument to old BSD-style signal handlers and as |
| 744 | * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers. |
| 745 | * |
| 746 | * XXX the FP state is not preserved across signal handlers. So signal |
| 747 | * handlers cannot afford to do FP unless they preserve the state or |
| 748 | * longjmp() out. Both preserving the state and longjmp()ing may be |
| 749 | * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable |
| 750 | * solution for signals other than SIGFPE. |
| 751 | * |
| 752 | * The MP lock is not held on entry (see i386/i386/exception.s) and |
| 753 | * should not be held on exit. Interrupts are enabled. We must enter |
| 754 | * a critical section to stabilize the FP system and prevent an interrupt |
| 755 | * or preemption from changing the FP state out from under us. |
| 756 | */ |
| 757 | void |
| 758 | npx_intr(void *dummy) |
| 759 | { |
| 760 | int code; |
| 761 | u_short control; |
| 762 | struct intrframe *frame; |
| 763 | u_long *exstat; |
| 764 | |
| 765 | crit_enter(); |
| 766 | |
| 767 | /* |
| 768 | * This exception can only occur with CR0_TS clear, otherwise we |
| 769 | * would get a DNA exception. However, since interrupts were |
| 770 | * enabled a preemption could have sneaked in and used the FP system |
| 771 | * before we entered our critical section. If that occured, the |
| 772 | * TS bit will be set and npxthread will be NULL. |
| 773 | */ |
| 774 | if (npx_exists && (rcr0() & CR0_TS)) { |
| 775 | KASSERT(mdcpu->gd_npxthread == NULL, ("gd_npxthread was %p with TS set!", mdcpu->gd_npxthread)); |
| 776 | npxdna(); |
| 777 | crit_exit(); |
| 778 | return; |
| 779 | } |
| 780 | if (mdcpu->gd_npxthread == NULL || !npx_exists) { |
| 781 | get_mplock(); |
| 782 | printf("npxintr: npxthread = %p, curthread = %p, npx_exists = %d\n", |
| 783 | mdcpu->gd_npxthread, curthread, npx_exists); |
| 784 | panic("npxintr from nowhere"); |
| 785 | } |
| 786 | if (mdcpu->gd_npxthread != curthread) { |
| 787 | get_mplock(); |
| 788 | printf("npxintr: npxthread = %p, curthread = %p, npx_exists = %d\n", |
| 789 | mdcpu->gd_npxthread, curthread, npx_exists); |
| 790 | panic("npxintr from non-current process"); |
| 791 | } |
| 792 | |
| 793 | exstat = GET_FPU_EXSW_PTR(curthread); |
| 794 | outb(0xf0, 0); |
| 795 | fnstsw(exstat); |
| 796 | fnstcw(&control); |
| 797 | fnclex(); |
| 798 | |
| 799 | get_mplock(); |
| 800 | |
| 801 | /* |
| 802 | * Pass exception to process. |
| 803 | */ |
| 804 | frame = (struct intrframe *)&dummy; /* XXX */ |
| 805 | if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) { |
| 806 | /* |
| 807 | * Interrupt is essentially a trap, so we can afford to call |
| 808 | * the SIGFPE handler (if any) as soon as the interrupt |
| 809 | * returns. |
| 810 | * |
| 811 | * XXX little or nothing is gained from this, and plenty is |
| 812 | * lost - the interrupt frame has to contain the trap frame |
| 813 | * (this is otherwise only necessary for the rescheduling trap |
| 814 | * in doreti, and the frame for that could easily be set up |
| 815 | * just before it is used). |
| 816 | */ |
| 817 | curproc->p_md.md_regs = INTR_TO_TRAPFRAME(frame); |
| 818 | /* |
| 819 | * Encode the appropriate code for detailed information on |
| 820 | * this exception. |
| 821 | */ |
| 822 | code = |
| 823 | fpetable[(*exstat & ~control & 0x3f) | (*exstat & 0x40)]; |
| 824 | trapsignal(curproc, SIGFPE, code); |
| 825 | } else { |
| 826 | /* |
| 827 | * Nested interrupt. These losers occur when: |
| 828 | * o an IRQ13 is bogusly generated at a bogus time, e.g.: |
| 829 | * o immediately after an fnsave or frstor of an |
| 830 | * error state. |
| 831 | * o a couple of 386 instructions after |
| 832 | * "fstpl _memvar" causes a stack overflow. |
| 833 | * These are especially nasty when combined with a |
| 834 | * trace trap. |
| 835 | * o an IRQ13 occurs at the same time as another higher- |
| 836 | * priority interrupt. |
| 837 | * |
| 838 | * Treat them like a true async interrupt. |
| 839 | */ |
| 840 | ksignal(curproc, SIGFPE); |
| 841 | } |
| 842 | rel_mplock(); |
| 843 | crit_exit(); |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * Implement the device not available (DNA) exception. gd_npxthread had |
| 848 | * better be NULL. Restore the current thread's FP state and set gd_npxthread |
| 849 | * to curthread. |
| 850 | * |
| 851 | * Interrupts are enabled and preemption can occur. Enter a critical |
| 852 | * section to stabilize the FP state. |
| 853 | */ |
| 854 | int |
| 855 | npxdna(void) |
| 856 | { |
| 857 | u_long *exstat; |
| 858 | |
| 859 | if (!npx_exists) |
| 860 | return (0); |
| 861 | if (mdcpu->gd_npxthread != NULL) { |
| 862 | printf("npxdna: npxthread = %p, curthread = %p\n", |
| 863 | mdcpu->gd_npxthread, curthread); |
| 864 | panic("npxdna"); |
| 865 | } |
| 866 | /* |
| 867 | * The setting of gd_npxthread and the call to fpurstor() must not |
| 868 | * be preempted by an interrupt thread or we will take an npxdna |
| 869 | * trap and potentially save our current fpstate (which is garbage) |
| 870 | * and then restore the garbage rather then the originally saved |
| 871 | * fpstate. |
| 872 | */ |
| 873 | crit_enter(); |
| 874 | stop_emulating(); |
| 875 | /* |
| 876 | * Record new context early in case frstor causes an IRQ13. |
| 877 | */ |
| 878 | mdcpu->gd_npxthread = curthread; |
| 879 | exstat = GET_FPU_EXSW_PTR(curthread); |
| 880 | *exstat = 0; |
| 881 | /* |
| 882 | * The following frstor may cause an IRQ13 when the state being |
| 883 | * restored has a pending error. The error will appear to have been |
| 884 | * triggered by the current (npx) user instruction even when that |
| 885 | * instruction is a no-wait instruction that should not trigger an |
| 886 | * error (e.g., fnclex). On at least one 486 system all of the |
| 887 | * no-wait instructions are broken the same as frstor, so our |
| 888 | * treatment does not amplify the breakage. On at least one |
| 889 | * 386/Cyrix 387 system, fnclex works correctly while frstor and |
| 890 | * fnsave are broken, so our treatment breaks fnclex if it is the |
| 891 | * first FPU instruction after a context switch. |
| 892 | */ |
| 893 | fpurstor(curthread->td_savefpu); |
| 894 | crit_exit(); |
| 895 | |
| 896 | return (1); |
| 897 | } |
| 898 | |
| 899 | /* |
| 900 | * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error |
| 901 | * pending, then fnsave generates a bogus IRQ13 on some systems. Force |
| 902 | * any IRQ13 to be handled immediately, and then ignore it. This routine is |
| 903 | * often called at splhigh so it must not use many system services. In |
| 904 | * particular, it's much easier to install a special handler than to |
| 905 | * guarantee that it's safe to use npxintr() and its supporting code. |
| 906 | * |
| 907 | * WARNING! This call is made during a switch and the MP lock will be |
| 908 | * setup for the new target thread rather then the current thread, so we |
| 909 | * cannot do anything here that depends on the *_mplock() functions as |
| 910 | * we may trip over their assertions. |
| 911 | * |
| 912 | * WARNING! When using fxsave we MUST fninit after saving the FP state. The |
| 913 | * kernel will always assume that the FP state is 'safe' (will not cause |
| 914 | * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still |
| 915 | * setup a custom save area before actually using the FP unit, but it will |
| 916 | * not bother calling fninit. This greatly improves kernel performance when |
| 917 | * it wishes to use the FP unit. |
| 918 | */ |
| 919 | void |
| 920 | npxsave(union savefpu *addr) |
| 921 | { |
| 922 | #if defined(SMP) || !defined(CPU_DISABLE_SSE) |
| 923 | |
| 924 | crit_enter(); |
| 925 | stop_emulating(); |
| 926 | fpusave(addr); |
| 927 | mdcpu->gd_npxthread = NULL; |
| 928 | fninit(); |
| 929 | start_emulating(); |
| 930 | crit_exit(); |
| 931 | |
| 932 | #else /* !SMP and CPU_DISABLE_SSE */ |
| 933 | |
| 934 | u_char icu1_mask; |
| 935 | u_char icu2_mask; |
| 936 | u_char old_icu1_mask; |
| 937 | u_char old_icu2_mask; |
| 938 | struct gate_descriptor save_idt_npxintr; |
| 939 | u_long save_eflags; |
| 940 | |
| 941 | save_eflags = read_eflags(); |
| 942 | cpu_disable_intr(); |
| 943 | old_icu1_mask = inb(IO_ICU1 + 1); |
| 944 | old_icu2_mask = inb(IO_ICU2 + 1); |
| 945 | save_idt_npxintr = idt[npx_intrno]; |
| 946 | outb(IO_ICU1 + 1, old_icu1_mask & ~((1 << ICU_IRQ_SLAVE) | npx0_imask)); |
| 947 | outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8)); |
| 948 | idt[npx_intrno] = npx_idt_probeintr; |
| 949 | cpu_enable_intr(); |
| 950 | stop_emulating(); |
| 951 | fnsave(addr); |
| 952 | fnop(); |
| 953 | cpu_disable_intr(); |
| 954 | mdcpu->gd_npxthread = NULL; |
| 955 | start_emulating(); |
| 956 | icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */ |
| 957 | icu2_mask = inb(IO_ICU2 + 1); |
| 958 | outb(IO_ICU1 + 1, |
| 959 | (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask)); |
| 960 | outb(IO_ICU2 + 1, |
| 961 | (icu2_mask & ~(npx0_imask >> 8)) |
| 962 | | (old_icu2_mask & (npx0_imask >> 8))); |
| 963 | idt[npx_intrno] = save_idt_npxintr; |
| 964 | write_eflags(save_eflags); /* back to usual state */ |
| 965 | |
| 966 | #endif /* SMP */ |
| 967 | } |
| 968 | |
| 969 | static void |
| 970 | fpusave(union savefpu *addr) |
| 971 | { |
| 972 | #ifndef CPU_DISABLE_SSE |
| 973 | if (cpu_fxsr) |
| 974 | fxsave(addr); |
| 975 | else |
| 976 | #endif |
| 977 | fnsave(addr); |
| 978 | } |
| 979 | |
| 980 | #ifndef CPU_DISABLE_SSE |
| 981 | /* |
| 982 | * On AuthenticAMD processors, the fxrstor instruction does not restore |
| 983 | * the x87's stored last instruction pointer, last data pointer, and last |
| 984 | * opcode values, except in the rare case in which the exception summary |
| 985 | * (ES) bit in the x87 status word is set to 1. |
| 986 | * |
| 987 | * In order to avoid leaking this information across processes, we clean |
| 988 | * these values by performing a dummy load before executing fxrstor(). |
| 989 | */ |
| 990 | static double dummy_variable = 0.0; |
| 991 | static void |
| 992 | fpu_clean_state(void) |
| 993 | { |
| 994 | u_short status; |
| 995 | |
| 996 | /* |
| 997 | * Clear the ES bit in the x87 status word if it is currently |
| 998 | * set, in order to avoid causing a fault in the upcoming load. |
| 999 | */ |
| 1000 | fnstsw(&status); |
| 1001 | if (status & 0x80) |
| 1002 | fnclex(); |
| 1003 | |
| 1004 | /* |
| 1005 | * Load the dummy variable into the x87 stack. This mangles |
| 1006 | * the x87 stack, but we don't care since we're about to call |
| 1007 | * fxrstor() anyway. |
| 1008 | */ |
| 1009 | __asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable)); |
| 1010 | } |
| 1011 | #endif /* CPU_DISABLE_SSE */ |
| 1012 | |
| 1013 | static void |
| 1014 | fpurstor(union savefpu *addr) |
| 1015 | { |
| 1016 | #ifndef CPU_DISABLE_SSE |
| 1017 | if (cpu_fxsr) { |
| 1018 | fpu_clean_state(); |
| 1019 | fxrstor(addr); |
| 1020 | } else { |
| 1021 | frstor(addr); |
| 1022 | } |
| 1023 | #else |
| 1024 | frstor(addr); |
| 1025 | #endif |
| 1026 | } |
| 1027 | |
| 1028 | /* |
| 1029 | * Because npx is a static device that always exists under nexus, |
| 1030 | * and is not scanned by the nexus device, we need an identify |
| 1031 | * function to install the device. |
| 1032 | */ |
| 1033 | static device_method_t npx_methods[] = { |
| 1034 | /* Device interface */ |
| 1035 | DEVMETHOD(device_identify, bus_generic_identify), |
| 1036 | DEVMETHOD(device_probe, npx_probe), |
| 1037 | DEVMETHOD(device_attach, npx_attach), |
| 1038 | DEVMETHOD(device_detach, bus_generic_detach), |
| 1039 | DEVMETHOD(device_shutdown, bus_generic_shutdown), |
| 1040 | DEVMETHOD(device_suspend, bus_generic_suspend), |
| 1041 | DEVMETHOD(device_resume, bus_generic_resume), |
| 1042 | |
| 1043 | { 0, 0 } |
| 1044 | }; |
| 1045 | |
| 1046 | static driver_t npx_driver = { |
| 1047 | "npx", |
| 1048 | npx_methods, |
| 1049 | 1, /* no softc */ |
| 1050 | }; |
| 1051 | |
| 1052 | static devclass_t npx_devclass; |
| 1053 | |
| 1054 | /* |
| 1055 | * We prefer to attach to the root nexus so that the usual case (exception 16) |
| 1056 | * doesn't describe the processor as being `on isa'. |
| 1057 | */ |
| 1058 | DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0); |