2 * Copyright (c) KATO Takenori, 1997, 1998.
3 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved. Unpublished rights reserved under the copyright
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer as
14 * the first lines of this file unmodified.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/sysctl.h>
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 #include <machine/smp.h>
46 static int hw_instruction_sse;
47 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
48 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
50 int cpu; /* Are we 386, 386sx, 486, etc? */
51 u_int cpu_feature; /* Feature flags */
52 u_int cpu_feature2; /* Feature flags */
53 u_int amd_feature; /* AMD feature flags */
54 u_int amd_feature2; /* AMD feature flags */
55 u_int via_feature_rng; /* VIA RNG features */
56 u_int via_feature_xcrypt; /* VIA ACE features */
57 u_int cpu_high; /* Highest arg to CPUID */
58 u_int cpu_exthigh; /* Highest arg to extended CPUID */
59 u_int cpu_id; /* Stepping ID */
60 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */
61 u_int cpu_procinfo2; /* Multicore info */
62 char cpu_vendor[20]; /* CPU Origin code */
63 u_int cpu_vendor_id; /* CPU vendor ID */
64 u_int cpu_fxsr; /* SSE enabled */
65 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */
67 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
68 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
69 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
70 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
73 * Initialize special VIA C3/C7 features
81 do_cpuid(0xc0000000, regs);
83 if (val >= 0xc0000001) {
84 do_cpuid(0xc0000001, regs);
89 /* Enable RNG if present and disabled */
90 if (val & VIA_CPUID_HAS_RNG) {
91 if (!(val & VIA_CPUID_DO_RNG)) {
92 msreg = rdmsr(0x110B);
96 via_feature_rng = VIA_HAS_RNG;
98 /* Enable AES engine if present and disabled */
99 if (val & VIA_CPUID_HAS_ACE) {
100 if (!(val & VIA_CPUID_DO_ACE)) {
101 msreg = rdmsr(0x1107);
102 msreg |= (0x01 << 28);
103 wrmsr(0x1107, msreg);
105 via_feature_xcrypt |= VIA_HAS_AES;
107 /* Enable ACE2 engine if present and disabled */
108 if (val & VIA_CPUID_HAS_ACE2) {
109 if (!(val & VIA_CPUID_DO_ACE2)) {
110 msreg = rdmsr(0x1107);
111 msreg |= (0x01 << 28);
112 wrmsr(0x1107, msreg);
114 via_feature_xcrypt |= VIA_HAS_AESCTR;
116 /* Enable SHA engine if present and disabled */
117 if (val & VIA_CPUID_HAS_PHE) {
118 if (!(val & VIA_CPUID_DO_PHE)) {
119 msreg = rdmsr(0x1107);
120 msreg |= (0x01 << 28/**/);
121 wrmsr(0x1107, msreg);
123 via_feature_xcrypt |= VIA_HAS_SHA;
125 /* Enable MM engine if present and disabled */
126 if (val & VIA_CPUID_HAS_PMM) {
127 if (!(val & VIA_CPUID_DO_PMM)) {
128 msreg = rdmsr(0x1107);
129 msreg |= (0x01 << 28/**/);
130 wrmsr(0x1107, msreg);
132 via_feature_xcrypt |= VIA_HAS_MM;
137 * Initialize CPU control registers
144 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
145 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
146 cpu_fxsr = hw_instruction_sse = 1;
148 #ifdef CPU_AMD64X2_INTR_SPAM
150 * Set the LINTEN bit in the HyperTransport Transaction
153 * This will cause EXTINT and NMI interrupts routed over the
154 * hypertransport bus to be fed into the LAPIC LINT0/LINT1. If
155 * the bit isn't set, the interrupts will go to the general cpu
156 * INTR/NMI pins. On a multi-core cpus the interrupt winds up
157 * going to ALL cpus. The first cpu that does the interrupt ack
158 * cycle will get the correct interrupt. The second cpu that does
159 * it will get a spurious interrupt vector (typically IRQ 7).
161 if ((cpu_id & 0xff0) == 0xf30) {
164 (1 << 31) | /* enable */
165 (0 << 16) | /* bus */
166 (24 << 11) | /* dev (cpu + 24) */
167 (0 << 8) | /* func */
171 if ((tcr & 0x00010000) == 0) {
172 outl(0xcfc, tcr|0x00010000);
178 if ((amd_feature & AMDID_NX) != 0) {
179 msr = rdmsr(MSR_EFER) | EFER_NXE;
180 wrmsr(MSR_EFER, msr);
185 if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
186 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
187 CPUID_TO_MODEL(cpu_id) >= 0xf)