2 * Copyright (c) KATO Takenori, 1997, 1998.
4 * All rights reserved. Unpublished rights reserved under the copyright
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * $FreeBSD: src/sys/i386/i386/initcpu.c,v 1.19.2.9 2003/04/05 13:47:19 dwmalone Exp $
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/sysctl.h>
39 #include <machine/cputypes.h>
40 #include <machine/md_var.h>
41 #include <machine/specialreg.h>
43 void initializecpu(void);
44 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
45 void enable_K5_wt_alloc(void);
46 void enable_K6_wt_alloc(void);
47 void enable_K6_2_wt_alloc(void);
51 static void init_bluelightning(void);
52 #ifdef CPU_I486_ON_386
53 static void init_i486_on_386(void);
58 static void init_ppro(void);
59 static void init_mendocino(void);
62 static int hw_instruction_sse;
63 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
64 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
66 /* Must *NOT* be BSS or locore will bzero these after setting them */
67 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
68 u_int cpu_feature = 0; /* Feature flags */
69 u_int cpu_feature2 = 0; /* Feature flags */
70 u_int amd_feature = 0; /* AMD feature flags */
71 u_int amd_feature2 = 0; /* AMD feature flags */
72 u_int amd_pminfo = 0; /* AMD advanced power management info */
73 u_int via_feature_rng = 0; /* VIA RNG features */
74 u_int via_feature_xcrypt = 0; /* VIA ACE features */
75 u_int cpu_high = 0; /* Highest arg to CPUID */
76 u_int cpu_id = 0; /* Stepping ID */
77 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
78 u_int cpu_procinfo2 = 0; /* Multicore info */
79 char cpu_vendor[20] = ""; /* CPU Origin code */
80 u_int cpu_vendor_id = 0; /* CPU vendor ID */
81 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */
84 * -1: automatic (enable on h/w, disable on VMs)
86 * 1: enable (where available)
88 static int hw_clflush_enable = -1;
90 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0,
94 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
95 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
96 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
97 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
99 #ifndef CPU_DISABLE_SSE
100 u_int cpu_fxsr; /* SSE enabled */
108 init_bluelightning(void)
112 eflags = read_eflags();
115 load_cr0(rcr0() | CR0_CD | CR0_NW);
118 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
119 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
121 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
123 /* Enables 13MB and 0-640KB cache. */
124 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
125 #ifdef CPU_BLUELIGHTNING_3X
126 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
128 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
131 /* Enable caching in CR0. */
132 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
134 write_eflags(eflags);
137 #ifdef CPU_I486_ON_386
139 * There are i486 based upgrade products for i386 machines.
140 * In this case, BIOS doesn't enables CPU cache.
143 init_i486_on_386(void)
147 eflags = read_eflags();
150 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
152 write_eflags(eflags);
156 #endif /* I486_CPU */
166 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
172 #ifdef CPU_PPRO2CELERON
174 u_int64_t bbl_cr_ctl3;
176 eflags = read_eflags();
179 load_cr0(rcr0() | CR0_CD | CR0_NW);
182 bbl_cr_ctl3 = rdmsr(0x11e);
184 /* If the L2 cache is configured, do nothing. */
185 if (!(bbl_cr_ctl3 & 1)) {
186 bbl_cr_ctl3 = 0x134052bLL;
188 /* Set L2 Cache Latency (Default: 5). */
189 #ifdef CPU_CELERON_L2_LATENCY
190 #if CPU_L2_LATENCY > 15
191 #error invalid CPU_L2_LATENCY.
193 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
195 bbl_cr_ctl3 |= 5 << 1;
197 wrmsr(0x11e, bbl_cr_ctl3);
200 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
201 write_eflags(eflags);
202 #endif /* CPU_PPRO2CELERON */
206 * Initialize special VIA C3/C7 features
214 do_cpuid(0xc0000000, regs);
216 if (val >= 0xc0000001) {
217 do_cpuid(0xc0000001, regs);
222 /* Enable RNG if present and disabled */
223 if (val & VIA_CPUID_HAS_RNG) {
224 if (!(val & VIA_CPUID_DO_RNG)) {
225 msreg = rdmsr(0x110B);
227 wrmsr(0x110B, msreg);
229 via_feature_rng = VIA_HAS_RNG;
231 /* Enable AES engine if present and disabled */
232 if (val & VIA_CPUID_HAS_ACE) {
233 if (!(val & VIA_CPUID_DO_ACE)) {
234 msreg = rdmsr(0x1107);
235 msreg |= (0x01 << 28);
236 wrmsr(0x1107, msreg);
238 via_feature_xcrypt |= VIA_HAS_AES;
240 /* Enable ACE2 engine if present and disabled */
241 if (val & VIA_CPUID_HAS_ACE2) {
242 if (!(val & VIA_CPUID_DO_ACE2)) {
243 msreg = rdmsr(0x1107);
244 msreg |= (0x01 << 28);
245 wrmsr(0x1107, msreg);
247 via_feature_xcrypt |= VIA_HAS_AESCTR;
249 /* Enable SHA engine if present and disabled */
250 if (val & VIA_CPUID_HAS_PHE) {
251 if (!(val & VIA_CPUID_DO_PHE)) {
252 msreg = rdmsr(0x1107);
253 msreg |= (0x01 << 28/**/);
254 wrmsr(0x1107, msreg);
256 via_feature_xcrypt |= VIA_HAS_SHA;
258 /* Enable MM engine if present and disabled */
259 if (val & VIA_CPUID_HAS_PMM) {
260 if (!(val & VIA_CPUID_DO_PMM)) {
261 msreg = rdmsr(0x1107);
262 msreg |= (0x01 << 28/**/);
263 wrmsr(0x1107, msreg);
265 via_feature_xcrypt |= VIA_HAS_MM;
269 #endif /* I686_CPU */
272 * Initialize CR4 (Control register 4) to enable SSE instructions.
277 #ifndef CPU_DISABLE_SSE
278 if ((cpu_feature & CPUID_SSE) && (cpu_feature & CPUID_FXSR)) {
279 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
280 cpu_fxsr = hw_instruction_sse = 1;
290 #ifdef CPU_ATHLON_SSE_HACK
292 * Sometimes the BIOS doesn't enable SSE instructions.
293 * According to AMD document 20734, the mobile
294 * Duron, the (mobile) Athlon 4 and the Athlon MP
295 * support SSE. These correspond to cpu_id 0x66X
298 if ((cpu_feature & CPUID_XMM) == 0 &&
299 ((cpu_id & ~0xf) == 0x660 ||
300 (cpu_id & ~0xf) == 0x670 ||
301 (cpu_id & ~0xf) == 0x680)) {
303 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
305 cpu_feature = regs[3];
309 #endif /* I686_CPU */
319 init_bluelightning();
321 #ifdef CPU_I486_ON_386
326 #endif /* I486_CPU */
329 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
330 switch (cpu_id & 0xff0) {
338 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
340 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
341 switch (cpu_id & 0xff0) {
343 if ((cpu_id & 0xf) < 3)
362 /* Check if we are running in a hypervisor. */
363 vmm_guest = detect_virtual();
364 if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
365 vmm_guest = VMM_GUEST_UNKNOWN;
367 if (cpu_vendor_id == CPU_VENDOR_AMD) {
368 switch((cpu_id & 0xFF0000)) {
372 * Errata 721 is the cpu bug found by your's truly
373 * (Matthew Dillon). It is a bug where a sequence
374 * of 5 or more popq's + a retq, under involved
375 * deep recursion circumstances, can cause the %rsp
376 * to not be properly updated, almost always
377 * resulting in a seg-fault soon after.
379 * While the errata is not documented as affecting
380 * 32-bit mode, install the workaround out of an
381 * abundance of caution.
383 * Do not install the workaround when we are running
384 * in a virtual machine.
389 msr = rdmsr(MSR_AMD_DE_CFG);
390 if ((msr & 1) == 0) {
391 kprintf("Errata 721 workaround installed\n");
393 wrmsr(MSR_AMD_DE_CFG, msr);
399 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
400 if (cpu_feature & CPUID_CLFSH) {
401 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
403 if (hw_clflush_enable == 0 ||
404 ((hw_clflush_enable == -1) && vmm_guest))
405 cpu_feature &= ~CPUID_CLFSH;
410 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
412 * Enable write allocate feature of AMD processors.
413 * Following two functions require the Maxmem variable being set.
416 enable_K5_wt_alloc(void)
421 * Write allocate is supported only on models 1, 2, and 3, with
422 * a stepping of 4 or greater.
424 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
426 msr = rdmsr(0x83); /* HWCR */
427 wrmsr(0x83, msr & !(0x10));
430 * We have to tell the chip where the top of memory is,
431 * since video cards could have frame bufferes there,
432 * memory-mapped I/O could be there, etc.
438 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
441 * There is no way to know wheter 15-16M hole exists or not.
442 * Therefore, we disable write allocate for this range.
444 wrmsr(0x86, 0x0ff00f0);
445 msr |= AMD_WT_ALLOC_PRE;
449 wrmsr(0x83, msr|0x10); /* enable write allocate */
456 enable_K6_wt_alloc(void)
462 eflags = read_eflags();
466 #ifdef CPU_DISABLE_CACHE
468 * Certain K6-2 box becomes unstable when write allocation is
472 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
473 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
474 * All other bits in TR12 have no effect on the processer's operation.
475 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
478 wrmsr(0x0000000e, (u_int64_t)0x0008);
480 /* Don't assume that memory size is aligned with 4M. */
482 size = ((Maxmem >> 8) + 3) >> 2;
486 /* Limit is 508M bytes. */
489 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
491 #if defined(NO_MEMORY_HOLE)
492 if (whcr & (0x7fLL << 1))
496 * There is no way to know wheter 15-16M hole exists or not.
497 * Therefore, we disable write allocate for this range.
501 wrmsr(0x0c0000082, whcr);
503 write_eflags(eflags);
507 enable_K6_2_wt_alloc(void)
513 eflags = read_eflags();
517 #ifdef CPU_DISABLE_CACHE
519 * Certain K6-2 box becomes unstable when write allocation is
523 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
524 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
525 * All other bits in TR12 have no effect on the processer's operation.
526 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
529 wrmsr(0x0000000e, (u_int64_t)0x0008);
531 /* Don't assume that memory size is aligned with 4M. */
533 size = ((Maxmem >> 8) + 3) >> 2;
537 /* Limit is 4092M bytes. */
540 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
542 #if defined(NO_MEMORY_HOLE)
543 if (whcr & (0x3ffLL << 22))
547 * There is no way to know wheter 15-16M hole exists or not.
548 * Therefore, we disable write allocate for this range.
550 whcr &= ~(1LL << 16);
552 wrmsr(0x0c0000082, whcr);
554 write_eflags(eflags);
556 #endif /* I585_CPU && CPU_WT_ALLOC */