kernel - Implement Errata 721 for 32-bit kernels too
[dragonfly.git] / sys / platform / pc32 / i386 / initcpu.c
... / ...
CommitLineData
1/*
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/i386/i386/initcpu.c,v 1.19.2.9 2003/04/05 13:47:19 dwmalone Exp $
30 */
31
32#include "opt_cpu.h"
33
34#include <sys/param.h>
35#include <sys/kernel.h>
36#include <sys/systm.h>
37#include <sys/sysctl.h>
38
39#include <machine/cputypes.h>
40#include <machine/md_var.h>
41#include <machine/specialreg.h>
42
43void initializecpu(void);
44#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
45void enable_K5_wt_alloc(void);
46void enable_K6_wt_alloc(void);
47void enable_K6_2_wt_alloc(void);
48#endif
49
50#ifdef I486_CPU
51static void init_5x86(void);
52static void init_bluelightning(void);
53static void init_486dlc(void);
54static void init_cy486dx(void);
55#ifdef CPU_I486_ON_386
56static void init_i486_on_386(void);
57#endif
58static void init_6x86(void);
59#endif /* I486_CPU */
60
61#ifdef I686_CPU
62static void init_6x86MX(void);
63static void init_ppro(void);
64static void init_mendocino(void);
65#endif
66
67static int hw_instruction_sse;
68SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
69 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
70
71/* Must *NOT* be BSS or locore will bzero these after setting them */
72int cpu = 0; /* Are we 386, 386sx, 486, etc? */
73u_int cpu_feature = 0; /* Feature flags */
74u_int cpu_feature2 = 0; /* Feature flags */
75u_int amd_feature = 0; /* AMD feature flags */
76u_int amd_feature2 = 0; /* AMD feature flags */
77u_int amd_pminfo = 0; /* AMD advanced power management info */
78u_int via_feature_rng = 0; /* VIA RNG features */
79u_int via_feature_xcrypt = 0; /* VIA ACE features */
80u_int cpu_high = 0; /* Highest arg to CPUID */
81u_int cpu_id = 0; /* Stepping ID */
82u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
83u_int cpu_procinfo2 = 0; /* Multicore info */
84char cpu_vendor[20] = ""; /* CPU Origin code */
85u_int cpu_vendor_id = 0; /* CPU vendor ID */
86u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */
87
88/*
89 * -1: automatic (enable on h/w, disable on VMs)
90 * 0: disable
91 * 1: enable (where available)
92 */
93static int hw_clflush_enable = -1;
94
95SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0,
96 "");
97
98
99SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
100 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
101SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
102 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
103
104#ifndef CPU_DISABLE_SSE
105u_int cpu_fxsr; /* SSE enabled */
106#endif
107
108#ifdef I486_CPU
109/*
110 * IBM Blue Lightning
111 */
112static void
113init_bluelightning(void)
114{
115 u_long eflags;
116
117 eflags = read_eflags();
118 cpu_disable_intr();
119
120 load_cr0(rcr0() | CR0_CD | CR0_NW);
121 invd();
122
123#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
124 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
125#else
126 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
127#endif
128 /* Enables 13MB and 0-640KB cache. */
129 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
130#ifdef CPU_BLUELIGHTNING_3X
131 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
132#else
133 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
134#endif
135
136 /* Enable caching in CR0. */
137 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
138 invd();
139 write_eflags(eflags);
140}
141
142/*
143 * Cyrix 486SLC/DLC/SR/DR series
144 */
145static void
146init_486dlc(void)
147{
148 u_long eflags;
149 u_char ccr0;
150
151 eflags = read_eflags();
152 cpu_disable_intr();
153 invd();
154
155 ccr0 = read_cyrix_reg(CCR0);
156#ifndef CYRIX_CACHE_WORKS
157 ccr0 |= CCR0_NC1 | CCR0_BARB;
158 write_cyrix_reg(CCR0, ccr0);
159 invd();
160#else
161 ccr0 &= ~CCR0_NC0;
162#ifndef CYRIX_CACHE_REALLY_WORKS
163 ccr0 |= CCR0_NC1 | CCR0_BARB;
164#else
165 ccr0 |= CCR0_NC1;
166#endif
167#ifdef CPU_DIRECT_MAPPED_CACHE
168 ccr0 |= CCR0_CO; /* Direct mapped mode. */
169#endif
170 write_cyrix_reg(CCR0, ccr0);
171
172 /* Clear non-cacheable region. */
173 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
174 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
175 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
176 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
177
178 write_cyrix_reg(0, 0); /* dummy write */
179
180 /* Enable caching in CR0. */
181 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
182 invd();
183#endif /* !CYRIX_CACHE_WORKS */
184 write_eflags(eflags);
185}
186
187
188/*
189 * Cyrix 486S/DX series
190 */
191static void
192init_cy486dx(void)
193{
194 u_long eflags;
195 u_char ccr2;
196
197 eflags = read_eflags();
198 cpu_disable_intr();
199 invd();
200
201 ccr2 = read_cyrix_reg(CCR2);
202#ifdef CPU_SUSP_HLT
203 ccr2 |= CCR2_SUSP_HLT;
204#endif
205
206 write_cyrix_reg(CCR2, ccr2);
207 write_eflags(eflags);
208}
209
210
211/*
212 * Cyrix 5x86
213 */
214static void
215init_5x86(void)
216{
217 u_long eflags;
218 u_char ccr2, ccr3, ccr4, pcr0;
219
220 eflags = read_eflags();
221 cpu_disable_intr();
222
223 load_cr0(rcr0() | CR0_CD | CR0_NW);
224 wbinvd();
225
226 read_cyrix_reg(CCR3); /* dummy */
227
228 /* Initialize CCR2. */
229 ccr2 = read_cyrix_reg(CCR2);
230 ccr2 |= CCR2_WB;
231#ifdef CPU_SUSP_HLT
232 ccr2 |= CCR2_SUSP_HLT;
233#else
234 ccr2 &= ~CCR2_SUSP_HLT;
235#endif
236 ccr2 |= CCR2_WT1;
237 write_cyrix_reg(CCR2, ccr2);
238
239 /* Initialize CCR4. */
240 ccr3 = read_cyrix_reg(CCR3);
241 write_cyrix_reg(CCR3, CCR3_MAPEN0);
242
243 ccr4 = read_cyrix_reg(CCR4);
244 ccr4 |= CCR4_DTE;
245 ccr4 |= CCR4_MEM;
246#ifdef CPU_FASTER_5X86_FPU
247 ccr4 |= CCR4_FASTFPE;
248#else
249 ccr4 &= ~CCR4_FASTFPE;
250#endif
251 ccr4 &= ~CCR4_IOMASK;
252 /********************************************************************
253 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
254 * should be 0 for errata fix.
255 ********************************************************************/
256#ifdef CPU_IORT
257 ccr4 |= CPU_IORT & CCR4_IOMASK;
258#endif
259 write_cyrix_reg(CCR4, ccr4);
260
261 /* Initialize PCR0. */
262 /****************************************************************
263 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
264 * BTB_EN might make your system unstable.
265 ****************************************************************/
266 pcr0 = read_cyrix_reg(PCR0);
267#ifdef CPU_RSTK_EN
268 pcr0 |= PCR0_RSTK;
269#else
270 pcr0 &= ~PCR0_RSTK;
271#endif
272#ifdef CPU_BTB_EN
273 pcr0 |= PCR0_BTB;
274#else
275 pcr0 &= ~PCR0_BTB;
276#endif
277#ifdef CPU_LOOP_EN
278 pcr0 |= PCR0_LOOP;
279#else
280 pcr0 &= ~PCR0_LOOP;
281#endif
282
283 /****************************************************************
284 * WARNING: if you use a memory mapped I/O device, don't use
285 * DISABLE_5X86_LSSER option, which may reorder memory mapped
286 * I/O access.
287 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
288 ****************************************************************/
289#ifdef CPU_DISABLE_5X86_LSSER
290 pcr0 &= ~PCR0_LSSER;
291#else
292 pcr0 |= PCR0_LSSER;
293#endif
294 write_cyrix_reg(PCR0, pcr0);
295
296 /* Restore CCR3. */
297 write_cyrix_reg(CCR3, ccr3);
298
299 read_cyrix_reg(0x80); /* dummy */
300
301 /* Unlock NW bit in CR0. */
302 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
303 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
304 /* Lock NW bit in CR0. */
305 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
306
307 write_eflags(eflags);
308}
309
310#ifdef CPU_I486_ON_386
311/*
312 * There are i486 based upgrade products for i386 machines.
313 * In this case, BIOS doesn't enables CPU cache.
314 */
315void
316init_i486_on_386(void)
317{
318 u_long eflags;
319
320 eflags = read_eflags();
321 cpu_disable_intr();
322
323 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
324
325 write_eflags(eflags);
326}
327#endif
328
329/*
330 * Cyrix 6x86
331 *
332 * XXX - What should I do here? Please let me know.
333 */
334static void
335init_6x86(void)
336{
337 u_long eflags;
338 u_char ccr3, ccr4;
339
340 eflags = read_eflags();
341 cpu_disable_intr();
342
343 load_cr0(rcr0() | CR0_CD | CR0_NW);
344 wbinvd();
345
346 /* Initialize CCR0. */
347 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
348
349 /* Initialize CCR1. */
350#ifdef CPU_CYRIX_NO_LOCK
351 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
352#else
353 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
354#endif
355
356 /* Initialize CCR2. */
357#ifdef CPU_SUSP_HLT
358 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
359#else
360 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
361#endif
362
363 ccr3 = read_cyrix_reg(CCR3);
364 write_cyrix_reg(CCR3, CCR3_MAPEN0);
365
366 /* Initialize CCR4. */
367 ccr4 = read_cyrix_reg(CCR4);
368 ccr4 |= CCR4_DTE;
369 ccr4 &= ~CCR4_IOMASK;
370#ifdef CPU_IORT
371 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
372#else
373 write_cyrix_reg(CCR4, ccr4 | 7);
374#endif
375
376 /* Initialize CCR5. */
377#ifdef CPU_WT_ALLOC
378 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
379#endif
380
381 /* Restore CCR3. */
382 write_cyrix_reg(CCR3, ccr3);
383
384 /* Unlock NW bit in CR0. */
385 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
386
387 /*
388 * Earlier revision of the 6x86 CPU could crash the system if
389 * L1 cache is in write-back mode.
390 */
391 if ((cyrix_did & 0xff00) > 0x1600)
392 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
393 else {
394 /* Revision 2.6 and lower. */
395#ifdef CYRIX_CACHE_REALLY_WORKS
396 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
397#else
398 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
399#endif
400 }
401
402 /* Lock NW bit in CR0. */
403 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
404
405 write_eflags(eflags);
406}
407#endif /* I486_CPU */
408
409#ifdef I686_CPU
410/*
411 * Cyrix 6x86MX (code-named M2)
412 *
413 * XXX - What should I do here? Please let me know.
414 */
415static void
416init_6x86MX(void)
417{
418 u_long eflags;
419 u_char ccr3, ccr4;
420
421 eflags = read_eflags();
422 cpu_disable_intr();
423
424 load_cr0(rcr0() | CR0_CD | CR0_NW);
425 wbinvd();
426
427 /* Initialize CCR0. */
428 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
429
430 /* Initialize CCR1. */
431#ifdef CPU_CYRIX_NO_LOCK
432 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
433#else
434 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
435#endif
436
437 /* Initialize CCR2. */
438#ifdef CPU_SUSP_HLT
439 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
440#else
441 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
442#endif
443
444 ccr3 = read_cyrix_reg(CCR3);
445 write_cyrix_reg(CCR3, CCR3_MAPEN0);
446
447 /* Initialize CCR4. */
448 ccr4 = read_cyrix_reg(CCR4);
449 ccr4 &= ~CCR4_IOMASK;
450#ifdef CPU_IORT
451 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
452#else
453 write_cyrix_reg(CCR4, ccr4 | 7);
454#endif
455
456 /* Initialize CCR5. */
457#ifdef CPU_WT_ALLOC
458 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
459#endif
460
461 /* Restore CCR3. */
462 write_cyrix_reg(CCR3, ccr3);
463
464 /* Unlock NW bit in CR0. */
465 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
466
467 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
468
469 /* Lock NW bit in CR0. */
470 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
471
472 write_eflags(eflags);
473}
474
475static void
476init_ppro(void)
477{
478#ifndef SMP
479 u_int64_t apicbase;
480
481 /*
482 * Local APIC should be diabled in UP kernel.
483 */
484 apicbase = rdmsr(0x1b);
485 apicbase &= ~0x800LL;
486 wrmsr(0x1b, apicbase);
487#endif
488}
489
490/*
491 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
492 * L2 cache).
493 */
494void
495init_mendocino(void)
496{
497#ifdef CPU_PPRO2CELERON
498 u_long eflags;
499 u_int64_t bbl_cr_ctl3;
500
501 eflags = read_eflags();
502 cpu_disable_intr();
503
504 load_cr0(rcr0() | CR0_CD | CR0_NW);
505 wbinvd();
506
507 bbl_cr_ctl3 = rdmsr(0x11e);
508
509 /* If the L2 cache is configured, do nothing. */
510 if (!(bbl_cr_ctl3 & 1)) {
511 bbl_cr_ctl3 = 0x134052bLL;
512
513 /* Set L2 Cache Latency (Default: 5). */
514#ifdef CPU_CELERON_L2_LATENCY
515#if CPU_L2_LATENCY > 15
516#error invalid CPU_L2_LATENCY.
517#endif
518 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
519#else
520 bbl_cr_ctl3 |= 5 << 1;
521#endif
522 wrmsr(0x11e, bbl_cr_ctl3);
523 }
524
525 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
526 write_eflags(eflags);
527#endif /* CPU_PPRO2CELERON */
528}
529
530/*
531 * Initialize special VIA C3/C7 features
532 */
533static void
534init_via(void)
535{
536 u_int regs[4], val;
537 u_int64_t msreg;
538
539 do_cpuid(0xc0000000, regs);
540 val = regs[0];
541 if (val >= 0xc0000001) {
542 do_cpuid(0xc0000001, regs);
543 val = regs[3];
544 } else
545 val = 0;
546
547 /* Enable RNG if present and disabled */
548 if (val & VIA_CPUID_HAS_RNG) {
549 if (!(val & VIA_CPUID_DO_RNG)) {
550 msreg = rdmsr(0x110B);
551 msreg |= 0x40;
552 wrmsr(0x110B, msreg);
553 }
554 via_feature_rng = VIA_HAS_RNG;
555 }
556 /* Enable AES engine if present and disabled */
557 if (val & VIA_CPUID_HAS_ACE) {
558 if (!(val & VIA_CPUID_DO_ACE)) {
559 msreg = rdmsr(0x1107);
560 msreg |= (0x01 << 28);
561 wrmsr(0x1107, msreg);
562 }
563 via_feature_xcrypt |= VIA_HAS_AES;
564 }
565 /* Enable ACE2 engine if present and disabled */
566 if (val & VIA_CPUID_HAS_ACE2) {
567 if (!(val & VIA_CPUID_DO_ACE2)) {
568 msreg = rdmsr(0x1107);
569 msreg |= (0x01 << 28);
570 wrmsr(0x1107, msreg);
571 }
572 via_feature_xcrypt |= VIA_HAS_AESCTR;
573 }
574 /* Enable SHA engine if present and disabled */
575 if (val & VIA_CPUID_HAS_PHE) {
576 if (!(val & VIA_CPUID_DO_PHE)) {
577 msreg = rdmsr(0x1107);
578 msreg |= (0x01 << 28/**/);
579 wrmsr(0x1107, msreg);
580 }
581 via_feature_xcrypt |= VIA_HAS_SHA;
582 }
583 /* Enable MM engine if present and disabled */
584 if (val & VIA_CPUID_HAS_PMM) {
585 if (!(val & VIA_CPUID_DO_PMM)) {
586 msreg = rdmsr(0x1107);
587 msreg |= (0x01 << 28/**/);
588 wrmsr(0x1107, msreg);
589 }
590 via_feature_xcrypt |= VIA_HAS_MM;
591 }
592}
593
594#endif /* I686_CPU */
595
596/*
597 * Initialize CR4 (Control register 4) to enable SSE instructions.
598 */
599void
600enable_sse(void)
601{
602#ifndef CPU_DISABLE_SSE
603 if ((cpu_feature & CPUID_SSE) && (cpu_feature & CPUID_FXSR)) {
604 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
605 cpu_fxsr = hw_instruction_sse = 1;
606 }
607#endif
608}
609
610#ifdef I686_CPU
611static
612void
613init_686_amd(void)
614{
615#ifdef CPU_ATHLON_SSE_HACK
616 /*
617 * Sometimes the BIOS doesn't enable SSE instructions.
618 * According to AMD document 20734, the mobile
619 * Duron, the (mobile) Athlon 4 and the Athlon MP
620 * support SSE. These correspond to cpu_id 0x66X
621 * or 0x67X.
622 */
623 if ((cpu_feature & CPUID_XMM) == 0 &&
624 ((cpu_id & ~0xf) == 0x660 ||
625 (cpu_id & ~0xf) == 0x670 ||
626 (cpu_id & ~0xf) == 0x680)) {
627 u_int regs[4];
628 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
629 do_cpuid(1, regs);
630 cpu_feature = regs[3];
631 }
632#endif
633}
634#endif /* I686_CPU */
635
636void
637initializecpu(void)
638{
639 uint64_t msr;
640
641 switch (cpu) {
642#ifdef I486_CPU
643 case CPU_BLUE:
644 init_bluelightning();
645 break;
646 case CPU_486DLC:
647 init_486dlc();
648 break;
649 case CPU_CY486DX:
650 init_cy486dx();
651 break;
652 case CPU_M1SC:
653 init_5x86();
654 break;
655#ifdef CPU_I486_ON_386
656 case CPU_486:
657 init_i486_on_386();
658 break;
659#endif
660 case CPU_M1:
661 init_6x86();
662 break;
663#endif /* I486_CPU */
664#ifdef I686_CPU
665 case CPU_M2:
666 init_6x86MX();
667 break;
668 case CPU_686:
669 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
670 switch (cpu_id & 0xff0) {
671 case 0x610:
672 init_ppro();
673 break;
674 case 0x660:
675 init_mendocino();
676 break;
677 }
678 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
679 init_686_amd();
680 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
681 switch (cpu_id & 0xff0) {
682 case 0x690:
683 if ((cpu_id & 0xf) < 3)
684 break;
685 /* fall through. */
686 case 0x6a0:
687 case 0x6d0:
688 case 0x6f0:
689 init_via();
690 break;
691 default:
692 break;
693 }
694 }
695 break;
696#endif
697 default:
698 break;
699 }
700 enable_sse();
701
702 if (cpu_vendor_id == CPU_VENDOR_AMD) {
703 switch((cpu_id & 0xFF0000)) {
704 case 0x100000:
705 case 0x120000:
706 /*
707 * Errata 721 is the cpu bug found by your's truly
708 * (Matthew Dillon). It is a bug where a sequence
709 * of 5 or more popq's + a retq, under involved
710 * deep recursion circumstances, can cause the %rsp
711 * to not be properly updated, almost always
712 * resulting in a seg-fault soon after.
713 */
714 msr = rdmsr(0xc0011029);
715 if ((msr & 1) == 0) {
716 kprintf("Errata 721 workaround installed\n");
717 msr |= 1;
718 wrmsr(0xc0011029, msr);
719 }
720 break;
721 }
722 }
723
724 if (cpu_feature2 & CPUID2_VMM)
725 vmm_guest = 1;
726
727 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
728 if (cpu_feature & CPUID_CLFSH) {
729 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
730
731 if (hw_clflush_enable == 0 ||
732 ((hw_clflush_enable == -1) && vmm_guest))
733 cpu_feature &= ~CPUID_CLFSH;
734 }
735
736}
737
738#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
739/*
740 * Enable write allocate feature of AMD processors.
741 * Following two functions require the Maxmem variable being set.
742 */
743void
744enable_K5_wt_alloc(void)
745{
746 u_int64_t msr;
747
748 /*
749 * Write allocate is supported only on models 1, 2, and 3, with
750 * a stepping of 4 or greater.
751 */
752 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
753 cpu_disable_intr();
754 msr = rdmsr(0x83); /* HWCR */
755 wrmsr(0x83, msr & !(0x10));
756
757 /*
758 * We have to tell the chip where the top of memory is,
759 * since video cards could have frame bufferes there,
760 * memory-mapped I/O could be there, etc.
761 */
762 if(Maxmem > 0)
763 msr = Maxmem / 16;
764 else
765 msr = 0;
766 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
767
768 /*
769 * There is no way to know wheter 15-16M hole exists or not.
770 * Therefore, we disable write allocate for this range.
771 */
772 wrmsr(0x86, 0x0ff00f0);
773 msr |= AMD_WT_ALLOC_PRE;
774 wrmsr(0x85, msr);
775
776 msr=rdmsr(0x83);
777 wrmsr(0x83, msr|0x10); /* enable write allocate */
778
779 cpu_enable_intr();
780 }
781}
782
783void
784enable_K6_wt_alloc(void)
785{
786 quad_t size;
787 u_int64_t whcr;
788 u_long eflags;
789
790 eflags = read_eflags();
791 cpu_disable_intr();
792 wbinvd();
793
794#ifdef CPU_DISABLE_CACHE
795 /*
796 * Certain K6-2 box becomes unstable when write allocation is
797 * enabled.
798 */
799 /*
800 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
801 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
802 * All other bits in TR12 have no effect on the processer's operation.
803 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
804 * on the AMD-K6.
805 */
806 wrmsr(0x0000000e, (u_int64_t)0x0008);
807#endif
808 /* Don't assume that memory size is aligned with 4M. */
809 if (Maxmem > 0)
810 size = ((Maxmem >> 8) + 3) >> 2;
811 else
812 size = 0;
813
814 /* Limit is 508M bytes. */
815 if (size > 0x7f)
816 size = 0x7f;
817 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
818
819#if defined(NO_MEMORY_HOLE)
820 if (whcr & (0x7fLL << 1))
821 whcr |= 0x0001LL;
822#else
823 /*
824 * There is no way to know wheter 15-16M hole exists or not.
825 * Therefore, we disable write allocate for this range.
826 */
827 whcr &= ~0x0001LL;
828#endif
829 wrmsr(0x0c0000082, whcr);
830
831 write_eflags(eflags);
832}
833
834void
835enable_K6_2_wt_alloc(void)
836{
837 quad_t size;
838 u_int64_t whcr;
839 u_long eflags;
840
841 eflags = read_eflags();
842 cpu_disable_intr();
843 wbinvd();
844
845#ifdef CPU_DISABLE_CACHE
846 /*
847 * Certain K6-2 box becomes unstable when write allocation is
848 * enabled.
849 */
850 /*
851 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
852 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
853 * All other bits in TR12 have no effect on the processer's operation.
854 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
855 * on the AMD-K6.
856 */
857 wrmsr(0x0000000e, (u_int64_t)0x0008);
858#endif
859 /* Don't assume that memory size is aligned with 4M. */
860 if (Maxmem > 0)
861 size = ((Maxmem >> 8) + 3) >> 2;
862 else
863 size = 0;
864
865 /* Limit is 4092M bytes. */
866 if (size > 0x3fff)
867 size = 0x3ff;
868 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
869
870#if defined(NO_MEMORY_HOLE)
871 if (whcr & (0x3ffLL << 22))
872 whcr |= 1LL << 16;
873#else
874 /*
875 * There is no way to know wheter 15-16M hole exists or not.
876 * Therefore, we disable write allocate for this range.
877 */
878 whcr &= ~(1LL << 16);
879#endif
880 wrmsr(0x0c0000082, whcr);
881
882 write_eflags(eflags);
883}
884#endif /* I585_CPU && CPU_WT_ALLOC */
885
886#include "opt_ddb.h"
887#ifdef DDB
888#include <ddb/ddb.h>
889
890DB_SHOW_COMMAND(cyrixreg, cyrixreg)
891{
892 u_long eflags;
893 u_int cr0;
894 u_char ccr1, ccr2, ccr3;
895 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
896
897 cr0 = rcr0();
898 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
899 eflags = read_eflags();
900 cpu_disable_intr();
901
902
903 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
904 ccr0 = read_cyrix_reg(CCR0);
905 }
906 ccr1 = read_cyrix_reg(CCR1);
907 ccr2 = read_cyrix_reg(CCR2);
908 ccr3 = read_cyrix_reg(CCR3);
909 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
910 write_cyrix_reg(CCR3, CCR3_MAPEN0);
911 ccr4 = read_cyrix_reg(CCR4);
912 if ((cpu == CPU_M1) || (cpu == CPU_M2))
913 ccr5 = read_cyrix_reg(CCR5);
914 else
915 pcr0 = read_cyrix_reg(PCR0);
916 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
917 }
918 write_eflags(eflags);
919
920 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
921 kprintf("CCR0=%x, ", (u_int)ccr0);
922
923 kprintf("CCR1=%x, CCR2=%x, CCR3=%x",
924 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
925 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
926 kprintf(", CCR4=%x, ", (u_int)ccr4);
927 if (cpu == CPU_M1SC)
928 kprintf("PCR0=%x\n", pcr0);
929 else
930 kprintf("CCR5=%x\n", ccr5);
931 }
932 }
933 kprintf("CR0=%x\n", cr0);
934}
935#endif /* DDB */