Remove i386 support.
[dragonfly.git] / sys / platform / pc32 / i386 / initcpu.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/i386/i386/initcpu.c,v 1.19.2.9 2003/04/05 13:47:19 dwmalone Exp $
26be20a0 30 * $DragonFly: src/sys/platform/pc32/i386/initcpu.c,v 1.10 2006/12/23 00:27:03 swildner Exp $
984263bc
MD
31 */
32
33#include "opt_cpu.h"
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/sysctl.h>
39
40#include <machine/cputypes.h>
41#include <machine/md_var.h>
42#include <machine/specialreg.h>
43
44void initializecpu(void);
45#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
46void enable_K5_wt_alloc(void);
47void enable_K6_wt_alloc(void);
48void enable_K6_2_wt_alloc(void);
49#endif
50
51#ifdef I486_CPU
52static void init_5x86(void);
53static void init_bluelightning(void);
54static void init_486dlc(void);
55static void init_cy486dx(void);
56#ifdef CPU_I486_ON_386
57static void init_i486_on_386(void);
58#endif
59static void init_6x86(void);
60#endif /* I486_CPU */
61
62#ifdef I686_CPU
63static void init_6x86MX(void);
64static void init_ppro(void);
65static void init_mendocino(void);
66#endif
67
68static int hw_instruction_sse;
69SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
70 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
71
642a6e88 72#ifndef CPU_DISABLE_SSE
984263bc
MD
73u_int cpu_fxsr; /* SSE enabled */
74#endif
75
76#ifdef I486_CPU
77/*
78 * IBM Blue Lightning
79 */
80static void
81init_bluelightning(void)
82{
83 u_long eflags;
84
984263bc 85 eflags = read_eflags();
8a8d5d85 86 cpu_disable_intr();
984263bc
MD
87
88 load_cr0(rcr0() | CR0_CD | CR0_NW);
89 invd();
90
91#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
92 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
93#else
94 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
95#endif
96 /* Enables 13MB and 0-640KB cache. */
97 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
98#ifdef CPU_BLUELIGHTNING_3X
99 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
100#else
101 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
102#endif
103
104 /* Enable caching in CR0. */
105 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
106 invd();
107 write_eflags(eflags);
108}
109
110/*
111 * Cyrix 486SLC/DLC/SR/DR series
112 */
113static void
114init_486dlc(void)
115{
116 u_long eflags;
117 u_char ccr0;
118
119 eflags = read_eflags();
8a8d5d85 120 cpu_disable_intr();
984263bc
MD
121 invd();
122
123 ccr0 = read_cyrix_reg(CCR0);
124#ifndef CYRIX_CACHE_WORKS
125 ccr0 |= CCR0_NC1 | CCR0_BARB;
126 write_cyrix_reg(CCR0, ccr0);
127 invd();
128#else
129 ccr0 &= ~CCR0_NC0;
130#ifndef CYRIX_CACHE_REALLY_WORKS
131 ccr0 |= CCR0_NC1 | CCR0_BARB;
132#else
133 ccr0 |= CCR0_NC1;
134#endif
135#ifdef CPU_DIRECT_MAPPED_CACHE
136 ccr0 |= CCR0_CO; /* Direct mapped mode. */
137#endif
138 write_cyrix_reg(CCR0, ccr0);
139
140 /* Clear non-cacheable region. */
141 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
142 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
143 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
144 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
145
146 write_cyrix_reg(0, 0); /* dummy write */
147
148 /* Enable caching in CR0. */
149 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
150 invd();
151#endif /* !CYRIX_CACHE_WORKS */
152 write_eflags(eflags);
153}
154
155
156/*
157 * Cyrix 486S/DX series
158 */
159static void
160init_cy486dx(void)
161{
162 u_long eflags;
163 u_char ccr2;
164
165 eflags = read_eflags();
8a8d5d85 166 cpu_disable_intr();
984263bc
MD
167 invd();
168
169 ccr2 = read_cyrix_reg(CCR2);
170#ifdef CPU_SUSP_HLT
171 ccr2 |= CCR2_SUSP_HLT;
172#endif
173
984263bc
MD
174 write_cyrix_reg(CCR2, ccr2);
175 write_eflags(eflags);
176}
177
178
179/*
180 * Cyrix 5x86
181 */
182static void
183init_5x86(void)
184{
185 u_long eflags;
186 u_char ccr2, ccr3, ccr4, pcr0;
187
188 eflags = read_eflags();
8a8d5d85 189 cpu_disable_intr();
984263bc
MD
190
191 load_cr0(rcr0() | CR0_CD | CR0_NW);
192 wbinvd();
193
09e6e929 194 read_cyrix_reg(CCR3); /* dummy */
984263bc
MD
195
196 /* Initialize CCR2. */
197 ccr2 = read_cyrix_reg(CCR2);
198 ccr2 |= CCR2_WB;
199#ifdef CPU_SUSP_HLT
200 ccr2 |= CCR2_SUSP_HLT;
201#else
202 ccr2 &= ~CCR2_SUSP_HLT;
203#endif
204 ccr2 |= CCR2_WT1;
205 write_cyrix_reg(CCR2, ccr2);
206
207 /* Initialize CCR4. */
208 ccr3 = read_cyrix_reg(CCR3);
209 write_cyrix_reg(CCR3, CCR3_MAPEN0);
210
211 ccr4 = read_cyrix_reg(CCR4);
212 ccr4 |= CCR4_DTE;
213 ccr4 |= CCR4_MEM;
214#ifdef CPU_FASTER_5X86_FPU
215 ccr4 |= CCR4_FASTFPE;
216#else
217 ccr4 &= ~CCR4_FASTFPE;
218#endif
219 ccr4 &= ~CCR4_IOMASK;
220 /********************************************************************
221 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
222 * should be 0 for errata fix.
223 ********************************************************************/
224#ifdef CPU_IORT
225 ccr4 |= CPU_IORT & CCR4_IOMASK;
226#endif
227 write_cyrix_reg(CCR4, ccr4);
228
229 /* Initialize PCR0. */
230 /****************************************************************
231 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
232 * BTB_EN might make your system unstable.
233 ****************************************************************/
234 pcr0 = read_cyrix_reg(PCR0);
235#ifdef CPU_RSTK_EN
236 pcr0 |= PCR0_RSTK;
237#else
238 pcr0 &= ~PCR0_RSTK;
239#endif
240#ifdef CPU_BTB_EN
241 pcr0 |= PCR0_BTB;
242#else
243 pcr0 &= ~PCR0_BTB;
244#endif
245#ifdef CPU_LOOP_EN
246 pcr0 |= PCR0_LOOP;
247#else
248 pcr0 &= ~PCR0_LOOP;
249#endif
250
251 /****************************************************************
252 * WARNING: if you use a memory mapped I/O device, don't use
253 * DISABLE_5X86_LSSER option, which may reorder memory mapped
254 * I/O access.
255 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
256 ****************************************************************/
257#ifdef CPU_DISABLE_5X86_LSSER
258 pcr0 &= ~PCR0_LSSER;
259#else
260 pcr0 |= PCR0_LSSER;
261#endif
262 write_cyrix_reg(PCR0, pcr0);
263
264 /* Restore CCR3. */
265 write_cyrix_reg(CCR3, ccr3);
266
09e6e929 267 read_cyrix_reg(0x80); /* dummy */
984263bc
MD
268
269 /* Unlock NW bit in CR0. */
270 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
271 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
272 /* Lock NW bit in CR0. */
273 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
274
275 write_eflags(eflags);
276}
277
278#ifdef CPU_I486_ON_386
279/*
280 * There are i486 based upgrade products for i386 machines.
281 * In this case, BIOS doesn't enables CPU cache.
282 */
283void
284init_i486_on_386(void)
285{
286 u_long eflags;
287
984263bc 288 eflags = read_eflags();
8a8d5d85 289 cpu_disable_intr();
984263bc
MD
290
291 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
292
293 write_eflags(eflags);
294}
295#endif
296
297/*
298 * Cyrix 6x86
299 *
300 * XXX - What should I do here? Please let me know.
301 */
302static void
303init_6x86(void)
304{
305 u_long eflags;
306 u_char ccr3, ccr4;
307
308 eflags = read_eflags();
8a8d5d85 309 cpu_disable_intr();
984263bc
MD
310
311 load_cr0(rcr0() | CR0_CD | CR0_NW);
312 wbinvd();
313
314 /* Initialize CCR0. */
315 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
316
317 /* Initialize CCR1. */
318#ifdef CPU_CYRIX_NO_LOCK
319 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
320#else
321 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
322#endif
323
324 /* Initialize CCR2. */
325#ifdef CPU_SUSP_HLT
326 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
327#else
328 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
329#endif
330
331 ccr3 = read_cyrix_reg(CCR3);
332 write_cyrix_reg(CCR3, CCR3_MAPEN0);
333
334 /* Initialize CCR4. */
335 ccr4 = read_cyrix_reg(CCR4);
336 ccr4 |= CCR4_DTE;
337 ccr4 &= ~CCR4_IOMASK;
338#ifdef CPU_IORT
339 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
340#else
341 write_cyrix_reg(CCR4, ccr4 | 7);
342#endif
343
344 /* Initialize CCR5. */
345#ifdef CPU_WT_ALLOC
346 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
347#endif
348
349 /* Restore CCR3. */
350 write_cyrix_reg(CCR3, ccr3);
351
352 /* Unlock NW bit in CR0. */
353 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
354
355 /*
356 * Earlier revision of the 6x86 CPU could crash the system if
357 * L1 cache is in write-back mode.
358 */
359 if ((cyrix_did & 0xff00) > 0x1600)
360 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
361 else {
362 /* Revision 2.6 and lower. */
363#ifdef CYRIX_CACHE_REALLY_WORKS
364 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
365#else
366 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
367#endif
368 }
369
370 /* Lock NW bit in CR0. */
371 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
372
373 write_eflags(eflags);
374}
375#endif /* I486_CPU */
376
377#ifdef I686_CPU
378/*
379 * Cyrix 6x86MX (code-named M2)
380 *
381 * XXX - What should I do here? Please let me know.
382 */
383static void
384init_6x86MX(void)
385{
386 u_long eflags;
387 u_char ccr3, ccr4;
388
389 eflags = read_eflags();
8a8d5d85 390 cpu_disable_intr();
984263bc
MD
391
392 load_cr0(rcr0() | CR0_CD | CR0_NW);
393 wbinvd();
394
395 /* Initialize CCR0. */
396 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
397
398 /* Initialize CCR1. */
399#ifdef CPU_CYRIX_NO_LOCK
400 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
401#else
402 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
403#endif
404
405 /* Initialize CCR2. */
406#ifdef CPU_SUSP_HLT
407 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
408#else
409 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
410#endif
411
412 ccr3 = read_cyrix_reg(CCR3);
413 write_cyrix_reg(CCR3, CCR3_MAPEN0);
414
415 /* Initialize CCR4. */
416 ccr4 = read_cyrix_reg(CCR4);
417 ccr4 &= ~CCR4_IOMASK;
418#ifdef CPU_IORT
419 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
420#else
421 write_cyrix_reg(CCR4, ccr4 | 7);
422#endif
423
424 /* Initialize CCR5. */
425#ifdef CPU_WT_ALLOC
426 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
427#endif
428
429 /* Restore CCR3. */
430 write_cyrix_reg(CCR3, ccr3);
431
432 /* Unlock NW bit in CR0. */
433 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
434
435 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
436
437 /* Lock NW bit in CR0. */
438 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
439
440 write_eflags(eflags);
441}
442
443static void
444init_ppro(void)
445{
446#ifndef SMP
447 u_int64_t apicbase;
448
449 /*
450 * Local APIC should be diabled in UP kernel.
451 */
452 apicbase = rdmsr(0x1b);
453 apicbase &= ~0x800LL;
454 wrmsr(0x1b, apicbase);
455#endif
456}
457
458/*
459 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
460 * L2 cache).
461 */
462void
463init_mendocino(void)
464{
465#ifdef CPU_PPRO2CELERON
466 u_long eflags;
467 u_int64_t bbl_cr_ctl3;
468
469 eflags = read_eflags();
8a8d5d85 470 cpu_disable_intr();
984263bc
MD
471
472 load_cr0(rcr0() | CR0_CD | CR0_NW);
473 wbinvd();
474
475 bbl_cr_ctl3 = rdmsr(0x11e);
476
477 /* If the L2 cache is configured, do nothing. */
478 if (!(bbl_cr_ctl3 & 1)) {
479 bbl_cr_ctl3 = 0x134052bLL;
480
481 /* Set L2 Cache Latency (Default: 5). */
482#ifdef CPU_CELERON_L2_LATENCY
483#if CPU_L2_LATENCY > 15
484#error invalid CPU_L2_LATENCY.
485#endif
486 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
487#else
488 bbl_cr_ctl3 |= 5 << 1;
489#endif
490 wrmsr(0x11e, bbl_cr_ctl3);
491 }
492
493 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
494 write_eflags(eflags);
495#endif /* CPU_PPRO2CELERON */
496}
497
498#endif /* I686_CPU */
499
500/*
501 * Initialize CR4 (Control register 4) to enable SSE instructions.
502 */
503void
504enable_sse(void)
505{
642a6e88
RG
506#ifndef CPU_DISABLE_SSE
507 if ((cpu_feature & CPUID_SSE) && (cpu_feature & CPUID_FXSR)) {
984263bc
MD
508 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
509 cpu_fxsr = hw_instruction_sse = 1;
510 }
511#endif
512}
513
20f0a2d3 514static
984263bc 515void
20f0a2d3 516init_686_amd(void)
984263bc 517{
20f0a2d3
MD
518#if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
519 /*
520 * Sometimes the BIOS doesn't enable SSE instructions.
521 * According to AMD document 20734, the mobile
522 * Duron, the (mobile) Athlon 4 and the Athlon MP
523 * support SSE. These correspond to cpu_id 0x66X
524 * or 0x67X.
525 */
526 if ((cpu_feature & CPUID_XMM) == 0 &&
527 ((cpu_id & ~0xf) == 0x660 ||
528 (cpu_id & ~0xf) == 0x670 ||
529 (cpu_id & ~0xf) == 0x680)) {
530 u_int regs[4];
531 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
532 do_cpuid(1, regs);
533 cpu_feature = regs[3];
534 }
535#endif
536#if defined(I686_CPU) && defined(CPU_AMD64X2_INTR_SPAM)
ce59d031
MD
537 /*
538 * Set the LINTEN bit in the HyperTransport Transaction
539 * Control Register.
540 *
541 * This will cause EXTINT and NMI interrupts routed over the
542 * hypertransport bus to be fed into the LAPIC LINT0/LINT1. If
543 * the bit isn't set, the interrupts will go to the general cpu
544 * INTR/NMI pins. On a dual-core cpu the interrupt winds up
545 * going to BOTH cpus. The first cpu that does the interrupt ack
546 * cycle will get the correct interrupt. The second cpu that does
547 * it will get a spurious interrupt vector (typically IRQ 7).
548 */
20f0a2d3 549 if ((cpu_id & 0xff0) == 0xf30) {
ce59d031
MD
550 int32_t tcr;
551 outl(0x0cf8,
552 (1 << 31) | /* enable */
553 (0 << 16) | /* bus */
554 (24 << 11) | /* dev (cpu + 24) */
555 (0 << 8) | /* func */
556 0x68 /* reg */
557 );
558 tcr = inl(0xcfc);
20f0a2d3
MD
559 if ((tcr & 0x00010000) == 0) {
560 outl(0xcfc, tcr|0x00010000);
561 additional_cpu_info("AMD: Rerouting HyperTransport "
562 "EXTINT/NMI to APIC");
563 }
564 outl(0x0cf8, 0);
ce59d031
MD
565 }
566#endif
20f0a2d3 567}
984263bc 568
20f0a2d3
MD
569void
570initializecpu(void)
571{
984263bc
MD
572 switch (cpu) {
573#ifdef I486_CPU
574 case CPU_BLUE:
575 init_bluelightning();
576 break;
577 case CPU_486DLC:
578 init_486dlc();
579 break;
580 case CPU_CY486DX:
581 init_cy486dx();
582 break;
583 case CPU_M1SC:
584 init_5x86();
585 break;
586#ifdef CPU_I486_ON_386
587 case CPU_486:
588 init_i486_on_386();
589 break;
590#endif
591 case CPU_M1:
592 init_6x86();
593 break;
594#endif /* I486_CPU */
595#ifdef I686_CPU
596 case CPU_M2:
597 init_6x86MX();
598 break;
599 case CPU_686:
600 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
601 switch (cpu_id & 0xff0) {
602 case 0x610:
603 init_ppro();
604 break;
605 case 0x660:
606 init_mendocino();
607 break;
608 }
609 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
20f0a2d3 610 init_686_amd();
984263bc
MD
611 }
612 break;
613#endif
614 default:
615 break;
616 }
617 enable_sse();
984263bc
MD
618}
619
620#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
621/*
622 * Enable write allocate feature of AMD processors.
623 * Following two functions require the Maxmem variable being set.
624 */
625void
626enable_K5_wt_alloc(void)
627{
628 u_int64_t msr;
629
630 /*
631 * Write allocate is supported only on models 1, 2, and 3, with
632 * a stepping of 4 or greater.
633 */
634 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
8a8d5d85 635 cpu_disable_intr();
984263bc
MD
636 msr = rdmsr(0x83); /* HWCR */
637 wrmsr(0x83, msr & !(0x10));
638
639 /*
640 * We have to tell the chip where the top of memory is,
641 * since video cards could have frame bufferes there,
642 * memory-mapped I/O could be there, etc.
643 */
644 if(Maxmem > 0)
645 msr = Maxmem / 16;
646 else
647 msr = 0;
648 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
7d34994c 649
984263bc
MD
650 /*
651 * There is no way to know wheter 15-16M hole exists or not.
652 * Therefore, we disable write allocate for this range.
653 */
7d34994c
SW
654 wrmsr(0x86, 0x0ff00f0);
655 msr |= AMD_WT_ALLOC_PRE;
984263bc
MD
656 wrmsr(0x85, msr);
657
658 msr=rdmsr(0x83);
659 wrmsr(0x83, msr|0x10); /* enable write allocate */
660
7b95be2a 661 cpu_enable_intr();
984263bc
MD
662 }
663}
664
665void
666enable_K6_wt_alloc(void)
667{
668 quad_t size;
669 u_int64_t whcr;
670 u_long eflags;
671
672 eflags = read_eflags();
8a8d5d85 673 cpu_disable_intr();
984263bc
MD
674 wbinvd();
675
676#ifdef CPU_DISABLE_CACHE
677 /*
678 * Certain K6-2 box becomes unstable when write allocation is
679 * enabled.
680 */
681 /*
682 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
683 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
684 * All other bits in TR12 have no effect on the processer's operation.
685 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
686 * on the AMD-K6.
687 */
688 wrmsr(0x0000000e, (u_int64_t)0x0008);
689#endif
690 /* Don't assume that memory size is aligned with 4M. */
691 if (Maxmem > 0)
692 size = ((Maxmem >> 8) + 3) >> 2;
693 else
694 size = 0;
695
696 /* Limit is 508M bytes. */
697 if (size > 0x7f)
698 size = 0x7f;
699 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
700
7d34994c
SW
701#if defined(NO_MEMORY_HOLE)
702 if (whcr & (0x7fLL << 1))
703 whcr |= 0x0001LL;
984263bc
MD
704#else
705 /*
706 * There is no way to know wheter 15-16M hole exists or not.
707 * Therefore, we disable write allocate for this range.
708 */
709 whcr &= ~0x0001LL;
710#endif
711 wrmsr(0x0c0000082, whcr);
712
713 write_eflags(eflags);
984263bc
MD
714}
715
716void
717enable_K6_2_wt_alloc(void)
718{
719 quad_t size;
720 u_int64_t whcr;
721 u_long eflags;
722
723 eflags = read_eflags();
8a8d5d85 724 cpu_disable_intr();
984263bc
MD
725 wbinvd();
726
727#ifdef CPU_DISABLE_CACHE
728 /*
729 * Certain K6-2 box becomes unstable when write allocation is
730 * enabled.
731 */
732 /*
733 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
734 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
735 * All other bits in TR12 have no effect on the processer's operation.
736 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
737 * on the AMD-K6.
738 */
739 wrmsr(0x0000000e, (u_int64_t)0x0008);
740#endif
741 /* Don't assume that memory size is aligned with 4M. */
742 if (Maxmem > 0)
743 size = ((Maxmem >> 8) + 3) >> 2;
744 else
745 size = 0;
746
747 /* Limit is 4092M bytes. */
748 if (size > 0x3fff)
749 size = 0x3ff;
750 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
751
7d34994c
SW
752#if defined(NO_MEMORY_HOLE)
753 if (whcr & (0x3ffLL << 22))
754 whcr |= 1LL << 16;
984263bc
MD
755#else
756 /*
757 * There is no way to know wheter 15-16M hole exists or not.
758 * Therefore, we disable write allocate for this range.
759 */
760 whcr &= ~(1LL << 16);
761#endif
762 wrmsr(0x0c0000082, whcr);
763
764 write_eflags(eflags);
984263bc
MD
765}
766#endif /* I585_CPU && CPU_WT_ALLOC */
767
768#include "opt_ddb.h"
769#ifdef DDB
770#include <ddb/ddb.h>
771
772DB_SHOW_COMMAND(cyrixreg, cyrixreg)
773{
774 u_long eflags;
775 u_int cr0;
776 u_char ccr1, ccr2, ccr3;
777 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
778
779 cr0 = rcr0();
780 if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
781 eflags = read_eflags();
8a8d5d85 782 cpu_disable_intr();
984263bc
MD
783
784
785 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
786 ccr0 = read_cyrix_reg(CCR0);
787 }
788 ccr1 = read_cyrix_reg(CCR1);
789 ccr2 = read_cyrix_reg(CCR2);
790 ccr3 = read_cyrix_reg(CCR3);
791 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
792 write_cyrix_reg(CCR3, CCR3_MAPEN0);
793 ccr4 = read_cyrix_reg(CCR4);
794 if ((cpu == CPU_M1) || (cpu == CPU_M2))
795 ccr5 = read_cyrix_reg(CCR5);
796 else
797 pcr0 = read_cyrix_reg(PCR0);
798 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
799 }
800 write_eflags(eflags);
801
802 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
26be20a0 803 kprintf("CCR0=%x, ", (u_int)ccr0);
984263bc 804
26be20a0 805 kprintf("CCR1=%x, CCR2=%x, CCR3=%x",
984263bc
MD
806 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
807 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
26be20a0 808 kprintf(", CCR4=%x, ", (u_int)ccr4);
984263bc 809 if (cpu == CPU_M1SC)
26be20a0 810 kprintf("PCR0=%x\n", pcr0);
984263bc 811 else
26be20a0 812 kprintf("CCR5=%x\n", ccr5);
984263bc
MD
813 }
814 }
26be20a0 815 kprintf("CR0=%x\n", cr0);
984263bc
MD
816}
817#endif /* DDB */