LINT build test. Aggregated source code adjustments to bring most of the
[dragonfly.git] / sys / platform / pc32 / i386 / initcpu.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/i386/i386/initcpu.c,v 1.19.2.9 2003/04/05 13:47:19 dwmalone Exp $
7b95be2a 30 * $DragonFly: src/sys/platform/pc32/i386/initcpu.c,v 1.4 2003/07/21 07:57:43 dillon Exp $
984263bc
MD
31 */
32
33#include "opt_cpu.h"
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/sysctl.h>
39
40#include <machine/cputypes.h>
41#include <machine/md_var.h>
42#include <machine/specialreg.h>
43
44void initializecpu(void);
45#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
46void enable_K5_wt_alloc(void);
47void enable_K6_wt_alloc(void);
48void enable_K6_2_wt_alloc(void);
49#endif
50
51#ifdef I486_CPU
52static void init_5x86(void);
53static void init_bluelightning(void);
54static void init_486dlc(void);
55static void init_cy486dx(void);
56#ifdef CPU_I486_ON_386
57static void init_i486_on_386(void);
58#endif
59static void init_6x86(void);
60#endif /* I486_CPU */
61
62#ifdef I686_CPU
63static void init_6x86MX(void);
64static void init_ppro(void);
65static void init_mendocino(void);
66#endif
67
68static int hw_instruction_sse;
69SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
70 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
71
72#ifdef CPU_ENABLE_SSE
73u_int cpu_fxsr; /* SSE enabled */
74#endif
75
76#ifdef I486_CPU
77/*
78 * IBM Blue Lightning
79 */
80static void
81init_bluelightning(void)
82{
83 u_long eflags;
84
85#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
86 need_post_dma_flush = 1;
87#endif
88
89 eflags = read_eflags();
8a8d5d85 90 cpu_disable_intr();
984263bc
MD
91
92 load_cr0(rcr0() | CR0_CD | CR0_NW);
93 invd();
94
95#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
96 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
97#else
98 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
99#endif
100 /* Enables 13MB and 0-640KB cache. */
101 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
102#ifdef CPU_BLUELIGHTNING_3X
103 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
104#else
105 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
106#endif
107
108 /* Enable caching in CR0. */
109 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
110 invd();
111 write_eflags(eflags);
112}
113
114/*
115 * Cyrix 486SLC/DLC/SR/DR series
116 */
117static void
118init_486dlc(void)
119{
120 u_long eflags;
121 u_char ccr0;
122
123 eflags = read_eflags();
8a8d5d85 124 cpu_disable_intr();
984263bc
MD
125 invd();
126
127 ccr0 = read_cyrix_reg(CCR0);
128#ifndef CYRIX_CACHE_WORKS
129 ccr0 |= CCR0_NC1 | CCR0_BARB;
130 write_cyrix_reg(CCR0, ccr0);
131 invd();
132#else
133 ccr0 &= ~CCR0_NC0;
134#ifndef CYRIX_CACHE_REALLY_WORKS
135 ccr0 |= CCR0_NC1 | CCR0_BARB;
136#else
137 ccr0 |= CCR0_NC1;
138#endif
139#ifdef CPU_DIRECT_MAPPED_CACHE
140 ccr0 |= CCR0_CO; /* Direct mapped mode. */
141#endif
142 write_cyrix_reg(CCR0, ccr0);
143
144 /* Clear non-cacheable region. */
145 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
146 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
147 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
148 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
149
150 write_cyrix_reg(0, 0); /* dummy write */
151
152 /* Enable caching in CR0. */
153 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
154 invd();
155#endif /* !CYRIX_CACHE_WORKS */
156 write_eflags(eflags);
157}
158
159
160/*
161 * Cyrix 486S/DX series
162 */
163static void
164init_cy486dx(void)
165{
166 u_long eflags;
167 u_char ccr2;
168
169 eflags = read_eflags();
8a8d5d85 170 cpu_disable_intr();
984263bc
MD
171 invd();
172
173 ccr2 = read_cyrix_reg(CCR2);
174#ifdef CPU_SUSP_HLT
175 ccr2 |= CCR2_SUSP_HLT;
176#endif
177
178#ifdef PC98
179 /* Enables WB cache interface pin and Lock NW bit in CR0. */
180 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
181 /* Unlock NW bit in CR0. */
182 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
183 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
184#endif
185
186 write_cyrix_reg(CCR2, ccr2);
187 write_eflags(eflags);
188}
189
190
191/*
192 * Cyrix 5x86
193 */
194static void
195init_5x86(void)
196{
197 u_long eflags;
198 u_char ccr2, ccr3, ccr4, pcr0;
199
200 eflags = read_eflags();
8a8d5d85 201 cpu_disable_intr();
984263bc
MD
202
203 load_cr0(rcr0() | CR0_CD | CR0_NW);
204 wbinvd();
205
206 (void)read_cyrix_reg(CCR3); /* dummy */
207
208 /* Initialize CCR2. */
209 ccr2 = read_cyrix_reg(CCR2);
210 ccr2 |= CCR2_WB;
211#ifdef CPU_SUSP_HLT
212 ccr2 |= CCR2_SUSP_HLT;
213#else
214 ccr2 &= ~CCR2_SUSP_HLT;
215#endif
216 ccr2 |= CCR2_WT1;
217 write_cyrix_reg(CCR2, ccr2);
218
219 /* Initialize CCR4. */
220 ccr3 = read_cyrix_reg(CCR3);
221 write_cyrix_reg(CCR3, CCR3_MAPEN0);
222
223 ccr4 = read_cyrix_reg(CCR4);
224 ccr4 |= CCR4_DTE;
225 ccr4 |= CCR4_MEM;
226#ifdef CPU_FASTER_5X86_FPU
227 ccr4 |= CCR4_FASTFPE;
228#else
229 ccr4 &= ~CCR4_FASTFPE;
230#endif
231 ccr4 &= ~CCR4_IOMASK;
232 /********************************************************************
233 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
234 * should be 0 for errata fix.
235 ********************************************************************/
236#ifdef CPU_IORT
237 ccr4 |= CPU_IORT & CCR4_IOMASK;
238#endif
239 write_cyrix_reg(CCR4, ccr4);
240
241 /* Initialize PCR0. */
242 /****************************************************************
243 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
244 * BTB_EN might make your system unstable.
245 ****************************************************************/
246 pcr0 = read_cyrix_reg(PCR0);
247#ifdef CPU_RSTK_EN
248 pcr0 |= PCR0_RSTK;
249#else
250 pcr0 &= ~PCR0_RSTK;
251#endif
252#ifdef CPU_BTB_EN
253 pcr0 |= PCR0_BTB;
254#else
255 pcr0 &= ~PCR0_BTB;
256#endif
257#ifdef CPU_LOOP_EN
258 pcr0 |= PCR0_LOOP;
259#else
260 pcr0 &= ~PCR0_LOOP;
261#endif
262
263 /****************************************************************
264 * WARNING: if you use a memory mapped I/O device, don't use
265 * DISABLE_5X86_LSSER option, which may reorder memory mapped
266 * I/O access.
267 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
268 ****************************************************************/
269#ifdef CPU_DISABLE_5X86_LSSER
270 pcr0 &= ~PCR0_LSSER;
271#else
272 pcr0 |= PCR0_LSSER;
273#endif
274 write_cyrix_reg(PCR0, pcr0);
275
276 /* Restore CCR3. */
277 write_cyrix_reg(CCR3, ccr3);
278
279 (void)read_cyrix_reg(0x80); /* dummy */
280
281 /* Unlock NW bit in CR0. */
282 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
283 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
284 /* Lock NW bit in CR0. */
285 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
286
287 write_eflags(eflags);
288}
289
290#ifdef CPU_I486_ON_386
291/*
292 * There are i486 based upgrade products for i386 machines.
293 * In this case, BIOS doesn't enables CPU cache.
294 */
295void
296init_i486_on_386(void)
297{
298 u_long eflags;
299
300#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
301 need_post_dma_flush = 1;
302#endif
303
304 eflags = read_eflags();
8a8d5d85 305 cpu_disable_intr();
984263bc
MD
306
307 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
308
309 write_eflags(eflags);
310}
311#endif
312
313/*
314 * Cyrix 6x86
315 *
316 * XXX - What should I do here? Please let me know.
317 */
318static void
319init_6x86(void)
320{
321 u_long eflags;
322 u_char ccr3, ccr4;
323
324 eflags = read_eflags();
8a8d5d85 325 cpu_disable_intr();
984263bc
MD
326
327 load_cr0(rcr0() | CR0_CD | CR0_NW);
328 wbinvd();
329
330 /* Initialize CCR0. */
331 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
332
333 /* Initialize CCR1. */
334#ifdef CPU_CYRIX_NO_LOCK
335 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
336#else
337 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
338#endif
339
340 /* Initialize CCR2. */
341#ifdef CPU_SUSP_HLT
342 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
343#else
344 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
345#endif
346
347 ccr3 = read_cyrix_reg(CCR3);
348 write_cyrix_reg(CCR3, CCR3_MAPEN0);
349
350 /* Initialize CCR4. */
351 ccr4 = read_cyrix_reg(CCR4);
352 ccr4 |= CCR4_DTE;
353 ccr4 &= ~CCR4_IOMASK;
354#ifdef CPU_IORT
355 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
356#else
357 write_cyrix_reg(CCR4, ccr4 | 7);
358#endif
359
360 /* Initialize CCR5. */
361#ifdef CPU_WT_ALLOC
362 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
363#endif
364
365 /* Restore CCR3. */
366 write_cyrix_reg(CCR3, ccr3);
367
368 /* Unlock NW bit in CR0. */
369 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
370
371 /*
372 * Earlier revision of the 6x86 CPU could crash the system if
373 * L1 cache is in write-back mode.
374 */
375 if ((cyrix_did & 0xff00) > 0x1600)
376 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
377 else {
378 /* Revision 2.6 and lower. */
379#ifdef CYRIX_CACHE_REALLY_WORKS
380 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
381#else
382 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
383#endif
384 }
385
386 /* Lock NW bit in CR0. */
387 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
388
389 write_eflags(eflags);
390}
391#endif /* I486_CPU */
392
393#ifdef I686_CPU
394/*
395 * Cyrix 6x86MX (code-named M2)
396 *
397 * XXX - What should I do here? Please let me know.
398 */
399static void
400init_6x86MX(void)
401{
402 u_long eflags;
403 u_char ccr3, ccr4;
404
405 eflags = read_eflags();
8a8d5d85 406 cpu_disable_intr();
984263bc
MD
407
408 load_cr0(rcr0() | CR0_CD | CR0_NW);
409 wbinvd();
410
411 /* Initialize CCR0. */
412 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
413
414 /* Initialize CCR1. */
415#ifdef CPU_CYRIX_NO_LOCK
416 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
417#else
418 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
419#endif
420
421 /* Initialize CCR2. */
422#ifdef CPU_SUSP_HLT
423 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
424#else
425 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
426#endif
427
428 ccr3 = read_cyrix_reg(CCR3);
429 write_cyrix_reg(CCR3, CCR3_MAPEN0);
430
431 /* Initialize CCR4. */
432 ccr4 = read_cyrix_reg(CCR4);
433 ccr4 &= ~CCR4_IOMASK;
434#ifdef CPU_IORT
435 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
436#else
437 write_cyrix_reg(CCR4, ccr4 | 7);
438#endif
439
440 /* Initialize CCR5. */
441#ifdef CPU_WT_ALLOC
442 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
443#endif
444
445 /* Restore CCR3. */
446 write_cyrix_reg(CCR3, ccr3);
447
448 /* Unlock NW bit in CR0. */
449 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
450
451 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
452
453 /* Lock NW bit in CR0. */
454 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
455
456 write_eflags(eflags);
457}
458
459static void
460init_ppro(void)
461{
462#ifndef SMP
463 u_int64_t apicbase;
464
465 /*
466 * Local APIC should be diabled in UP kernel.
467 */
468 apicbase = rdmsr(0x1b);
469 apicbase &= ~0x800LL;
470 wrmsr(0x1b, apicbase);
471#endif
472}
473
474/*
475 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
476 * L2 cache).
477 */
478void
479init_mendocino(void)
480{
481#ifdef CPU_PPRO2CELERON
482 u_long eflags;
483 u_int64_t bbl_cr_ctl3;
484
485 eflags = read_eflags();
8a8d5d85 486 cpu_disable_intr();
984263bc
MD
487
488 load_cr0(rcr0() | CR0_CD | CR0_NW);
489 wbinvd();
490
491 bbl_cr_ctl3 = rdmsr(0x11e);
492
493 /* If the L2 cache is configured, do nothing. */
494 if (!(bbl_cr_ctl3 & 1)) {
495 bbl_cr_ctl3 = 0x134052bLL;
496
497 /* Set L2 Cache Latency (Default: 5). */
498#ifdef CPU_CELERON_L2_LATENCY
499#if CPU_L2_LATENCY > 15
500#error invalid CPU_L2_LATENCY.
501#endif
502 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
503#else
504 bbl_cr_ctl3 |= 5 << 1;
505#endif
506 wrmsr(0x11e, bbl_cr_ctl3);
507 }
508
509 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
510 write_eflags(eflags);
511#endif /* CPU_PPRO2CELERON */
512}
513
514#endif /* I686_CPU */
515
516/*
517 * Initialize CR4 (Control register 4) to enable SSE instructions.
518 */
519void
520enable_sse(void)
521{
522#if defined(CPU_ENABLE_SSE)
523 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
524 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
525 cpu_fxsr = hw_instruction_sse = 1;
526 }
527#endif
528}
529
530void
531initializecpu(void)
532{
533
534 switch (cpu) {
535#ifdef I486_CPU
536 case CPU_BLUE:
537 init_bluelightning();
538 break;
539 case CPU_486DLC:
540 init_486dlc();
541 break;
542 case CPU_CY486DX:
543 init_cy486dx();
544 break;
545 case CPU_M1SC:
546 init_5x86();
547 break;
548#ifdef CPU_I486_ON_386
549 case CPU_486:
550 init_i486_on_386();
551 break;
552#endif
553 case CPU_M1:
554 init_6x86();
555 break;
556#endif /* I486_CPU */
557#ifdef I686_CPU
558 case CPU_M2:
559 init_6x86MX();
560 break;
561 case CPU_686:
562 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
563 switch (cpu_id & 0xff0) {
564 case 0x610:
565 init_ppro();
566 break;
567 case 0x660:
568 init_mendocino();
569 break;
570 }
571 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
572#if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
573 /*
574 * Sometimes the BIOS doesn't enable SSE instructions.
575 * According to AMD document 20734, the mobile
576 * Duron, the (mobile) Athlon 4 and the Athlon MP
577 * support SSE. These correspond to cpu_id 0x66X
578 * or 0x67X.
579 */
580 if ((cpu_feature & CPUID_XMM) == 0 &&
581 ((cpu_id & ~0xf) == 0x660 ||
582 (cpu_id & ~0xf) == 0x670 ||
583 (cpu_id & ~0xf) == 0x680)) {
584 u_int regs[4];
585 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
586 do_cpuid(1, regs);
587 cpu_feature = regs[3];
588 }
589#endif
590 }
591 break;
592#endif
593 default:
594 break;
595 }
596 enable_sse();
597
598#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
599 /*
600 * OS should flush L1 cache by itself because no PC-98 supports
601 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
602 * when need_pre_dma_flush = 1, use invd instruction after DMA
603 * transfer when need_post_dma_flush = 1. If your CPU upgrade
604 * product supports hardware cache control, you can add the
605 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
606 * This option eliminates unneeded cache flush instruction(s).
607 */
608 if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
609 switch (cpu) {
610#ifdef I486_CPU
611 case CPU_486DLC:
612 need_post_dma_flush = 1;
613 break;
614 case CPU_M1SC:
615 need_pre_dma_flush = 1;
616 break;
617 case CPU_CY486DX:
618 need_pre_dma_flush = 1;
619#ifdef CPU_I486_ON_386
620 need_post_dma_flush = 1;
621#endif
622 break;
623#endif
624 default:
625 break;
626 }
627 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
628 switch (cpu_id & 0xFF0) {
629 case 0x470: /* Enhanced Am486DX2 WB */
630 case 0x490: /* Enhanced Am486DX4 WB */
631 case 0x4F0: /* Am5x86 WB */
632 need_pre_dma_flush = 1;
633 break;
634 }
635 } else if (strcmp(cpu_vendor, "IBM") == 0) {
636 need_post_dma_flush = 1;
637 } else {
638#ifdef CPU_I486_ON_386
639 need_pre_dma_flush = 1;
640#endif
641 }
642#endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
643}
644
645#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
646/*
647 * Enable write allocate feature of AMD processors.
648 * Following two functions require the Maxmem variable being set.
649 */
650void
651enable_K5_wt_alloc(void)
652{
653 u_int64_t msr;
654
655 /*
656 * Write allocate is supported only on models 1, 2, and 3, with
657 * a stepping of 4 or greater.
658 */
659 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
8a8d5d85 660 cpu_disable_intr();
984263bc
MD
661 msr = rdmsr(0x83); /* HWCR */
662 wrmsr(0x83, msr & !(0x10));
663
664 /*
665 * We have to tell the chip where the top of memory is,
666 * since video cards could have frame bufferes there,
667 * memory-mapped I/O could be there, etc.
668 */
669 if(Maxmem > 0)
670 msr = Maxmem / 16;
671 else
672 msr = 0;
673 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
674#ifdef PC98
675 if (!(inb(0x43b) & 4)) {
676 wrmsr(0x86, 0x0ff00f0);
677 msr |= AMD_WT_ALLOC_PRE;
678 }
679#else
680 /*
681 * There is no way to know wheter 15-16M hole exists or not.
682 * Therefore, we disable write allocate for this range.
683 */
684 wrmsr(0x86, 0x0ff00f0);
685 msr |= AMD_WT_ALLOC_PRE;
686#endif
687 wrmsr(0x85, msr);
688
689 msr=rdmsr(0x83);
690 wrmsr(0x83, msr|0x10); /* enable write allocate */
691
7b95be2a 692 cpu_enable_intr();
984263bc
MD
693 }
694}
695
696void
697enable_K6_wt_alloc(void)
698{
699 quad_t size;
700 u_int64_t whcr;
701 u_long eflags;
702
703 eflags = read_eflags();
8a8d5d85 704 cpu_disable_intr();
984263bc
MD
705 wbinvd();
706
707#ifdef CPU_DISABLE_CACHE
708 /*
709 * Certain K6-2 box becomes unstable when write allocation is
710 * enabled.
711 */
712 /*
713 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
714 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
715 * All other bits in TR12 have no effect on the processer's operation.
716 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
717 * on the AMD-K6.
718 */
719 wrmsr(0x0000000e, (u_int64_t)0x0008);
720#endif
721 /* Don't assume that memory size is aligned with 4M. */
722 if (Maxmem > 0)
723 size = ((Maxmem >> 8) + 3) >> 2;
724 else
725 size = 0;
726
727 /* Limit is 508M bytes. */
728 if (size > 0x7f)
729 size = 0x7f;
730 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
731
732#if defined(PC98) || defined(NO_MEMORY_HOLE)
733 if (whcr & (0x7fLL << 1)) {
734#ifdef PC98
735 /*
736 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
737 * 15-16M range.
738 */
739 if (!(inb(0x43b) & 4))
740 whcr &= ~0x0001LL;
741 else
742#endif
743 whcr |= 0x0001LL;
744 }
745#else
746 /*
747 * There is no way to know wheter 15-16M hole exists or not.
748 * Therefore, we disable write allocate for this range.
749 */
750 whcr &= ~0x0001LL;
751#endif
752 wrmsr(0x0c0000082, whcr);
753
754 write_eflags(eflags);
984263bc
MD
755}
756
757void
758enable_K6_2_wt_alloc(void)
759{
760 quad_t size;
761 u_int64_t whcr;
762 u_long eflags;
763
764 eflags = read_eflags();
8a8d5d85 765 cpu_disable_intr();
984263bc
MD
766 wbinvd();
767
768#ifdef CPU_DISABLE_CACHE
769 /*
770 * Certain K6-2 box becomes unstable when write allocation is
771 * enabled.
772 */
773 /*
774 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
775 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
776 * All other bits in TR12 have no effect on the processer's operation.
777 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
778 * on the AMD-K6.
779 */
780 wrmsr(0x0000000e, (u_int64_t)0x0008);
781#endif
782 /* Don't assume that memory size is aligned with 4M. */
783 if (Maxmem > 0)
784 size = ((Maxmem >> 8) + 3) >> 2;
785 else
786 size = 0;
787
788 /* Limit is 4092M bytes. */
789 if (size > 0x3fff)
790 size = 0x3ff;
791 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
792
793#if defined(PC98) || defined(NO_MEMORY_HOLE)
794 if (whcr & (0x3ffLL << 22)) {
795#ifdef PC98
796 /*
797 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
798 * 15-16M range.
799 */
800 if (!(inb(0x43b) & 4))
801 whcr &= ~(1LL << 16);
802 else
803#endif
804 whcr |= 1LL << 16;
805 }
806#else
807 /*
808 * There is no way to know wheter 15-16M hole exists or not.
809 * Therefore, we disable write allocate for this range.
810 */
811 whcr &= ~(1LL << 16);
812#endif
813 wrmsr(0x0c0000082, whcr);
814
815 write_eflags(eflags);
984263bc
MD
816}
817#endif /* I585_CPU && CPU_WT_ALLOC */
818
819#include "opt_ddb.h"
820#ifdef DDB
821#include <ddb/ddb.h>
822
823DB_SHOW_COMMAND(cyrixreg, cyrixreg)
824{
825 u_long eflags;
826 u_int cr0;
827 u_char ccr1, ccr2, ccr3;
828 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
829
830 cr0 = rcr0();
831 if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
832 eflags = read_eflags();
8a8d5d85 833 cpu_disable_intr();
984263bc
MD
834
835
836 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
837 ccr0 = read_cyrix_reg(CCR0);
838 }
839 ccr1 = read_cyrix_reg(CCR1);
840 ccr2 = read_cyrix_reg(CCR2);
841 ccr3 = read_cyrix_reg(CCR3);
842 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
843 write_cyrix_reg(CCR3, CCR3_MAPEN0);
844 ccr4 = read_cyrix_reg(CCR4);
845 if ((cpu == CPU_M1) || (cpu == CPU_M2))
846 ccr5 = read_cyrix_reg(CCR5);
847 else
848 pcr0 = read_cyrix_reg(PCR0);
849 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
850 }
851 write_eflags(eflags);
852
853 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
854 printf("CCR0=%x, ", (u_int)ccr0);
855
856 printf("CCR1=%x, CCR2=%x, CCR3=%x",
857 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
858 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
859 printf(", CCR4=%x, ", (u_int)ccr4);
860 if (cpu == CPU_M1SC)
861 printf("PCR0=%x\n", pcr0);
862 else
863 printf("CCR5=%x\n", ccr5);
864 }
865 }
866 printf("CR0=%x\n", cr0);
867}
868#endif /* DDB */