kernel - Add workaround support for a probable AMD cpu bug related to cc1
[dragonfly.git] / sys / cpu / i386 / include / cpufunc.h
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
34 */
35
36/*
37 * Functions to provide access to special i386 instructions.
38 */
39
a9295349
MD
40#ifndef _CPU_CPUFUNC_H_
41#define _CPU_CPUFUNC_H_
984263bc 42
1bd40720
MD
43#ifndef _SYS_TYPES_H_
44#include <sys/types.h>
45#endif
46#ifndef _SYS_CDEFS_H_
984263bc 47#include <sys/cdefs.h>
1bd40720 48#endif
984263bc
MD
49
50__BEGIN_DECLS
51#define readb(va) (*(volatile u_int8_t *) (va))
52#define readw(va) (*(volatile u_int16_t *) (va))
53#define readl(va) (*(volatile u_int32_t *) (va))
54
55#define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
56#define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
57#define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
58
59#ifdef __GNUC__
60
61#ifdef SMP
f8334305 62#include <machine/lock.h> /* XXX */
984263bc
MD
63#endif
64
65#ifdef SWTCH_OPTIM_STATS
66extern int tlb_flush_count; /* XXX */
67#endif
68
69static __inline void
70breakpoint(void)
71{
72 __asm __volatile("int $3");
73}
74
310fc17d
MD
75static __inline void
76cpu_pause(void)
77{
78 __asm __volatile("pause");
79}
80
ef0fdad1
MD
81/*
82 * Find the first 1 in mask, starting with bit 0 and return the
83 * bit number. If mask is 0 the result is undefined.
84 */
984263bc
MD
85static __inline u_int
86bsfl(u_int mask)
87{
88 u_int result;
89
90 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
91 return (result);
92}
93
9c20f2ea
MD
94static __inline u_int
95bsflong(u_long mask)
96{
97 u_long result;
98
99 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
100 return (result);
101}
102
ef0fdad1
MD
103/*
104 * Find the last 1 in mask, starting with bit 31 and return the
105 * bit number. If mask is 0 the result is undefined.
106 */
984263bc
MD
107static __inline u_int
108bsrl(u_int mask)
109{
110 u_int result;
111
112 __asm __volatile("bsrl %0,%0" : "=r" (result) : "0" (mask));
113 return (result);
114}
115
ef0fdad1
MD
116/*
117 * Test and set the specified bit (1 << bit) in the integer. The
118 * previous value of the bit is returned (0 or 1).
119 */
120static __inline int
121btsl(u_int *mask, int bit)
122{
123 int result;
124
125 __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
126 "=r"(result), "=m"(*mask) : "r" (bit));
127 return(result);
128}
129
130/*
131 * Test and clear the specified bit (1 << bit) in the integer. The
132 * previous value of the bit is returned (0 or 1).
133 */
134static __inline int
135btrl(u_int *mask, int bit)
136{
137 int result;
138
139 __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
140 "=r"(result), "=m"(*mask) : "r" (bit));
141 return(result);
142}
143
984263bc 144static __inline void
984263bc
MD
145do_cpuid(u_int ax, u_int *p)
146{
147 __asm __volatile("cpuid"
148 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
149 : "0" (ax));
150}
151
90e8a35b
AP
152static __inline void
153cpuid_count(u_int ax, u_int cx, u_int *p)
154{
155 __asm __volatile("cpuid"
156 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
157 : "0" (ax), "c" (cx));
158}
159
fa0b852c
MD
160#ifndef _CPU_DISABLE_INTR_DEFINED
161
984263bc 162static __inline void
8a8d5d85
MD
163cpu_disable_intr(void)
164{
165 __asm __volatile("cli" : : : "memory");
166}
167
fa0b852c
MD
168#endif
169
170#ifndef _CPU_ENABLE_INTR_DEFINED
171
8a8d5d85
MD
172static __inline void
173cpu_enable_intr(void)
984263bc 174{
984263bc
MD
175 __asm __volatile("sti");
176}
177
fa0b852c
MD
178#endif
179
35238fa5
MD
180/*
181 * Cpu and compiler memory ordering fence. mfence ensures strong read and
182 * write ordering.
183 *
184 * A serializing or fence instruction is required here. A locked bus
185 * cycle on data for which we already own cache mastership is the most
186 * portable.
187 */
188static __inline void
189cpu_mfence(void)
190{
191#ifdef SMP
cbc5d70e
SZ
192#ifdef CPU_HAS_SSE2
193 __asm __volatile("mfence" : : : "memory");
194#else
35238fa5 195 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
cbc5d70e 196#endif
35238fa5
MD
197#else
198 __asm __volatile("" : : : "memory");
199#endif
200}
201
202/*
203 * cpu_lfence() ensures strong read ordering for reads issued prior
204 * to the instruction verses reads issued afterwords.
205 *
206 * A serializing or fence instruction is required here. A locked bus
207 * cycle on data for which we already own cache mastership is the most
208 * portable.
209 */
0f7a3396 210static __inline void
35238fa5 211cpu_lfence(void)
0f7a3396 212{
35238fa5 213#ifdef SMP
cbc5d70e
SZ
214#ifdef CPU_HAS_SSE2
215 __asm __volatile("lfence" : : : "memory");
216#else
35238fa5 217 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
cbc5d70e 218#endif
35238fa5 219#else
0f7a3396 220 __asm __volatile("" : : : "memory");
35238fa5 221#endif
0f7a3396
MD
222}
223
35238fa5 224/*
b1af91cb 225 * cpu_sfence() ensures strong write ordering for writes issued prior
35238fa5
MD
226 * to the instruction verses writes issued afterwords. Writes are
227 * ordered on intel cpus so we do not actually have to do anything.
228 */
0f7a3396 229static __inline void
35238fa5 230cpu_sfence(void)
0f7a3396 231{
39bedebc
SZ
232 /*
233 * NOTE:
234 * Don't use 'sfence' here, as it will create a lot of
235 * unnecessary stalls.
236 */
35238fa5
MD
237 __asm __volatile("" : : : "memory");
238}
239
240/*
241 * cpu_ccfence() prevents the compiler from reordering instructions, in
242 * particular stores, relative to the current cpu. Use cpu_sfence() if
243 * you need to guarentee ordering by both the compiler and by the cpu.
244 *
245 * This also prevents the compiler from caching memory loads into local
246 * variables across the routine.
247 */
248static __inline void
249cpu_ccfence(void)
250{
251 __asm __volatile("" : : : "memory");
0f7a3396
MD
252}
253
8e32ecc0
MD
254/*
255 * This is a horrible, horrible hack that might have to be put at the
256 * end of certain procedures (on a case by case basis), just before it
257 * returns to avoid what we believe to be an unreported AMD cpu bug.
258 * Found to occur on both a Phenom II X4 820 (two of them), as well
259 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1).
260 * The problem does not appear to occur w/Intel cpus.
261 *
262 * The bug is likely related to either a write combining issue or the
263 * Return Address Stack (RAS) hardware cache.
264 *
265 * In particular, we had to do this for GCC's fill_sons_in_loop() routine
266 * which due to its deep recursion and stack flow appears to be able to
267 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the
268 * end of the routine just before it returns works around the bug.
269 *
270 * The bug appears to be extremely sensitive to %rip and %rsp values, to
271 * the point where even just inserting an instruction in an unrelated
272 * procedure (shifting the entire code base being run) effects the outcome.
273 * DragonFly is probably able to more readily reproduce the bug due to
274 * the stackgap randomization code. We would expect OpenBSD (where we got
275 * the stackgap randomization code from) to also be able to reproduce the
276 * issue. To date we have only reproduced the issue in DragonFly.
277 */
278#define __AMDCPUBUG_DFLY01_AVAILABLE__
279
280static __inline void
281cpu_amdcpubug_dfly01(void)
282{
283 __asm __volatile("nop" : : : "memory");
284}
285
d4bf6a4b
SS
286#ifdef _KERNEL
287
984263bc
MD
288#define HAVE_INLINE_FFS
289
290static __inline int
291ffs(int mask)
292{
293 /*
294 * Note that gcc-2's builtin ffs would be used if we didn't declare
295 * this inline or turn off the builtin. The builtin is faster but
296 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
297 * versions.
298 */
1a5be628 299 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
984263bc
MD
300}
301
302#define HAVE_INLINE_FLS
303
304static __inline int
305fls(int mask)
306{
1a5be628 307 return (mask == 0 ? mask : (int) bsrl((u_int)mask) + 1);
984263bc
MD
308}
309
d4bf6a4b
SS
310#endif /* _KERNEL */
311
984263bc
MD
312/*
313 * The following complications are to get around gcc not having a
314 * constraint letter for the range 0..255. We still put "d" in the
315 * constraint because "i" isn't a valid constraint when the port
316 * isn't constant. This only matters for -O0 because otherwise
317 * the non-working version gets optimized away.
318 *
319 * Use an expression-statement instead of a conditional expression
320 * because gcc-2.6.0 would promote the operands of the conditional
321 * and produce poor code for "if ((inb(var) & const1) == const2)".
322 *
323 * The unnecessary test `(port) < 0x10000' is to generate a warning if
324 * the `port' has type u_short or smaller. Such types are pessimal.
325 * This actually only works for signed types. The range check is
326 * careful to avoid generating warnings.
327 */
328#define inb(port) __extension__ ({ \
329 u_char _data; \
330 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
331 && (port) < 0x10000) \
332 _data = inbc(port); \
333 else \
334 _data = inbv(port); \
335 _data; })
336
337#define outb(port, data) ( \
338 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
339 && (port) < 0x10000 \
340 ? outbc(port, data) : outbv(port, data))
341
342static __inline u_char
343inbc(u_int port)
344{
345 u_char data;
346
347 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
348 return (data);
349}
350
351static __inline void
352outbc(u_int port, u_char data)
353{
354 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
355}
356
984263bc
MD
357static __inline u_char
358inbv(u_int port)
359{
360 u_char data;
361 /*
362 * We use %%dx and not %1 here because i/o is done at %dx and not at
363 * %edx, while gcc generates inferior code (movw instead of movl)
364 * if we tell it to load (u_short) port.
365 */
366 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
367 return (data);
368}
369
370static __inline u_int
371inl(u_int port)
372{
373 u_int data;
374
375 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
376 return (data);
377}
378
379static __inline void
380insb(u_int port, void *addr, size_t cnt)
381{
382 __asm __volatile("cld; rep; insb"
383 : "=D" (addr), "=c" (cnt)
384 : "0" (addr), "1" (cnt), "d" (port)
385 : "memory");
386}
387
388static __inline void
389insw(u_int port, void *addr, size_t cnt)
390{
391 __asm __volatile("cld; rep; insw"
392 : "=D" (addr), "=c" (cnt)
393 : "0" (addr), "1" (cnt), "d" (port)
394 : "memory");
395}
396
397static __inline void
398insl(u_int port, void *addr, size_t cnt)
399{
400 __asm __volatile("cld; rep; insl"
401 : "=D" (addr), "=c" (cnt)
402 : "0" (addr), "1" (cnt), "d" (port)
403 : "memory");
404}
405
406static __inline void
407invd(void)
408{
409 __asm __volatile("invd");
410}
411
17a9f566 412#if defined(_KERNEL)
984263bc
MD
413
414/*
17a9f566
MD
415 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
416 * will cause the invl*() functions to be equivalent to the cpu_invl*()
417 * functions.
984263bc 418 */
8a8d5d85
MD
419#ifdef SMP
420void smp_invltlb(void);
7d4d6fdb 421void smp_invltlb_intr(void);
8a8d5d85 422#else
17a9f566
MD
423#define smp_invltlb()
424#endif
984263bc 425
fa0b852c
MD
426#ifndef _CPU_INVLPG_DEFINED
427
17a9f566
MD
428/*
429 * Invalidate a patricular VA on this cpu only
430 */
984263bc
MD
431static __inline void
432cpu_invlpg(void *addr)
433{
434 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
435}
436
fa0b852c
MD
437#endif
438
439#ifndef _CPU_INVLTLB_DEFINED
440
17a9f566
MD
441/*
442 * Invalidate the TLB on this cpu only
443 */
984263bc
MD
444static __inline void
445cpu_invltlb(void)
446{
447 u_int temp;
448 /*
449 * This should be implemented as load_cr3(rcr3()) when load_cr3()
450 * is inlined.
451 */
452 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp)
453 : : "memory");
454#if defined(SWTCH_OPTIM_STATS)
455 ++tlb_flush_count;
456#endif
457}
458
fa0b852c
MD
459#endif
460
b1af91cb
JH
461static __inline void
462cpu_nop(void)
463{
464 __asm __volatile("rep; nop");
465}
466
17a9f566 467#endif /* _KERNEL */
984263bc
MD
468
469static __inline u_short
470inw(u_int port)
471{
472 u_short data;
473
474 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
475 return (data);
476}
477
478static __inline u_int
479loadandclear(volatile u_int *addr)
480{
481 u_int result;
482
483 __asm __volatile("xorl %0,%0; xchgl %1,%0"
484 : "=&r" (result) : "m" (*addr));
485 return (result);
486}
487
488static __inline void
489outbv(u_int port, u_char data)
490{
491 u_char al;
492 /*
493 * Use an unnecessary assignment to help gcc's register allocator.
494 * This make a large difference for gcc-1.40 and a tiny difference
495 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
496 * best results. gcc-2.6.0 can't handle this.
497 */
498 al = data;
499 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
500}
501
502static __inline void
503outl(u_int port, u_int data)
504{
505 /*
506 * outl() and outw() aren't used much so we haven't looked at
507 * possible micro-optimizations such as the unnecessary
508 * assignment for them.
509 */
510 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
511}
512
513static __inline void
514outsb(u_int port, const void *addr, size_t cnt)
515{
516 __asm __volatile("cld; rep; outsb"
517 : "=S" (addr), "=c" (cnt)
518 : "0" (addr), "1" (cnt), "d" (port));
519}
520
521static __inline void
522outsw(u_int port, const void *addr, size_t cnt)
523{
524 __asm __volatile("cld; rep; outsw"
525 : "=S" (addr), "=c" (cnt)
526 : "0" (addr), "1" (cnt), "d" (port));
527}
528
529static __inline void
530outsl(u_int port, const void *addr, size_t cnt)
531{
532 __asm __volatile("cld; rep; outsl"
533 : "=S" (addr), "=c" (cnt)
534 : "0" (addr), "1" (cnt), "d" (port));
535}
536
537static __inline void
538outw(u_int port, u_short data)
539{
540 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
541}
542
543static __inline u_int
544rcr2(void)
545{
546 u_int data;
547
548 __asm __volatile("movl %%cr2,%0" : "=r" (data));
549 return (data);
550}
551
552static __inline u_int
553read_eflags(void)
554{
555 u_int ef;
556
557 __asm __volatile("pushfl; popl %0" : "=r" (ef));
558 return (ef);
559}
560
561static __inline u_int64_t
562rdmsr(u_int msr)
563{
564 u_int64_t rv;
565
cdb8afee 566 __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
984263bc
MD
567 return (rv);
568}
569
570static __inline u_int64_t
571rdpmc(u_int pmc)
572{
573 u_int64_t rv;
574
cdb8afee 575 __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
984263bc
MD
576 return (rv);
577}
578
527fddf7
MD
579#define _RDTSC_SUPPORTED_
580
984263bc
MD
581static __inline u_int64_t
582rdtsc(void)
583{
584 u_int64_t rv;
585
cdb8afee 586 __asm __volatile("rdtsc" : "=A" (rv));
984263bc
MD
587 return (rv);
588}
589
590static __inline void
591wbinvd(void)
592{
593 __asm __volatile("wbinvd");
594}
595
596static __inline void
597write_eflags(u_int ef)
598{
599 __asm __volatile("pushl %0; popfl" : : "r" (ef));
600}
601
602static __inline void
603wrmsr(u_int msr, u_int64_t newval)
604{
cdb8afee 605 __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
984263bc
MD
606}
607
ee2e9de9 608static __inline u_short
984263bc
MD
609rfs(void)
610{
ee2e9de9 611 u_short sel;
e96f55de 612 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
984263bc
MD
613 return (sel);
614}
615
ee2e9de9 616static __inline u_short
984263bc
MD
617rgs(void)
618{
ee2e9de9 619 u_short sel;
e96f55de 620 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
984263bc
MD
621 return (sel);
622}
623
624static __inline void
ee2e9de9 625load_fs(u_short sel)
984263bc 626{
e96f55de 627 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
984263bc
MD
628}
629
630static __inline void
ee2e9de9 631load_gs(u_short sel)
984263bc 632{
e96f55de 633 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
984263bc
MD
634}
635
636static __inline u_int
637rdr0(void)
638{
639 u_int data;
640 __asm __volatile("movl %%dr0,%0" : "=r" (data));
641 return (data);
642}
643
644static __inline void
645load_dr0(u_int sel)
646{
647 __asm __volatile("movl %0,%%dr0" : : "r" (sel));
648}
649
650static __inline u_int
651rdr1(void)
652{
653 u_int data;
654 __asm __volatile("movl %%dr1,%0" : "=r" (data));
655 return (data);
656}
657
658static __inline void
659load_dr1(u_int sel)
660{
661 __asm __volatile("movl %0,%%dr1" : : "r" (sel));
662}
663
664static __inline u_int
665rdr2(void)
666{
667 u_int data;
668 __asm __volatile("movl %%dr2,%0" : "=r" (data));
669 return (data);
670}
671
672static __inline void
673load_dr2(u_int sel)
674{
675 __asm __volatile("movl %0,%%dr2" : : "r" (sel));
676}
677
678static __inline u_int
679rdr3(void)
680{
681 u_int data;
682 __asm __volatile("movl %%dr3,%0" : "=r" (data));
683 return (data);
684}
685
686static __inline void
687load_dr3(u_int sel)
688{
689 __asm __volatile("movl %0,%%dr3" : : "r" (sel));
690}
691
692static __inline u_int
693rdr4(void)
694{
695 u_int data;
696 __asm __volatile("movl %%dr4,%0" : "=r" (data));
697 return (data);
698}
699
700static __inline void
701load_dr4(u_int sel)
702{
703 __asm __volatile("movl %0,%%dr4" : : "r" (sel));
704}
705
706static __inline u_int
707rdr5(void)
708{
709 u_int data;
710 __asm __volatile("movl %%dr5,%0" : "=r" (data));
711 return (data);
712}
713
714static __inline void
715load_dr5(u_int sel)
716{
717 __asm __volatile("movl %0,%%dr5" : : "r" (sel));
718}
719
720static __inline u_int
721rdr6(void)
722{
723 u_int data;
724 __asm __volatile("movl %%dr6,%0" : "=r" (data));
725 return (data);
726}
727
728static __inline void
729load_dr6(u_int sel)
730{
731 __asm __volatile("movl %0,%%dr6" : : "r" (sel));
732}
733
734static __inline u_int
735rdr7(void)
736{
737 u_int data;
738 __asm __volatile("movl %%dr7,%0" : "=r" (data));
739 return (data);
740}
741
742static __inline void
743load_dr7(u_int sel)
744{
745 __asm __volatile("movl %0,%%dr7" : : "r" (sel));
746}
747
748#else /* !__GNUC__ */
749
3ae0cd58 750int breakpoint (void);
310fc17d 751void cpu_pause (void);
3ae0cd58
RG
752u_int bsfl (u_int mask);
753u_int bsrl (u_int mask);
754void cpu_disable_intr (void);
755void do_cpuid (u_int ax, u_int *p);
756void cpu_enable_intr (void);
757u_char inb (u_int port);
758u_int inl (u_int port);
759void insb (u_int port, void *addr, size_t cnt);
760void insl (u_int port, void *addr, size_t cnt);
761void insw (u_int port, void *addr, size_t cnt);
762void invd (void);
3ae0cd58
RG
763u_short inw (u_int port);
764u_int loadandclear (u_int *addr);
765void outb (u_int port, u_char data);
766void outl (u_int port, u_int data);
767void outsb (u_int port, void *addr, size_t cnt);
768void outsl (u_int port, void *addr, size_t cnt);
769void outsw (u_int port, void *addr, size_t cnt);
770void outw (u_int port, u_short data);
771u_int rcr2 (void);
772u_int64_t rdmsr (u_int msr);
773u_int64_t rdpmc (u_int pmc);
774u_int64_t rdtsc (void);
775u_int read_eflags (void);
776void wbinvd (void);
777void write_eflags (u_int ef);
778void wrmsr (u_int msr, u_int64_t newval);
ee2e9de9
SW
779u_short rfs (void);
780u_short rgs (void);
781void load_fs (u_short sel);
782void load_gs (u_short sel);
984263bc
MD
783
784#endif /* __GNUC__ */
785
3ae0cd58
RG
786void load_cr0 (u_int cr0);
787void load_cr3 (u_int cr3);
788void load_cr4 (u_int cr4);
789void ltr (u_short sel);
790u_int rcr0 (void);
791u_int rcr3 (void);
792u_int rcr4 (void);
0bdfdda1 793int rdmsr_safe (u_int msr, uint64_t *val);
3ae0cd58 794void reset_dbregs (void);
984263bc
MD
795__END_DECLS
796
a9295349 797#endif /* !_CPU_CPUFUNC_H_ */