2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * Copyright (c) 2008 The DragonFly Project.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36 * $DragonFly: src/sys/cpu/amd64/include/cpufunc.h,v 1.3 2008/08/29 17:07:06 dillon Exp $
40 * Functions to provide access to special i386 instructions.
41 * This in included in sys/systm.h, and that file should be
42 * used in preference to this.
45 #ifndef _CPU_CPUFUNC_H_
46 #define _CPU_CPUFUNC_H_
48 #include <sys/cdefs.h>
49 #include <machine/psl.h>
52 struct region_descriptor;
55 #define readb(va) (*(volatile u_int8_t *) (va))
56 #define readw(va) (*(volatile u_int16_t *) (va))
57 #define readl(va) (*(volatile u_int32_t *) (va))
58 #define readq(va) (*(volatile u_int64_t *) (va))
60 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
61 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
62 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
63 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d))
70 __asm __volatile("int $3");
76 __asm __volatile("pause");
84 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 static __inline u_long
93 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
102 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
106 static __inline u_long
111 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
116 cpu_disable_intr(void)
118 __asm __volatile("cli" : : : "memory");
122 do_cpuid(u_int ax, u_int *p)
124 __asm __volatile("cpuid"
125 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
130 cpuid_count(u_int ax, u_int cx, u_int *p)
132 __asm __volatile("cpuid"
133 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
134 : "0" (ax), "c" (cx));
138 cpu_enable_intr(void)
140 __asm __volatile("sti");
144 * Cpu and compiler memory ordering fence. mfence ensures strong read and
147 * A serializing or fence instruction is required here. A locked bus
148 * cycle on data for which we already own cache mastership is the most
155 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
157 __asm __volatile("" : : : "memory");
162 * cpu_lfence() ensures strong read ordering for reads issued prior
163 * to the instruction verses reads issued afterwords.
165 * A serializing or fence instruction is required here. A locked bus
166 * cycle on data for which we already own cache mastership is the most
173 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
175 __asm __volatile("" : : : "memory");
180 * cpu_sfence() ensures strong write ordering for writes issued prior
181 * to the instruction verses writes issued afterwords. Writes are
182 * ordered on intel cpus so we do not actually have to do anything.
187 __asm __volatile("" : : : "memory");
191 * cpu_ccfence() prevents the compiler from reordering instructions, in
192 * particular stores, relative to the current cpu. Use cpu_sfence() if
193 * you need to guarentee ordering by both the compiler and by the cpu.
195 * This also prevents the compiler from caching memory loads into local
196 * variables across the routine.
201 __asm __volatile("" : : : "memory");
206 #define HAVE_INLINE_FFS
213 * Note that gcc-2's builtin ffs would be used if we didn't declare
214 * this inline or turn off the builtin. The builtin is faster but
215 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
218 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
220 /* Actually, the above is way out of date. The builtins use cmov etc */
221 return (__builtin_ffs(mask));
225 #define HAVE_INLINE_FFSL
230 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
233 #define HAVE_INLINE_FLS
238 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
241 #define HAVE_INLINE_FLSL
246 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
254 __asm __volatile("hlt");
258 * The following complications are to get around gcc not having a
259 * constraint letter for the range 0..255. We still put "d" in the
260 * constraint because "i" isn't a valid constraint when the port
261 * isn't constant. This only matters for -O0 because otherwise
262 * the non-working version gets optimized away.
264 * Use an expression-statement instead of a conditional expression
265 * because gcc-2.6.0 would promote the operands of the conditional
266 * and produce poor code for "if ((inb(var) & const1) == const2)".
268 * The unnecessary test `(port) < 0x10000' is to generate a warning if
269 * the `port' has type u_short or smaller. Such types are pessimal.
270 * This actually only works for signed types. The range check is
271 * careful to avoid generating warnings.
273 #define inb(port) __extension__ ({ \
275 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
276 && (port) < 0x10000) \
277 _data = inbc(port); \
279 _data = inbv(port); \
282 #define outb(port, data) ( \
283 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
284 && (port) < 0x10000 \
285 ? outbc(port, data) : outbv(port, data))
287 static __inline u_char
292 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
297 outbc(u_int port, u_char data)
299 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
302 static __inline u_char
307 * We use %%dx and not %1 here because i/o is done at %dx and not at
308 * %edx, while gcc generates inferior code (movw instead of movl)
309 * if we tell it to load (u_short) port.
311 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
315 static __inline u_int
320 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
325 insb(u_int port, void *addr, size_t cnt)
327 __asm __volatile("cld; rep; insb"
328 : "+D" (addr), "+c" (cnt)
334 insw(u_int port, void *addr, size_t cnt)
336 __asm __volatile("cld; rep; insw"
337 : "+D" (addr), "+c" (cnt)
343 insl(u_int port, void *addr, size_t cnt)
345 __asm __volatile("cld; rep; insl"
346 : "+D" (addr), "+c" (cnt)
354 __asm __volatile("invd");
360 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
361 * will cause the invl*() functions to be equivalent to the cpu_invl*()
365 void smp_invltlb(void);
367 #define smp_invltlb()
370 #ifndef _CPU_INVLPG_DEFINED
373 * Invalidate a patricular VA on this cpu only
376 cpu_invlpg(void *addr)
378 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
385 static __inline u_short
390 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
394 static __inline u_int
395 loadandclear(volatile u_int *addr)
399 __asm __volatile("xorl %0,%0; xchgl %1,%0"
400 : "=&r" (result) : "m" (*addr));
405 outbv(u_int port, u_char data)
409 * Use an unnecessary assignment to help gcc's register allocator.
410 * This make a large difference for gcc-1.40 and a tiny difference
411 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
412 * best results. gcc-2.6.0 can't handle this.
415 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
419 outl(u_int port, u_int data)
422 * outl() and outw() aren't used much so we haven't looked at
423 * possible micro-optimizations such as the unnecessary
424 * assignment for them.
426 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
430 outsb(u_int port, const void *addr, size_t cnt)
432 __asm __volatile("cld; rep; outsb"
433 : "+S" (addr), "+c" (cnt)
438 outsw(u_int port, const void *addr, size_t cnt)
440 __asm __volatile("cld; rep; outsw"
441 : "+S" (addr), "+c" (cnt)
446 outsl(u_int port, const void *addr, size_t cnt)
448 __asm __volatile("cld; rep; outsl"
449 : "+S" (addr), "+c" (cnt)
454 outw(u_int port, u_short data)
456 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
462 __asm __volatile("pause");
465 static __inline u_long
470 __asm __volatile("pushfq; popq %0" : "=r" (rf));
474 static __inline u_int64_t
479 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
480 return (low | ((u_int64_t)high << 32));
483 static __inline u_int64_t
488 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
489 return (low | ((u_int64_t)high << 32));
492 static __inline u_int64_t
497 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
498 return (low | ((u_int64_t)high << 32));
504 __asm __volatile("wbinvd");
508 write_rflags(u_long rf)
510 __asm __volatile("pushq %0; popfq" : : "r" (rf));
514 wrmsr(u_int msr, u_int64_t newval)
520 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
524 load_cr0(u_long data)
527 __asm __volatile("movq %0,%%cr0" : : "r" (data));
530 static __inline u_long
535 __asm __volatile("movq %%cr0,%0" : "=r" (data));
539 static __inline u_long
544 __asm __volatile("movq %%cr2,%0" : "=r" (data));
549 load_cr3(u_long data)
552 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
555 static __inline u_long
560 __asm __volatile("movq %%cr3,%0" : "=r" (data));
565 load_cr4(u_long data)
567 __asm __volatile("movq %0,%%cr4" : : "r" (data));
570 static __inline u_long
575 __asm __volatile("movq %%cr4,%0" : "=r" (data));
580 * Global TLB flush (except for thise for pages marked PG_G)
590 * TLB flush for an individual page (even if it has PG_G).
591 * Only works on 486+ CPUs (i386 does not have PG_G).
597 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
600 static __inline u_int
604 __asm __volatile("movl %%fs,%0" : "=rm" (sel));
608 static __inline u_int
612 __asm __volatile("movl %%gs,%0" : "=rm" (sel));
619 __asm __volatile("movl %0,%%ds" : : "rm" (sel));
625 __asm __volatile("movl %0,%%es" : : "rm" (sel));
629 /* This is defined in <machine/specialreg.h> but is too painful to get to */
631 #define MSR_FSBASE 0xc0000100
636 /* Preserve the fsbase value across the selector load */
637 __asm __volatile("rdmsr; movl %0,%%fs; wrmsr"
638 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
642 #define MSR_GSBASE 0xc0000101
648 * Preserve the gsbase value across the selector load.
649 * Note that we have to disable interrupts because the gsbase
650 * being trashed happens to be the kernel gsbase at the time.
652 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
653 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
656 /* Usable by userland */
660 __asm __volatile("movl %0,%%fs" : : "rm" (sel));
666 __asm __volatile("movl %0,%%gs" : : "rm" (sel));
670 /* void lidt(struct region_descriptor *addr); */
672 lidt(struct region_descriptor *addr)
674 __asm __volatile("lidt (%0)" : : "r" (addr));
677 /* void lldt(u_short sel); */
681 __asm __volatile("lldt %0" : : "r" (sel));
684 /* void ltr(u_short sel); */
688 __asm __volatile("ltr %0" : : "r" (sel));
691 static __inline u_int64_t
695 __asm __volatile("movq %%dr0,%0" : "=r" (data));
700 load_dr0(u_int64_t dr0)
702 __asm __volatile("movq %0,%%dr0" : : "r" (dr0));
705 static __inline u_int64_t
709 __asm __volatile("movq %%dr1,%0" : "=r" (data));
714 load_dr1(u_int64_t dr1)
716 __asm __volatile("movq %0,%%dr1" : : "r" (dr1));
719 static __inline u_int64_t
723 __asm __volatile("movq %%dr2,%0" : "=r" (data));
728 load_dr2(u_int64_t dr2)
730 __asm __volatile("movq %0,%%dr2" : : "r" (dr2));
733 static __inline u_int64_t
737 __asm __volatile("movq %%dr3,%0" : "=r" (data));
742 load_dr3(u_int64_t dr3)
744 __asm __volatile("movq %0,%%dr3" : : "r" (dr3));
747 static __inline u_int64_t
751 __asm __volatile("movq %%dr4,%0" : "=r" (data));
756 load_dr4(u_int64_t dr4)
758 __asm __volatile("movq %0,%%dr4" : : "r" (dr4));
761 static __inline u_int64_t
765 __asm __volatile("movq %%dr5,%0" : "=r" (data));
770 load_dr5(u_int64_t dr5)
772 __asm __volatile("movq %0,%%dr5" : : "r" (dr5));
775 static __inline u_int64_t
779 __asm __volatile("movq %%dr6,%0" : "=r" (data));
784 load_dr6(u_int64_t dr6)
786 __asm __volatile("movq %0,%%dr6" : : "r" (dr6));
789 static __inline u_int64_t
793 __asm __volatile("movq %%dr7,%0" : "=r" (data));
798 load_dr7(u_int64_t dr7)
800 __asm __volatile("movq %0,%%dr7" : : "r" (dr7));
803 static __inline register_t
808 rflags = read_rflags();
814 intr_restore(register_t rflags)
816 write_rflags(rflags);
819 #else /* !__GNUC__ */
821 int breakpoint(void);
822 void cpu_pause(void);
823 u_int bsfl(u_int mask);
824 u_int bsrl(u_int mask);
825 void cpu_disable_intr(void);
826 void cpu_enable_intr(void);
827 void cpu_invlpg(u_long addr);
828 void cpu_invlpg_range(u_long start, u_long end);
829 void do_cpuid(u_int ax, u_int *p);
831 u_char inb(u_int port);
832 u_int inl(u_int port);
833 void insb(u_int port, void *addr, size_t cnt);
834 void insl(u_int port, void *addr, size_t cnt);
835 void insw(u_int port, void *addr, size_t cnt);
837 void invlpg(u_int addr);
838 void invlpg_range(u_int start, u_int end);
839 void cpu_invltlb(void);
840 u_short inw(u_int port);
841 void load_cr0(u_int cr0);
842 void load_cr3(u_int cr3);
843 void load_cr4(u_int cr4);
844 void load_fs(u_int sel);
845 void load_gs(u_int sel);
846 struct region_descriptor;
847 void lidt(struct region_descriptor *addr);
848 void lldt(u_short sel);
849 void ltr(u_short sel);
850 void outb(u_int port, u_char data);
851 void outl(u_int port, u_int data);
852 void outsb(u_int port, void *addr, size_t cnt);
853 void outsl(u_int port, void *addr, size_t cnt);
854 void outsw(u_int port, void *addr, size_t cnt);
855 void outw(u_int port, u_short data);
856 void ia32_pause(void);
863 u_int64_t rdmsr(u_int msr);
864 u_int64_t rdpmc(u_int pmc);
865 u_int64_t rdtsc(void);
866 u_int read_rflags(void);
868 void write_rflags(u_int rf);
869 void wrmsr(u_int msr, u_int64_t newval);
870 u_int64_t rdr0(void);
871 void load_dr0(u_int64_t dr0);
872 u_int64_t rdr1(void);
873 void load_dr1(u_int64_t dr1);
874 u_int64_t rdr2(void);
875 void load_dr2(u_int64_t dr2);
876 u_int64_t rdr3(void);
877 void load_dr3(u_int64_t dr3);
878 u_int64_t rdr4(void);
879 void load_dr4(u_int64_t dr4);
880 u_int64_t rdr5(void);
881 void load_dr5(u_int64_t dr5);
882 u_int64_t rdr6(void);
883 void load_dr6(u_int64_t dr6);
884 u_int64_t rdr7(void);
885 void load_dr7(u_int64_t dr7);
886 register_t intr_disable(void);
887 void intr_restore(register_t rf);
889 #endif /* __GNUC__ */
891 void reset_dbregs(void);
895 #endif /* !_CPU_CPUFUNC_H_ */