2 * Copyright (c) 1993 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
37 * Functions to provide access to special i386 instructions.
40 #ifndef _CPU_CPUFUNC_H_
41 #define _CPU_CPUFUNC_H_
44 #include <sys/types.h>
47 #include <sys/cdefs.h>
49 #include <sys/thread.h>
50 #include <machine/smp.h>
53 #define readb(va) (*(volatile u_int8_t *) (va))
54 #define readw(va) (*(volatile u_int16_t *) (va))
55 #define readl(va) (*(volatile u_int32_t *) (va))
57 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
58 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
59 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
63 #include <machine/lock.h> /* XXX */
65 #ifdef SWTCH_OPTIM_STATS
66 extern int tlb_flush_count; /* XXX */
72 __asm __volatile("int $3");
78 __asm __volatile("pause");
82 * Find the first 1 in mask, starting with bit 0 and return the
83 * bit number. If mask is 0 the result is undefined.
90 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
99 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
104 * Find the last 1 in mask, starting with bit 31 and return the
105 * bit number. If mask is 0 the result is undefined.
107 static __inline u_int
112 __asm __volatile("bsrl %0,%0" : "=r" (result) : "0" (mask));
117 * Test and set the specified bit (1 << bit) in the integer. The
118 * previous value of the bit is returned (0 or 1).
121 btsl(u_int *mask, int bit)
125 __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
126 "=r"(result), "=m"(*mask) : "r" (bit));
131 * Test and clear the specified bit (1 << bit) in the integer. The
132 * previous value of the bit is returned (0 or 1).
135 btrl(u_int *mask, int bit)
139 __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
140 "=r"(result), "=m"(*mask) : "r" (bit));
147 __asm __volatile("clflush %0" : : "m" (*(char *) addr));
151 do_cpuid(u_int ax, u_int *p)
153 __asm __volatile("cpuid"
154 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
159 cpuid_count(u_int ax, u_int cx, u_int *p)
161 __asm __volatile("cpuid"
162 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
163 : "0" (ax), "c" (cx));
166 #ifndef _CPU_DISABLE_INTR_DEFINED
169 cpu_disable_intr(void)
171 __asm __volatile("cli" : : : "memory");
176 #ifndef _CPU_ENABLE_INTR_DEFINED
179 cpu_enable_intr(void)
181 __asm __volatile("sti");
187 * Cpu and compiler memory ordering fence. mfence ensures strong read and
190 * A serializing or fence instruction is required here. A locked bus
191 * cycle on data for which we already own cache mastership is the most
198 __asm __volatile("mfence" : : : "memory");
200 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
205 * cpu_lfence() ensures strong read ordering for reads issued prior
206 * to the instruction verses reads issued afterwords.
208 * A serializing or fence instruction is required here. A locked bus
209 * cycle on data for which we already own cache mastership is the most
216 __asm __volatile("lfence" : : : "memory");
218 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
223 * cpu_sfence() ensures strong write ordering for writes issued prior
224 * to the instruction verses writes issued afterwords. Writes are
225 * ordered on intel cpus so we do not actually have to do anything.
232 * Don't use 'sfence' here, as it will create a lot of
233 * unnecessary stalls.
235 __asm __volatile("" : : : "memory");
239 * cpu_ccfence() prevents the compiler from reordering instructions, in
240 * particular stores, relative to the current cpu. Use cpu_sfence() if
241 * you need to guarentee ordering by both the compiler and by the cpu.
243 * This also prevents the compiler from caching memory loads into local
244 * variables across the routine.
249 __asm __volatile("" : : : "memory");
253 * This is a horrible, horrible hack that might have to be put at the
254 * end of certain procedures (on a case by case basis), just before it
255 * returns to avoid what we believe to be an unreported AMD cpu bug.
256 * Found to occur on both a Phenom II X4 820 (two of them), as well
257 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1).
258 * The problem does not appear to occur w/Intel cpus.
260 * The bug is likely related to either a write combining issue or the
261 * Return Address Stack (RAS) hardware cache.
263 * In particular, we had to do this for GCC's fill_sons_in_loop() routine
264 * which due to its deep recursion and stack flow appears to be able to
265 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the
266 * end of the routine just before it returns works around the bug.
268 * The bug appears to be extremely sensitive to %rip and %rsp values, to
269 * the point where even just inserting an instruction in an unrelated
270 * procedure (shifting the entire code base being run) effects the outcome.
271 * DragonFly is probably able to more readily reproduce the bug due to
272 * the stackgap randomization code. We would expect OpenBSD (where we got
273 * the stackgap randomization code from) to also be able to reproduce the
274 * issue. To date we have only reproduced the issue in DragonFly.
276 #define __AMDCPUBUG_DFLY01_AVAILABLE__
279 cpu_amdcpubug_dfly01(void)
281 __asm __volatile("nop" : : : "memory");
286 #define HAVE_INLINE_FFS
292 * Note that gcc-2's builtin ffs would be used if we didn't declare
293 * this inline or turn off the builtin. The builtin is faster but
294 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
297 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
300 #define HAVE_INLINE_FLS
305 return (mask == 0 ? mask : (int) bsrl((u_int)mask) + 1);
311 * The following complications are to get around gcc not having a
312 * constraint letter for the range 0..255. We still put "d" in the
313 * constraint because "i" isn't a valid constraint when the port
314 * isn't constant. This only matters for -O0 because otherwise
315 * the non-working version gets optimized away.
317 * Use an expression-statement instead of a conditional expression
318 * because gcc-2.6.0 would promote the operands of the conditional
319 * and produce poor code for "if ((inb(var) & const1) == const2)".
321 * The unnecessary test `(port) < 0x10000' is to generate a warning if
322 * the `port' has type u_short or smaller. Such types are pessimal.
323 * This actually only works for signed types. The range check is
324 * careful to avoid generating warnings.
326 #define inb(port) __extension__ ({ \
328 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
329 && (port) < 0x10000) \
330 _data = inbc(port); \
332 _data = inbv(port); \
335 #define outb(port, data) ( \
336 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
337 && (port) < 0x10000 \
338 ? outbc(port, data) : outbv(port, data))
340 static __inline u_char
345 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
350 outbc(u_int port, u_char data)
352 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
355 static __inline u_char
360 * We use %%dx and not %1 here because i/o is done at %dx and not at
361 * %edx, while gcc generates inferior code (movw instead of movl)
362 * if we tell it to load (u_short) port.
364 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
368 static __inline u_int
373 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
378 insb(u_int port, void *addr, size_t cnt)
380 __asm __volatile("cld; rep; insb"
381 : "=D" (addr), "=c" (cnt)
382 : "0" (addr), "1" (cnt), "d" (port)
387 insw(u_int port, void *addr, size_t cnt)
389 __asm __volatile("cld; rep; insw"
390 : "=D" (addr), "=c" (cnt)
391 : "0" (addr), "1" (cnt), "d" (port)
396 insl(u_int port, void *addr, size_t cnt)
398 __asm __volatile("cld; rep; insl"
399 : "=D" (addr), "=c" (cnt)
400 : "0" (addr), "1" (cnt), "d" (port)
407 __asm __volatile("invd");
413 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
414 * will cause the invl*() functions to be equivalent to the cpu_invl*()
417 void smp_invltlb(void);
418 void smp_invltlb_intr(void);
420 #ifndef _CPU_INVLPG_DEFINED
423 * Invalidate a patricular VA on this cpu only
426 cpu_invlpg(void *addr)
428 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
433 #ifndef _CPU_INVLTLB_DEFINED
436 * Invalidate the TLB on this cpu only
443 * This should be implemented as load_cr3(rcr3()) when load_cr3()
446 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp)
448 #if defined(SWTCH_OPTIM_STATS)
458 __asm __volatile("rep; nop");
463 static __inline u_short
468 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
472 static __inline u_int
473 loadandclear(volatile u_int *addr)
477 __asm __volatile("xorl %0,%0; xchgl %1,%0"
478 : "=&r" (result) : "m" (*addr));
483 outbv(u_int port, u_char data)
487 * Use an unnecessary assignment to help gcc's register allocator.
488 * This make a large difference for gcc-1.40 and a tiny difference
489 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
490 * best results. gcc-2.6.0 can't handle this.
493 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
497 outl(u_int port, u_int data)
500 * outl() and outw() aren't used much so we haven't looked at
501 * possible micro-optimizations such as the unnecessary
502 * assignment for them.
504 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
508 outsb(u_int port, const void *addr, size_t cnt)
510 __asm __volatile("cld; rep; outsb"
511 : "=S" (addr), "=c" (cnt)
512 : "0" (addr), "1" (cnt), "d" (port));
516 outsw(u_int port, const void *addr, size_t cnt)
518 __asm __volatile("cld; rep; outsw"
519 : "=S" (addr), "=c" (cnt)
520 : "0" (addr), "1" (cnt), "d" (port));
524 outsl(u_int port, const void *addr, size_t cnt)
526 __asm __volatile("cld; rep; outsl"
527 : "=S" (addr), "=c" (cnt)
528 : "0" (addr), "1" (cnt), "d" (port));
532 outw(u_int port, u_short data)
534 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
537 static __inline u_int
542 __asm __volatile("movl %%cr2,%0" : "=r" (data));
546 static __inline u_int
551 __asm __volatile("pushfl; popl %0" : "=r" (ef));
555 static __inline u_int64_t
560 __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
564 static __inline u_int64_t
569 __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
573 #define _RDTSC_SUPPORTED_
575 static __inline u_int64_t
580 __asm __volatile("rdtsc" : "=A" (rv));
587 __asm __volatile("wbinvd");
591 void cpu_wbinvd_on_all_cpus_callback(void *arg);
594 cpu_wbinvd_on_all_cpus(void)
596 lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
601 write_eflags(u_int ef)
603 __asm __volatile("pushl %0; popfl" : : "r" (ef));
607 wrmsr(u_int msr, u_int64_t newval)
609 __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
612 static __inline u_short
616 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
620 static __inline u_short
624 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
631 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
637 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
640 static __inline u_int
644 __asm __volatile("movl %%dr0,%0" : "=r" (data));
651 __asm __volatile("movl %0,%%dr0" : : "r" (sel));
654 static __inline u_int
658 __asm __volatile("movl %%dr1,%0" : "=r" (data));
665 __asm __volatile("movl %0,%%dr1" : : "r" (sel));
668 static __inline u_int
672 __asm __volatile("movl %%dr2,%0" : "=r" (data));
679 __asm __volatile("movl %0,%%dr2" : : "r" (sel));
682 static __inline u_int
686 __asm __volatile("movl %%dr3,%0" : "=r" (data));
693 __asm __volatile("movl %0,%%dr3" : : "r" (sel));
696 static __inline u_int
700 __asm __volatile("movl %%dr4,%0" : "=r" (data));
707 __asm __volatile("movl %0,%%dr4" : : "r" (sel));
710 static __inline u_int
714 __asm __volatile("movl %%dr5,%0" : "=r" (data));
721 __asm __volatile("movl %0,%%dr5" : : "r" (sel));
724 static __inline u_int
728 __asm __volatile("movl %%dr6,%0" : "=r" (data));
735 __asm __volatile("movl %0,%%dr6" : : "r" (sel));
738 static __inline u_int
742 __asm __volatile("movl %%dr7,%0" : "=r" (data));
749 __asm __volatile("movl %0,%%dr7" : : "r" (sel));
752 #else /* !__GNUC__ */
754 int breakpoint (void);
755 void cpu_pause (void);
756 u_int bsfl (u_int mask);
757 u_int bsrl (u_int mask);
758 void cpu_disable_intr (void);
759 void do_cpuid (u_int ax, u_int *p);
760 void cpu_enable_intr (void);
761 u_char inb (u_int port);
762 u_int inl (u_int port);
763 void insb (u_int port, void *addr, size_t cnt);
764 void insl (u_int port, void *addr, size_t cnt);
765 void insw (u_int port, void *addr, size_t cnt);
767 u_short inw (u_int port);
768 u_int loadandclear (u_int *addr);
769 void outb (u_int port, u_char data);
770 void outl (u_int port, u_int data);
771 void outsb (u_int port, void *addr, size_t cnt);
772 void outsl (u_int port, void *addr, size_t cnt);
773 void outsw (u_int port, void *addr, size_t cnt);
774 void outw (u_int port, u_short data);
776 u_int64_t rdmsr (u_int msr);
777 u_int64_t rdpmc (u_int pmc);
778 u_int64_t rdtsc (void);
779 u_int read_eflags (void);
781 void write_eflags (u_int ef);
782 void wrmsr (u_int msr, u_int64_t newval);
785 void load_fs (u_short sel);
786 void load_gs (u_short sel);
788 #endif /* __GNUC__ */
790 void load_cr0 (u_int cr0);
791 void load_cr3 (u_int cr3);
792 void load_cr4 (u_int cr4);
793 void ltr (u_short sel);
797 int rdmsr_safe (u_int msr, uint64_t *val);
798 void reset_dbregs (void);
801 #endif /* !_CPU_CPUFUNC_H_ */